code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
from PrStatusWorker import PrStatusWorker
import threading
def initialize_worker():
worker = PrStatusWorker()
worker.start_pr_status_polling()
print("Starting the PR status monitor worker thread...")
worker_thread = threading.Thread(target=initialize_worker, name="pr_status_worker")
worker_thread.start()
|
normal
|
{
"blob_id": "4b5f58d471b05428caef3ca7a3bdc0d30a7e3881",
"index": 5265,
"step-1": "<mask token>\n\n\ndef initialize_worker():\n worker = PrStatusWorker()\n worker.start_pr_status_polling()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef initialize_worker():\n worker = PrStatusWorker()\n worker.start_pr_status_polling()\n\n\nprint('Starting the PR status monitor worker thread...')\n<mask token>\nworker_thread.start()\n",
"step-3": "<mask token>\n\n\ndef initialize_worker():\n worker = PrStatusWorker()\n worker.start_pr_status_polling()\n\n\nprint('Starting the PR status monitor worker thread...')\nworker_thread = threading.Thread(target=initialize_worker, name=\n 'pr_status_worker')\nworker_thread.start()\n",
"step-4": "from PrStatusWorker import PrStatusWorker\nimport threading\n\n\ndef initialize_worker():\n worker = PrStatusWorker()\n worker.start_pr_status_polling()\n\n\nprint('Starting the PR status monitor worker thread...')\nworker_thread = threading.Thread(target=initialize_worker, name=\n 'pr_status_worker')\nworker_thread.start()\n",
"step-5": "\nfrom PrStatusWorker import PrStatusWorker\nimport threading\n\ndef initialize_worker():\n worker = PrStatusWorker()\n worker.start_pr_status_polling()\n\nprint(\"Starting the PR status monitor worker thread...\")\nworker_thread = threading.Thread(target=initialize_worker, name=\"pr_status_worker\")\nworker_thread.start()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Code Rodrigo
'''
This script, basically generates all he possible combinations
to be analyzed according to the Dempster Shafer Theory.
It requires to define beforehand, the combination of variables
that lead to the higher and lower bound for a given combination
of random sets, via the sensitivity analysis
'''
import itertools as itt
import numpy as np
import matplotlib.pyplot as plt
def read_input_RS ():
low=np.loadtxt('LowerArray.csv', delimiter=',', skiprows=1)
lower_bound = np.ravel(low)
upper_bound = (np.ravel(np.transpose(np.loadtxt('UpperArray.csv',
delimiter=',', skiprows=1))))
return lower_bound, upper_bound, low[0,:].size
def generate_combinations (lower, upper, n):
lower_input = itt.combinations(lower, n)
upper_input = np.array(list(itt.product(upper, repeat=n)))
return lower_input, upper_input,
def independent_probability ():
probability_assignment = (np.loadtxt('ProbabilityAssignment.csv',
delimiter=',', skiprows=1))
return probability_assignment
if __name__ == "__main__":
a,b,r=read_input_RS ()
#c=a[0,:].size
d,e=generate_combinations (a,b,r)
print(b)
print(e)
np.savetxt('test.out', e, delimiter=',')
#b=read_input_RS ()
#c=generate_combinations (a,b)
|
normal
|
{
"blob_id": "4b44f4343da1677b5436ec2b153e573fda3c0cee",
"index": 2280,
"step-1": "<mask token>\n\n\ndef read_input_RS():\n low = np.loadtxt('LowerArray.csv', delimiter=',', skiprows=1)\n lower_bound = np.ravel(low)\n upper_bound = np.ravel(np.transpose(np.loadtxt('UpperArray.csv',\n delimiter=',', skiprows=1)))\n return lower_bound, upper_bound, low[0, :].size\n\n\n<mask token>\n\n\ndef independent_probability():\n probability_assignment = np.loadtxt('ProbabilityAssignment.csv',\n delimiter=',', skiprows=1)\n return probability_assignment\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_input_RS():\n low = np.loadtxt('LowerArray.csv', delimiter=',', skiprows=1)\n lower_bound = np.ravel(low)\n upper_bound = np.ravel(np.transpose(np.loadtxt('UpperArray.csv',\n delimiter=',', skiprows=1)))\n return lower_bound, upper_bound, low[0, :].size\n\n\ndef generate_combinations(lower, upper, n):\n lower_input = itt.combinations(lower, n)\n upper_input = np.array(list(itt.product(upper, repeat=n)))\n return lower_input, upper_input\n\n\ndef independent_probability():\n probability_assignment = np.loadtxt('ProbabilityAssignment.csv',\n delimiter=',', skiprows=1)\n return probability_assignment\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef read_input_RS():\n low = np.loadtxt('LowerArray.csv', delimiter=',', skiprows=1)\n lower_bound = np.ravel(low)\n upper_bound = np.ravel(np.transpose(np.loadtxt('UpperArray.csv',\n delimiter=',', skiprows=1)))\n return lower_bound, upper_bound, low[0, :].size\n\n\ndef generate_combinations(lower, upper, n):\n lower_input = itt.combinations(lower, n)\n upper_input = np.array(list(itt.product(upper, repeat=n)))\n return lower_input, upper_input\n\n\ndef independent_probability():\n probability_assignment = np.loadtxt('ProbabilityAssignment.csv',\n delimiter=',', skiprows=1)\n return probability_assignment\n\n\nif __name__ == '__main__':\n a, b, r = read_input_RS()\n d, e = generate_combinations(a, b, r)\n print(b)\n print(e)\n np.savetxt('test.out', e, delimiter=',')\n",
"step-4": "<mask token>\nimport itertools as itt\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef read_input_RS():\n low = np.loadtxt('LowerArray.csv', delimiter=',', skiprows=1)\n lower_bound = np.ravel(low)\n upper_bound = np.ravel(np.transpose(np.loadtxt('UpperArray.csv',\n delimiter=',', skiprows=1)))\n return lower_bound, upper_bound, low[0, :].size\n\n\ndef generate_combinations(lower, upper, n):\n lower_input = itt.combinations(lower, n)\n upper_input = np.array(list(itt.product(upper, repeat=n)))\n return lower_input, upper_input\n\n\ndef independent_probability():\n probability_assignment = np.loadtxt('ProbabilityAssignment.csv',\n delimiter=',', skiprows=1)\n return probability_assignment\n\n\nif __name__ == '__main__':\n a, b, r = read_input_RS()\n d, e = generate_combinations(a, b, r)\n print(b)\n print(e)\n np.savetxt('test.out', e, delimiter=',')\n",
"step-5": "# Code Rodrigo\n\n'''\nThis script, basically generates all he possible combinations\nto be analyzed according to the Dempster Shafer Theory.\nIt requires to define beforehand, the combination of variables\nthat lead to the higher and lower bound for a given combination\nof random sets, via the sensitivity analysis\n'''\n\nimport itertools as itt\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef read_input_RS ():\n low=np.loadtxt('LowerArray.csv', delimiter=',', skiprows=1)\n lower_bound = np.ravel(low)\n upper_bound = (np.ravel(np.transpose(np.loadtxt('UpperArray.csv',\n delimiter=',', skiprows=1))))\n return lower_bound, upper_bound, low[0,:].size\n\ndef generate_combinations (lower, upper, n):\n lower_input = itt.combinations(lower, n)\n upper_input = np.array(list(itt.product(upper, repeat=n)))\n return lower_input, upper_input,\n\ndef independent_probability ():\n probability_assignment = (np.loadtxt('ProbabilityAssignment.csv',\n delimiter=',', skiprows=1))\n return probability_assignment\n\nif __name__ == \"__main__\":\n a,b,r=read_input_RS ()\n #c=a[0,:].size\n d,e=generate_combinations (a,b,r)\n print(b)\n print(e)\n np.savetxt('test.out', e, delimiter=',')\n\n#b=read_input_RS ()\n#c=generate_combinations (a,b)\n\n\n\n\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
n, x0, y0 = list(map(int, input().split()))
cards = [y0] + list(map(int, input().split()))
# yの手持ちはゲームに関与するため、リストに加えてしまう
xs = [[-1] * (n+1) for i in range(n+1)]
ys = [[-1] * (n+1) for i in range(n+1)]
#xs[i][j] = xの手番で、xがcards[i]を持ちyがcards[j]を持っているとき(i<j)の最善スコア
#ys[i][j] = yの手番で、xがcards[j]を持ちyがcards[i]を持っているとき(i<j)の最善スコア
for i in range(n+1):
xs[i][-1] = abs(cards[-1] - cards[i])
ys[i][-1] = abs(cards[-1] - cards[i])
for j in range(n-1, -1, -1):
# x[i][j] = max (y[j][j+1] , y[j][j+2] , ……, y[j][n] )
xs_temp = max(ys[j][j+1:n+1])
ys_temp = min(xs[j][j+1:n+1])
for i in range(0, j):
xs[i][j] = xs_temp
ys[i][j] = ys_temp
# print(xs)
# print(ys)
print(max(ys[0][1:]))
|
normal
|
{
"blob_id": "81b9fc78d92fdc4392cb71a77fdfd354ff950ae3",
"index": 6153,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(n + 1):\n xs[i][-1] = abs(cards[-1] - cards[i])\n ys[i][-1] = abs(cards[-1] - cards[i])\nfor j in range(n - 1, -1, -1):\n xs_temp = max(ys[j][j + 1:n + 1])\n ys_temp = min(xs[j][j + 1:n + 1])\n for i in range(0, j):\n xs[i][j] = xs_temp\n ys[i][j] = ys_temp\nprint(max(ys[0][1:]))\n",
"step-3": "n, x0, y0 = list(map(int, input().split()))\ncards = [y0] + list(map(int, input().split()))\nxs = [([-1] * (n + 1)) for i in range(n + 1)]\nys = [([-1] * (n + 1)) for i in range(n + 1)]\nfor i in range(n + 1):\n xs[i][-1] = abs(cards[-1] - cards[i])\n ys[i][-1] = abs(cards[-1] - cards[i])\nfor j in range(n - 1, -1, -1):\n xs_temp = max(ys[j][j + 1:n + 1])\n ys_temp = min(xs[j][j + 1:n + 1])\n for i in range(0, j):\n xs[i][j] = xs_temp\n ys[i][j] = ys_temp\nprint(max(ys[0][1:]))\n",
"step-4": "n, x0, y0 = list(map(int, input().split()))\n\ncards = [y0] + list(map(int, input().split()))\n# yの手持ちはゲームに関与するため、リストに加えてしまう\n\nxs = [[-1] * (n+1) for i in range(n+1)]\nys = [[-1] * (n+1) for i in range(n+1)] \n#xs[i][j] = xの手番で、xがcards[i]を持ちyがcards[j]を持っているとき(i<j)の最善スコア\n#ys[i][j] = yの手番で、xがcards[j]を持ちyがcards[i]を持っているとき(i<j)の最善スコア\n\nfor i in range(n+1):\n\txs[i][-1] = abs(cards[-1] - cards[i])\n\tys[i][-1] = abs(cards[-1] - cards[i])\n\nfor j in range(n-1, -1, -1):\n\n\t# x[i][j] = max (y[j][j+1] , y[j][j+2] , ……, y[j][n] )\n\txs_temp = max(ys[j][j+1:n+1])\n\tys_temp = min(xs[j][j+1:n+1])\n\tfor i in range(0, j):\n\t\txs[i][j] = xs_temp\n\t\tys[i][j] = ys_temp\n\n# print(xs)\n# print(ys)\nprint(max(ys[0][1:]))\t\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
web = Blueprint('web', __name__)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from flask import Blueprint
web = Blueprint('web', __name__)
from app.web import auth
from app.web import user
from app.web import book
|
flexible
|
{
"blob_id": "02182f0379e58b64bbe17cc5f433e8aae7814976",
"index": 196,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nweb = Blueprint('web', __name__)\n<mask token>\n",
"step-3": "from flask import Blueprint\nweb = Blueprint('web', __name__)\nfrom app.web import auth\nfrom app.web import user\nfrom app.web import book\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for p in G['Pos']:
Pos.append(p)
<|reserved_special_token_0|>
for v in G['Vel']:
Vel.append(v)
<|reserved_special_token_0|>
for s in G['Spin']:
Spin.append(s)
<|reserved_special_token_0|>
for d in G['DiscRadii']:
Disc_r.append(d)
<|reserved_special_token_0|>
for g in G['DiscGas']:
Disc_gas.append(g)
<|reserved_special_token_0|>
for g in G['DiscStars']:
Disc_stars.append(g)
<|reserved_special_token_0|>
for g in G['SpinStars']:
SpinStars.append(g)
<|reserved_special_token_0|>
for g in G['SpinGas']:
SpinGas.append(g)
<|reserved_special_token_0|>
for g in G['SpinClassicalBulge']:
SpinClassicalBulge.append(g)
<|reserved_special_token_0|>
for g in G['DiscHI']:
DiscHI.append(g)
<|reserved_special_token_0|>
for g in G['DiscH2']:
DiscH2.append(g)
<|reserved_special_token_0|>
for g in G['DiscSFR']:
DiscSFR.append(g)
<|reserved_special_token_0|>
for g in G['DiscGasMetals']:
DiscGasMetals.append(g)
<|reserved_special_token_0|>
for g in G['DiscStarsMetals']:
DiscStarsMetals.append(g)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
Pos = []
for p in G['Pos']:
Pos.append(p)
Pos_df = pd.Series(Pos, dtype=np.dtype('object'))
Vel = []
for v in G['Vel']:
Vel.append(v)
Vel_df = pd.Series(Vel, dtype=np.dtype('object'))
Spin = []
for s in G['Spin']:
Spin.append(s)
Spin_df = pd.Series(Spin, dtype=np.dtype('object'))
Disc_r = []
for d in G['DiscRadii']:
Disc_r.append(d)
Disc_df = pd.Series(Disc_r, dtype=np.dtype('object'))
Disc_gas = []
for g in G['DiscGas']:
Disc_gas.append(g)
Disc_gas_df = pd.Series(Disc_gas, dtype=np.dtype('object'))
Disc_stars = []
for g in G['DiscStars']:
Disc_stars.append(g)
Disc_stars_df = pd.Series(Disc_stars, dtype=np.dtype('object'))
SpinStars = []
for g in G['SpinStars']:
SpinStars.append(g)
SpinStars_df = pd.Series(SpinStars, dtype=np.dtype('object'))
SpinGas = []
for g in G['SpinGas']:
SpinGas.append(g)
SpinGas_df = pd.Series(SpinGas, dtype=np.dtype('object'))
SpinClassicalBulge = []
for g in G['SpinClassicalBulge']:
SpinClassicalBulge.append(g)
SpinClassicalBulge_df = pd.Series(SpinClassicalBulge, dtype=np.dtype('object'))
DiscHI = []
for g in G['DiscHI']:
DiscHI.append(g)
DiscHI_df = pd.Series(DiscHI, dtype=np.dtype('object'))
DiscH2 = []
for g in G['DiscH2']:
DiscH2.append(g)
DiscH2_df = pd.Series(DiscH2, dtype=np.dtype('object'))
DiscSFR = []
for g in G['DiscSFR']:
DiscSFR.append(g)
DiscSFR_df = pd.Series(DiscSFR, dtype=np.dtype('object'))
DiscGasMetals = []
for g in G['DiscGasMetals']:
DiscGasMetals.append(g)
DiscGasMetals_df = pd.Series(DiscGasMetals, dtype=np.dtype('object'))
DiscStarsMetals = []
for g in G['DiscStarsMetals']:
DiscStarsMetals.append(g)
DiscStarsMetals_df = pd.Series(DiscStarsMetals, dtype=np.dtype('object'))
DS = pd.DataFrame({'Type': G['Type'], 'GalaxyIndex': G['GalaxyIndex'],
'HaloIndex': G['HaloIndex'], 'SimulationHaloIndex': G[
'SimulationHaloIndex'], 'TreeIndex': G['TreeIndex'], 'SnapNum': G[
'SnapNum'], 'CentralGalaxyIndex': G['CentralGalaxyIndex'],
'CentralMvir': G['CentralMvir'], 'mergeType': G['mergeType'],
'mergeIntoID': G['mergeIntoID'], 'mergeIntoSnapNum': G[
'mergeIntoSnapNum'], 'dT': G['dT'], 'Pos': Pos_df, 'Vel': Vel_df,
'Spin': Spin_df, 'Len': G['Len'], 'LenMax': G['LenMax'], 'Mvir': G[
'Mvir'], 'Rvir': G['Rvir'], 'Vvir': G['Vvir'], 'Vmax': G['Vmax'],
'VelDisp': G['VelDisp'], 'DiscRadii': Disc_df, 'ColdGas': G['ColdGas'],
'StellarMass': G['StellarMass'], 'MergerBulgeMass': G['MergerBulgeMass'
], 'InstabilityBulgeMass': G['InstabilityBulgeMass'], 'HotGas': G[
'HotGas'], 'EjectedMass': G['EjectedMass'], 'BlackHoleMass': G[
'BlackHoleMass'], 'IntraClusterStars': G['IntraClusterStars'],
'DiscGas': Disc_gas_df, 'DiscStars': Disc_stars_df, 'SpinStars':
SpinStars_df, 'SpinGas': SpinGas_df, 'SpinClassicalBulge':
SpinClassicalBulge_df, 'StarsInSitu': G['StarsInSitu'],
'StarsInstability': G['StarsInstability'], 'StarsMergeBurst': G[
'StarsMergeBurst'], 'DiscHI': DiscHI_df, 'DiscH2': DiscH2_df, 'DiscSFR':
DiscSFR_df, 'MetalsColdGas': G['MetalsColdGas'], 'MetalsStellarMass': G
['MetalsStellarMass'], 'ClassicalMetalsBulgeMass': G[
'ClassicalMetalsBulgeMass'], 'SecularMetalsBulgeMass': G[
'SecularMetalsBulgeMass'], 'MetalsHotGas': G['MetalsHotGas'],
'MetalsEjectedMass': G['MetalsEjectedMass'], 'MetalsIntraClusterStars':
G['MetalsIntraClusterStars'], 'DiscGasMetals': DiscGasMetals_df,
'DiscStarsMetals': DiscStarsMetals_df, 'SfrFromH2': G['SfrFromH2'],
'SfrInstab': G['SfrInstab'], 'SfrMergeBurst': G['SfrMergeBurst'],
'SfrDiskZ': G['SfrDiskZ'], 'SfrBulgeZ': G['SfrBulgeZ'],
'DiskScaleRadius': G['DiskScaleRadius'], 'CoolScaleRadius': G[
'CoolScaleRadius'], 'StellarDiscScaleRadius': G[
'StellarDiscScaleRadius'], 'Cooling': G['Cooling'], 'Heating': G[
'Heating'], 'LastMajorMerger': G['LastMajorMerger'], 'LastMinorMerger':
G['LastMinorMerger'], 'OutflowRate': G['OutflowRate'], 'infallMvir': G[
'infallMvir'], 'infallVvir': G['infallVvir'], 'infallVmax': G[
'infallVmax']})
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
Pos = []
for p in G['Pos']:
Pos.append(p)
Pos_df = pd.Series(Pos, dtype=np.dtype('object'))
Vel = []
for v in G['Vel']:
Vel.append(v)
Vel_df = pd.Series(Vel, dtype=np.dtype('object'))
Spin = []
for s in G['Spin']:
Spin.append(s)
Spin_df = pd.Series(Spin, dtype=np.dtype('object'))
Disc_r = []
for d in G['DiscRadii']:
Disc_r.append(d)
Disc_df = pd.Series(Disc_r, dtype=np.dtype('object'))
Disc_gas = []
for g in G['DiscGas']:
Disc_gas.append(g)
Disc_gas_df = pd.Series(Disc_gas, dtype=np.dtype('object'))
Disc_stars = []
for g in G['DiscStars']:
Disc_stars.append(g)
Disc_stars_df = pd.Series(Disc_stars, dtype=np.dtype('object'))
SpinStars = []
for g in G['SpinStars']:
SpinStars.append(g)
SpinStars_df = pd.Series(SpinStars, dtype=np.dtype('object'))
SpinGas = []
for g in G['SpinGas']:
SpinGas.append(g)
SpinGas_df = pd.Series(SpinGas, dtype=np.dtype('object'))
SpinClassicalBulge = []
for g in G['SpinClassicalBulge']:
SpinClassicalBulge.append(g)
SpinClassicalBulge_df = pd.Series(SpinClassicalBulge, dtype=np.dtype('object'))
DiscHI = []
for g in G['DiscHI']:
DiscHI.append(g)
DiscHI_df = pd.Series(DiscHI, dtype=np.dtype('object'))
DiscH2 = []
for g in G['DiscH2']:
DiscH2.append(g)
DiscH2_df = pd.Series(DiscH2, dtype=np.dtype('object'))
DiscSFR = []
for g in G['DiscSFR']:
DiscSFR.append(g)
DiscSFR_df = pd.Series(DiscSFR, dtype=np.dtype('object'))
DiscGasMetals = []
for g in G['DiscGasMetals']:
DiscGasMetals.append(g)
DiscGasMetals_df = pd.Series(DiscGasMetals, dtype=np.dtype('object'))
DiscStarsMetals = []
for g in G['DiscStarsMetals']:
DiscStarsMetals.append(g)
DiscStarsMetals_df = pd.Series(DiscStarsMetals, dtype=np.dtype('object'))
DS = pd.DataFrame({'Type': G['Type'], 'GalaxyIndex': G['GalaxyIndex'],
'HaloIndex': G['HaloIndex'], 'SimulationHaloIndex': G[
'SimulationHaloIndex'], 'TreeIndex': G['TreeIndex'], 'SnapNum': G[
'SnapNum'], 'CentralGalaxyIndex': G['CentralGalaxyIndex'],
'CentralMvir': G['CentralMvir'], 'mergeType': G['mergeType'],
'mergeIntoID': G['mergeIntoID'], 'mergeIntoSnapNum': G[
'mergeIntoSnapNum'], 'dT': G['dT'], 'Pos': Pos_df, 'Vel': Vel_df,
'Spin': Spin_df, 'Len': G['Len'], 'LenMax': G['LenMax'], 'Mvir': G[
'Mvir'], 'Rvir': G['Rvir'], 'Vvir': G['Vvir'], 'Vmax': G['Vmax'],
'VelDisp': G['VelDisp'], 'DiscRadii': Disc_df, 'ColdGas': G['ColdGas'],
'StellarMass': G['StellarMass'], 'MergerBulgeMass': G['MergerBulgeMass'
], 'InstabilityBulgeMass': G['InstabilityBulgeMass'], 'HotGas': G[
'HotGas'], 'EjectedMass': G['EjectedMass'], 'BlackHoleMass': G[
'BlackHoleMass'], 'IntraClusterStars': G['IntraClusterStars'],
'DiscGas': Disc_gas_df, 'DiscStars': Disc_stars_df, 'SpinStars':
SpinStars_df, 'SpinGas': SpinGas_df, 'SpinClassicalBulge':
SpinClassicalBulge_df, 'StarsInSitu': G['StarsInSitu'],
'StarsInstability': G['StarsInstability'], 'StarsMergeBurst': G[
'StarsMergeBurst'], 'DiscHI': DiscHI_df, 'DiscH2': DiscH2_df, 'DiscSFR':
DiscSFR_df, 'MetalsColdGas': G['MetalsColdGas'], 'MetalsStellarMass': G
['MetalsStellarMass'], 'ClassicalMetalsBulgeMass': G[
'ClassicalMetalsBulgeMass'], 'SecularMetalsBulgeMass': G[
'SecularMetalsBulgeMass'], 'MetalsHotGas': G['MetalsHotGas'],
'MetalsEjectedMass': G['MetalsEjectedMass'], 'MetalsIntraClusterStars':
G['MetalsIntraClusterStars'], 'DiscGasMetals': DiscGasMetals_df,
'DiscStarsMetals': DiscStarsMetals_df, 'SfrFromH2': G['SfrFromH2'],
'SfrInstab': G['SfrInstab'], 'SfrMergeBurst': G['SfrMergeBurst'],
'SfrDiskZ': G['SfrDiskZ'], 'SfrBulgeZ': G['SfrBulgeZ'],
'DiskScaleRadius': G['DiskScaleRadius'], 'CoolScaleRadius': G[
'CoolScaleRadius'], 'StellarDiscScaleRadius': G[
'StellarDiscScaleRadius'], 'Cooling': G['Cooling'], 'Heating': G[
'Heating'], 'LastMajorMerger': G['LastMajorMerger'], 'LastMinorMerger':
G['LastMinorMerger'], 'OutflowRate': G['OutflowRate'], 'infallMvir': G[
'infallMvir'], 'infallVvir': G['infallVvir'], 'infallVmax': G[
'infallVmax']})
<|reserved_special_token_1|>
#Create Pandas dataframe from the DarkSage output G['']
import pandas as pd
import numpy as np
# This is a way to converte multi dimensional data into pd.Series and then load these into the pandas dataframe
Pos = []
for p in G['Pos']:
Pos.append(p)
Pos_df = pd.Series(Pos, dtype=np.dtype("object"))
Vel = []
for v in G['Vel']:
Vel.append(v)
Vel_df = pd.Series(Vel, dtype=np.dtype("object"))
Spin = []
for s in G['Spin']:
Spin.append(s)
Spin_df = pd.Series(Spin, dtype=np.dtype("object"))
Disc_r = []
for d in G['DiscRadii']:
Disc_r.append(d)
Disc_df = pd.Series(Disc_r, dtype=np.dtype("object"))
Disc_gas = []
for g in G['DiscGas']:
Disc_gas.append(g)
Disc_gas_df = pd.Series(Disc_gas, dtype=np.dtype("object"))
Disc_stars = []
for g in G['DiscStars']:
Disc_stars.append(g)
Disc_stars_df = pd.Series(Disc_stars, dtype=np.dtype("object"))
SpinStars = []
for g in G['SpinStars']:
SpinStars.append(g)
SpinStars_df = pd.Series(SpinStars, dtype=np.dtype("object"))
SpinGas = []
for g in G['SpinGas']:
SpinGas.append(g)
SpinGas_df = pd.Series(SpinGas , dtype=np.dtype("object"))
SpinClassicalBulge = []
for g in G['SpinClassicalBulge']:
SpinClassicalBulge.append(g)
SpinClassicalBulge_df = pd.Series(SpinClassicalBulge, dtype=np.dtype("object"))
DiscHI = []
for g in G['DiscHI']:
DiscHI.append(g)
DiscHI_df = pd.Series(DiscHI, dtype=np.dtype("object"))
DiscH2 = []
for g in G['DiscH2']:
DiscH2.append(g)
DiscH2_df = pd.Series(DiscH2, dtype=np.dtype("object"))
DiscSFR = []
for g in G['DiscSFR']:
DiscSFR.append(g)
DiscSFR_df = pd.Series(DiscSFR, dtype=np.dtype("object"))
DiscGasMetals = []
for g in G['DiscGasMetals']:
DiscGasMetals.append(g)
DiscGasMetals_df = pd.Series(DiscGasMetals, dtype=np.dtype("object"))
DiscStarsMetals = []
for g in G['DiscStarsMetals']:
DiscStarsMetals.append(g)
DiscStarsMetals_df = pd.Series(DiscStarsMetals, dtype=np.dtype("object"))
######################################
DS = pd.DataFrame({'Type' : G['Type' ],
'GalaxyIndex' : G['GalaxyIndex' ],
'HaloIndex' : G['HaloIndex' ],
'SimulationHaloIndex' : G['SimulationHaloIndex' ],
'TreeIndex' : G['TreeIndex' ],
'SnapNum' : G['SnapNum' ],
'CentralGalaxyIndex' : G['CentralGalaxyIndex' ],
'CentralMvir' : G['CentralMvir' ],
'mergeType' : G['mergeType' ],
'mergeIntoID' : G['mergeIntoID' ],
'mergeIntoSnapNum' : G['mergeIntoSnapNum' ],
'dT' : G['dT' ],
'Pos' : Pos_df,
'Vel' : Vel_df ,
'Spin' : Spin_df ,
'Len' : G['Len' ],
'LenMax' : G['LenMax' ],
'Mvir' : G['Mvir' ],
'Rvir' : G['Rvir' ],
'Vvir' : G['Vvir' ],
'Vmax' : G['Vmax' ],
'VelDisp' : G['VelDisp' ],
'DiscRadii' : Disc_df,
'ColdGas' : G['ColdGas' ],
'StellarMass' : G['StellarMass' ],
'MergerBulgeMass' : G['MergerBulgeMass' ],
'InstabilityBulgeMass' : G['InstabilityBulgeMass' ],
'HotGas' : G['HotGas' ],
'EjectedMass' : G['EjectedMass' ],
'BlackHoleMass' : G['BlackHoleMass' ],
'IntraClusterStars' : G['IntraClusterStars' ],
'DiscGas' : Disc_gas_df,
'DiscStars' : Disc_stars_df,
'SpinStars' : SpinStars_df,
'SpinGas' : SpinGas_df,
'SpinClassicalBulge' : SpinClassicalBulge_df,
'StarsInSitu' : G['StarsInSitu' ],
'StarsInstability' : G['StarsInstability' ],
'StarsMergeBurst' : G['StarsMergeBurst' ],
'DiscHI' : DiscHI_df,
'DiscH2' : DiscH2_df,
'DiscSFR' : DiscSFR_df,
'MetalsColdGas' : G['MetalsColdGas' ],
'MetalsStellarMass' : G['MetalsStellarMass' ],
'ClassicalMetalsBulgeMass' : G['ClassicalMetalsBulgeMass' ],
'SecularMetalsBulgeMass' : G['SecularMetalsBulgeMass' ],
'MetalsHotGas' : G['MetalsHotGas' ],
'MetalsEjectedMass' : G['MetalsEjectedMass' ],
'MetalsIntraClusterStars' : G['MetalsIntraClusterStars' ],
'DiscGasMetals' : DiscGasMetals_df,
'DiscStarsMetals' : DiscStarsMetals_df,
'SfrFromH2' : G['SfrFromH2' ],
'SfrInstab' : G['SfrInstab' ],
'SfrMergeBurst' : G['SfrMergeBurst' ],
'SfrDiskZ' : G['SfrDiskZ' ],
'SfrBulgeZ' : G['SfrBulgeZ' ],
'DiskScaleRadius' : G['DiskScaleRadius' ],
'CoolScaleRadius' : G['CoolScaleRadius' ],
'StellarDiscScaleRadius' : G['StellarDiscScaleRadius' ],
'Cooling' : G['Cooling' ],
'Heating' : G['Heating' ],
'LastMajorMerger' : G['LastMajorMerger' ],
'LastMinorMerger' : G['LastMinorMerger' ],
'OutflowRate' : G['OutflowRate' ],
'infallMvir' : G['infallMvir' ],
'infallVvir' : G['infallVvir' ],
'infallVmax' : G['infallVmax' ]})
|
flexible
|
{
"blob_id": "0d565c9f92a60d25f28c903c0a27e7b93d547a4f",
"index": 2971,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor p in G['Pos']:\n Pos.append(p)\n<mask token>\nfor v in G['Vel']:\n Vel.append(v)\n<mask token>\nfor s in G['Spin']:\n Spin.append(s)\n<mask token>\nfor d in G['DiscRadii']:\n Disc_r.append(d)\n<mask token>\nfor g in G['DiscGas']:\n Disc_gas.append(g)\n<mask token>\nfor g in G['DiscStars']:\n Disc_stars.append(g)\n<mask token>\nfor g in G['SpinStars']:\n SpinStars.append(g)\n<mask token>\nfor g in G['SpinGas']:\n SpinGas.append(g)\n<mask token>\nfor g in G['SpinClassicalBulge']:\n SpinClassicalBulge.append(g)\n<mask token>\nfor g in G['DiscHI']:\n DiscHI.append(g)\n<mask token>\nfor g in G['DiscH2']:\n DiscH2.append(g)\n<mask token>\nfor g in G['DiscSFR']:\n DiscSFR.append(g)\n<mask token>\nfor g in G['DiscGasMetals']:\n DiscGasMetals.append(g)\n<mask token>\nfor g in G['DiscStarsMetals']:\n DiscStarsMetals.append(g)\n<mask token>\n",
"step-3": "<mask token>\nPos = []\nfor p in G['Pos']:\n Pos.append(p)\nPos_df = pd.Series(Pos, dtype=np.dtype('object'))\nVel = []\nfor v in G['Vel']:\n Vel.append(v)\nVel_df = pd.Series(Vel, dtype=np.dtype('object'))\nSpin = []\nfor s in G['Spin']:\n Spin.append(s)\nSpin_df = pd.Series(Spin, dtype=np.dtype('object'))\nDisc_r = []\nfor d in G['DiscRadii']:\n Disc_r.append(d)\nDisc_df = pd.Series(Disc_r, dtype=np.dtype('object'))\nDisc_gas = []\nfor g in G['DiscGas']:\n Disc_gas.append(g)\nDisc_gas_df = pd.Series(Disc_gas, dtype=np.dtype('object'))\nDisc_stars = []\nfor g in G['DiscStars']:\n Disc_stars.append(g)\nDisc_stars_df = pd.Series(Disc_stars, dtype=np.dtype('object'))\nSpinStars = []\nfor g in G['SpinStars']:\n SpinStars.append(g)\nSpinStars_df = pd.Series(SpinStars, dtype=np.dtype('object'))\nSpinGas = []\nfor g in G['SpinGas']:\n SpinGas.append(g)\nSpinGas_df = pd.Series(SpinGas, dtype=np.dtype('object'))\nSpinClassicalBulge = []\nfor g in G['SpinClassicalBulge']:\n SpinClassicalBulge.append(g)\nSpinClassicalBulge_df = pd.Series(SpinClassicalBulge, dtype=np.dtype('object'))\nDiscHI = []\nfor g in G['DiscHI']:\n DiscHI.append(g)\nDiscHI_df = pd.Series(DiscHI, dtype=np.dtype('object'))\nDiscH2 = []\nfor g in G['DiscH2']:\n DiscH2.append(g)\nDiscH2_df = pd.Series(DiscH2, dtype=np.dtype('object'))\nDiscSFR = []\nfor g in G['DiscSFR']:\n DiscSFR.append(g)\nDiscSFR_df = pd.Series(DiscSFR, dtype=np.dtype('object'))\nDiscGasMetals = []\nfor g in G['DiscGasMetals']:\n DiscGasMetals.append(g)\nDiscGasMetals_df = pd.Series(DiscGasMetals, dtype=np.dtype('object'))\nDiscStarsMetals = []\nfor g in G['DiscStarsMetals']:\n DiscStarsMetals.append(g)\nDiscStarsMetals_df = pd.Series(DiscStarsMetals, dtype=np.dtype('object'))\nDS = pd.DataFrame({'Type': G['Type'], 'GalaxyIndex': G['GalaxyIndex'],\n 'HaloIndex': G['HaloIndex'], 'SimulationHaloIndex': G[\n 'SimulationHaloIndex'], 'TreeIndex': G['TreeIndex'], 'SnapNum': G[\n 'SnapNum'], 'CentralGalaxyIndex': G['CentralGalaxyIndex'],\n 'CentralMvir': G['CentralMvir'], 'mergeType': G['mergeType'],\n 'mergeIntoID': G['mergeIntoID'], 'mergeIntoSnapNum': G[\n 'mergeIntoSnapNum'], 'dT': G['dT'], 'Pos': Pos_df, 'Vel': Vel_df,\n 'Spin': Spin_df, 'Len': G['Len'], 'LenMax': G['LenMax'], 'Mvir': G[\n 'Mvir'], 'Rvir': G['Rvir'], 'Vvir': G['Vvir'], 'Vmax': G['Vmax'],\n 'VelDisp': G['VelDisp'], 'DiscRadii': Disc_df, 'ColdGas': G['ColdGas'],\n 'StellarMass': G['StellarMass'], 'MergerBulgeMass': G['MergerBulgeMass'\n ], 'InstabilityBulgeMass': G['InstabilityBulgeMass'], 'HotGas': G[\n 'HotGas'], 'EjectedMass': G['EjectedMass'], 'BlackHoleMass': G[\n 'BlackHoleMass'], 'IntraClusterStars': G['IntraClusterStars'],\n 'DiscGas': Disc_gas_df, 'DiscStars': Disc_stars_df, 'SpinStars':\n SpinStars_df, 'SpinGas': SpinGas_df, 'SpinClassicalBulge':\n SpinClassicalBulge_df, 'StarsInSitu': G['StarsInSitu'],\n 'StarsInstability': G['StarsInstability'], 'StarsMergeBurst': G[\n 'StarsMergeBurst'], 'DiscHI': DiscHI_df, 'DiscH2': DiscH2_df, 'DiscSFR':\n DiscSFR_df, 'MetalsColdGas': G['MetalsColdGas'], 'MetalsStellarMass': G\n ['MetalsStellarMass'], 'ClassicalMetalsBulgeMass': G[\n 'ClassicalMetalsBulgeMass'], 'SecularMetalsBulgeMass': G[\n 'SecularMetalsBulgeMass'], 'MetalsHotGas': G['MetalsHotGas'],\n 'MetalsEjectedMass': G['MetalsEjectedMass'], 'MetalsIntraClusterStars':\n G['MetalsIntraClusterStars'], 'DiscGasMetals': DiscGasMetals_df,\n 'DiscStarsMetals': DiscStarsMetals_df, 'SfrFromH2': G['SfrFromH2'],\n 'SfrInstab': G['SfrInstab'], 'SfrMergeBurst': G['SfrMergeBurst'],\n 'SfrDiskZ': G['SfrDiskZ'], 'SfrBulgeZ': G['SfrBulgeZ'],\n 'DiskScaleRadius': G['DiskScaleRadius'], 'CoolScaleRadius': G[\n 'CoolScaleRadius'], 'StellarDiscScaleRadius': G[\n 'StellarDiscScaleRadius'], 'Cooling': G['Cooling'], 'Heating': G[\n 'Heating'], 'LastMajorMerger': G['LastMajorMerger'], 'LastMinorMerger':\n G['LastMinorMerger'], 'OutflowRate': G['OutflowRate'], 'infallMvir': G[\n 'infallMvir'], 'infallVvir': G['infallVvir'], 'infallVmax': G[\n 'infallVmax']})\n",
"step-4": "import pandas as pd\nimport numpy as np\nPos = []\nfor p in G['Pos']:\n Pos.append(p)\nPos_df = pd.Series(Pos, dtype=np.dtype('object'))\nVel = []\nfor v in G['Vel']:\n Vel.append(v)\nVel_df = pd.Series(Vel, dtype=np.dtype('object'))\nSpin = []\nfor s in G['Spin']:\n Spin.append(s)\nSpin_df = pd.Series(Spin, dtype=np.dtype('object'))\nDisc_r = []\nfor d in G['DiscRadii']:\n Disc_r.append(d)\nDisc_df = pd.Series(Disc_r, dtype=np.dtype('object'))\nDisc_gas = []\nfor g in G['DiscGas']:\n Disc_gas.append(g)\nDisc_gas_df = pd.Series(Disc_gas, dtype=np.dtype('object'))\nDisc_stars = []\nfor g in G['DiscStars']:\n Disc_stars.append(g)\nDisc_stars_df = pd.Series(Disc_stars, dtype=np.dtype('object'))\nSpinStars = []\nfor g in G['SpinStars']:\n SpinStars.append(g)\nSpinStars_df = pd.Series(SpinStars, dtype=np.dtype('object'))\nSpinGas = []\nfor g in G['SpinGas']:\n SpinGas.append(g)\nSpinGas_df = pd.Series(SpinGas, dtype=np.dtype('object'))\nSpinClassicalBulge = []\nfor g in G['SpinClassicalBulge']:\n SpinClassicalBulge.append(g)\nSpinClassicalBulge_df = pd.Series(SpinClassicalBulge, dtype=np.dtype('object'))\nDiscHI = []\nfor g in G['DiscHI']:\n DiscHI.append(g)\nDiscHI_df = pd.Series(DiscHI, dtype=np.dtype('object'))\nDiscH2 = []\nfor g in G['DiscH2']:\n DiscH2.append(g)\nDiscH2_df = pd.Series(DiscH2, dtype=np.dtype('object'))\nDiscSFR = []\nfor g in G['DiscSFR']:\n DiscSFR.append(g)\nDiscSFR_df = pd.Series(DiscSFR, dtype=np.dtype('object'))\nDiscGasMetals = []\nfor g in G['DiscGasMetals']:\n DiscGasMetals.append(g)\nDiscGasMetals_df = pd.Series(DiscGasMetals, dtype=np.dtype('object'))\nDiscStarsMetals = []\nfor g in G['DiscStarsMetals']:\n DiscStarsMetals.append(g)\nDiscStarsMetals_df = pd.Series(DiscStarsMetals, dtype=np.dtype('object'))\nDS = pd.DataFrame({'Type': G['Type'], 'GalaxyIndex': G['GalaxyIndex'],\n 'HaloIndex': G['HaloIndex'], 'SimulationHaloIndex': G[\n 'SimulationHaloIndex'], 'TreeIndex': G['TreeIndex'], 'SnapNum': G[\n 'SnapNum'], 'CentralGalaxyIndex': G['CentralGalaxyIndex'],\n 'CentralMvir': G['CentralMvir'], 'mergeType': G['mergeType'],\n 'mergeIntoID': G['mergeIntoID'], 'mergeIntoSnapNum': G[\n 'mergeIntoSnapNum'], 'dT': G['dT'], 'Pos': Pos_df, 'Vel': Vel_df,\n 'Spin': Spin_df, 'Len': G['Len'], 'LenMax': G['LenMax'], 'Mvir': G[\n 'Mvir'], 'Rvir': G['Rvir'], 'Vvir': G['Vvir'], 'Vmax': G['Vmax'],\n 'VelDisp': G['VelDisp'], 'DiscRadii': Disc_df, 'ColdGas': G['ColdGas'],\n 'StellarMass': G['StellarMass'], 'MergerBulgeMass': G['MergerBulgeMass'\n ], 'InstabilityBulgeMass': G['InstabilityBulgeMass'], 'HotGas': G[\n 'HotGas'], 'EjectedMass': G['EjectedMass'], 'BlackHoleMass': G[\n 'BlackHoleMass'], 'IntraClusterStars': G['IntraClusterStars'],\n 'DiscGas': Disc_gas_df, 'DiscStars': Disc_stars_df, 'SpinStars':\n SpinStars_df, 'SpinGas': SpinGas_df, 'SpinClassicalBulge':\n SpinClassicalBulge_df, 'StarsInSitu': G['StarsInSitu'],\n 'StarsInstability': G['StarsInstability'], 'StarsMergeBurst': G[\n 'StarsMergeBurst'], 'DiscHI': DiscHI_df, 'DiscH2': DiscH2_df, 'DiscSFR':\n DiscSFR_df, 'MetalsColdGas': G['MetalsColdGas'], 'MetalsStellarMass': G\n ['MetalsStellarMass'], 'ClassicalMetalsBulgeMass': G[\n 'ClassicalMetalsBulgeMass'], 'SecularMetalsBulgeMass': G[\n 'SecularMetalsBulgeMass'], 'MetalsHotGas': G['MetalsHotGas'],\n 'MetalsEjectedMass': G['MetalsEjectedMass'], 'MetalsIntraClusterStars':\n G['MetalsIntraClusterStars'], 'DiscGasMetals': DiscGasMetals_df,\n 'DiscStarsMetals': DiscStarsMetals_df, 'SfrFromH2': G['SfrFromH2'],\n 'SfrInstab': G['SfrInstab'], 'SfrMergeBurst': G['SfrMergeBurst'],\n 'SfrDiskZ': G['SfrDiskZ'], 'SfrBulgeZ': G['SfrBulgeZ'],\n 'DiskScaleRadius': G['DiskScaleRadius'], 'CoolScaleRadius': G[\n 'CoolScaleRadius'], 'StellarDiscScaleRadius': G[\n 'StellarDiscScaleRadius'], 'Cooling': G['Cooling'], 'Heating': G[\n 'Heating'], 'LastMajorMerger': G['LastMajorMerger'], 'LastMinorMerger':\n G['LastMinorMerger'], 'OutflowRate': G['OutflowRate'], 'infallMvir': G[\n 'infallMvir'], 'infallVvir': G['infallVvir'], 'infallVmax': G[\n 'infallVmax']})\n",
"step-5": "#Create Pandas dataframe from the DarkSage output G['']\n\nimport pandas as pd\nimport numpy as np\n\n\n# This is a way to converte multi dimensional data into pd.Series and then load these into the pandas dataframe\nPos = []\nfor p in G['Pos']:\n Pos.append(p)\nPos_df = pd.Series(Pos, dtype=np.dtype(\"object\"))\n\nVel = []\nfor v in G['Vel']:\n Vel.append(v)\nVel_df = pd.Series(Vel, dtype=np.dtype(\"object\"))\n\nSpin = []\nfor s in G['Spin']:\n Spin.append(s)\nSpin_df = pd.Series(Spin, dtype=np.dtype(\"object\"))\n\nDisc_r = []\nfor d in G['DiscRadii']:\n Disc_r.append(d)\nDisc_df = pd.Series(Disc_r, dtype=np.dtype(\"object\"))\n\nDisc_gas = []\nfor g in G['DiscGas']:\n Disc_gas.append(g)\nDisc_gas_df = pd.Series(Disc_gas, dtype=np.dtype(\"object\"))\n\nDisc_stars = []\nfor g in G['DiscStars']:\n Disc_stars.append(g)\nDisc_stars_df = pd.Series(Disc_stars, dtype=np.dtype(\"object\"))\n\nSpinStars = []\nfor g in G['SpinStars']:\n SpinStars.append(g)\nSpinStars_df = pd.Series(SpinStars, dtype=np.dtype(\"object\"))\n\nSpinGas = []\nfor g in G['SpinGas']:\n SpinGas.append(g)\nSpinGas_df = pd.Series(SpinGas , dtype=np.dtype(\"object\"))\n\nSpinClassicalBulge = []\nfor g in G['SpinClassicalBulge']:\n SpinClassicalBulge.append(g)\nSpinClassicalBulge_df = pd.Series(SpinClassicalBulge, dtype=np.dtype(\"object\"))\n\nDiscHI = []\nfor g in G['DiscHI']:\n DiscHI.append(g)\nDiscHI_df = pd.Series(DiscHI, dtype=np.dtype(\"object\"))\n\nDiscH2 = []\nfor g in G['DiscH2']:\n DiscH2.append(g)\nDiscH2_df = pd.Series(DiscH2, dtype=np.dtype(\"object\"))\n\nDiscSFR = []\nfor g in G['DiscSFR']:\n DiscSFR.append(g)\nDiscSFR_df = pd.Series(DiscSFR, dtype=np.dtype(\"object\"))\n\nDiscGasMetals = []\nfor g in G['DiscGasMetals']:\n DiscGasMetals.append(g)\nDiscGasMetals_df = pd.Series(DiscGasMetals, dtype=np.dtype(\"object\"))\n\nDiscStarsMetals = []\nfor g in G['DiscStarsMetals']:\n DiscStarsMetals.append(g)\nDiscStarsMetals_df = pd.Series(DiscStarsMetals, dtype=np.dtype(\"object\"))\n\n\n\n\n######################################\n\n\nDS = pd.DataFrame({'Type' : G['Type' ],\n'GalaxyIndex' : G['GalaxyIndex' ],\n'HaloIndex' : G['HaloIndex' ],\n'SimulationHaloIndex' : G['SimulationHaloIndex' ],\n'TreeIndex' : G['TreeIndex' ],\n'SnapNum' : G['SnapNum' ],\n'CentralGalaxyIndex' : G['CentralGalaxyIndex' ],\n'CentralMvir' : G['CentralMvir' ],\n'mergeType' : G['mergeType' ],\n'mergeIntoID' : G['mergeIntoID' ],\n'mergeIntoSnapNum' : G['mergeIntoSnapNum' ],\n'dT' : G['dT' ],\n'Pos' : Pos_df,\n'Vel' : Vel_df ,\n'Spin' : Spin_df ,\n'Len' : G['Len' ],\n'LenMax' : G['LenMax' ],\n'Mvir' : G['Mvir' ],\n'Rvir' : G['Rvir' ],\n'Vvir' : G['Vvir' ],\n'Vmax' : G['Vmax' ],\n'VelDisp' : G['VelDisp' ],\n'DiscRadii' : Disc_df,\n'ColdGas' : G['ColdGas' ],\n'StellarMass' : G['StellarMass' ],\n'MergerBulgeMass' : G['MergerBulgeMass' ],\n'InstabilityBulgeMass' : G['InstabilityBulgeMass' ],\n'HotGas' : G['HotGas' ],\n'EjectedMass' : G['EjectedMass' ],\n'BlackHoleMass' : G['BlackHoleMass' ],\n'IntraClusterStars' : G['IntraClusterStars' ],\n'DiscGas' : Disc_gas_df,\n'DiscStars' : Disc_stars_df,\n'SpinStars' : SpinStars_df,\n'SpinGas' : SpinGas_df,\n'SpinClassicalBulge' : SpinClassicalBulge_df,\n'StarsInSitu' : G['StarsInSitu' ],\n'StarsInstability' : G['StarsInstability' ],\n'StarsMergeBurst' : G['StarsMergeBurst' ],\n'DiscHI' : DiscHI_df,\n'DiscH2' : DiscH2_df,\n'DiscSFR' : DiscSFR_df,\n'MetalsColdGas' : G['MetalsColdGas' ],\n'MetalsStellarMass' : G['MetalsStellarMass' ],\n'ClassicalMetalsBulgeMass' : G['ClassicalMetalsBulgeMass' ],\n'SecularMetalsBulgeMass' : G['SecularMetalsBulgeMass' ],\n'MetalsHotGas' : G['MetalsHotGas' ],\n'MetalsEjectedMass' : G['MetalsEjectedMass' ],\n'MetalsIntraClusterStars' : G['MetalsIntraClusterStars' ],\n'DiscGasMetals' : DiscGasMetals_df,\n'DiscStarsMetals' : DiscStarsMetals_df,\n'SfrFromH2' : G['SfrFromH2' ],\n'SfrInstab' : G['SfrInstab' ],\n'SfrMergeBurst' : G['SfrMergeBurst' ],\n'SfrDiskZ' : G['SfrDiskZ' ],\n'SfrBulgeZ' : G['SfrBulgeZ' ],\n'DiskScaleRadius' : G['DiskScaleRadius' ],\n'CoolScaleRadius' : G['CoolScaleRadius' ],\n'StellarDiscScaleRadius' : G['StellarDiscScaleRadius' ],\n'Cooling' : G['Cooling' ],\n'Heating' : G['Heating' ],\n'LastMajorMerger' : G['LastMajorMerger' ],\n'LastMinorMerger' : G['LastMinorMerger' ],\n'OutflowRate' : G['OutflowRate' ],\n'infallMvir' : G['infallMvir' ],\n'infallVvir' : G['infallVvir' ],\n'infallVmax' : G['infallVmax' ]})\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
__all__ = ["kepler", "quad_solution_vector", "contact_points"]
import numpy as np
from .. import driver
def kepler(mean_anomaly, eccentricity):
mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)
eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)
sinf = np.empty_like(mean_anomaly)
cosf = np.empty_like(mean_anomaly)
driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)
return sinf, cosf
def quad_solution_vector(b, r):
b = np.ascontiguousarray(b, dtype=np.float64)
r = np.ascontiguousarray(r, dtype=np.float64)
s = np.empty(r.shape + (3,), dtype=np.float64)
driver.quad_solution_vector(b, r, s)
return s
def contact_points(a, e, cosw, sinw, cosi, sini, L):
a = np.ascontiguousarray(a, dtype=np.float64)
e = np.ascontiguousarray(e, dtype=np.float64)
cosw = np.ascontiguousarray(cosw, dtype=np.float64)
sinw = np.ascontiguousarray(sinw, dtype=np.float64)
cosi = np.ascontiguousarray(cosi, dtype=np.float64)
sini = np.ascontiguousarray(sini, dtype=np.float64)
L = np.ascontiguousarray(L, dtype=np.float64)
M_left = np.empty_like(a)
M_right = np.empty_like(a)
flag = np.empty_like(a, dtype=np.int32)
driver.contact_points(
a, e, cosw, sinw, cosi, sini, L, M_left, M_right, flag
)
return M_left, M_right, flag
|
normal
|
{
"blob_id": "ccd32a6ca98c205a6f5d4936288392251522db29",
"index": 4896,
"step-1": "<mask token>\n\n\ndef kepler(mean_anomaly, eccentricity):\n mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)\n eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)\n sinf = np.empty_like(mean_anomaly)\n cosf = np.empty_like(mean_anomaly)\n driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)\n return sinf, cosf\n\n\n<mask token>\n\n\ndef contact_points(a, e, cosw, sinw, cosi, sini, L):\n a = np.ascontiguousarray(a, dtype=np.float64)\n e = np.ascontiguousarray(e, dtype=np.float64)\n cosw = np.ascontiguousarray(cosw, dtype=np.float64)\n sinw = np.ascontiguousarray(sinw, dtype=np.float64)\n cosi = np.ascontiguousarray(cosi, dtype=np.float64)\n sini = np.ascontiguousarray(sini, dtype=np.float64)\n L = np.ascontiguousarray(L, dtype=np.float64)\n M_left = np.empty_like(a)\n M_right = np.empty_like(a)\n flag = np.empty_like(a, dtype=np.int32)\n driver.contact_points(a, e, cosw, sinw, cosi, sini, L, M_left, M_right,\n flag)\n return M_left, M_right, flag\n",
"step-2": "<mask token>\n\n\ndef kepler(mean_anomaly, eccentricity):\n mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)\n eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)\n sinf = np.empty_like(mean_anomaly)\n cosf = np.empty_like(mean_anomaly)\n driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)\n return sinf, cosf\n\n\ndef quad_solution_vector(b, r):\n b = np.ascontiguousarray(b, dtype=np.float64)\n r = np.ascontiguousarray(r, dtype=np.float64)\n s = np.empty(r.shape + (3,), dtype=np.float64)\n driver.quad_solution_vector(b, r, s)\n return s\n\n\ndef contact_points(a, e, cosw, sinw, cosi, sini, L):\n a = np.ascontiguousarray(a, dtype=np.float64)\n e = np.ascontiguousarray(e, dtype=np.float64)\n cosw = np.ascontiguousarray(cosw, dtype=np.float64)\n sinw = np.ascontiguousarray(sinw, dtype=np.float64)\n cosi = np.ascontiguousarray(cosi, dtype=np.float64)\n sini = np.ascontiguousarray(sini, dtype=np.float64)\n L = np.ascontiguousarray(L, dtype=np.float64)\n M_left = np.empty_like(a)\n M_right = np.empty_like(a)\n flag = np.empty_like(a, dtype=np.int32)\n driver.contact_points(a, e, cosw, sinw, cosi, sini, L, M_left, M_right,\n flag)\n return M_left, M_right, flag\n",
"step-3": "__all__ = ['kepler', 'quad_solution_vector', 'contact_points']\n<mask token>\n\n\ndef kepler(mean_anomaly, eccentricity):\n mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)\n eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)\n sinf = np.empty_like(mean_anomaly)\n cosf = np.empty_like(mean_anomaly)\n driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)\n return sinf, cosf\n\n\ndef quad_solution_vector(b, r):\n b = np.ascontiguousarray(b, dtype=np.float64)\n r = np.ascontiguousarray(r, dtype=np.float64)\n s = np.empty(r.shape + (3,), dtype=np.float64)\n driver.quad_solution_vector(b, r, s)\n return s\n\n\ndef contact_points(a, e, cosw, sinw, cosi, sini, L):\n a = np.ascontiguousarray(a, dtype=np.float64)\n e = np.ascontiguousarray(e, dtype=np.float64)\n cosw = np.ascontiguousarray(cosw, dtype=np.float64)\n sinw = np.ascontiguousarray(sinw, dtype=np.float64)\n cosi = np.ascontiguousarray(cosi, dtype=np.float64)\n sini = np.ascontiguousarray(sini, dtype=np.float64)\n L = np.ascontiguousarray(L, dtype=np.float64)\n M_left = np.empty_like(a)\n M_right = np.empty_like(a)\n flag = np.empty_like(a, dtype=np.int32)\n driver.contact_points(a, e, cosw, sinw, cosi, sini, L, M_left, M_right,\n flag)\n return M_left, M_right, flag\n",
"step-4": "__all__ = ['kepler', 'quad_solution_vector', 'contact_points']\nimport numpy as np\nfrom .. import driver\n\n\ndef kepler(mean_anomaly, eccentricity):\n mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)\n eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)\n sinf = np.empty_like(mean_anomaly)\n cosf = np.empty_like(mean_anomaly)\n driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)\n return sinf, cosf\n\n\ndef quad_solution_vector(b, r):\n b = np.ascontiguousarray(b, dtype=np.float64)\n r = np.ascontiguousarray(r, dtype=np.float64)\n s = np.empty(r.shape + (3,), dtype=np.float64)\n driver.quad_solution_vector(b, r, s)\n return s\n\n\ndef contact_points(a, e, cosw, sinw, cosi, sini, L):\n a = np.ascontiguousarray(a, dtype=np.float64)\n e = np.ascontiguousarray(e, dtype=np.float64)\n cosw = np.ascontiguousarray(cosw, dtype=np.float64)\n sinw = np.ascontiguousarray(sinw, dtype=np.float64)\n cosi = np.ascontiguousarray(cosi, dtype=np.float64)\n sini = np.ascontiguousarray(sini, dtype=np.float64)\n L = np.ascontiguousarray(L, dtype=np.float64)\n M_left = np.empty_like(a)\n M_right = np.empty_like(a)\n flag = np.empty_like(a, dtype=np.int32)\n driver.contact_points(a, e, cosw, sinw, cosi, sini, L, M_left, M_right,\n flag)\n return M_left, M_right, flag\n",
"step-5": "# -*- coding: utf-8 -*-\n\n__all__ = [\"kepler\", \"quad_solution_vector\", \"contact_points\"]\n\n\nimport numpy as np\n\nfrom .. import driver\n\n\ndef kepler(mean_anomaly, eccentricity):\n mean_anomaly = np.ascontiguousarray(mean_anomaly, dtype=np.float64)\n eccentricity = np.ascontiguousarray(eccentricity, dtype=np.float64)\n sinf = np.empty_like(mean_anomaly)\n cosf = np.empty_like(mean_anomaly)\n driver.solve_kepler(mean_anomaly, eccentricity, sinf, cosf)\n return sinf, cosf\n\n\ndef quad_solution_vector(b, r):\n b = np.ascontiguousarray(b, dtype=np.float64)\n r = np.ascontiguousarray(r, dtype=np.float64)\n s = np.empty(r.shape + (3,), dtype=np.float64)\n driver.quad_solution_vector(b, r, s)\n return s\n\n\ndef contact_points(a, e, cosw, sinw, cosi, sini, L):\n a = np.ascontiguousarray(a, dtype=np.float64)\n e = np.ascontiguousarray(e, dtype=np.float64)\n cosw = np.ascontiguousarray(cosw, dtype=np.float64)\n sinw = np.ascontiguousarray(sinw, dtype=np.float64)\n cosi = np.ascontiguousarray(cosi, dtype=np.float64)\n sini = np.ascontiguousarray(sini, dtype=np.float64)\n L = np.ascontiguousarray(L, dtype=np.float64)\n M_left = np.empty_like(a)\n M_right = np.empty_like(a)\n flag = np.empty_like(a, dtype=np.int32)\n driver.contact_points(\n a, e, cosw, sinw, cosi, sini, L, M_left, M_right, flag\n )\n return M_left, M_right, flag\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def convert(self, s: str, numRows: int) ->str:
res = ''
for i in range(numRows):
pass
return res
<|reserved_special_token_1|>
# https://leetcode-cn.com/problems/zigzag-conversion/
# 6. Z 字形变换
class Solution:
def convert(self, s: str, numRows: int) -> str:
res = ''
for i in range(numRows):
pass
return res
|
flexible
|
{
"blob_id": "aa952e8f9a1855b5578cb26d6e5aca42605ee585",
"index": 5454,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def convert(self, s: str, numRows: int) ->str:\n res = ''\n for i in range(numRows):\n pass\n return res\n",
"step-4": "# https://leetcode-cn.com/problems/zigzag-conversion/\n# 6. Z 字形变换\n\n\nclass Solution:\n def convert(self, s: str, numRows: int) -> str:\n res = ''\n for i in range(numRows):\n pass\n return res\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def takeInput():
x = input()
while not validInput(x):
print('Invalid input. Try another one:')
x = input()
return x
def main():
stats = {'Council': 0, 'United': 0, 'Faceless': 0, 'Warband': 0}
print(
"Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz."
)
print('Answer the the following questions by typing a, b, c or d.')
print('1/8')
print("I'd die without my...\na. Freedom\nb. Knowledge\nc. Talent\nd. Hope"
)
x = takeInput()
if x == 'a':
stats['Faceless'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['Warband'] += 1
else:
stats['United'] += 1
print('2/8')
print('Pick an animal:\na. Owl\nb. Leopard\nc. Elepant\nd. Octopus')
x = takeInput()
if x == 'a':
stats['Warband'] += 1
elif x == 'b':
stats['Faceless'] += 1
elif x == 'c':
stats['United'] += 1
else:
stats['Council'] += 1
print('3/8')
print(
"""Wars are won...
a. In the heat of battle
b. In the planning room
c. With unbreaking resolve
d. By the unpredictable"""
)
x = takeInput()
if x == 'a':
stats['Warband'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['United'] += 1
else:
stats['Faceless'] += 1
print('4/8')
print(
"""The perfect team would never...
a. Give up
b. Lose focus
c. Tell me what to do
d. Feed my opponent"""
)
x = takeInput()
if x == 'a':
stats['United'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['Warband'] += 1
else:
stats['Faceless'] += 1
print('5/8')
print(
"""The enemy team is winning on all fronts. What do you do?
a. Outmaneuver them to steal some objectives
b. Rally my team for a final stand
c. Go pentakill them, like I always do
d. This is right where I want them--I'll explain later"""
)
x = takeInput()
if x == 'a':
stats['Faceless'] += 1
elif x == 'b':
stats['United'] += 1
elif x == 'c':
stats['Warband'] += 1
else:
stats['Council'] += 1
print('6/8')
print(
"What's your favorite time of the day\na. Dawn\nb. Day\nc. Dusk\nd. Night"
)
x = takeInput()
if x == 'a':
stats['United'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['Faceless'] += 1
else:
stats['Warband'] += 1
print('7/8')
print(
"""Which of these sounds like you
a. "Can we please group"
b. "Trust me. I'm not trolling"
c. "ez"
d. "WINNABLE\""""
)
x = takeInput()
if x == 'a':
stats['Council'] += 1
elif x == 'b':
stats['Faceless'] += 1
elif x == 'c':
stats['Warband'] += 1
else:
stats['United'] += 1
print('8/8')
print(
"""I want to be seen as a(n)...
a. Selfless leader
b. Brilliant tactician
c. Crafty wildcard
d. Elite fighter"""
)
x = takeInput()
if x == 'a':
stats['United'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['Faceless'] += 1
else:
stats['Warband'] += 1
print('\n')
result = max(stats.items(), key=operator.itemgetter(1))[0]
print('Congratulations! You are a ' + result)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def validInput(x):
if x == 'a':
return True
elif x == 'b':
return True
elif x == 'c':
return True
elif x == 'd':
return True
else:
return False
def takeInput():
x = input()
while not validInput(x):
print('Invalid input. Try another one:')
x = input()
return x
def main():
stats = {'Council': 0, 'United': 0, 'Faceless': 0, 'Warband': 0}
print(
"Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz."
)
print('Answer the the following questions by typing a, b, c or d.')
print('1/8')
print("I'd die without my...\na. Freedom\nb. Knowledge\nc. Talent\nd. Hope"
)
x = takeInput()
if x == 'a':
stats['Faceless'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['Warband'] += 1
else:
stats['United'] += 1
print('2/8')
print('Pick an animal:\na. Owl\nb. Leopard\nc. Elepant\nd. Octopus')
x = takeInput()
if x == 'a':
stats['Warband'] += 1
elif x == 'b':
stats['Faceless'] += 1
elif x == 'c':
stats['United'] += 1
else:
stats['Council'] += 1
print('3/8')
print(
"""Wars are won...
a. In the heat of battle
b. In the planning room
c. With unbreaking resolve
d. By the unpredictable"""
)
x = takeInput()
if x == 'a':
stats['Warband'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['United'] += 1
else:
stats['Faceless'] += 1
print('4/8')
print(
"""The perfect team would never...
a. Give up
b. Lose focus
c. Tell me what to do
d. Feed my opponent"""
)
x = takeInput()
if x == 'a':
stats['United'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['Warband'] += 1
else:
stats['Faceless'] += 1
print('5/8')
print(
"""The enemy team is winning on all fronts. What do you do?
a. Outmaneuver them to steal some objectives
b. Rally my team for a final stand
c. Go pentakill them, like I always do
d. This is right where I want them--I'll explain later"""
)
x = takeInput()
if x == 'a':
stats['Faceless'] += 1
elif x == 'b':
stats['United'] += 1
elif x == 'c':
stats['Warband'] += 1
else:
stats['Council'] += 1
print('6/8')
print(
"What's your favorite time of the day\na. Dawn\nb. Day\nc. Dusk\nd. Night"
)
x = takeInput()
if x == 'a':
stats['United'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['Faceless'] += 1
else:
stats['Warband'] += 1
print('7/8')
print(
"""Which of these sounds like you
a. "Can we please group"
b. "Trust me. I'm not trolling"
c. "ez"
d. "WINNABLE\""""
)
x = takeInput()
if x == 'a':
stats['Council'] += 1
elif x == 'b':
stats['Faceless'] += 1
elif x == 'c':
stats['Warband'] += 1
else:
stats['United'] += 1
print('8/8')
print(
"""I want to be seen as a(n)...
a. Selfless leader
b. Brilliant tactician
c. Crafty wildcard
d. Elite fighter"""
)
x = takeInput()
if x == 'a':
stats['United'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['Faceless'] += 1
else:
stats['Warband'] += 1
print('\n')
result = max(stats.items(), key=operator.itemgetter(1))[0]
print('Congratulations! You are a ' + result)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def validInput(x):
if x == 'a':
return True
elif x == 'b':
return True
elif x == 'c':
return True
elif x == 'd':
return True
else:
return False
def takeInput():
x = input()
while not validInput(x):
print('Invalid input. Try another one:')
x = input()
return x
def main():
stats = {'Council': 0, 'United': 0, 'Faceless': 0, 'Warband': 0}
print(
"Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz."
)
print('Answer the the following questions by typing a, b, c or d.')
print('1/8')
print("I'd die without my...\na. Freedom\nb. Knowledge\nc. Talent\nd. Hope"
)
x = takeInput()
if x == 'a':
stats['Faceless'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['Warband'] += 1
else:
stats['United'] += 1
print('2/8')
print('Pick an animal:\na. Owl\nb. Leopard\nc. Elepant\nd. Octopus')
x = takeInput()
if x == 'a':
stats['Warband'] += 1
elif x == 'b':
stats['Faceless'] += 1
elif x == 'c':
stats['United'] += 1
else:
stats['Council'] += 1
print('3/8')
print(
"""Wars are won...
a. In the heat of battle
b. In the planning room
c. With unbreaking resolve
d. By the unpredictable"""
)
x = takeInput()
if x == 'a':
stats['Warband'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['United'] += 1
else:
stats['Faceless'] += 1
print('4/8')
print(
"""The perfect team would never...
a. Give up
b. Lose focus
c. Tell me what to do
d. Feed my opponent"""
)
x = takeInput()
if x == 'a':
stats['United'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['Warband'] += 1
else:
stats['Faceless'] += 1
print('5/8')
print(
"""The enemy team is winning on all fronts. What do you do?
a. Outmaneuver them to steal some objectives
b. Rally my team for a final stand
c. Go pentakill them, like I always do
d. This is right where I want them--I'll explain later"""
)
x = takeInput()
if x == 'a':
stats['Faceless'] += 1
elif x == 'b':
stats['United'] += 1
elif x == 'c':
stats['Warband'] += 1
else:
stats['Council'] += 1
print('6/8')
print(
"What's your favorite time of the day\na. Dawn\nb. Day\nc. Dusk\nd. Night"
)
x = takeInput()
if x == 'a':
stats['United'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['Faceless'] += 1
else:
stats['Warband'] += 1
print('7/8')
print(
"""Which of these sounds like you
a. "Can we please group"
b. "Trust me. I'm not trolling"
c. "ez"
d. "WINNABLE\""""
)
x = takeInput()
if x == 'a':
stats['Council'] += 1
elif x == 'b':
stats['Faceless'] += 1
elif x == 'c':
stats['Warband'] += 1
else:
stats['United'] += 1
print('8/8')
print(
"""I want to be seen as a(n)...
a. Selfless leader
b. Brilliant tactician
c. Crafty wildcard
d. Elite fighter"""
)
x = takeInput()
if x == 'a':
stats['United'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['Faceless'] += 1
else:
stats['Warband'] += 1
print('\n')
result = max(stats.items(), key=operator.itemgetter(1))[0]
print('Congratulations! You are a ' + result)
main()
<|reserved_special_token_1|>
import operator
def validInput(x):
if x == 'a':
return True
elif x == 'b':
return True
elif x == 'c':
return True
elif x == 'd':
return True
else:
return False
def takeInput():
x = input()
while not validInput(x):
print('Invalid input. Try another one:')
x = input()
return x
def main():
stats = {'Council': 0, 'United': 0, 'Faceless': 0, 'Warband': 0}
print(
"Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz."
)
print('Answer the the following questions by typing a, b, c or d.')
print('1/8')
print("I'd die without my...\na. Freedom\nb. Knowledge\nc. Talent\nd. Hope"
)
x = takeInput()
if x == 'a':
stats['Faceless'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['Warband'] += 1
else:
stats['United'] += 1
print('2/8')
print('Pick an animal:\na. Owl\nb. Leopard\nc. Elepant\nd. Octopus')
x = takeInput()
if x == 'a':
stats['Warband'] += 1
elif x == 'b':
stats['Faceless'] += 1
elif x == 'c':
stats['United'] += 1
else:
stats['Council'] += 1
print('3/8')
print(
"""Wars are won...
a. In the heat of battle
b. In the planning room
c. With unbreaking resolve
d. By the unpredictable"""
)
x = takeInput()
if x == 'a':
stats['Warband'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['United'] += 1
else:
stats['Faceless'] += 1
print('4/8')
print(
"""The perfect team would never...
a. Give up
b. Lose focus
c. Tell me what to do
d. Feed my opponent"""
)
x = takeInput()
if x == 'a':
stats['United'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['Warband'] += 1
else:
stats['Faceless'] += 1
print('5/8')
print(
"""The enemy team is winning on all fronts. What do you do?
a. Outmaneuver them to steal some objectives
b. Rally my team for a final stand
c. Go pentakill them, like I always do
d. This is right where I want them--I'll explain later"""
)
x = takeInput()
if x == 'a':
stats['Faceless'] += 1
elif x == 'b':
stats['United'] += 1
elif x == 'c':
stats['Warband'] += 1
else:
stats['Council'] += 1
print('6/8')
print(
"What's your favorite time of the day\na. Dawn\nb. Day\nc. Dusk\nd. Night"
)
x = takeInput()
if x == 'a':
stats['United'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['Faceless'] += 1
else:
stats['Warband'] += 1
print('7/8')
print(
"""Which of these sounds like you
a. "Can we please group"
b. "Trust me. I'm not trolling"
c. "ez"
d. "WINNABLE\""""
)
x = takeInput()
if x == 'a':
stats['Council'] += 1
elif x == 'b':
stats['Faceless'] += 1
elif x == 'c':
stats['Warband'] += 1
else:
stats['United'] += 1
print('8/8')
print(
"""I want to be seen as a(n)...
a. Selfless leader
b. Brilliant tactician
c. Crafty wildcard
d. Elite fighter"""
)
x = takeInput()
if x == 'a':
stats['United'] += 1
elif x == 'b':
stats['Council'] += 1
elif x == 'c':
stats['Faceless'] += 1
else:
stats['Warband'] += 1
print('\n')
result = max(stats.items(), key=operator.itemgetter(1))[0]
print('Congratulations! You are a ' + result)
main()
<|reserved_special_token_1|>
# ----------------------------------------------------------------------------
# Written by Khanh Nguyen Le
# May 4th 2019
# Discord: https://discord.io/skyrst
# ----------------------------------------------------------------------------
import operator
def validInput(x):
if x=="a": return True
elif x=="b": return True
elif x=="c": return True
elif x=="d": return True
else: return False
def takeInput():
x=input()
while not validInput(x):
print("Invalid input. Try another one:")
x=input()
return x
def main():
stats = {'Council':0, 'United':0, 'Faceless': 0, 'Warband':0}
print("Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz.")
print("Answer the the following questions by typing a, b, c or d.")
print("1/8")
print("I'd die without my...\na. Freedom\nb. Knowledge\nc. Talent\nd. Hope")
x = takeInput()
if x=="a":
stats['Faceless'] += 1
elif x=="b":
stats['Council']+= 1
elif x=="c":
stats['Warband']+= 1
else:
stats['United']+= 1
print("2/8")
print("Pick an animal:\na. Owl\nb. Leopard\nc. Elepant\nd. Octopus")
x = takeInput()
if x=="a":
stats['Warband'] += 1
elif x=="b":
stats['Faceless']+= 1
elif x=="c":
stats['United']+= 1
else:
stats['Council']+= 1
print("3/8")
print("Wars are won...\na. In the heat of battle\nb. In the planning room\nc. With unbreaking resolve\nd. By the unpredictable")
x = takeInput()
if x=="a":
stats['Warband'] += 1
elif x=="b":
stats['Council']+= 1
elif x=="c":
stats['United']+= 1
else:
stats['Faceless']+= 1
print("4/8")
print("The perfect team would never...\na. Give up\nb. Lose focus\nc. Tell me what to do\nd. Feed my opponent")
x = takeInput()
if x=="a":
stats['United'] += 1
elif x=="b":
stats['Council']+= 1
elif x=="c":
stats['Warband']+= 1
else:
stats['Faceless']+= 1
print("5/8")
print("The enemy team is winning on all fronts. What do you do?\na. Outmaneuver them to steal some objectives\nb. Rally my team for a final stand\nc. Go pentakill them, like I always do\nd. This is right where I want them--I'll explain later")
x = takeInput()
if x=="a":
stats['Faceless'] += 1
elif x=="b":
stats['United']+= 1
elif x=="c":
stats['Warband']+= 1
else:
stats['Council']+= 1
print("6/8")
print("What's your favorite time of the day\na. Dawn\nb. Day\nc. Dusk\nd. Night")
x = takeInput()
if x=="a":
stats['United'] += 1
elif x=="b":
stats['Council']+= 1
elif x=="c":
stats['Faceless']+= 1
else:
stats['Warband']+= 1
print("7/8")
print("Which of these sounds like you\na. \"Can we please group\"\nb. \"Trust me. I'm not trolling\"\nc. \"ez\"\nd. \"WINNABLE\"")
x = takeInput()
if x=="a":
stats['Council'] += 1
elif x=="b":
stats['Faceless']+= 1
elif x=="c":
stats['Warband']+= 1
else:
stats['United']+= 1
print("8/8")
print("I want to be seen as a(n)...\na. Selfless leader\nb. Brilliant tactician\nc. Crafty wildcard\nd. Elite fighter")
x = takeInput()
if x=="a":
stats['United'] += 1
elif x=="b":
stats['Council']+= 1
elif x=="c":
stats['Faceless']+= 1
else:
stats['Warband']+= 1
print("\n")
result = max(stats.items(), key=operator.itemgetter(1))[0]
print("Congratulations! You are a " +result)
main()
|
flexible
|
{
"blob_id": "5209638ec97a666783c102bec7a2b00991c41a08",
"index": 5438,
"step-1": "<mask token>\n\n\ndef takeInput():\n x = input()\n while not validInput(x):\n print('Invalid input. Try another one:')\n x = input()\n return x\n\n\ndef main():\n stats = {'Council': 0, 'United': 0, 'Faceless': 0, 'Warband': 0}\n print(\n \"Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz.\"\n )\n print('Answer the the following questions by typing a, b, c or d.')\n print('1/8')\n print(\"I'd die without my...\\na. Freedom\\nb. Knowledge\\nc. Talent\\nd. Hope\"\n )\n x = takeInput()\n if x == 'a':\n stats['Faceless'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['United'] += 1\n print('2/8')\n print('Pick an animal:\\na. Owl\\nb. Leopard\\nc. Elepant\\nd. Octopus')\n x = takeInput()\n if x == 'a':\n stats['Warband'] += 1\n elif x == 'b':\n stats['Faceless'] += 1\n elif x == 'c':\n stats['United'] += 1\n else:\n stats['Council'] += 1\n print('3/8')\n print(\n \"\"\"Wars are won...\na. In the heat of battle\nb. In the planning room\nc. With unbreaking resolve\nd. By the unpredictable\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Warband'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['United'] += 1\n else:\n stats['Faceless'] += 1\n print('4/8')\n print(\n \"\"\"The perfect team would never...\na. Give up\nb. Lose focus\nc. Tell me what to do\nd. Feed my opponent\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['Faceless'] += 1\n print('5/8')\n print(\n \"\"\"The enemy team is winning on all fronts. What do you do?\na. Outmaneuver them to steal some objectives\nb. Rally my team for a final stand\nc. Go pentakill them, like I always do\nd. This is right where I want them--I'll explain later\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Faceless'] += 1\n elif x == 'b':\n stats['United'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['Council'] += 1\n print('6/8')\n print(\n \"What's your favorite time of the day\\na. Dawn\\nb. Day\\nc. Dusk\\nd. Night\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Faceless'] += 1\n else:\n stats['Warband'] += 1\n print('7/8')\n print(\n \"\"\"Which of these sounds like you\na. \"Can we please group\"\nb. \"Trust me. I'm not trolling\"\nc. \"ez\"\nd. \"WINNABLE\\\"\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Council'] += 1\n elif x == 'b':\n stats['Faceless'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['United'] += 1\n print('8/8')\n print(\n \"\"\"I want to be seen as a(n)...\na. Selfless leader\nb. Brilliant tactician\nc. Crafty wildcard\nd. Elite fighter\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Faceless'] += 1\n else:\n stats['Warband'] += 1\n print('\\n')\n result = max(stats.items(), key=operator.itemgetter(1))[0]\n print('Congratulations! You are a ' + result)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef validInput(x):\n if x == 'a':\n return True\n elif x == 'b':\n return True\n elif x == 'c':\n return True\n elif x == 'd':\n return True\n else:\n return False\n\n\ndef takeInput():\n x = input()\n while not validInput(x):\n print('Invalid input. Try another one:')\n x = input()\n return x\n\n\ndef main():\n stats = {'Council': 0, 'United': 0, 'Faceless': 0, 'Warband': 0}\n print(\n \"Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz.\"\n )\n print('Answer the the following questions by typing a, b, c or d.')\n print('1/8')\n print(\"I'd die without my...\\na. Freedom\\nb. Knowledge\\nc. Talent\\nd. Hope\"\n )\n x = takeInput()\n if x == 'a':\n stats['Faceless'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['United'] += 1\n print('2/8')\n print('Pick an animal:\\na. Owl\\nb. Leopard\\nc. Elepant\\nd. Octopus')\n x = takeInput()\n if x == 'a':\n stats['Warband'] += 1\n elif x == 'b':\n stats['Faceless'] += 1\n elif x == 'c':\n stats['United'] += 1\n else:\n stats['Council'] += 1\n print('3/8')\n print(\n \"\"\"Wars are won...\na. In the heat of battle\nb. In the planning room\nc. With unbreaking resolve\nd. By the unpredictable\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Warband'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['United'] += 1\n else:\n stats['Faceless'] += 1\n print('4/8')\n print(\n \"\"\"The perfect team would never...\na. Give up\nb. Lose focus\nc. Tell me what to do\nd. Feed my opponent\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['Faceless'] += 1\n print('5/8')\n print(\n \"\"\"The enemy team is winning on all fronts. What do you do?\na. Outmaneuver them to steal some objectives\nb. Rally my team for a final stand\nc. Go pentakill them, like I always do\nd. This is right where I want them--I'll explain later\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Faceless'] += 1\n elif x == 'b':\n stats['United'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['Council'] += 1\n print('6/8')\n print(\n \"What's your favorite time of the day\\na. Dawn\\nb. Day\\nc. Dusk\\nd. Night\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Faceless'] += 1\n else:\n stats['Warband'] += 1\n print('7/8')\n print(\n \"\"\"Which of these sounds like you\na. \"Can we please group\"\nb. \"Trust me. I'm not trolling\"\nc. \"ez\"\nd. \"WINNABLE\\\"\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Council'] += 1\n elif x == 'b':\n stats['Faceless'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['United'] += 1\n print('8/8')\n print(\n \"\"\"I want to be seen as a(n)...\na. Selfless leader\nb. Brilliant tactician\nc. Crafty wildcard\nd. Elite fighter\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Faceless'] += 1\n else:\n stats['Warband'] += 1\n print('\\n')\n result = max(stats.items(), key=operator.itemgetter(1))[0]\n print('Congratulations! You are a ' + result)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef validInput(x):\n if x == 'a':\n return True\n elif x == 'b':\n return True\n elif x == 'c':\n return True\n elif x == 'd':\n return True\n else:\n return False\n\n\ndef takeInput():\n x = input()\n while not validInput(x):\n print('Invalid input. Try another one:')\n x = input()\n return x\n\n\ndef main():\n stats = {'Council': 0, 'United': 0, 'Faceless': 0, 'Warband': 0}\n print(\n \"Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz.\"\n )\n print('Answer the the following questions by typing a, b, c or d.')\n print('1/8')\n print(\"I'd die without my...\\na. Freedom\\nb. Knowledge\\nc. Talent\\nd. Hope\"\n )\n x = takeInput()\n if x == 'a':\n stats['Faceless'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['United'] += 1\n print('2/8')\n print('Pick an animal:\\na. Owl\\nb. Leopard\\nc. Elepant\\nd. Octopus')\n x = takeInput()\n if x == 'a':\n stats['Warband'] += 1\n elif x == 'b':\n stats['Faceless'] += 1\n elif x == 'c':\n stats['United'] += 1\n else:\n stats['Council'] += 1\n print('3/8')\n print(\n \"\"\"Wars are won...\na. In the heat of battle\nb. In the planning room\nc. With unbreaking resolve\nd. By the unpredictable\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Warband'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['United'] += 1\n else:\n stats['Faceless'] += 1\n print('4/8')\n print(\n \"\"\"The perfect team would never...\na. Give up\nb. Lose focus\nc. Tell me what to do\nd. Feed my opponent\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['Faceless'] += 1\n print('5/8')\n print(\n \"\"\"The enemy team is winning on all fronts. What do you do?\na. Outmaneuver them to steal some objectives\nb. Rally my team for a final stand\nc. Go pentakill them, like I always do\nd. This is right where I want them--I'll explain later\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Faceless'] += 1\n elif x == 'b':\n stats['United'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['Council'] += 1\n print('6/8')\n print(\n \"What's your favorite time of the day\\na. Dawn\\nb. Day\\nc. Dusk\\nd. Night\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Faceless'] += 1\n else:\n stats['Warband'] += 1\n print('7/8')\n print(\n \"\"\"Which of these sounds like you\na. \"Can we please group\"\nb. \"Trust me. I'm not trolling\"\nc. \"ez\"\nd. \"WINNABLE\\\"\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Council'] += 1\n elif x == 'b':\n stats['Faceless'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['United'] += 1\n print('8/8')\n print(\n \"\"\"I want to be seen as a(n)...\na. Selfless leader\nb. Brilliant tactician\nc. Crafty wildcard\nd. Elite fighter\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Faceless'] += 1\n else:\n stats['Warband'] += 1\n print('\\n')\n result = max(stats.items(), key=operator.itemgetter(1))[0]\n print('Congratulations! You are a ' + result)\n\n\nmain()\n",
"step-4": "import operator\n\n\ndef validInput(x):\n if x == 'a':\n return True\n elif x == 'b':\n return True\n elif x == 'c':\n return True\n elif x == 'd':\n return True\n else:\n return False\n\n\ndef takeInput():\n x = input()\n while not validInput(x):\n print('Invalid input. Try another one:')\n x = input()\n return x\n\n\ndef main():\n stats = {'Council': 0, 'United': 0, 'Faceless': 0, 'Warband': 0}\n print(\n \"Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz.\"\n )\n print('Answer the the following questions by typing a, b, c or d.')\n print('1/8')\n print(\"I'd die without my...\\na. Freedom\\nb. Knowledge\\nc. Talent\\nd. Hope\"\n )\n x = takeInput()\n if x == 'a':\n stats['Faceless'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['United'] += 1\n print('2/8')\n print('Pick an animal:\\na. Owl\\nb. Leopard\\nc. Elepant\\nd. Octopus')\n x = takeInput()\n if x == 'a':\n stats['Warband'] += 1\n elif x == 'b':\n stats['Faceless'] += 1\n elif x == 'c':\n stats['United'] += 1\n else:\n stats['Council'] += 1\n print('3/8')\n print(\n \"\"\"Wars are won...\na. In the heat of battle\nb. In the planning room\nc. With unbreaking resolve\nd. By the unpredictable\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Warband'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['United'] += 1\n else:\n stats['Faceless'] += 1\n print('4/8')\n print(\n \"\"\"The perfect team would never...\na. Give up\nb. Lose focus\nc. Tell me what to do\nd. Feed my opponent\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['Faceless'] += 1\n print('5/8')\n print(\n \"\"\"The enemy team is winning on all fronts. What do you do?\na. Outmaneuver them to steal some objectives\nb. Rally my team for a final stand\nc. Go pentakill them, like I always do\nd. This is right where I want them--I'll explain later\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Faceless'] += 1\n elif x == 'b':\n stats['United'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['Council'] += 1\n print('6/8')\n print(\n \"What's your favorite time of the day\\na. Dawn\\nb. Day\\nc. Dusk\\nd. Night\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Faceless'] += 1\n else:\n stats['Warband'] += 1\n print('7/8')\n print(\n \"\"\"Which of these sounds like you\na. \"Can we please group\"\nb. \"Trust me. I'm not trolling\"\nc. \"ez\"\nd. \"WINNABLE\\\"\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Council'] += 1\n elif x == 'b':\n stats['Faceless'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['United'] += 1\n print('8/8')\n print(\n \"\"\"I want to be seen as a(n)...\na. Selfless leader\nb. Brilliant tactician\nc. Crafty wildcard\nd. Elite fighter\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Faceless'] += 1\n else:\n stats['Warband'] += 1\n print('\\n')\n result = max(stats.items(), key=operator.itemgetter(1))[0]\n print('Congratulations! You are a ' + result)\n\n\nmain()\n",
"step-5": "# ----------------------------------------------------------------------------\r\n# Written by Khanh Nguyen Le\r\n# May 4th 2019\r\n# Discord: https://discord.io/skyrst\r\n# ----------------------------------------------------------------------------\r\nimport operator\r\ndef validInput(x):\r\n if x==\"a\": return True\r\n elif x==\"b\": return True\r\n elif x==\"c\": return True\r\n elif x==\"d\": return True\r\n else: return False\r\n \r\ndef takeInput():\r\n x=input()\r\n while not validInput(x):\r\n print(\"Invalid input. Try another one:\")\r\n x=input()\r\n return x\r\n \r\ndef main():\r\n stats = {'Council':0, 'United':0, 'Faceless': 0, 'Warband':0}\r\n print(\"Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz.\")\r\n print(\"Answer the the following questions by typing a, b, c or d.\")\r\n print(\"1/8\")\r\n print(\"I'd die without my...\\na. Freedom\\nb. Knowledge\\nc. Talent\\nd. Hope\")\r\n x = takeInput()\r\n if x==\"a\":\r\n stats['Faceless'] += 1\r\n elif x==\"b\":\r\n stats['Council']+= 1\r\n elif x==\"c\":\r\n stats['Warband']+= 1\r\n else:\r\n stats['United']+= 1\r\n print(\"2/8\")\r\n print(\"Pick an animal:\\na. Owl\\nb. Leopard\\nc. Elepant\\nd. Octopus\")\r\n x = takeInput()\r\n if x==\"a\":\r\n stats['Warband'] += 1\r\n elif x==\"b\":\r\n stats['Faceless']+= 1\r\n elif x==\"c\":\r\n stats['United']+= 1\r\n else:\r\n stats['Council']+= 1\r\n print(\"3/8\")\r\n print(\"Wars are won...\\na. In the heat of battle\\nb. In the planning room\\nc. With unbreaking resolve\\nd. By the unpredictable\")\r\n x = takeInput()\r\n if x==\"a\":\r\n stats['Warband'] += 1\r\n elif x==\"b\":\r\n stats['Council']+= 1\r\n elif x==\"c\":\r\n stats['United']+= 1\r\n else:\r\n stats['Faceless']+= 1\r\n print(\"4/8\")\r\n print(\"The perfect team would never...\\na. Give up\\nb. Lose focus\\nc. Tell me what to do\\nd. Feed my opponent\")\r\n x = takeInput()\r\n if x==\"a\":\r\n stats['United'] += 1\r\n elif x==\"b\":\r\n stats['Council']+= 1\r\n elif x==\"c\":\r\n stats['Warband']+= 1\r\n else:\r\n stats['Faceless']+= 1\r\n print(\"5/8\")\r\n print(\"The enemy team is winning on all fronts. What do you do?\\na. Outmaneuver them to steal some objectives\\nb. Rally my team for a final stand\\nc. Go pentakill them, like I always do\\nd. This is right where I want them--I'll explain later\")\r\n x = takeInput()\r\n if x==\"a\":\r\n stats['Faceless'] += 1\r\n elif x==\"b\":\r\n stats['United']+= 1\r\n elif x==\"c\":\r\n stats['Warband']+= 1\r\n else:\r\n stats['Council']+= 1\r\n print(\"6/8\")\r\n print(\"What's your favorite time of the day\\na. Dawn\\nb. Day\\nc. Dusk\\nd. Night\")\r\n x = takeInput()\r\n if x==\"a\":\r\n stats['United'] += 1\r\n elif x==\"b\":\r\n stats['Council']+= 1\r\n elif x==\"c\":\r\n stats['Faceless']+= 1\r\n else:\r\n stats['Warband']+= 1\r\n print(\"7/8\")\r\n print(\"Which of these sounds like you\\na. \\\"Can we please group\\\"\\nb. \\\"Trust me. I'm not trolling\\\"\\nc. \\\"ez\\\"\\nd. \\\"WINNABLE\\\"\")\r\n x = takeInput()\r\n if x==\"a\":\r\n stats['Council'] += 1\r\n elif x==\"b\":\r\n stats['Faceless']+= 1\r\n elif x==\"c\":\r\n stats['Warband']+= 1\r\n else:\r\n stats['United']+= 1\r\n print(\"8/8\")\r\n print(\"I want to be seen as a(n)...\\na. Selfless leader\\nb. Brilliant tactician\\nc. Crafty wildcard\\nd. Elite fighter\")\r\n x = takeInput()\r\n if x==\"a\":\r\n stats['United'] += 1\r\n elif x==\"b\":\r\n stats['Council']+= 1\r\n elif x==\"c\":\r\n stats['Faceless']+= 1\r\n else:\r\n stats['Warband']+= 1\r\n print(\"\\n\")\r\n \r\n result = max(stats.items(), key=operator.itemgetter(1))[0]\r\n print(\"Congratulations! You are a \" +result)\r\n\r\n\r\nmain()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import cv2
import openslide
class QualityPatch():
def __init__(self, original_img_path,label_img_path,patch_level,patch_size):
"""
parameter:
original_img_path(str): the source of image
label_img_path(str): label image
patch_level(int): the level that the patch belongs to
patch_size(tuple): size of patch(x,y)
attributes:
self.slide(Openslide): the slide that the patch belongs to
self.original_img_path(str) : the path of the lide
self.label_img_path(str) : label_img_path
self.patch_level(int) : the level that the patch belongs to
self.patch_size = patch_size
self.scale(int) : the magnification of the slide that the patch belongs to with level_max baseline
self.label(np array) : the image of label
self.label_size(tuple) : the size of label
self.adj_patch_size_label(tuple) : considering the slide is rescaled to self.label_size the size is zero, it is 1
"""
self.slide = openslide.OpenSlide(original_img_path)
slide_width, slide_height = self.slide.dimensions
self.label = (cv2.imread(label_img_path,cv2.IMREAD_GRAYSCALE)/255)
self.patch_coors = [(w,h) for w in range(0, slide_width - patch_size[0], patch_size[0]) for h in range(0, slide_height - patch_size[1],patch_size[1])]
self.original_img_path = original_img_path
self.label_img_path = label_img_path
self.patch_level = patch_level
self.patch_size = patch_size
self.label = self.label.T
self.level_dim = self.slide.level_dimensions[patch_level]
self.label_size = self.label.shape
self.scale = (self.label_size[0]/self.level_dim[0], self.label_size[1]/self.level_dim[1])
self.adj_patch_size_label = self.calculateAdjPatchSize()
def calculateLabelCoordinates(self, patch_location):
return (int(self.scale[0]*patch_location[0]/2**(self.patch_level)), int(self.scale[1]*patch_location[1]/2**(self.patch_level)))
def calculateAdjPatchSize(self):
return (int(self.scale[0] * self.patch_size[0])+1, int(self.scale[1] * self.patch_size[1])+1)
def patchQualityInsurance(self, patch_location):
label_coordinates = self.calculateLabelCoordinates(patch_location)
percent = (np.sum(self.label[label_coordinates[0]:label_coordinates[0]+self.adj_patch_size_label[0],label_coordinates[1]:label_coordinates[1]+self.adj_patch_size_label[1]]))/(self.adj_patch_size_label[0]*self.adj_patch_size_label[1])
return percent
def getLabelWithPatchLocation(self, patch_location):
patch_image = np.ones(self.adj_patch_size_label)/2
label_with_patch_location = self.label.copy()
label_coordinates = self.calculateLabelCoordinates(patch_location)
label_with_patch_location[label_coordinates[0]:label_coordinates[0]+self.adj_patch_size_label[0],label_coordinates[1]:label_coordinates[1]+self.adj_patch_size_label[1]] = patch_image
return label_with_patch_location.T
def getReleventPatches(self):
relevent_patches = []
for i, coor in enumerate(self.patch_coors):
percent = self.patchQualityInsurance(coor)
if percent > .5:
relevent_patches.append([coor,percent])
if i % 10000 == 0:
print(i, "/",len(self.patch_coors), "dic len", len(relevent_patches), " from", len(self.patch_coors) )
return relevent_patches
def checkingfunction(self, checking_coors=(40000,90000)):
if checking_coors[0] < 0 or checking_coors[0] < 0 or\
self.slide.level_dimensions[self.patch_level][0] < (checking_coors[0] / 2**(self.patch_level) + self.patch_size[0]) or\
self.slide.level_dimensions[self.patch_level][1] < ((checking_coors[1] / 2**(self.patch_level) + self.patch_size[1])):
raise ValueError("the patch location with patch size is not valid.")
image = self.slide.read_region(checking_coors, self.patch_level, self.patch_size)
percent = self.patchQualityInsurance(checking_coors)
fig, ax = plt.subplots(nrows=1, ncols=3)
plt.tight_layout()
ax[0].set_title("tissue percentage %.02f"%percent)
ax[0].axis('off')
ax[0].imshow(image)
ax[1].set_title("tissue label")
ax[1].axis('off')
ax[1].imshow(self.label.T, cmap='gray')
ax[2].set_title("label with patch")
ax[2].axis('off')
ax[2].imshow(self.getLabelWithPatchLocation(checking_coors))
plt.savefig("test/check_read_region"+str(self.patch_level)+'.png')
plt.close('all')
|
normal
|
{
"blob_id": "0ad71f02e37f2744036b134c33e037a724fd38a6",
"index": 8049,
"step-1": "<mask token>\n\n\nclass QualityPatch:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def getReleventPatches(self):\n relevent_patches = []\n for i, coor in enumerate(self.patch_coors):\n percent = self.patchQualityInsurance(coor)\n if percent > 0.5:\n relevent_patches.append([coor, percent])\n if i % 10000 == 0:\n print(i, '/', len(self.patch_coors), 'dic len', len(\n relevent_patches), ' from', len(self.patch_coors))\n return relevent_patches\n\n def checkingfunction(self, checking_coors=(40000, 90000)):\n if checking_coors[0] < 0 or checking_coors[0\n ] < 0 or self.slide.level_dimensions[self.patch_level][0\n ] < checking_coors[0] / 2 ** self.patch_level + self.patch_size[0\n ] or self.slide.level_dimensions[self.patch_level][1\n ] < checking_coors[1] / 2 ** self.patch_level + self.patch_size[1]:\n raise ValueError('the patch location with patch size is not valid.'\n )\n image = self.slide.read_region(checking_coors, self.patch_level,\n self.patch_size)\n percent = self.patchQualityInsurance(checking_coors)\n fig, ax = plt.subplots(nrows=1, ncols=3)\n plt.tight_layout()\n ax[0].set_title('tissue percentage %.02f' % percent)\n ax[0].axis('off')\n ax[0].imshow(image)\n ax[1].set_title('tissue label')\n ax[1].axis('off')\n ax[1].imshow(self.label.T, cmap='gray')\n ax[2].set_title('label with patch')\n ax[2].axis('off')\n ax[2].imshow(self.getLabelWithPatchLocation(checking_coors))\n plt.savefig('test/check_read_region' + str(self.patch_level) + '.png')\n plt.close('all')\n",
"step-2": "<mask token>\n\n\nclass QualityPatch:\n\n def __init__(self, original_img_path, label_img_path, patch_level,\n patch_size):\n \"\"\"\n parameter:\n original_img_path(str): the source of image\n label_img_path(str): label image\n patch_level(int): the level that the patch belongs to\n patch_size(tuple): size of patch(x,y)\n\n attributes:\n self.slide(Openslide): the slide that the patch belongs to \n self.original_img_path(str) : the path of the lide\n self.label_img_path(str) : label_img_path\n self.patch_level(int) : the level that the patch belongs to\n self.patch_size = patch_size\n\n self.scale(int) : the magnification of the slide that the patch belongs to with level_max baseline\n self.label(np array) : the image of label\n self.label_size(tuple) : the size of label\n self.adj_patch_size_label(tuple) : considering the slide is rescaled to self.label_size the size is zero, it is 1\n \"\"\"\n self.slide = openslide.OpenSlide(original_img_path)\n slide_width, slide_height = self.slide.dimensions\n self.label = cv2.imread(label_img_path, cv2.IMREAD_GRAYSCALE) / 255\n self.patch_coors = [(w, h) for w in range(0, slide_width -\n patch_size[0], patch_size[0]) for h in range(0, slide_height -\n patch_size[1], patch_size[1])]\n self.original_img_path = original_img_path\n self.label_img_path = label_img_path\n self.patch_level = patch_level\n self.patch_size = patch_size\n self.label = self.label.T\n self.level_dim = self.slide.level_dimensions[patch_level]\n self.label_size = self.label.shape\n self.scale = self.label_size[0] / self.level_dim[0], self.label_size[1\n ] / self.level_dim[1]\n self.adj_patch_size_label = self.calculateAdjPatchSize()\n <mask token>\n\n def calculateAdjPatchSize(self):\n return int(self.scale[0] * self.patch_size[0]) + 1, int(self.scale[\n 1] * self.patch_size[1]) + 1\n\n def patchQualityInsurance(self, patch_location):\n label_coordinates = self.calculateLabelCoordinates(patch_location)\n percent = np.sum(self.label[label_coordinates[0]:label_coordinates[\n 0] + self.adj_patch_size_label[0], label_coordinates[1]:\n label_coordinates[1] + self.adj_patch_size_label[1]]) / (self.\n adj_patch_size_label[0] * self.adj_patch_size_label[1])\n return percent\n <mask token>\n\n def getReleventPatches(self):\n relevent_patches = []\n for i, coor in enumerate(self.patch_coors):\n percent = self.patchQualityInsurance(coor)\n if percent > 0.5:\n relevent_patches.append([coor, percent])\n if i % 10000 == 0:\n print(i, '/', len(self.patch_coors), 'dic len', len(\n relevent_patches), ' from', len(self.patch_coors))\n return relevent_patches\n\n def checkingfunction(self, checking_coors=(40000, 90000)):\n if checking_coors[0] < 0 or checking_coors[0\n ] < 0 or self.slide.level_dimensions[self.patch_level][0\n ] < checking_coors[0] / 2 ** self.patch_level + self.patch_size[0\n ] or self.slide.level_dimensions[self.patch_level][1\n ] < checking_coors[1] / 2 ** self.patch_level + self.patch_size[1]:\n raise ValueError('the patch location with patch size is not valid.'\n )\n image = self.slide.read_region(checking_coors, self.patch_level,\n self.patch_size)\n percent = self.patchQualityInsurance(checking_coors)\n fig, ax = plt.subplots(nrows=1, ncols=3)\n plt.tight_layout()\n ax[0].set_title('tissue percentage %.02f' % percent)\n ax[0].axis('off')\n ax[0].imshow(image)\n ax[1].set_title('tissue label')\n ax[1].axis('off')\n ax[1].imshow(self.label.T, cmap='gray')\n ax[2].set_title('label with patch')\n ax[2].axis('off')\n ax[2].imshow(self.getLabelWithPatchLocation(checking_coors))\n plt.savefig('test/check_read_region' + str(self.patch_level) + '.png')\n plt.close('all')\n",
"step-3": "<mask token>\n\n\nclass QualityPatch:\n\n def __init__(self, original_img_path, label_img_path, patch_level,\n patch_size):\n \"\"\"\n parameter:\n original_img_path(str): the source of image\n label_img_path(str): label image\n patch_level(int): the level that the patch belongs to\n patch_size(tuple): size of patch(x,y)\n\n attributes:\n self.slide(Openslide): the slide that the patch belongs to \n self.original_img_path(str) : the path of the lide\n self.label_img_path(str) : label_img_path\n self.patch_level(int) : the level that the patch belongs to\n self.patch_size = patch_size\n\n self.scale(int) : the magnification of the slide that the patch belongs to with level_max baseline\n self.label(np array) : the image of label\n self.label_size(tuple) : the size of label\n self.adj_patch_size_label(tuple) : considering the slide is rescaled to self.label_size the size is zero, it is 1\n \"\"\"\n self.slide = openslide.OpenSlide(original_img_path)\n slide_width, slide_height = self.slide.dimensions\n self.label = cv2.imread(label_img_path, cv2.IMREAD_GRAYSCALE) / 255\n self.patch_coors = [(w, h) for w in range(0, slide_width -\n patch_size[0], patch_size[0]) for h in range(0, slide_height -\n patch_size[1], patch_size[1])]\n self.original_img_path = original_img_path\n self.label_img_path = label_img_path\n self.patch_level = patch_level\n self.patch_size = patch_size\n self.label = self.label.T\n self.level_dim = self.slide.level_dimensions[patch_level]\n self.label_size = self.label.shape\n self.scale = self.label_size[0] / self.level_dim[0], self.label_size[1\n ] / self.level_dim[1]\n self.adj_patch_size_label = self.calculateAdjPatchSize()\n\n def calculateLabelCoordinates(self, patch_location):\n return int(self.scale[0] * patch_location[0] / 2 ** self.patch_level\n ), int(self.scale[1] * patch_location[1] / 2 ** self.patch_level)\n\n def calculateAdjPatchSize(self):\n return int(self.scale[0] * self.patch_size[0]) + 1, int(self.scale[\n 1] * self.patch_size[1]) + 1\n\n def patchQualityInsurance(self, patch_location):\n label_coordinates = self.calculateLabelCoordinates(patch_location)\n percent = np.sum(self.label[label_coordinates[0]:label_coordinates[\n 0] + self.adj_patch_size_label[0], label_coordinates[1]:\n label_coordinates[1] + self.adj_patch_size_label[1]]) / (self.\n adj_patch_size_label[0] * self.adj_patch_size_label[1])\n return percent\n <mask token>\n\n def getReleventPatches(self):\n relevent_patches = []\n for i, coor in enumerate(self.patch_coors):\n percent = self.patchQualityInsurance(coor)\n if percent > 0.5:\n relevent_patches.append([coor, percent])\n if i % 10000 == 0:\n print(i, '/', len(self.patch_coors), 'dic len', len(\n relevent_patches), ' from', len(self.patch_coors))\n return relevent_patches\n\n def checkingfunction(self, checking_coors=(40000, 90000)):\n if checking_coors[0] < 0 or checking_coors[0\n ] < 0 or self.slide.level_dimensions[self.patch_level][0\n ] < checking_coors[0] / 2 ** self.patch_level + self.patch_size[0\n ] or self.slide.level_dimensions[self.patch_level][1\n ] < checking_coors[1] / 2 ** self.patch_level + self.patch_size[1]:\n raise ValueError('the patch location with patch size is not valid.'\n )\n image = self.slide.read_region(checking_coors, self.patch_level,\n self.patch_size)\n percent = self.patchQualityInsurance(checking_coors)\n fig, ax = plt.subplots(nrows=1, ncols=3)\n plt.tight_layout()\n ax[0].set_title('tissue percentage %.02f' % percent)\n ax[0].axis('off')\n ax[0].imshow(image)\n ax[1].set_title('tissue label')\n ax[1].axis('off')\n ax[1].imshow(self.label.T, cmap='gray')\n ax[2].set_title('label with patch')\n ax[2].axis('off')\n ax[2].imshow(self.getLabelWithPatchLocation(checking_coors))\n plt.savefig('test/check_read_region' + str(self.patch_level) + '.png')\n plt.close('all')\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport cv2\nimport openslide\n\n\nclass QualityPatch:\n\n def __init__(self, original_img_path, label_img_path, patch_level,\n patch_size):\n \"\"\"\n parameter:\n original_img_path(str): the source of image\n label_img_path(str): label image\n patch_level(int): the level that the patch belongs to\n patch_size(tuple): size of patch(x,y)\n\n attributes:\n self.slide(Openslide): the slide that the patch belongs to \n self.original_img_path(str) : the path of the lide\n self.label_img_path(str) : label_img_path\n self.patch_level(int) : the level that the patch belongs to\n self.patch_size = patch_size\n\n self.scale(int) : the magnification of the slide that the patch belongs to with level_max baseline\n self.label(np array) : the image of label\n self.label_size(tuple) : the size of label\n self.adj_patch_size_label(tuple) : considering the slide is rescaled to self.label_size the size is zero, it is 1\n \"\"\"\n self.slide = openslide.OpenSlide(original_img_path)\n slide_width, slide_height = self.slide.dimensions\n self.label = cv2.imread(label_img_path, cv2.IMREAD_GRAYSCALE) / 255\n self.patch_coors = [(w, h) for w in range(0, slide_width -\n patch_size[0], patch_size[0]) for h in range(0, slide_height -\n patch_size[1], patch_size[1])]\n self.original_img_path = original_img_path\n self.label_img_path = label_img_path\n self.patch_level = patch_level\n self.patch_size = patch_size\n self.label = self.label.T\n self.level_dim = self.slide.level_dimensions[patch_level]\n self.label_size = self.label.shape\n self.scale = self.label_size[0] / self.level_dim[0], self.label_size[1\n ] / self.level_dim[1]\n self.adj_patch_size_label = self.calculateAdjPatchSize()\n\n def calculateLabelCoordinates(self, patch_location):\n return int(self.scale[0] * patch_location[0] / 2 ** self.patch_level\n ), int(self.scale[1] * patch_location[1] / 2 ** self.patch_level)\n\n def calculateAdjPatchSize(self):\n return int(self.scale[0] * self.patch_size[0]) + 1, int(self.scale[\n 1] * self.patch_size[1]) + 1\n\n def patchQualityInsurance(self, patch_location):\n label_coordinates = self.calculateLabelCoordinates(patch_location)\n percent = np.sum(self.label[label_coordinates[0]:label_coordinates[\n 0] + self.adj_patch_size_label[0], label_coordinates[1]:\n label_coordinates[1] + self.adj_patch_size_label[1]]) / (self.\n adj_patch_size_label[0] * self.adj_patch_size_label[1])\n return percent\n\n def getLabelWithPatchLocation(self, patch_location):\n patch_image = np.ones(self.adj_patch_size_label) / 2\n label_with_patch_location = self.label.copy()\n label_coordinates = self.calculateLabelCoordinates(patch_location)\n label_with_patch_location[label_coordinates[0]:label_coordinates[0] +\n self.adj_patch_size_label[0], label_coordinates[1]:\n label_coordinates[1] + self.adj_patch_size_label[1]] = patch_image\n return label_with_patch_location.T\n\n def getReleventPatches(self):\n relevent_patches = []\n for i, coor in enumerate(self.patch_coors):\n percent = self.patchQualityInsurance(coor)\n if percent > 0.5:\n relevent_patches.append([coor, percent])\n if i % 10000 == 0:\n print(i, '/', len(self.patch_coors), 'dic len', len(\n relevent_patches), ' from', len(self.patch_coors))\n return relevent_patches\n\n def checkingfunction(self, checking_coors=(40000, 90000)):\n if checking_coors[0] < 0 or checking_coors[0\n ] < 0 or self.slide.level_dimensions[self.patch_level][0\n ] < checking_coors[0] / 2 ** self.patch_level + self.patch_size[0\n ] or self.slide.level_dimensions[self.patch_level][1\n ] < checking_coors[1] / 2 ** self.patch_level + self.patch_size[1]:\n raise ValueError('the patch location with patch size is not valid.'\n )\n image = self.slide.read_region(checking_coors, self.patch_level,\n self.patch_size)\n percent = self.patchQualityInsurance(checking_coors)\n fig, ax = plt.subplots(nrows=1, ncols=3)\n plt.tight_layout()\n ax[0].set_title('tissue percentage %.02f' % percent)\n ax[0].axis('off')\n ax[0].imshow(image)\n ax[1].set_title('tissue label')\n ax[1].axis('off')\n ax[1].imshow(self.label.T, cmap='gray')\n ax[2].set_title('label with patch')\n ax[2].axis('off')\n ax[2].imshow(self.getLabelWithPatchLocation(checking_coors))\n plt.savefig('test/check_read_region' + str(self.patch_level) + '.png')\n plt.close('all')\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport cv2\nimport openslide\n\nclass QualityPatch():\n def __init__(self, original_img_path,label_img_path,patch_level,patch_size):\n \"\"\"\n parameter:\n original_img_path(str): the source of image\n label_img_path(str): label image\n patch_level(int): the level that the patch belongs to\n patch_size(tuple): size of patch(x,y)\n\n attributes:\n self.slide(Openslide): the slide that the patch belongs to \n self.original_img_path(str) : the path of the lide\n self.label_img_path(str) : label_img_path\n self.patch_level(int) : the level that the patch belongs to\n self.patch_size = patch_size\n\n self.scale(int) : the magnification of the slide that the patch belongs to with level_max baseline\n self.label(np array) : the image of label\n self.label_size(tuple) : the size of label\n self.adj_patch_size_label(tuple) : considering the slide is rescaled to self.label_size the size is zero, it is 1\n \"\"\"\n self.slide = openslide.OpenSlide(original_img_path)\n slide_width, slide_height = self.slide.dimensions\n self.label = (cv2.imread(label_img_path,cv2.IMREAD_GRAYSCALE)/255)\n self.patch_coors = [(w,h) for w in range(0, slide_width - patch_size[0], patch_size[0]) for h in range(0, slide_height - patch_size[1],patch_size[1])]\n\n self.original_img_path = original_img_path\n self.label_img_path = label_img_path\n self.patch_level = patch_level\n self.patch_size = patch_size\n self.label = self.label.T\n self.level_dim = self.slide.level_dimensions[patch_level]\n\n self.label_size = self.label.shape\n self.scale = (self.label_size[0]/self.level_dim[0], self.label_size[1]/self.level_dim[1])\n self.adj_patch_size_label = self.calculateAdjPatchSize()\n\n def calculateLabelCoordinates(self, patch_location):\n return (int(self.scale[0]*patch_location[0]/2**(self.patch_level)), int(self.scale[1]*patch_location[1]/2**(self.patch_level)))\n \n def calculateAdjPatchSize(self):\n return (int(self.scale[0] * self.patch_size[0])+1, int(self.scale[1] * self.patch_size[1])+1)\n\n def patchQualityInsurance(self, patch_location):\n label_coordinates = self.calculateLabelCoordinates(patch_location)\n percent = (np.sum(self.label[label_coordinates[0]:label_coordinates[0]+self.adj_patch_size_label[0],label_coordinates[1]:label_coordinates[1]+self.adj_patch_size_label[1]]))/(self.adj_patch_size_label[0]*self.adj_patch_size_label[1])\n\n return percent\n\n def getLabelWithPatchLocation(self, patch_location):\n patch_image = np.ones(self.adj_patch_size_label)/2\n label_with_patch_location = self.label.copy()\n label_coordinates = self.calculateLabelCoordinates(patch_location)\n label_with_patch_location[label_coordinates[0]:label_coordinates[0]+self.adj_patch_size_label[0],label_coordinates[1]:label_coordinates[1]+self.adj_patch_size_label[1]] = patch_image\n return label_with_patch_location.T\n \n def getReleventPatches(self):\n relevent_patches = []\n\n\n for i, coor in enumerate(self.patch_coors):\n percent = self.patchQualityInsurance(coor)\n if percent > .5:\n relevent_patches.append([coor,percent])\n if i % 10000 == 0:\n print(i, \"/\",len(self.patch_coors), \"dic len\", len(relevent_patches), \" from\", len(self.patch_coors) )\n return relevent_patches\n\n def checkingfunction(self, checking_coors=(40000,90000)):\n if checking_coors[0] < 0 or checking_coors[0] < 0 or\\\n self.slide.level_dimensions[self.patch_level][0] < (checking_coors[0] / 2**(self.patch_level) + self.patch_size[0]) or\\\n self.slide.level_dimensions[self.patch_level][1] < ((checking_coors[1] / 2**(self.patch_level) + self.patch_size[1])):\n raise ValueError(\"the patch location with patch size is not valid.\")\n \n image = self.slide.read_region(checking_coors, self.patch_level, self.patch_size)\n percent = self.patchQualityInsurance(checking_coors)\n\n fig, ax = plt.subplots(nrows=1, ncols=3)\n plt.tight_layout()\n ax[0].set_title(\"tissue percentage %.02f\"%percent)\n ax[0].axis('off')\n ax[0].imshow(image)\n ax[1].set_title(\"tissue label\")\n ax[1].axis('off')\n ax[1].imshow(self.label.T, cmap='gray')\n ax[2].set_title(\"label with patch\")\n ax[2].axis('off')\n ax[2].imshow(self.getLabelWithPatchLocation(checking_coors))\n plt.savefig(\"test/check_read_region\"+str(self.patch_level)+'.png')\n plt.close('all')\n",
"step-ids": [
3,
6,
7,
9,
10
]
}
|
[
3,
6,
7,
9,
10
] |
<|reserved_special_token_0|>
def processFrame(image_message):
frame = CvBridge().imgmsg_to_cv2(image_message)
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))
mask = cv2.dilate(mask, None, iterations=1)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.
CHAIN_APPROX_SIMPLE)[-2]
center = None
c = max(cnts, key=cv2.contourArea)
x, y, w, h = cv2.boundingRect(c)
msg = DroneArray()
drone = Drone()
drone.id = -1
drone.name = 'parrot_bebop2'
drone.box.t.linear.x = x * 100 / 640
drone.box.t.linear.y = y * 100 / 480
drone.box.w = w * 100 / 640
drone.box.h = h * 100 / 480
msg.drones.append(drone)
pub.publish(msg)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def processFrame(image_message):
frame = CvBridge().imgmsg_to_cv2(image_message)
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))
mask = cv2.dilate(mask, None, iterations=1)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.
CHAIN_APPROX_SIMPLE)[-2]
center = None
c = max(cnts, key=cv2.contourArea)
x, y, w, h = cv2.boundingRect(c)
msg = DroneArray()
drone = Drone()
drone.id = -1
drone.name = 'parrot_bebop2'
drone.box.t.linear.x = x * 100 / 640
drone.box.t.linear.y = y * 100 / 480
drone.box.w = w * 100 / 640
drone.box.h = h * 100 / 480
msg.drones.append(drone)
pub.publish(msg)
if __name__ == '__main__':
rospy.init_node('gazeboTracking', anonymous=True)
rospy.Subscriber('camera_img', Image, processFrame)
pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)
rospy.spin()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pub = None
def processFrame(image_message):
frame = CvBridge().imgmsg_to_cv2(image_message)
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))
mask = cv2.dilate(mask, None, iterations=1)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.
CHAIN_APPROX_SIMPLE)[-2]
center = None
c = max(cnts, key=cv2.contourArea)
x, y, w, h = cv2.boundingRect(c)
msg = DroneArray()
drone = Drone()
drone.id = -1
drone.name = 'parrot_bebop2'
drone.box.t.linear.x = x * 100 / 640
drone.box.t.linear.y = y * 100 / 480
drone.box.w = w * 100 / 640
drone.box.h = h * 100 / 480
msg.drones.append(drone)
pub.publish(msg)
if __name__ == '__main__':
rospy.init_node('gazeboTracking', anonymous=True)
rospy.Subscriber('camera_img', Image, processFrame)
pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)
rospy.spin()
<|reserved_special_token_1|>
import rospy
import cv2
import numpy as np
from cv_bridge import CvBridge
from matplotlib import pyplot as plt
from sensor_msgs.msg import Image
from drone_app_msgs.msg import BBox, Drone, DroneArray
from rospy.numpy_msg import numpy_msg
pub = None
def processFrame(image_message):
frame = CvBridge().imgmsg_to_cv2(image_message)
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))
mask = cv2.dilate(mask, None, iterations=1)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.
CHAIN_APPROX_SIMPLE)[-2]
center = None
c = max(cnts, key=cv2.contourArea)
x, y, w, h = cv2.boundingRect(c)
msg = DroneArray()
drone = Drone()
drone.id = -1
drone.name = 'parrot_bebop2'
drone.box.t.linear.x = x * 100 / 640
drone.box.t.linear.y = y * 100 / 480
drone.box.w = w * 100 / 640
drone.box.h = h * 100 / 480
msg.drones.append(drone)
pub.publish(msg)
if __name__ == '__main__':
rospy.init_node('gazeboTracking', anonymous=True)
rospy.Subscriber('camera_img', Image, processFrame)
pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)
rospy.spin()
<|reserved_special_token_1|>
#!/usr/bin/env python
import rospy
import cv2
import numpy as np
from cv_bridge import CvBridge
from matplotlib import pyplot as plt
from sensor_msgs.msg import Image
from drone_app_msgs.msg import BBox, Drone, DroneArray
from rospy.numpy_msg import numpy_msg
# ---------------------------------------
# This is an implementation of a simple CV
# algorithm that can be used for testing
# --- Global variables initialization ---
pub = None
# ---------------------------------------
def processFrame(image_message):
# --- Convert from ROS to OpenCV
frame = CvBridge().imgmsg_to_cv2(image_message)
# --- Threshold the image and find a mask
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))
mask = cv2.dilate(mask, None, iterations=1)
# --- Find contours in the mask and initialize the current
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
c = max(cnts, key=cv2.contourArea)
x,y,w,h = cv2.boundingRect(c)
# --- Pack in the message
msg = DroneArray()
drone = Drone()
drone.id = -1
drone.name = 'parrot_bebop2'
drone.box.t.linear.x = x * 100 / 640
drone.box.t.linear.y = y * 100 / 480
drone.box.w = w * 100 / 640
drone.box.h = h * 100 / 480
msg.drones.append(drone)
pub.publish(msg)
if __name__ == '__main__' :
# --- Topics
rospy.init_node('gazeboTracking', anonymous=True)
rospy.Subscriber('camera_img', Image, processFrame)
pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)
rospy.spin()
|
flexible
|
{
"blob_id": "e864dad3f46fc9c6c472823bd06ce74fb5cb3f41",
"index": 462,
"step-1": "<mask token>\n\n\ndef processFrame(image_message):\n frame = CvBridge().imgmsg_to_cv2(image_message)\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))\n mask = cv2.dilate(mask, None, iterations=1)\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n center = None\n c = max(cnts, key=cv2.contourArea)\n x, y, w, h = cv2.boundingRect(c)\n msg = DroneArray()\n drone = Drone()\n drone.id = -1\n drone.name = 'parrot_bebop2'\n drone.box.t.linear.x = x * 100 / 640\n drone.box.t.linear.y = y * 100 / 480\n drone.box.w = w * 100 / 640\n drone.box.h = h * 100 / 480\n msg.drones.append(drone)\n pub.publish(msg)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef processFrame(image_message):\n frame = CvBridge().imgmsg_to_cv2(image_message)\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))\n mask = cv2.dilate(mask, None, iterations=1)\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n center = None\n c = max(cnts, key=cv2.contourArea)\n x, y, w, h = cv2.boundingRect(c)\n msg = DroneArray()\n drone = Drone()\n drone.id = -1\n drone.name = 'parrot_bebop2'\n drone.box.t.linear.x = x * 100 / 640\n drone.box.t.linear.y = y * 100 / 480\n drone.box.w = w * 100 / 640\n drone.box.h = h * 100 / 480\n msg.drones.append(drone)\n pub.publish(msg)\n\n\nif __name__ == '__main__':\n rospy.init_node('gazeboTracking', anonymous=True)\n rospy.Subscriber('camera_img', Image, processFrame)\n pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)\n rospy.spin()\n",
"step-3": "<mask token>\npub = None\n\n\ndef processFrame(image_message):\n frame = CvBridge().imgmsg_to_cv2(image_message)\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))\n mask = cv2.dilate(mask, None, iterations=1)\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n center = None\n c = max(cnts, key=cv2.contourArea)\n x, y, w, h = cv2.boundingRect(c)\n msg = DroneArray()\n drone = Drone()\n drone.id = -1\n drone.name = 'parrot_bebop2'\n drone.box.t.linear.x = x * 100 / 640\n drone.box.t.linear.y = y * 100 / 480\n drone.box.w = w * 100 / 640\n drone.box.h = h * 100 / 480\n msg.drones.append(drone)\n pub.publish(msg)\n\n\nif __name__ == '__main__':\n rospy.init_node('gazeboTracking', anonymous=True)\n rospy.Subscriber('camera_img', Image, processFrame)\n pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)\n rospy.spin()\n",
"step-4": "import rospy\nimport cv2\nimport numpy as np\nfrom cv_bridge import CvBridge\nfrom matplotlib import pyplot as plt\nfrom sensor_msgs.msg import Image\nfrom drone_app_msgs.msg import BBox, Drone, DroneArray\nfrom rospy.numpy_msg import numpy_msg\npub = None\n\n\ndef processFrame(image_message):\n frame = CvBridge().imgmsg_to_cv2(image_message)\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))\n mask = cv2.dilate(mask, None, iterations=1)\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n center = None\n c = max(cnts, key=cv2.contourArea)\n x, y, w, h = cv2.boundingRect(c)\n msg = DroneArray()\n drone = Drone()\n drone.id = -1\n drone.name = 'parrot_bebop2'\n drone.box.t.linear.x = x * 100 / 640\n drone.box.t.linear.y = y * 100 / 480\n drone.box.w = w * 100 / 640\n drone.box.h = h * 100 / 480\n msg.drones.append(drone)\n pub.publish(msg)\n\n\nif __name__ == '__main__':\n rospy.init_node('gazeboTracking', anonymous=True)\n rospy.Subscriber('camera_img', Image, processFrame)\n pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)\n rospy.spin()\n",
"step-5": "#!/usr/bin/env python\nimport rospy\nimport cv2\nimport numpy as np\nfrom cv_bridge import CvBridge\nfrom matplotlib import pyplot as plt\nfrom sensor_msgs.msg import Image\nfrom drone_app_msgs.msg import BBox, Drone, DroneArray\nfrom rospy.numpy_msg import numpy_msg\n\n# ---------------------------------------\n# This is an implementation of a simple CV\n# algorithm that can be used for testing\n# --- Global variables initialization ---\npub = None\n# ---------------------------------------\n\ndef processFrame(image_message):\n # --- Convert from ROS to OpenCV\n frame = CvBridge().imgmsg_to_cv2(image_message)\n\n # --- Threshold the image and find a mask\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))\n mask = cv2.dilate(mask, None, iterations=1)\n\n # --- Find contours in the mask and initialize the current\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n c = max(cnts, key=cv2.contourArea)\n x,y,w,h = cv2.boundingRect(c)\n\n # --- Pack in the message\n msg = DroneArray()\n drone = Drone()\n\n drone.id = -1\n drone.name = 'parrot_bebop2'\n drone.box.t.linear.x = x * 100 / 640\n drone.box.t.linear.y = y * 100 / 480\n drone.box.w = w * 100 / 640\n drone.box.h = h * 100 / 480\n\n msg.drones.append(drone)\n pub.publish(msg)\n\nif __name__ == '__main__' :\n # --- Topics\n rospy.init_node('gazeboTracking', anonymous=True)\n rospy.Subscriber('camera_img', Image, processFrame)\n pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)\n \n rospy.spin()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os
import sys
sys.path.insert(0, "/path/to/mm-api/python")
sys.path.insert(0, "/path/to/mm-api/distrib/python_osx")
print(sys.path)
import mmapi
from mmRemote import *
import mm;
# assumption: we are running
examples_dir = "/dir/of/models/"
part_filename1 = os.path.join( examples_dir, "model1.stl" )
part_filename2 = os.path.join( examples_dir, "model2.stl" )
# initialize connection
remote = mmRemote()
remote.connect()
cmd = mmapi.StoredCommands()
new_obj1 = mm.append_objects_from_file(remote, part_filename1);
new_obj1 = mm.append_objects_from_file(remote, part_filename2);
#done!
remote.shutdown()
|
normal
|
{
"blob_id": "bf6d1ddf66bc0d54320c0491e344925a5f507df7",
"index": 861,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, '/path/to/mm-api/python')\nsys.path.insert(0, '/path/to/mm-api/distrib/python_osx')\nprint(sys.path)\n<mask token>\nremote.connect()\n<mask token>\nremote.shutdown()\n",
"step-3": "<mask token>\nsys.path.insert(0, '/path/to/mm-api/python')\nsys.path.insert(0, '/path/to/mm-api/distrib/python_osx')\nprint(sys.path)\n<mask token>\nexamples_dir = '/dir/of/models/'\npart_filename1 = os.path.join(examples_dir, 'model1.stl')\npart_filename2 = os.path.join(examples_dir, 'model2.stl')\nremote = mmRemote()\nremote.connect()\ncmd = mmapi.StoredCommands()\nnew_obj1 = mm.append_objects_from_file(remote, part_filename1)\nnew_obj1 = mm.append_objects_from_file(remote, part_filename2)\nremote.shutdown()\n",
"step-4": "import os\nimport sys\nsys.path.insert(0, '/path/to/mm-api/python')\nsys.path.insert(0, '/path/to/mm-api/distrib/python_osx')\nprint(sys.path)\nimport mmapi\nfrom mmRemote import *\nimport mm\nexamples_dir = '/dir/of/models/'\npart_filename1 = os.path.join(examples_dir, 'model1.stl')\npart_filename2 = os.path.join(examples_dir, 'model2.stl')\nremote = mmRemote()\nremote.connect()\ncmd = mmapi.StoredCommands()\nnew_obj1 = mm.append_objects_from_file(remote, part_filename1)\nnew_obj1 = mm.append_objects_from_file(remote, part_filename2)\nremote.shutdown()\n",
"step-5": "import os\nimport sys\nsys.path.insert(0, \"/path/to/mm-api/python\")\nsys.path.insert(0, \"/path/to/mm-api/distrib/python_osx\")\nprint(sys.path)\n\n\nimport mmapi\nfrom mmRemote import *\nimport mm;\n\n# assumption: we are running\nexamples_dir = \"/dir/of/models/\"\npart_filename1 = os.path.join( examples_dir, \"model1.stl\" )\npart_filename2 = os.path.join( examples_dir, \"model2.stl\" )\n\n# initialize connection\nremote = mmRemote()\nremote.connect()\n\ncmd = mmapi.StoredCommands()\n\n\nnew_obj1 = mm.append_objects_from_file(remote, part_filename1);\nnew_obj1 = mm.append_objects_from_file(remote, part_filename2);\n\n#done!\nremote.shutdown()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#https://codecombat.com/play/level/village-champion
# Incoming munchkins! Defend the town!
# Define your own function to fight the enemy!
# In the function, find an enemy, then cleave or attack it.
def attttaaaaacccckkkk():
enemy = hero.findNearest(hero.findEnemies())
#enemy = hero.findNearestEnemy()
if enemy:
if enemy and hero.isReady('cleave'):
hero.cleave(enemy)
else:
hero.attack(enemy)
# Move between patrol points and call the function.
while True:
hero.moveXY(35, 34)
# Use whatever function name you defined above.
attttaaaaacccckkkk()
hero.moveXY(47, 27)
# Call the function again.
attttaaaaacccckkkk()
hero.moveXY(60, 31)
# Call the function again.
attttaaaaacccckkkk()
|
normal
|
{
"blob_id": "ce365e011d8cc88d9aa6b4df18ea3f4e70d48f5c",
"index": 4887,
"step-1": "<mask token>\n",
"step-2": "def attttaaaaacccckkkk():\n enemy = hero.findNearest(hero.findEnemies())\n if enemy:\n if enemy and hero.isReady('cleave'):\n hero.cleave(enemy)\n else:\n hero.attack(enemy)\n\n\n<mask token>\n",
"step-3": "def attttaaaaacccckkkk():\n enemy = hero.findNearest(hero.findEnemies())\n if enemy:\n if enemy and hero.isReady('cleave'):\n hero.cleave(enemy)\n else:\n hero.attack(enemy)\n\n\nwhile True:\n hero.moveXY(35, 34)\n attttaaaaacccckkkk()\n hero.moveXY(47, 27)\n attttaaaaacccckkkk()\n hero.moveXY(60, 31)\n attttaaaaacccckkkk()\n",
"step-4": "#https://codecombat.com/play/level/village-champion\n# Incoming munchkins! Defend the town!\n\n# Define your own function to fight the enemy!\n# In the function, find an enemy, then cleave or attack it.\ndef attttaaaaacccckkkk():\n enemy = hero.findNearest(hero.findEnemies())\n #enemy = hero.findNearestEnemy()\n if enemy:\n if enemy and hero.isReady('cleave'):\n hero.cleave(enemy)\n else:\n hero.attack(enemy)\n\n# Move between patrol points and call the function.\nwhile True:\n hero.moveXY(35, 34)\n # Use whatever function name you defined above.\n attttaaaaacccckkkk()\n hero.moveXY(47, 27)\n # Call the function again.\n attttaaaaacccckkkk()\n hero.moveXY(60, 31)\n # Call the function again.\n attttaaaaacccckkkk()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import xml.etree.ElementTree as ET
#tree = ET.parse('rutas/rutas_prueba.xml')
#treeToAdd = ET.parse('rutas/rutas_prueba_agregar.xml')
#root = tree.getroot()
#git rootToAdd = treeToAdd.getroot()
#for child in root:
# for test in child:
# print(test.tag, test.attrib)
#for elem in root.iter():
# print(elem.tag)
#prueba = [elem.tag for elem in root.iter()]
#print(prueba)
#print(ET.tostring(root, encoding='utf8').decode('utf8'))
# for elem in rootToAdd:
# root.append(elem)
#
# tree.write('rutas/probando_agregados.xml')
#get the tree for each routes file
rutas0k_10k = ET.parse('rutas/rutas0k-10k.xml')
rutas10k_30k = ET.parse('rutas/rutas10k-30k.xml')
rutas30k_50k = ET.parse('rutas/rutas30k-50k.xml')
rutas50k_70k = ET.parse('rutas/rutas50k-70k.xml')
rutas70k_90k = ET.parse('rutas/rutas70k-90k.xml')
rutas90k_110k = ET.parse('rutas/rutas90k-110k.xml')
#root for each routes tree
root1 = rutas0k_10k.getroot()
root2 = rutas10k_30k.getroot()
root3 = rutas30k_50k.getroot()
root4 = rutas50k_70k.getroot()
root5 = rutas70k_90k.getroot()
root6 = rutas90k_110k.getroot()
#each root except first root
rootsToAdd = [root2,root3,root4,root5,root6]
#add each element to the first tree
for root in rootsToAdd:
for elem in root:
root1.append(elem)
#write the tree to a new file
rutas0k_10k.write('rutas/rutas0k-110k.xml')
|
normal
|
{
"blob_id": "b4b7e20c9558bd1b29a1c1fa24bfca8a2d292b27",
"index": 398,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor root in rootsToAdd:\n for elem in root:\n root1.append(elem)\nrutas0k_10k.write('rutas/rutas0k-110k.xml')\n",
"step-3": "<mask token>\nrutas0k_10k = ET.parse('rutas/rutas0k-10k.xml')\nrutas10k_30k = ET.parse('rutas/rutas10k-30k.xml')\nrutas30k_50k = ET.parse('rutas/rutas30k-50k.xml')\nrutas50k_70k = ET.parse('rutas/rutas50k-70k.xml')\nrutas70k_90k = ET.parse('rutas/rutas70k-90k.xml')\nrutas90k_110k = ET.parse('rutas/rutas90k-110k.xml')\nroot1 = rutas0k_10k.getroot()\nroot2 = rutas10k_30k.getroot()\nroot3 = rutas30k_50k.getroot()\nroot4 = rutas50k_70k.getroot()\nroot5 = rutas70k_90k.getroot()\nroot6 = rutas90k_110k.getroot()\nrootsToAdd = [root2, root3, root4, root5, root6]\nfor root in rootsToAdd:\n for elem in root:\n root1.append(elem)\nrutas0k_10k.write('rutas/rutas0k-110k.xml')\n",
"step-4": "import xml.etree.ElementTree as ET\nrutas0k_10k = ET.parse('rutas/rutas0k-10k.xml')\nrutas10k_30k = ET.parse('rutas/rutas10k-30k.xml')\nrutas30k_50k = ET.parse('rutas/rutas30k-50k.xml')\nrutas50k_70k = ET.parse('rutas/rutas50k-70k.xml')\nrutas70k_90k = ET.parse('rutas/rutas70k-90k.xml')\nrutas90k_110k = ET.parse('rutas/rutas90k-110k.xml')\nroot1 = rutas0k_10k.getroot()\nroot2 = rutas10k_30k.getroot()\nroot3 = rutas30k_50k.getroot()\nroot4 = rutas50k_70k.getroot()\nroot5 = rutas70k_90k.getroot()\nroot6 = rutas90k_110k.getroot()\nrootsToAdd = [root2, root3, root4, root5, root6]\nfor root in rootsToAdd:\n for elem in root:\n root1.append(elem)\nrutas0k_10k.write('rutas/rutas0k-110k.xml')\n",
"step-5": "import xml.etree.ElementTree as ET\n\n#tree = ET.parse('rutas/rutas_prueba.xml')\n#treeToAdd = ET.parse('rutas/rutas_prueba_agregar.xml')\n\n#root = tree.getroot()\n\n#git rootToAdd = treeToAdd.getroot()\n\n#for child in root:\n# for test in child:\n# print(test.tag, test.attrib)\n\n\n#for elem in root.iter():\n# print(elem.tag)\n\n#prueba = [elem.tag for elem in root.iter()]\n#print(prueba)\n#print(ET.tostring(root, encoding='utf8').decode('utf8'))\n\n# for elem in rootToAdd:\n# root.append(elem)\n#\n# tree.write('rutas/probando_agregados.xml')\n\n#get the tree for each routes file\nrutas0k_10k = ET.parse('rutas/rutas0k-10k.xml')\nrutas10k_30k = ET.parse('rutas/rutas10k-30k.xml')\nrutas30k_50k = ET.parse('rutas/rutas30k-50k.xml')\nrutas50k_70k = ET.parse('rutas/rutas50k-70k.xml')\nrutas70k_90k = ET.parse('rutas/rutas70k-90k.xml')\nrutas90k_110k = ET.parse('rutas/rutas90k-110k.xml')\n\n#root for each routes tree\nroot1 = rutas0k_10k.getroot()\nroot2 = rutas10k_30k.getroot()\nroot3 = rutas30k_50k.getroot()\nroot4 = rutas50k_70k.getroot()\nroot5 = rutas70k_90k.getroot()\nroot6 = rutas90k_110k.getroot()\n\n#each root except first root\nrootsToAdd = [root2,root3,root4,root5,root6]\n\n#add each element to the first tree\nfor root in rootsToAdd:\n for elem in root:\n root1.append(elem)\n\n#write the tree to a new file\nrutas0k_10k.write('rutas/rutas0k-110k.xml')\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
from matplotlib import pyplot as plt
from matplotlib import colors
import numpy as np
class figure:
def __init__(self, dire, dpi, span, data, CIM,
learn_loss=None, eval_loss=None, different_dir_app=True, reference_steps=0, reveal_trend=1):
self.dire = self.new_num_directory(dire)
self.app_dire = [self.make_num_directory("app", i) for i in range(data.app_num)]
self.trend_dire = [self.make_num_directory("trend", i) for i in range(len(data.trend_rule.w))]
self.dpi = dpi
self.span = span
self.app = data.apps
self.trend_rule = data.trend_rule
self.prediction = CIM.prediction
self.prediction_e = CIM.prediction_est_rule
self.prediction_only_ci = CIM.prediction_only_ci
self.predfail_app_num = CIM.predfail_app_num
self.cap_rule_num = CIM.cap_rule_num
self.add_rule_num = CIM.add_rule_num
self.lost_rule_num = CIM.lost_rule_num
self.useless_rule_num = CIM.useless_rule_num
self.merge_rule_num = CIM.merge_rule_num
self.learn_loss = learn_loss
self.eval_loss = eval_loss
self.diff_dir = different_dir_app
self.reference_steps = reference_steps
self.reveal_trend = reveal_trend
def new_num_directory(self, path):
n = 1
while True:
if not os.path.exists(path + "_" + str(n)):
os.mkdir(path + "_" + str(n))
break
else:
n += 1
return path + "_" + str(n) + "/"
def make_num_directory(self, name, num):
os.mkdir(self.dire + "/" + name + "_" + str(num))
return self.dire + "/" + name + "_" + str(num) + "/"
def find_min_max(self, data_list, length, standarize_zero=True):
if standarize_zero:
min = 0
max = 0
else:
min = data_list[0][0]
max = data_list[0][0]
for data in data_list:
for j in range(length):
if j < len(data):
if data[j] < min:
min = data[j]
if data[j] > max:
max = data[j]
return min, max
def savefig_result(self, name):
x = list(range(self.span))
if self.diff_dir:
# トレンドルールごとの色(chosenRuleより)
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
for i, app in enumerate(self.app):
min, max = self.find_min_max([self.prediction[i], self.prediction_e[i]], self.span)
plt.figure(figsize=(len(x) / 10, 5.5))
# (chosenRuleより)
for j in range(len(self.trend_rule.w)):
plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, max * 1.1 + 0.1],
[min * 1.1 - 0.1, min * 1.1 - 0.1],
facecolor=cycle_tr[j], alpha=0.2,
label="Chosenrule:" + str(j))
for j in range(self.span):
plt.fill_between([j - 0.5, j + 0.5], [max*1.1+0.1, max*1.1+0.1], [min*1.1-0.1, min*1.1-0.1],
facecolor=cycle_tr[self.app[i].trend_idx[j]], alpha=0.2)
plt.plot(x, app.trend, label="trend", linestyle="dotted", color="black")
plt.plot(x[self.reference_steps:], self.prediction[i],
label="LSTM pred", linestyle="dotted", color="blue")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.prediction_e[i],
label="CIM pred", color="orange")
if self.learn_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:], self.learn_loss[i], alpha=0.3,
label="learn loss")
if self.eval_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:], self.eval_loss[i], alpha=0.3, marker="X",
label="eval loss")
plt.xlabel('season')
plt.ylabel('trend value')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.app_dire[i] + name + ".png", dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x)/10, 5.5))
# アプリごとの色
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
for i, app in enumerate(self.app):
plt.plot(x, self.app[i].trend, color=cycle_app[i], label="trend (app:" + str(i) + ")", linestyle="dotted")
plt.plot(x[self.reference_steps:], self.prediction[i], color=cycle_app[i], label="pred (app:" + str(i) + ")")
if self.learn_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:], self.learn_loss[i], color=cycle_app[i], alpha=0.3,
label="learn loss (app:" + str(i) + ")")
if self.eval_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:], self.eval_loss[i], color=cycle_app[i], alpha=0.3, marker="X",
label="evalu loss (app:" + str(i) + ")")
plt.xlabel('season')
plt.ylabel('trend value')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
return
def savefig_ruleweight(self, name):
x = list(range(self.span))
if self.diff_dir:
# 特徴ごとの色
if len(self.trend_rule.w[0]["value"]) <= 10:
cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w[0]["value"]) <= 20:
cycle_ft = plt.cm.get_cmap('tab20').colors
else:
cycle_ft = list(colors.XKCD_COLORS.items())[:100]
for i in range(len(self.trend_rule.w)):
plt.figure(figsize=(len(x) / 10, 5.5))
# 特徴毎に
for j in range(len(self.trend_rule.w[i]["value"])):
plt.plot(x, self.trend_rule.w[i]["value"][j][:-1], color=cycle_ft[j], label="feature:" + str(j))
plt.xlabel('season')
plt.ylabel('weight of trend rule')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.trend_dire[i] + name + ".png", dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x)/10, 5.5))
# トレンドルールごとの色
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
# 特徴ごとの色
if len(self.trend_rule.w[0]["value"]) <= 10:
cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w[0]["value"]) <= 20:
cycle_ft = plt.cm.get_cmap('tab20').colors
else:
cycle_ft = list(colors.XKCD_COLORS.items())[:100]
width = 0.8 / len(self.trend_rule.w[0]["value"])
#トレンドルール毎に
for i in range(len(self.trend_rule.w)):
bottom = np.array(- i * 2.0)
# 特徴毎に
for j in range(len(self.trend_rule.w[i]["value"])):
if i == 0:
plt.bar(x + np.array([width * float(j)] * len(x)), self.trend_rule.w[i][j][:-1],
color=cycle_ft[j], align='edge', bottom=bottom, width=width, label="feature:" + str(j))
else:
plt.bar(x + np.array([width * float(j)] * len(x)), self.trend_rule.w[i]["value"][j][:-1],
color=cycle_ft[j], align='edge', bottom=bottom, width=width)
plt.fill_between(list(range(self.span+1)), [- i * 2.0 + 1] * (len(x)+1), [- (i+1) * 2.0 + 1] * (len(x)+1),
facecolor=cycle_tr[i], alpha=0.2, label="trendrule:" + str(i))
plt.xlabel('season')
plt.ylabel('weight of trend rule')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
return
def savefig_chosenrule(self, name):
x = list(range(self.span))
if self.diff_dir:
pass # savefig_resultに統合
else:
plt.figure(figsize=(len(x)/10, 5.5))
# アプリごとの色
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
# トレンドルールごとの色
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
# 凡例表示用
for i in range(len(self.trend_rule.w)):
plt.scatter(x, np.array([0] * len(x)), color=cycle_tr[i], s=1, marker="D",
label="trendrule:" + str(i))
for id in range(len(self.app)):
colorArr = []
for i in self.app[id].trend_idx:
colorArr.append(cycle_tr[i])
plt.scatter(x, np.array([- id] * len(x)), color=cycle_app[id], s=150, label="app:" + str(id))
plt.scatter(x, np.array([- id] * len(x)), color="w", s=70)
plt.scatter(x, np.array([- id] * len(x)), color=colorArr, s=15, marker="D", alpha=0.5)
plt.xlabel('シーズン')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
return
def savefig_compare_prediction(self, name):
x = list(range(self.span))
if self.diff_dir:
for i in range(len(self.app)):
plt.figure(figsize=(len(x) / 10, 5.5))
# *************************(変更してください)
plt.plot(x[self.reference_steps + self.reveal_trend:],
np.abs(np.array(self.prediction_only_ci[i]) - np.array(self.app[i].trend[self.reference_steps + self.reveal_trend:])),
label="only CI loss", linestyle="dotted", color="green")
plt.plot(x[self.reference_steps:],
np.abs(np.array(self.prediction[i]) - np.array(self.app[i].trend[self.reference_steps:])),
label="LSTM loss", linestyle="dotted", color="blue")
plt.plot(x[self.reference_steps + self.reveal_trend:],
np.abs(np.array(self.prediction_e[i]) - np.array(self.app[i].trend[self.reference_steps + self.reveal_trend:])),
label="CIM loss", color="orange")
plt.xlabel('season')
plt.ylabel('prediction loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.app_dire[i] + name + ".png", dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x)/10, 5.5))
# アプリごとの色
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
for id in range(len(self.app)):
plt.plot(x[self.reference_steps:], np.abs(np.array(self.prediction[id]) - np.array(self.app[id].trend[self.reference_steps:])),
color=cycle_app[id], label="classify loss (app:" + str(id) + ")", linestyle="dotted")
plt.plot(x[self.reference_steps + self.reveal_trend:], np.abs(np.array(self.prediction_e[id]) - np.array(self.app[id].trend[self.reference_steps + self.reveal_trend:])),
color=cycle_app[id], label="analyse loss (app:" + str(id) + ")")
plt.xlabel('season')
plt.ylabel('prediction loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
return
def savefig_compare_prediction_ave(self, name):
x = list(range(self.span))
if self.diff_dir:
prediction = []
prediction_e = []
prediction_ci = []
# 各アプリに対して平均を算出
for j in range(self.span - self.reference_steps):
sum = 0
sum_e = 0
sum_ci = 0
for i in range(len(self.app)):
sum += (self.prediction[i][j] - self.app[i].trend[j + self.reference_steps])**2
if j < self.span - self.reference_steps - self.reveal_trend:
sum_e += (self.prediction_e[i][j] - self.app[i].trend[j + self.reference_steps + self.reveal_trend])**2
sum_ci += (self.prediction_e[i][j] - self.app[i].trend[j + self.reference_steps + self.reveal_trend])**2
prediction.append(sum / len(self.app))
if j < self.span - self.reference_steps - self.reveal_trend:
prediction_e.append(sum_e / len(self.app))
prediction_ci.append(sum_ci / len(self.app))
plt.figure(figsize=(len(x) / 10, 5.5))
plt.xlabel('season')
plt.ylabel('prediction loss average')
# *************************(変更してください)
plt.plot(x[self.reference_steps + self.reveal_trend:], prediction_ci,
label="only CI loss", linestyle="dotted")
plt.plot(x[self.reference_steps:], prediction, label="LSTM loss", linestyle="dotted")
plt.plot(x[self.reference_steps + self.reveal_trend:], prediction_e, label="CIM loss")
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
def savefig_rule_num(self, name):
x = list(range(self.span))
plt.figure(figsize=(len(x)/10, 5.5))
chart_num = 6
width = 0.8 / chart_num
plt.plot(x[self.reference_steps + self.reveal_trend:], self.predfail_app_num, label="truth rule number")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.predfail_app_num, label="prediction fail app")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.cap_rule_num, label="captured rule")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.add_rule_num, label="add rule")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.lost_rule_num, label="lost rule")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.useless_rule_num, label="useless rule")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.merge_rule_num, label="merge rule")
plt.xlabel('season')
plt.ylabel('number')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
return
def save_config(self, name, cfg):
import json
setting = dict(
APP_NUM = cfg.APP_NUM,
SPAN = cfg.SPAN,
REVEAL_TREND = cfg.REVEAL_TREND,
FIRST_RULE_NUM=cfg.FIRST_RULE_NUM,
SHIFT_TREND_RULE = cfg.SHIFT_TREND_RULE,
APPEAR_RATE = cfg.APPEAR_RATE,
DISAPPEAR_RATE = cfg.DISAPPEAR_RATE,
EVALUATE_THRESHOLD_PRED_FAIL = cfg.EVALUATE_THRESHOLD_PRED_FAIL,
SAMPLING = cfg.SAMPLING,
EVALUATE_THRESHOLD_DELETE_RULE = cfg.EVALUATE_THRESHOLD_DELETE_RULE,
EVALUATE_THRESHOLD_ADD_RULE = cfg.EVALUATE_THRESHOLD_ADD_RULE,
EVALUATE_THRESHOLD_MERGE_RULE = cfg.EVALUATE_THRESHOLD_MERGE_RULE,
THRESHOLD_APPNUM = cfg.THRESHOLD_APPNUM,
TRY_NEWRULE_NUM = cfg.TRY_NEWRULE_NUM,
LSTM_REFERENCE_STEPS = cfg.LSTM_REFERENCE_STEPS,
LSTM_EPOCHS = cfg.LSTM_EPOCHS,
NN_EPOCHS = cfg.NN_EPOCHS,
DATATYPE = [dict(
name = feat["name"],
type = str(type(feat["data"]))
) for feat in cfg.DATATYPE],
FIRST_BIN = cfg.FIRST_BIN
)
fw = open(self.dire + name + '.json', 'w')
json.dump(setting, fw, indent=4)
return
|
normal
|
{
"blob_id": "dce6ef64cf1a758ed25e11f626ce31206d18f960",
"index": 8645,
"step-1": "<mask token>\n\n\nclass figure:\n <mask token>\n\n def new_num_directory(self, path):\n n = 1\n while True:\n if not os.path.exists(path + '_' + str(n)):\n os.mkdir(path + '_' + str(n))\n break\n else:\n n += 1\n return path + '_' + str(n) + '/'\n\n def make_num_directory(self, name, num):\n os.mkdir(self.dire + '/' + name + '_' + str(num))\n return self.dire + '/' + name + '_' + str(num) + '/'\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def savefig_rule_num(self, name):\n x = list(range(self.span))\n plt.figure(figsize=(len(x) / 10, 5.5))\n chart_num = 6\n width = 0.8 / chart_num\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n predfail_app_num, label='truth rule number')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n predfail_app_num, label='prediction fail app')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n cap_rule_num, label='captured rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n add_rule_num, label='add rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n lost_rule_num, label='lost rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n useless_rule_num, label='useless rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n merge_rule_num, label='merge rule')\n plt.xlabel('season')\n plt.ylabel('number')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass figure:\n\n def __init__(self, dire, dpi, span, data, CIM, learn_loss=None,\n eval_loss=None, different_dir_app=True, reference_steps=0,\n reveal_trend=1):\n self.dire = self.new_num_directory(dire)\n self.app_dire = [self.make_num_directory('app', i) for i in range(\n data.app_num)]\n self.trend_dire = [self.make_num_directory('trend', i) for i in\n range(len(data.trend_rule.w))]\n self.dpi = dpi\n self.span = span\n self.app = data.apps\n self.trend_rule = data.trend_rule\n self.prediction = CIM.prediction\n self.prediction_e = CIM.prediction_est_rule\n self.prediction_only_ci = CIM.prediction_only_ci\n self.predfail_app_num = CIM.predfail_app_num\n self.cap_rule_num = CIM.cap_rule_num\n self.add_rule_num = CIM.add_rule_num\n self.lost_rule_num = CIM.lost_rule_num\n self.useless_rule_num = CIM.useless_rule_num\n self.merge_rule_num = CIM.merge_rule_num\n self.learn_loss = learn_loss\n self.eval_loss = eval_loss\n self.diff_dir = different_dir_app\n self.reference_steps = reference_steps\n self.reveal_trend = reveal_trend\n\n def new_num_directory(self, path):\n n = 1\n while True:\n if not os.path.exists(path + '_' + str(n)):\n os.mkdir(path + '_' + str(n))\n break\n else:\n n += 1\n return path + '_' + str(n) + '/'\n\n def make_num_directory(self, name, num):\n os.mkdir(self.dire + '/' + name + '_' + str(num))\n return self.dire + '/' + name + '_' + str(num) + '/'\n\n def find_min_max(self, data_list, length, standarize_zero=True):\n if standarize_zero:\n min = 0\n max = 0\n else:\n min = data_list[0][0]\n max = data_list[0][0]\n for data in data_list:\n for j in range(length):\n if j < len(data):\n if data[j] < min:\n min = data[j]\n if data[j] > max:\n max = data[j]\n return min, max\n\n def savefig_result(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n for i, app in enumerate(self.app):\n min, max = self.find_min_max([self.prediction[i], self.\n prediction_e[i]], self.span)\n plt.figure(figsize=(len(x) / 10, 5.5))\n for j in range(len(self.trend_rule.w)):\n plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, \n max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1\n ], facecolor=cycle_tr[j], alpha=0.2, label=\n 'Chosenrule:' + str(j))\n for j in range(self.span):\n plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, \n max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1\n ], facecolor=cycle_tr[self.app[i].trend_idx[j]],\n alpha=0.2)\n plt.plot(x, app.trend, label='trend', linestyle='dotted',\n color='black')\n plt.plot(x[self.reference_steps:], self.prediction[i],\n label='LSTM pred', linestyle='dotted', color='blue')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self\n .prediction_e[i], label='CIM pred', color='orange')\n if self.learn_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.learn_loss[i], alpha=0.3, label='learn loss')\n if self.eval_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.eval_loss[i], alpha=0.3, marker='X', label=\n 'eval loss')\n plt.xlabel('season')\n plt.ylabel('trend value')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.app_dire[i] + name + '.png', dpi=self.dpi)\n plt.clf()\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n for i, app in enumerate(self.app):\n plt.plot(x, self.app[i].trend, color=cycle_app[i], label=\n 'trend (app:' + str(i) + ')', linestyle='dotted')\n plt.plot(x[self.reference_steps:], self.prediction[i],\n color=cycle_app[i], label='pred (app:' + str(i) + ')')\n if self.learn_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.learn_loss[i], color=cycle_app[i], alpha=\n 0.3, label='learn loss (app:' + str(i) + ')')\n if self.eval_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.eval_loss[i], color=cycle_app[i], alpha=0.3,\n marker='X', label='evalu loss (app:' + str(i) + ')')\n plt.xlabel('season')\n plt.ylabel('trend value')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_ruleweight(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n if len(self.trend_rule.w[0]['value']) <= 10:\n cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w[0]['value']) <= 20:\n cycle_ft = plt.cm.get_cmap('tab20').colors\n else:\n cycle_ft = list(colors.XKCD_COLORS.items())[:100]\n for i in range(len(self.trend_rule.w)):\n plt.figure(figsize=(len(x) / 10, 5.5))\n for j in range(len(self.trend_rule.w[i]['value'])):\n plt.plot(x, self.trend_rule.w[i]['value'][j][:-1],\n color=cycle_ft[j], label='feature:' + str(j))\n plt.xlabel('season')\n plt.ylabel('weight of trend rule')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.trend_dire[i] + name + '.png', dpi=self.dpi)\n plt.clf()\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n if len(self.trend_rule.w[0]['value']) <= 10:\n cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w[0]['value']) <= 20:\n cycle_ft = plt.cm.get_cmap('tab20').colors\n else:\n cycle_ft = list(colors.XKCD_COLORS.items())[:100]\n width = 0.8 / len(self.trend_rule.w[0]['value'])\n for i in range(len(self.trend_rule.w)):\n bottom = np.array(-i * 2.0)\n for j in range(len(self.trend_rule.w[i]['value'])):\n if i == 0:\n plt.bar(x + np.array([width * float(j)] * len(x)),\n self.trend_rule.w[i][j][:-1], color=cycle_ft[j],\n align='edge', bottom=bottom, width=width, label\n ='feature:' + str(j))\n else:\n plt.bar(x + np.array([width * float(j)] * len(x)),\n self.trend_rule.w[i]['value'][j][:-1], color=\n cycle_ft[j], align='edge', bottom=bottom, width\n =width)\n plt.fill_between(list(range(self.span + 1)), [-i * 2.0 + 1] *\n (len(x) + 1), [-(i + 1) * 2.0 + 1] * (len(x) + 1),\n facecolor=cycle_tr[i], alpha=0.2, label='trendrule:' +\n str(i))\n plt.xlabel('season')\n plt.ylabel('weight of trend rule')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_chosenrule(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n pass\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n for i in range(len(self.trend_rule.w)):\n plt.scatter(x, np.array([0] * len(x)), color=cycle_tr[i], s\n =1, marker='D', label='trendrule:' + str(i))\n for id in range(len(self.app)):\n colorArr = []\n for i in self.app[id].trend_idx:\n colorArr.append(cycle_tr[i])\n plt.scatter(x, np.array([-id] * len(x)), color=cycle_app[id\n ], s=150, label='app:' + str(id))\n plt.scatter(x, np.array([-id] * len(x)), color='w', s=70)\n plt.scatter(x, np.array([-id] * len(x)), color=colorArr, s=\n 15, marker='D', alpha=0.5)\n plt.xlabel('シーズン')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n <mask token>\n\n def savefig_compare_prediction_ave(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n prediction = []\n prediction_e = []\n prediction_ci = []\n for j in range(self.span - self.reference_steps):\n sum = 0\n sum_e = 0\n sum_ci = 0\n for i in range(len(self.app)):\n sum += (self.prediction[i][j] - self.app[i].trend[j +\n self.reference_steps]) ** 2\n if (j < self.span - self.reference_steps - self.\n reveal_trend):\n sum_e += (self.prediction_e[i][j] - self.app[i].\n trend[j + self.reference_steps + self.reveal_trend]\n ) ** 2\n sum_ci += (self.prediction_e[i][j] - self.app[i].\n trend[j + self.reference_steps + self.reveal_trend]\n ) ** 2\n prediction.append(sum / len(self.app))\n if j < self.span - self.reference_steps - self.reveal_trend:\n prediction_e.append(sum_e / len(self.app))\n prediction_ci.append(sum_ci / len(self.app))\n plt.figure(figsize=(len(x) / 10, 5.5))\n plt.xlabel('season')\n plt.ylabel('prediction loss average')\n plt.plot(x[self.reference_steps + self.reveal_trend:],\n prediction_ci, label='only CI loss', linestyle='dotted')\n plt.plot(x[self.reference_steps:], prediction, label=\n 'LSTM loss', linestyle='dotted')\n plt.plot(x[self.reference_steps + self.reveal_trend:],\n prediction_e, label='CIM loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n\n def savefig_rule_num(self, name):\n x = list(range(self.span))\n plt.figure(figsize=(len(x) / 10, 5.5))\n chart_num = 6\n width = 0.8 / chart_num\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n predfail_app_num, label='truth rule number')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n predfail_app_num, label='prediction fail app')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n cap_rule_num, label='captured rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n add_rule_num, label='add rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n lost_rule_num, label='lost rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n useless_rule_num, label='useless rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n merge_rule_num, label='merge rule')\n plt.xlabel('season')\n plt.ylabel('number')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def save_config(self, name, cfg):\n import json\n setting = dict(APP_NUM=cfg.APP_NUM, SPAN=cfg.SPAN, REVEAL_TREND=cfg\n .REVEAL_TREND, FIRST_RULE_NUM=cfg.FIRST_RULE_NUM,\n SHIFT_TREND_RULE=cfg.SHIFT_TREND_RULE, APPEAR_RATE=cfg.\n APPEAR_RATE, DISAPPEAR_RATE=cfg.DISAPPEAR_RATE,\n EVALUATE_THRESHOLD_PRED_FAIL=cfg.EVALUATE_THRESHOLD_PRED_FAIL,\n SAMPLING=cfg.SAMPLING, EVALUATE_THRESHOLD_DELETE_RULE=cfg.\n EVALUATE_THRESHOLD_DELETE_RULE, EVALUATE_THRESHOLD_ADD_RULE=cfg\n .EVALUATE_THRESHOLD_ADD_RULE, EVALUATE_THRESHOLD_MERGE_RULE=cfg\n .EVALUATE_THRESHOLD_MERGE_RULE, THRESHOLD_APPNUM=cfg.\n THRESHOLD_APPNUM, TRY_NEWRULE_NUM=cfg.TRY_NEWRULE_NUM,\n LSTM_REFERENCE_STEPS=cfg.LSTM_REFERENCE_STEPS, LSTM_EPOCHS=cfg.\n LSTM_EPOCHS, NN_EPOCHS=cfg.NN_EPOCHS, DATATYPE=[dict(name=feat[\n 'name'], type=str(type(feat['data']))) for feat in cfg.DATATYPE\n ], FIRST_BIN=cfg.FIRST_BIN)\n fw = open(self.dire + name + '.json', 'w')\n json.dump(setting, fw, indent=4)\n return\n",
"step-3": "<mask token>\n\n\nclass figure:\n\n def __init__(self, dire, dpi, span, data, CIM, learn_loss=None,\n eval_loss=None, different_dir_app=True, reference_steps=0,\n reveal_trend=1):\n self.dire = self.new_num_directory(dire)\n self.app_dire = [self.make_num_directory('app', i) for i in range(\n data.app_num)]\n self.trend_dire = [self.make_num_directory('trend', i) for i in\n range(len(data.trend_rule.w))]\n self.dpi = dpi\n self.span = span\n self.app = data.apps\n self.trend_rule = data.trend_rule\n self.prediction = CIM.prediction\n self.prediction_e = CIM.prediction_est_rule\n self.prediction_only_ci = CIM.prediction_only_ci\n self.predfail_app_num = CIM.predfail_app_num\n self.cap_rule_num = CIM.cap_rule_num\n self.add_rule_num = CIM.add_rule_num\n self.lost_rule_num = CIM.lost_rule_num\n self.useless_rule_num = CIM.useless_rule_num\n self.merge_rule_num = CIM.merge_rule_num\n self.learn_loss = learn_loss\n self.eval_loss = eval_loss\n self.diff_dir = different_dir_app\n self.reference_steps = reference_steps\n self.reveal_trend = reveal_trend\n\n def new_num_directory(self, path):\n n = 1\n while True:\n if not os.path.exists(path + '_' + str(n)):\n os.mkdir(path + '_' + str(n))\n break\n else:\n n += 1\n return path + '_' + str(n) + '/'\n\n def make_num_directory(self, name, num):\n os.mkdir(self.dire + '/' + name + '_' + str(num))\n return self.dire + '/' + name + '_' + str(num) + '/'\n\n def find_min_max(self, data_list, length, standarize_zero=True):\n if standarize_zero:\n min = 0\n max = 0\n else:\n min = data_list[0][0]\n max = data_list[0][0]\n for data in data_list:\n for j in range(length):\n if j < len(data):\n if data[j] < min:\n min = data[j]\n if data[j] > max:\n max = data[j]\n return min, max\n\n def savefig_result(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n for i, app in enumerate(self.app):\n min, max = self.find_min_max([self.prediction[i], self.\n prediction_e[i]], self.span)\n plt.figure(figsize=(len(x) / 10, 5.5))\n for j in range(len(self.trend_rule.w)):\n plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, \n max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1\n ], facecolor=cycle_tr[j], alpha=0.2, label=\n 'Chosenrule:' + str(j))\n for j in range(self.span):\n plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, \n max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1\n ], facecolor=cycle_tr[self.app[i].trend_idx[j]],\n alpha=0.2)\n plt.plot(x, app.trend, label='trend', linestyle='dotted',\n color='black')\n plt.plot(x[self.reference_steps:], self.prediction[i],\n label='LSTM pred', linestyle='dotted', color='blue')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self\n .prediction_e[i], label='CIM pred', color='orange')\n if self.learn_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.learn_loss[i], alpha=0.3, label='learn loss')\n if self.eval_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.eval_loss[i], alpha=0.3, marker='X', label=\n 'eval loss')\n plt.xlabel('season')\n plt.ylabel('trend value')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.app_dire[i] + name + '.png', dpi=self.dpi)\n plt.clf()\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n for i, app in enumerate(self.app):\n plt.plot(x, self.app[i].trend, color=cycle_app[i], label=\n 'trend (app:' + str(i) + ')', linestyle='dotted')\n plt.plot(x[self.reference_steps:], self.prediction[i],\n color=cycle_app[i], label='pred (app:' + str(i) + ')')\n if self.learn_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.learn_loss[i], color=cycle_app[i], alpha=\n 0.3, label='learn loss (app:' + str(i) + ')')\n if self.eval_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.eval_loss[i], color=cycle_app[i], alpha=0.3,\n marker='X', label='evalu loss (app:' + str(i) + ')')\n plt.xlabel('season')\n plt.ylabel('trend value')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_ruleweight(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n if len(self.trend_rule.w[0]['value']) <= 10:\n cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w[0]['value']) <= 20:\n cycle_ft = plt.cm.get_cmap('tab20').colors\n else:\n cycle_ft = list(colors.XKCD_COLORS.items())[:100]\n for i in range(len(self.trend_rule.w)):\n plt.figure(figsize=(len(x) / 10, 5.5))\n for j in range(len(self.trend_rule.w[i]['value'])):\n plt.plot(x, self.trend_rule.w[i]['value'][j][:-1],\n color=cycle_ft[j], label='feature:' + str(j))\n plt.xlabel('season')\n plt.ylabel('weight of trend rule')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.trend_dire[i] + name + '.png', dpi=self.dpi)\n plt.clf()\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n if len(self.trend_rule.w[0]['value']) <= 10:\n cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w[0]['value']) <= 20:\n cycle_ft = plt.cm.get_cmap('tab20').colors\n else:\n cycle_ft = list(colors.XKCD_COLORS.items())[:100]\n width = 0.8 / len(self.trend_rule.w[0]['value'])\n for i in range(len(self.trend_rule.w)):\n bottom = np.array(-i * 2.0)\n for j in range(len(self.trend_rule.w[i]['value'])):\n if i == 0:\n plt.bar(x + np.array([width * float(j)] * len(x)),\n self.trend_rule.w[i][j][:-1], color=cycle_ft[j],\n align='edge', bottom=bottom, width=width, label\n ='feature:' + str(j))\n else:\n plt.bar(x + np.array([width * float(j)] * len(x)),\n self.trend_rule.w[i]['value'][j][:-1], color=\n cycle_ft[j], align='edge', bottom=bottom, width\n =width)\n plt.fill_between(list(range(self.span + 1)), [-i * 2.0 + 1] *\n (len(x) + 1), [-(i + 1) * 2.0 + 1] * (len(x) + 1),\n facecolor=cycle_tr[i], alpha=0.2, label='trendrule:' +\n str(i))\n plt.xlabel('season')\n plt.ylabel('weight of trend rule')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_chosenrule(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n pass\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n for i in range(len(self.trend_rule.w)):\n plt.scatter(x, np.array([0] * len(x)), color=cycle_tr[i], s\n =1, marker='D', label='trendrule:' + str(i))\n for id in range(len(self.app)):\n colorArr = []\n for i in self.app[id].trend_idx:\n colorArr.append(cycle_tr[i])\n plt.scatter(x, np.array([-id] * len(x)), color=cycle_app[id\n ], s=150, label='app:' + str(id))\n plt.scatter(x, np.array([-id] * len(x)), color='w', s=70)\n plt.scatter(x, np.array([-id] * len(x)), color=colorArr, s=\n 15, marker='D', alpha=0.5)\n plt.xlabel('シーズン')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_compare_prediction(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n for i in range(len(self.app)):\n plt.figure(figsize=(len(x) / 10, 5.5))\n plt.plot(x[self.reference_steps + self.reveal_trend:], np.\n abs(np.array(self.prediction_only_ci[i]) - np.array(\n self.app[i].trend[self.reference_steps + self.\n reveal_trend:])), label='only CI loss', linestyle=\n 'dotted', color='green')\n plt.plot(x[self.reference_steps:], np.abs(np.array(self.\n prediction[i]) - np.array(self.app[i].trend[self.\n reference_steps:])), label='LSTM loss', linestyle=\n 'dotted', color='blue')\n plt.plot(x[self.reference_steps + self.reveal_trend:], np.\n abs(np.array(self.prediction_e[i]) - np.array(self.app[\n i].trend[self.reference_steps + self.reveal_trend:])),\n label='CIM loss', color='orange')\n plt.xlabel('season')\n plt.ylabel('prediction loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.app_dire[i] + name + '.png', dpi=self.dpi)\n plt.clf()\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n for id in range(len(self.app)):\n plt.plot(x[self.reference_steps:], np.abs(np.array(self.\n prediction[id]) - np.array(self.app[id].trend[self.\n reference_steps:])), color=cycle_app[id], label=\n 'classify loss (app:' + str(id) + ')', linestyle='dotted')\n plt.plot(x[self.reference_steps + self.reveal_trend:], np.\n abs(np.array(self.prediction_e[id]) - np.array(self.app\n [id].trend[self.reference_steps + self.reveal_trend:])),\n color=cycle_app[id], label='analyse loss (app:' + str(\n id) + ')')\n plt.xlabel('season')\n plt.ylabel('prediction loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_compare_prediction_ave(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n prediction = []\n prediction_e = []\n prediction_ci = []\n for j in range(self.span - self.reference_steps):\n sum = 0\n sum_e = 0\n sum_ci = 0\n for i in range(len(self.app)):\n sum += (self.prediction[i][j] - self.app[i].trend[j +\n self.reference_steps]) ** 2\n if (j < self.span - self.reference_steps - self.\n reveal_trend):\n sum_e += (self.prediction_e[i][j] - self.app[i].\n trend[j + self.reference_steps + self.reveal_trend]\n ) ** 2\n sum_ci += (self.prediction_e[i][j] - self.app[i].\n trend[j + self.reference_steps + self.reveal_trend]\n ) ** 2\n prediction.append(sum / len(self.app))\n if j < self.span - self.reference_steps - self.reveal_trend:\n prediction_e.append(sum_e / len(self.app))\n prediction_ci.append(sum_ci / len(self.app))\n plt.figure(figsize=(len(x) / 10, 5.5))\n plt.xlabel('season')\n plt.ylabel('prediction loss average')\n plt.plot(x[self.reference_steps + self.reveal_trend:],\n prediction_ci, label='only CI loss', linestyle='dotted')\n plt.plot(x[self.reference_steps:], prediction, label=\n 'LSTM loss', linestyle='dotted')\n plt.plot(x[self.reference_steps + self.reveal_trend:],\n prediction_e, label='CIM loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n\n def savefig_rule_num(self, name):\n x = list(range(self.span))\n plt.figure(figsize=(len(x) / 10, 5.5))\n chart_num = 6\n width = 0.8 / chart_num\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n predfail_app_num, label='truth rule number')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n predfail_app_num, label='prediction fail app')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n cap_rule_num, label='captured rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n add_rule_num, label='add rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n lost_rule_num, label='lost rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n useless_rule_num, label='useless rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n merge_rule_num, label='merge rule')\n plt.xlabel('season')\n plt.ylabel('number')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def save_config(self, name, cfg):\n import json\n setting = dict(APP_NUM=cfg.APP_NUM, SPAN=cfg.SPAN, REVEAL_TREND=cfg\n .REVEAL_TREND, FIRST_RULE_NUM=cfg.FIRST_RULE_NUM,\n SHIFT_TREND_RULE=cfg.SHIFT_TREND_RULE, APPEAR_RATE=cfg.\n APPEAR_RATE, DISAPPEAR_RATE=cfg.DISAPPEAR_RATE,\n EVALUATE_THRESHOLD_PRED_FAIL=cfg.EVALUATE_THRESHOLD_PRED_FAIL,\n SAMPLING=cfg.SAMPLING, EVALUATE_THRESHOLD_DELETE_RULE=cfg.\n EVALUATE_THRESHOLD_DELETE_RULE, EVALUATE_THRESHOLD_ADD_RULE=cfg\n .EVALUATE_THRESHOLD_ADD_RULE, EVALUATE_THRESHOLD_MERGE_RULE=cfg\n .EVALUATE_THRESHOLD_MERGE_RULE, THRESHOLD_APPNUM=cfg.\n THRESHOLD_APPNUM, TRY_NEWRULE_NUM=cfg.TRY_NEWRULE_NUM,\n LSTM_REFERENCE_STEPS=cfg.LSTM_REFERENCE_STEPS, LSTM_EPOCHS=cfg.\n LSTM_EPOCHS, NN_EPOCHS=cfg.NN_EPOCHS, DATATYPE=[dict(name=feat[\n 'name'], type=str(type(feat['data']))) for feat in cfg.DATATYPE\n ], FIRST_BIN=cfg.FIRST_BIN)\n fw = open(self.dire + name + '.json', 'w')\n json.dump(setting, fw, indent=4)\n return\n",
"step-4": "import os\nfrom matplotlib import pyplot as plt\nfrom matplotlib import colors\nimport numpy as np\n\n\nclass figure:\n\n def __init__(self, dire, dpi, span, data, CIM, learn_loss=None,\n eval_loss=None, different_dir_app=True, reference_steps=0,\n reveal_trend=1):\n self.dire = self.new_num_directory(dire)\n self.app_dire = [self.make_num_directory('app', i) for i in range(\n data.app_num)]\n self.trend_dire = [self.make_num_directory('trend', i) for i in\n range(len(data.trend_rule.w))]\n self.dpi = dpi\n self.span = span\n self.app = data.apps\n self.trend_rule = data.trend_rule\n self.prediction = CIM.prediction\n self.prediction_e = CIM.prediction_est_rule\n self.prediction_only_ci = CIM.prediction_only_ci\n self.predfail_app_num = CIM.predfail_app_num\n self.cap_rule_num = CIM.cap_rule_num\n self.add_rule_num = CIM.add_rule_num\n self.lost_rule_num = CIM.lost_rule_num\n self.useless_rule_num = CIM.useless_rule_num\n self.merge_rule_num = CIM.merge_rule_num\n self.learn_loss = learn_loss\n self.eval_loss = eval_loss\n self.diff_dir = different_dir_app\n self.reference_steps = reference_steps\n self.reveal_trend = reveal_trend\n\n def new_num_directory(self, path):\n n = 1\n while True:\n if not os.path.exists(path + '_' + str(n)):\n os.mkdir(path + '_' + str(n))\n break\n else:\n n += 1\n return path + '_' + str(n) + '/'\n\n def make_num_directory(self, name, num):\n os.mkdir(self.dire + '/' + name + '_' + str(num))\n return self.dire + '/' + name + '_' + str(num) + '/'\n\n def find_min_max(self, data_list, length, standarize_zero=True):\n if standarize_zero:\n min = 0\n max = 0\n else:\n min = data_list[0][0]\n max = data_list[0][0]\n for data in data_list:\n for j in range(length):\n if j < len(data):\n if data[j] < min:\n min = data[j]\n if data[j] > max:\n max = data[j]\n return min, max\n\n def savefig_result(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n for i, app in enumerate(self.app):\n min, max = self.find_min_max([self.prediction[i], self.\n prediction_e[i]], self.span)\n plt.figure(figsize=(len(x) / 10, 5.5))\n for j in range(len(self.trend_rule.w)):\n plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, \n max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1\n ], facecolor=cycle_tr[j], alpha=0.2, label=\n 'Chosenrule:' + str(j))\n for j in range(self.span):\n plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, \n max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1\n ], facecolor=cycle_tr[self.app[i].trend_idx[j]],\n alpha=0.2)\n plt.plot(x, app.trend, label='trend', linestyle='dotted',\n color='black')\n plt.plot(x[self.reference_steps:], self.prediction[i],\n label='LSTM pred', linestyle='dotted', color='blue')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self\n .prediction_e[i], label='CIM pred', color='orange')\n if self.learn_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.learn_loss[i], alpha=0.3, label='learn loss')\n if self.eval_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.eval_loss[i], alpha=0.3, marker='X', label=\n 'eval loss')\n plt.xlabel('season')\n plt.ylabel('trend value')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.app_dire[i] + name + '.png', dpi=self.dpi)\n plt.clf()\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n for i, app in enumerate(self.app):\n plt.plot(x, self.app[i].trend, color=cycle_app[i], label=\n 'trend (app:' + str(i) + ')', linestyle='dotted')\n plt.plot(x[self.reference_steps:], self.prediction[i],\n color=cycle_app[i], label='pred (app:' + str(i) + ')')\n if self.learn_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.learn_loss[i], color=cycle_app[i], alpha=\n 0.3, label='learn loss (app:' + str(i) + ')')\n if self.eval_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.eval_loss[i], color=cycle_app[i], alpha=0.3,\n marker='X', label='evalu loss (app:' + str(i) + ')')\n plt.xlabel('season')\n plt.ylabel('trend value')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_ruleweight(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n if len(self.trend_rule.w[0]['value']) <= 10:\n cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w[0]['value']) <= 20:\n cycle_ft = plt.cm.get_cmap('tab20').colors\n else:\n cycle_ft = list(colors.XKCD_COLORS.items())[:100]\n for i in range(len(self.trend_rule.w)):\n plt.figure(figsize=(len(x) / 10, 5.5))\n for j in range(len(self.trend_rule.w[i]['value'])):\n plt.plot(x, self.trend_rule.w[i]['value'][j][:-1],\n color=cycle_ft[j], label='feature:' + str(j))\n plt.xlabel('season')\n plt.ylabel('weight of trend rule')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.trend_dire[i] + name + '.png', dpi=self.dpi)\n plt.clf()\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n if len(self.trend_rule.w[0]['value']) <= 10:\n cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w[0]['value']) <= 20:\n cycle_ft = plt.cm.get_cmap('tab20').colors\n else:\n cycle_ft = list(colors.XKCD_COLORS.items())[:100]\n width = 0.8 / len(self.trend_rule.w[0]['value'])\n for i in range(len(self.trend_rule.w)):\n bottom = np.array(-i * 2.0)\n for j in range(len(self.trend_rule.w[i]['value'])):\n if i == 0:\n plt.bar(x + np.array([width * float(j)] * len(x)),\n self.trend_rule.w[i][j][:-1], color=cycle_ft[j],\n align='edge', bottom=bottom, width=width, label\n ='feature:' + str(j))\n else:\n plt.bar(x + np.array([width * float(j)] * len(x)),\n self.trend_rule.w[i]['value'][j][:-1], color=\n cycle_ft[j], align='edge', bottom=bottom, width\n =width)\n plt.fill_between(list(range(self.span + 1)), [-i * 2.0 + 1] *\n (len(x) + 1), [-(i + 1) * 2.0 + 1] * (len(x) + 1),\n facecolor=cycle_tr[i], alpha=0.2, label='trendrule:' +\n str(i))\n plt.xlabel('season')\n plt.ylabel('weight of trend rule')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_chosenrule(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n pass\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n for i in range(len(self.trend_rule.w)):\n plt.scatter(x, np.array([0] * len(x)), color=cycle_tr[i], s\n =1, marker='D', label='trendrule:' + str(i))\n for id in range(len(self.app)):\n colorArr = []\n for i in self.app[id].trend_idx:\n colorArr.append(cycle_tr[i])\n plt.scatter(x, np.array([-id] * len(x)), color=cycle_app[id\n ], s=150, label='app:' + str(id))\n plt.scatter(x, np.array([-id] * len(x)), color='w', s=70)\n plt.scatter(x, np.array([-id] * len(x)), color=colorArr, s=\n 15, marker='D', alpha=0.5)\n plt.xlabel('シーズン')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_compare_prediction(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n for i in range(len(self.app)):\n plt.figure(figsize=(len(x) / 10, 5.5))\n plt.plot(x[self.reference_steps + self.reveal_trend:], np.\n abs(np.array(self.prediction_only_ci[i]) - np.array(\n self.app[i].trend[self.reference_steps + self.\n reveal_trend:])), label='only CI loss', linestyle=\n 'dotted', color='green')\n plt.plot(x[self.reference_steps:], np.abs(np.array(self.\n prediction[i]) - np.array(self.app[i].trend[self.\n reference_steps:])), label='LSTM loss', linestyle=\n 'dotted', color='blue')\n plt.plot(x[self.reference_steps + self.reveal_trend:], np.\n abs(np.array(self.prediction_e[i]) - np.array(self.app[\n i].trend[self.reference_steps + self.reveal_trend:])),\n label='CIM loss', color='orange')\n plt.xlabel('season')\n plt.ylabel('prediction loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.app_dire[i] + name + '.png', dpi=self.dpi)\n plt.clf()\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n for id in range(len(self.app)):\n plt.plot(x[self.reference_steps:], np.abs(np.array(self.\n prediction[id]) - np.array(self.app[id].trend[self.\n reference_steps:])), color=cycle_app[id], label=\n 'classify loss (app:' + str(id) + ')', linestyle='dotted')\n plt.plot(x[self.reference_steps + self.reveal_trend:], np.\n abs(np.array(self.prediction_e[id]) - np.array(self.app\n [id].trend[self.reference_steps + self.reveal_trend:])),\n color=cycle_app[id], label='analyse loss (app:' + str(\n id) + ')')\n plt.xlabel('season')\n plt.ylabel('prediction loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_compare_prediction_ave(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n prediction = []\n prediction_e = []\n prediction_ci = []\n for j in range(self.span - self.reference_steps):\n sum = 0\n sum_e = 0\n sum_ci = 0\n for i in range(len(self.app)):\n sum += (self.prediction[i][j] - self.app[i].trend[j +\n self.reference_steps]) ** 2\n if (j < self.span - self.reference_steps - self.\n reveal_trend):\n sum_e += (self.prediction_e[i][j] - self.app[i].\n trend[j + self.reference_steps + self.reveal_trend]\n ) ** 2\n sum_ci += (self.prediction_e[i][j] - self.app[i].\n trend[j + self.reference_steps + self.reveal_trend]\n ) ** 2\n prediction.append(sum / len(self.app))\n if j < self.span - self.reference_steps - self.reveal_trend:\n prediction_e.append(sum_e / len(self.app))\n prediction_ci.append(sum_ci / len(self.app))\n plt.figure(figsize=(len(x) / 10, 5.5))\n plt.xlabel('season')\n plt.ylabel('prediction loss average')\n plt.plot(x[self.reference_steps + self.reveal_trend:],\n prediction_ci, label='only CI loss', linestyle='dotted')\n plt.plot(x[self.reference_steps:], prediction, label=\n 'LSTM loss', linestyle='dotted')\n plt.plot(x[self.reference_steps + self.reveal_trend:],\n prediction_e, label='CIM loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n\n def savefig_rule_num(self, name):\n x = list(range(self.span))\n plt.figure(figsize=(len(x) / 10, 5.5))\n chart_num = 6\n width = 0.8 / chart_num\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n predfail_app_num, label='truth rule number')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n predfail_app_num, label='prediction fail app')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n cap_rule_num, label='captured rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n add_rule_num, label='add rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n lost_rule_num, label='lost rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n useless_rule_num, label='useless rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n merge_rule_num, label='merge rule')\n plt.xlabel('season')\n plt.ylabel('number')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def save_config(self, name, cfg):\n import json\n setting = dict(APP_NUM=cfg.APP_NUM, SPAN=cfg.SPAN, REVEAL_TREND=cfg\n .REVEAL_TREND, FIRST_RULE_NUM=cfg.FIRST_RULE_NUM,\n SHIFT_TREND_RULE=cfg.SHIFT_TREND_RULE, APPEAR_RATE=cfg.\n APPEAR_RATE, DISAPPEAR_RATE=cfg.DISAPPEAR_RATE,\n EVALUATE_THRESHOLD_PRED_FAIL=cfg.EVALUATE_THRESHOLD_PRED_FAIL,\n SAMPLING=cfg.SAMPLING, EVALUATE_THRESHOLD_DELETE_RULE=cfg.\n EVALUATE_THRESHOLD_DELETE_RULE, EVALUATE_THRESHOLD_ADD_RULE=cfg\n .EVALUATE_THRESHOLD_ADD_RULE, EVALUATE_THRESHOLD_MERGE_RULE=cfg\n .EVALUATE_THRESHOLD_MERGE_RULE, THRESHOLD_APPNUM=cfg.\n THRESHOLD_APPNUM, TRY_NEWRULE_NUM=cfg.TRY_NEWRULE_NUM,\n LSTM_REFERENCE_STEPS=cfg.LSTM_REFERENCE_STEPS, LSTM_EPOCHS=cfg.\n LSTM_EPOCHS, NN_EPOCHS=cfg.NN_EPOCHS, DATATYPE=[dict(name=feat[\n 'name'], type=str(type(feat['data']))) for feat in cfg.DATATYPE\n ], FIRST_BIN=cfg.FIRST_BIN)\n fw = open(self.dire + name + '.json', 'w')\n json.dump(setting, fw, indent=4)\n return\n",
"step-5": "import os\nfrom matplotlib import pyplot as plt\nfrom matplotlib import colors\nimport numpy as np\n\n\nclass figure:\n\n def __init__(self, dire, dpi, span, data, CIM,\n learn_loss=None, eval_loss=None, different_dir_app=True, reference_steps=0, reveal_trend=1):\n\n self.dire = self.new_num_directory(dire)\n self.app_dire = [self.make_num_directory(\"app\", i) for i in range(data.app_num)]\n self.trend_dire = [self.make_num_directory(\"trend\", i) for i in range(len(data.trend_rule.w))]\n self.dpi = dpi\n\n self.span = span\n self.app = data.apps\n self.trend_rule = data.trend_rule\n self.prediction = CIM.prediction\n self.prediction_e = CIM.prediction_est_rule\n\n self.prediction_only_ci = CIM.prediction_only_ci\n\n self.predfail_app_num = CIM.predfail_app_num\n self.cap_rule_num = CIM.cap_rule_num\n self.add_rule_num = CIM.add_rule_num\n self.lost_rule_num = CIM.lost_rule_num\n self.useless_rule_num = CIM.useless_rule_num\n self.merge_rule_num = CIM.merge_rule_num\n\n self.learn_loss = learn_loss\n self.eval_loss = eval_loss\n self.diff_dir = different_dir_app\n self.reference_steps = reference_steps\n self.reveal_trend = reveal_trend\n\n\n def new_num_directory(self, path):\n n = 1\n while True:\n if not os.path.exists(path + \"_\" + str(n)):\n os.mkdir(path + \"_\" + str(n))\n break\n else:\n n += 1\n return path + \"_\" + str(n) + \"/\"\n\n\n def make_num_directory(self, name, num):\n\n os.mkdir(self.dire + \"/\" + name + \"_\" + str(num))\n\n return self.dire + \"/\" + name + \"_\" + str(num) + \"/\"\n\n\n def find_min_max(self, data_list, length, standarize_zero=True):\n\n if standarize_zero:\n min = 0\n max = 0\n else:\n min = data_list[0][0]\n max = data_list[0][0]\n\n for data in data_list:\n\n for j in range(length):\n\n if j < len(data):\n if data[j] < min:\n min = data[j]\n if data[j] > max:\n max = data[j]\n\n return min, max\n\n\n def savefig_result(self, name):\n\n x = list(range(self.span))\n\n if self.diff_dir:\n\n # トレンドルールごとの色(chosenRuleより)\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n\n for i, app in enumerate(self.app):\n\n min, max = self.find_min_max([self.prediction[i], self.prediction_e[i]], self.span)\n\n plt.figure(figsize=(len(x) / 10, 5.5))\n\n # (chosenRuleより)\n for j in range(len(self.trend_rule.w)):\n plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, max * 1.1 + 0.1],\n [min * 1.1 - 0.1, min * 1.1 - 0.1],\n facecolor=cycle_tr[j], alpha=0.2,\n label=\"Chosenrule:\" + str(j))\n for j in range(self.span):\n plt.fill_between([j - 0.5, j + 0.5], [max*1.1+0.1, max*1.1+0.1], [min*1.1-0.1, min*1.1-0.1],\n facecolor=cycle_tr[self.app[i].trend_idx[j]], alpha=0.2)\n\n\n plt.plot(x, app.trend, label=\"trend\", linestyle=\"dotted\", color=\"black\")\n plt.plot(x[self.reference_steps:], self.prediction[i],\n label=\"LSTM pred\", linestyle=\"dotted\", color=\"blue\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.prediction_e[i],\n label=\"CIM pred\", color=\"orange\")\n\n if self.learn_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:], self.learn_loss[i], alpha=0.3,\n label=\"learn loss\")\n if self.eval_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:], self.eval_loss[i], alpha=0.3, marker=\"X\",\n label=\"eval loss\")\n\n plt.xlabel('season')\n plt.ylabel('trend value')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.app_dire[i] + name + \".png\", dpi=self.dpi)\n plt.clf()\n\n else:\n\n plt.figure(figsize=(len(x)/10, 5.5))\n\n # アプリごとの色\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n\n for i, app in enumerate(self.app):\n plt.plot(x, self.app[i].trend, color=cycle_app[i], label=\"trend (app:\" + str(i) + \")\", linestyle=\"dotted\")\n plt.plot(x[self.reference_steps:], self.prediction[i], color=cycle_app[i], label=\"pred (app:\" + str(i) + \")\")\n\n if self.learn_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:], self.learn_loss[i], color=cycle_app[i], alpha=0.3,\n label=\"learn loss (app:\" + str(i) + \")\")\n if self.eval_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:], self.eval_loss[i], color=cycle_app[i], alpha=0.3, marker=\"X\",\n label=\"evalu loss (app:\" + str(i) + \")\")\n\n plt.xlabel('season')\n plt.ylabel('trend value')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + \".png\", dpi=self.dpi)\n plt.clf()\n\n return\n\n\n def savefig_ruleweight(self, name):\n\n x = list(range(self.span))\n\n if self.diff_dir:\n\n # 特徴ごとの色\n if len(self.trend_rule.w[0][\"value\"]) <= 10:\n cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w[0][\"value\"]) <= 20:\n cycle_ft = plt.cm.get_cmap('tab20').colors\n else:\n cycle_ft = list(colors.XKCD_COLORS.items())[:100]\n\n for i in range(len(self.trend_rule.w)):\n\n plt.figure(figsize=(len(x) / 10, 5.5))\n\n # 特徴毎に\n for j in range(len(self.trend_rule.w[i][\"value\"])):\n plt.plot(x, self.trend_rule.w[i][\"value\"][j][:-1], color=cycle_ft[j], label=\"feature:\" + str(j))\n\n plt.xlabel('season')\n plt.ylabel('weight of trend rule')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.trend_dire[i] + name + \".png\", dpi=self.dpi)\n\n plt.clf()\n\n\n else:\n\n plt.figure(figsize=(len(x)/10, 5.5))\n\n # トレンドルールごとの色\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n\n # 特徴ごとの色\n if len(self.trend_rule.w[0][\"value\"]) <= 10:\n cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w[0][\"value\"]) <= 20:\n cycle_ft = plt.cm.get_cmap('tab20').colors\n else:\n cycle_ft = list(colors.XKCD_COLORS.items())[:100]\n\n width = 0.8 / len(self.trend_rule.w[0][\"value\"])\n #トレンドルール毎に\n for i in range(len(self.trend_rule.w)):\n bottom = np.array(- i * 2.0)\n # 特徴毎に\n for j in range(len(self.trend_rule.w[i][\"value\"])):\n if i == 0:\n plt.bar(x + np.array([width * float(j)] * len(x)), self.trend_rule.w[i][j][:-1],\n color=cycle_ft[j], align='edge', bottom=bottom, width=width, label=\"feature:\" + str(j))\n else:\n plt.bar(x + np.array([width * float(j)] * len(x)), self.trend_rule.w[i][\"value\"][j][:-1],\n color=cycle_ft[j], align='edge', bottom=bottom, width=width)\n\n plt.fill_between(list(range(self.span+1)), [- i * 2.0 + 1] * (len(x)+1), [- (i+1) * 2.0 + 1] * (len(x)+1),\n facecolor=cycle_tr[i], alpha=0.2, label=\"trendrule:\" + str(i))\n\n plt.xlabel('season')\n plt.ylabel('weight of trend rule')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + \".png\", dpi=self.dpi)\n\n plt.clf()\n\n return\n\n\n def savefig_chosenrule(self, name):\n\n x = list(range(self.span))\n\n if self.diff_dir:\n\n pass # savefig_resultに統合\n\n else:\n\n plt.figure(figsize=(len(x)/10, 5.5))\n\n # アプリごとの色\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n\n # トレンドルールごとの色\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n\n # 凡例表示用\n for i in range(len(self.trend_rule.w)):\n plt.scatter(x, np.array([0] * len(x)), color=cycle_tr[i], s=1, marker=\"D\",\n label=\"trendrule:\" + str(i))\n\n for id in range(len(self.app)):\n colorArr = []\n for i in self.app[id].trend_idx:\n colorArr.append(cycle_tr[i])\n plt.scatter(x, np.array([- id] * len(x)), color=cycle_app[id], s=150, label=\"app:\" + str(id))\n plt.scatter(x, np.array([- id] * len(x)), color=\"w\", s=70)\n plt.scatter(x, np.array([- id] * len(x)), color=colorArr, s=15, marker=\"D\", alpha=0.5)\n\n plt.xlabel('シーズン')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + \".png\", dpi=self.dpi)\n\n plt.clf()\n\n return\n\n\n def savefig_compare_prediction(self, name):\n\n x = list(range(self.span))\n\n if self.diff_dir:\n\n for i in range(len(self.app)):\n\n plt.figure(figsize=(len(x) / 10, 5.5))\n\n # *************************(変更してください)\n plt.plot(x[self.reference_steps + self.reveal_trend:],\n np.abs(np.array(self.prediction_only_ci[i]) - np.array(self.app[i].trend[self.reference_steps + self.reveal_trend:])),\n label=\"only CI loss\", linestyle=\"dotted\", color=\"green\")\n\n plt.plot(x[self.reference_steps:],\n np.abs(np.array(self.prediction[i]) - np.array(self.app[i].trend[self.reference_steps:])),\n label=\"LSTM loss\", linestyle=\"dotted\", color=\"blue\")\n plt.plot(x[self.reference_steps + self.reveal_trend:],\n np.abs(np.array(self.prediction_e[i]) - np.array(self.app[i].trend[self.reference_steps + self.reveal_trend:])),\n label=\"CIM loss\", color=\"orange\")\n\n plt.xlabel('season')\n plt.ylabel('prediction loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.app_dire[i] + name + \".png\", dpi=self.dpi)\n\n plt.clf()\n\n else:\n\n plt.figure(figsize=(len(x)/10, 5.5))\n\n # アプリごとの色\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n\n for id in range(len(self.app)):\n\n plt.plot(x[self.reference_steps:], np.abs(np.array(self.prediction[id]) - np.array(self.app[id].trend[self.reference_steps:])),\n color=cycle_app[id], label=\"classify loss (app:\" + str(id) + \")\", linestyle=\"dotted\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], np.abs(np.array(self.prediction_e[id]) - np.array(self.app[id].trend[self.reference_steps + self.reveal_trend:])),\n color=cycle_app[id], label=\"analyse loss (app:\" + str(id) + \")\")\n\n plt.xlabel('season')\n plt.ylabel('prediction loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + \".png\", dpi=self.dpi)\n\n plt.clf()\n\n return\n\n\n def savefig_compare_prediction_ave(self, name):\n\n x = list(range(self.span))\n\n if self.diff_dir:\n\n prediction = []\n prediction_e = []\n prediction_ci = []\n\n # 各アプリに対して平均を算出\n for j in range(self.span - self.reference_steps):\n\n sum = 0\n sum_e = 0\n sum_ci = 0\n\n for i in range(len(self.app)):\n\n sum += (self.prediction[i][j] - self.app[i].trend[j + self.reference_steps])**2\n if j < self.span - self.reference_steps - self.reveal_trend:\n\n sum_e += (self.prediction_e[i][j] - self.app[i].trend[j + self.reference_steps + self.reveal_trend])**2\n sum_ci += (self.prediction_e[i][j] - self.app[i].trend[j + self.reference_steps + self.reveal_trend])**2\n\n prediction.append(sum / len(self.app))\n if j < self.span - self.reference_steps - self.reveal_trend:\n prediction_e.append(sum_e / len(self.app))\n prediction_ci.append(sum_ci / len(self.app))\n\n plt.figure(figsize=(len(x) / 10, 5.5))\n\n plt.xlabel('season')\n plt.ylabel('prediction loss average')\n\n # *************************(変更してください)\n plt.plot(x[self.reference_steps + self.reveal_trend:], prediction_ci,\n label=\"only CI loss\", linestyle=\"dotted\")\n\n plt.plot(x[self.reference_steps:], prediction, label=\"LSTM loss\", linestyle=\"dotted\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], prediction_e, label=\"CIM loss\")\n\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + \".png\", dpi=self.dpi)\n\n plt.clf()\n\n\n def savefig_rule_num(self, name):\n\n x = list(range(self.span))\n\n plt.figure(figsize=(len(x)/10, 5.5))\n\n chart_num = 6\n width = 0.8 / chart_num\n\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.predfail_app_num, label=\"truth rule number\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.predfail_app_num, label=\"prediction fail app\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.cap_rule_num, label=\"captured rule\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.add_rule_num, label=\"add rule\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.lost_rule_num, label=\"lost rule\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.useless_rule_num, label=\"useless rule\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.merge_rule_num, label=\"merge rule\")\n\n plt.xlabel('season')\n plt.ylabel('number')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + \".png\", dpi=self.dpi)\n\n plt.clf()\n\n return\n\n def save_config(self, name, cfg):\n\n import json\n\n setting = dict(\n APP_NUM = cfg.APP_NUM,\n SPAN = cfg.SPAN,\n REVEAL_TREND = cfg.REVEAL_TREND,\n FIRST_RULE_NUM=cfg.FIRST_RULE_NUM,\n SHIFT_TREND_RULE = cfg.SHIFT_TREND_RULE,\n APPEAR_RATE = cfg.APPEAR_RATE,\n DISAPPEAR_RATE = cfg.DISAPPEAR_RATE,\n EVALUATE_THRESHOLD_PRED_FAIL = cfg.EVALUATE_THRESHOLD_PRED_FAIL,\n SAMPLING = cfg.SAMPLING,\n EVALUATE_THRESHOLD_DELETE_RULE = cfg.EVALUATE_THRESHOLD_DELETE_RULE,\n EVALUATE_THRESHOLD_ADD_RULE = cfg.EVALUATE_THRESHOLD_ADD_RULE,\n EVALUATE_THRESHOLD_MERGE_RULE = cfg.EVALUATE_THRESHOLD_MERGE_RULE,\n THRESHOLD_APPNUM = cfg.THRESHOLD_APPNUM,\n TRY_NEWRULE_NUM = cfg.TRY_NEWRULE_NUM,\n LSTM_REFERENCE_STEPS = cfg.LSTM_REFERENCE_STEPS,\n LSTM_EPOCHS = cfg.LSTM_EPOCHS,\n NN_EPOCHS = cfg.NN_EPOCHS,\n DATATYPE = [dict(\n name = feat[\"name\"],\n type = str(type(feat[\"data\"]))\n ) for feat in cfg.DATATYPE],\n FIRST_BIN = cfg.FIRST_BIN\n )\n\n fw = open(self.dire + name + '.json', 'w')\n json.dump(setting, fw, indent=4)\n\n return",
"step-ids": [
4,
11,
12,
13,
14
]
}
|
[
4,
11,
12,
13,
14
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created with YooLiang Technology (侑良科技).
# Author: Qi-Liang Wen (温啓良)
# Web: http://www.yooliang.com/
# Date: 2015/7/12.
from monkey import BasicModel
from monkey import Fields
class WebInformationModel(BasicModel):
class Meta:
label_name = {
"title": u"通用名稱",
"name": u"識別碼",
"domain_registration": u"網域註冊地",
"domain_registration_price": u"網域註冊費用",
"domain_registration_date": u"網域註冊日",
"domain_expiration_date": u"網域到期日",
"space_rental_level": u"伺服器等級",
"space_rental_price": u"空間費用",
"space_rental_date": u"空間租借日",
"space_expiration_date": u"空間到期日",
"manager_company": u"管理公司",
"manager_website": u"公司網址",
"manager_person": u"管理人姓名",
"manager_telephone": u"管理人電話",
"manager_mobile": u"管理人手機",
"manager_email": u"管理人信箱",
"contact_person": u"聯絡人",
"contact_telephone": u"聯絡電話",
"contact_mobile": u"聯絡手機",
"contact_email": u"聯絡信箱",
"contact_address": u"聯絡地址",
"is_enable": u"顯示於前台",
}
title = Fields.StringProperty(required=True)
name = Fields.StringProperty()
domain_registration = Fields.StringProperty()
domain_registration_price = Fields.StringProperty()
domain_registration_date = Fields.DateProperty()
domain_expiration_date = Fields.DateProperty()
space_rental_level = Fields.StringProperty()
space_rental_price = Fields.StringProperty()
space_rental_date = Fields.DateProperty()
space_expiration_date = Fields.DateProperty()
manager_company = Fields.StringProperty(default=u"侑良科技")
manager_website = Fields.StringProperty(default="http://")
manager_person = Fields.StringProperty()
manager_telephone = Fields.StringProperty()
manager_mobile = Fields.StringProperty()
manager_email = Fields.StringProperty()
contact_person = Fields.StringProperty()
contact_telephone = Fields.StringProperty()
contact_mobile = Fields.StringProperty()
contact_email = Fields.StringProperty()
contact_address = Fields.StringProperty()
is_enable = Fields.BooleanProperty(default=True)
@classmethod
def get_by_name(cls, name):
return cls.query(cls.name==name).get()
|
normal
|
{
"blob_id": "3d55a5b4e332523025f65e5f5859f4633f4ee9a3",
"index": 7501,
"step-1": "<mask token>\n\n\nclass WebInformationModel(BasicModel):\n\n\n class Meta:\n label_name = {'title': u'通用名稱', 'name': u'識別碼',\n 'domain_registration': u'網域註冊地', 'domain_registration_price':\n u'網域註冊費用', 'domain_registration_date': u'網域註冊日',\n 'domain_expiration_date': u'網域到期日', 'space_rental_level':\n u'伺服器等級', 'space_rental_price': u'空間費用', 'space_rental_date':\n u'空間租借日', 'space_expiration_date': u'空間到期日', 'manager_company':\n u'管理公司', 'manager_website': u'公司網址', 'manager_person': u'管理人姓名',\n 'manager_telephone': u'管理人電話', 'manager_mobile': u'管理人手機',\n 'manager_email': u'管理人信箱', 'contact_person': u'聯絡人',\n 'contact_telephone': u'聯絡電話', 'contact_mobile': u'聯絡手機',\n 'contact_email': u'聯絡信箱', 'contact_address': u'聯絡地址',\n 'is_enable': u'顯示於前台'}\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass WebInformationModel(BasicModel):\n\n\n class Meta:\n label_name = {'title': u'通用名稱', 'name': u'識別碼',\n 'domain_registration': u'網域註冊地', 'domain_registration_price':\n u'網域註冊費用', 'domain_registration_date': u'網域註冊日',\n 'domain_expiration_date': u'網域到期日', 'space_rental_level':\n u'伺服器等級', 'space_rental_price': u'空間費用', 'space_rental_date':\n u'空間租借日', 'space_expiration_date': u'空間到期日', 'manager_company':\n u'管理公司', 'manager_website': u'公司網址', 'manager_person': u'管理人姓名',\n 'manager_telephone': u'管理人電話', 'manager_mobile': u'管理人手機',\n 'manager_email': u'管理人信箱', 'contact_person': u'聯絡人',\n 'contact_telephone': u'聯絡電話', 'contact_mobile': u'聯絡手機',\n 'contact_email': u'聯絡信箱', 'contact_address': u'聯絡地址',\n 'is_enable': u'顯示於前台'}\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def get_by_name(cls, name):\n return cls.query(cls.name == name).get()\n",
"step-3": "<mask token>\n\n\nclass WebInformationModel(BasicModel):\n\n\n class Meta:\n label_name = {'title': u'通用名稱', 'name': u'識別碼',\n 'domain_registration': u'網域註冊地', 'domain_registration_price':\n u'網域註冊費用', 'domain_registration_date': u'網域註冊日',\n 'domain_expiration_date': u'網域到期日', 'space_rental_level':\n u'伺服器等級', 'space_rental_price': u'空間費用', 'space_rental_date':\n u'空間租借日', 'space_expiration_date': u'空間到期日', 'manager_company':\n u'管理公司', 'manager_website': u'公司網址', 'manager_person': u'管理人姓名',\n 'manager_telephone': u'管理人電話', 'manager_mobile': u'管理人手機',\n 'manager_email': u'管理人信箱', 'contact_person': u'聯絡人',\n 'contact_telephone': u'聯絡電話', 'contact_mobile': u'聯絡手機',\n 'contact_email': u'聯絡信箱', 'contact_address': u'聯絡地址',\n 'is_enable': u'顯示於前台'}\n title = Fields.StringProperty(required=True)\n name = Fields.StringProperty()\n domain_registration = Fields.StringProperty()\n domain_registration_price = Fields.StringProperty()\n domain_registration_date = Fields.DateProperty()\n domain_expiration_date = Fields.DateProperty()\n space_rental_level = Fields.StringProperty()\n space_rental_price = Fields.StringProperty()\n space_rental_date = Fields.DateProperty()\n space_expiration_date = Fields.DateProperty()\n manager_company = Fields.StringProperty(default=u'侑良科技')\n manager_website = Fields.StringProperty(default='http://')\n manager_person = Fields.StringProperty()\n manager_telephone = Fields.StringProperty()\n manager_mobile = Fields.StringProperty()\n manager_email = Fields.StringProperty()\n contact_person = Fields.StringProperty()\n contact_telephone = Fields.StringProperty()\n contact_mobile = Fields.StringProperty()\n contact_email = Fields.StringProperty()\n contact_address = Fields.StringProperty()\n is_enable = Fields.BooleanProperty(default=True)\n\n @classmethod\n def get_by_name(cls, name):\n return cls.query(cls.name == name).get()\n",
"step-4": "from monkey import BasicModel\nfrom monkey import Fields\n\n\nclass WebInformationModel(BasicModel):\n\n\n class Meta:\n label_name = {'title': u'通用名稱', 'name': u'識別碼',\n 'domain_registration': u'網域註冊地', 'domain_registration_price':\n u'網域註冊費用', 'domain_registration_date': u'網域註冊日',\n 'domain_expiration_date': u'網域到期日', 'space_rental_level':\n u'伺服器等級', 'space_rental_price': u'空間費用', 'space_rental_date':\n u'空間租借日', 'space_expiration_date': u'空間到期日', 'manager_company':\n u'管理公司', 'manager_website': u'公司網址', 'manager_person': u'管理人姓名',\n 'manager_telephone': u'管理人電話', 'manager_mobile': u'管理人手機',\n 'manager_email': u'管理人信箱', 'contact_person': u'聯絡人',\n 'contact_telephone': u'聯絡電話', 'contact_mobile': u'聯絡手機',\n 'contact_email': u'聯絡信箱', 'contact_address': u'聯絡地址',\n 'is_enable': u'顯示於前台'}\n title = Fields.StringProperty(required=True)\n name = Fields.StringProperty()\n domain_registration = Fields.StringProperty()\n domain_registration_price = Fields.StringProperty()\n domain_registration_date = Fields.DateProperty()\n domain_expiration_date = Fields.DateProperty()\n space_rental_level = Fields.StringProperty()\n space_rental_price = Fields.StringProperty()\n space_rental_date = Fields.DateProperty()\n space_expiration_date = Fields.DateProperty()\n manager_company = Fields.StringProperty(default=u'侑良科技')\n manager_website = Fields.StringProperty(default='http://')\n manager_person = Fields.StringProperty()\n manager_telephone = Fields.StringProperty()\n manager_mobile = Fields.StringProperty()\n manager_email = Fields.StringProperty()\n contact_person = Fields.StringProperty()\n contact_telephone = Fields.StringProperty()\n contact_mobile = Fields.StringProperty()\n contact_email = Fields.StringProperty()\n contact_address = Fields.StringProperty()\n is_enable = Fields.BooleanProperty(default=True)\n\n @classmethod\n def get_by_name(cls, name):\n return cls.query(cls.name == name).get()\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Created with YooLiang Technology (侑良科技).\n# Author: Qi-Liang Wen (温啓良)\n# Web: http://www.yooliang.com/\n# Date: 2015/7/12.\n\nfrom monkey import BasicModel\nfrom monkey import Fields\n\n\nclass WebInformationModel(BasicModel):\n class Meta:\n label_name = {\n \"title\": u\"通用名稱\",\n \"name\": u\"識別碼\",\n \"domain_registration\": u\"網域註冊地\",\n \"domain_registration_price\": u\"網域註冊費用\",\n \"domain_registration_date\": u\"網域註冊日\",\n \"domain_expiration_date\": u\"網域到期日\",\n \"space_rental_level\": u\"伺服器等級\",\n \"space_rental_price\": u\"空間費用\",\n \"space_rental_date\": u\"空間租借日\",\n \"space_expiration_date\": u\"空間到期日\",\n \n \"manager_company\": u\"管理公司\",\n \"manager_website\": u\"公司網址\",\n \"manager_person\": u\"管理人姓名\",\n \"manager_telephone\": u\"管理人電話\",\n \"manager_mobile\": u\"管理人手機\",\n \"manager_email\": u\"管理人信箱\",\n\n \"contact_person\": u\"聯絡人\",\n \"contact_telephone\": u\"聯絡電話\",\n \"contact_mobile\": u\"聯絡手機\",\n \"contact_email\": u\"聯絡信箱\",\n \"contact_address\": u\"聯絡地址\",\n \"is_enable\": u\"顯示於前台\",\n }\n title = Fields.StringProperty(required=True)\n name = Fields.StringProperty()\n\n domain_registration = Fields.StringProperty()\n domain_registration_price = Fields.StringProperty()\n domain_registration_date = Fields.DateProperty()\n domain_expiration_date = Fields.DateProperty()\n space_rental_level = Fields.StringProperty()\n space_rental_price = Fields.StringProperty()\n space_rental_date = Fields.DateProperty()\n space_expiration_date = Fields.DateProperty()\n\n manager_company = Fields.StringProperty(default=u\"侑良科技\")\n manager_website = Fields.StringProperty(default=\"http://\")\n manager_person = Fields.StringProperty()\n manager_telephone = Fields.StringProperty()\n manager_mobile = Fields.StringProperty()\n manager_email = Fields.StringProperty()\n\n contact_person = Fields.StringProperty()\n contact_telephone = Fields.StringProperty()\n contact_mobile = Fields.StringProperty()\n contact_email = Fields.StringProperty()\n contact_address = Fields.StringProperty()\n is_enable = Fields.BooleanProperty(default=True)\n\n @classmethod\n def get_by_name(cls, name):\n return cls.query(cls.name==name).get()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import Numberjack as Nj
class Teachers(object):
"""Will be expanded to allow constraints for individual teachers"""
def __init__(self):
self.store = list()
def add(self, teachers):
if isinstance(teachers, (list, tuple)):
self.store.extend(teachers)
elif isinstance(teachers, str):
self.store.append(teachers)
else:
raise TypeError('only lists, tuples and strings '
'of teachers can be added')
class Subjects(object):
def __init__(self):
self.store = list()
def add(self, subjects):
if isinstance(subjects, (list, tuple)):
self.store.extend(subjects)
elif isinstance(subjects, str):
self.store.append(subjects)
else:
raise TypeError('only lists, tuples and strings '
'of subjects can be added')
class TimeSlots(object):
"""
Currently only takes # of timeslots until I can figure out a good
way to standardized time inputs
"""
def __init__(self, num_slots):
if isinstance(num_slots, int):
self.store = num_slots
else:
raise TypeError('only accepts number of timeslots as ints')
class Solver(object):
def __init__(self, teachers, subjects, timeslots):
if timeslots.store < len(teachers.store):
raise ValueError('unable to solve for more teachers '
'than timeslots')
self.teachers = teachers.store
self.subjects = subjects.store
self.timeslots = timeslots.store
self.matrix = None
self.model = None
self.solver = None
self.solution = None
def solve(self):
self.matrix = Nj.Matrix(len(self.subjects),
self.timeslots,
len(self.teachers)+1)
self.model = Nj.Model(
[Nj.AllDiffExcept0(row) for row in self.matrix.row],
[Nj.AllDiffExcept0(col) for col in self.matrix.col]
)
self.solver = self.model.load('Mistral')
self.solver.solve()
self.solution = self.matrix
|
normal
|
{
"blob_id": "8787126e654808a5fec52283780d9b4f668fa50f",
"index": 8593,
"step-1": "<mask token>\n\n\nclass Subjects(object):\n\n def __init__(self):\n self.store = list()\n\n def add(self, subjects):\n if isinstance(subjects, (list, tuple)):\n self.store.extend(subjects)\n elif isinstance(subjects, str):\n self.store.append(subjects)\n else:\n raise TypeError(\n 'only lists, tuples and strings of subjects can be added')\n\n\nclass TimeSlots(object):\n \"\"\"\n Currently only takes # of timeslots until I can figure out a good\n way to standardized time inputs\n \"\"\"\n\n def __init__(self, num_slots):\n if isinstance(num_slots, int):\n self.store = num_slots\n else:\n raise TypeError('only accepts number of timeslots as ints')\n\n\nclass Solver(object):\n\n def __init__(self, teachers, subjects, timeslots):\n if timeslots.store < len(teachers.store):\n raise ValueError('unable to solve for more teachers than timeslots'\n )\n self.teachers = teachers.store\n self.subjects = subjects.store\n self.timeslots = timeslots.store\n self.matrix = None\n self.model = None\n self.solver = None\n self.solution = None\n\n def solve(self):\n self.matrix = Nj.Matrix(len(self.subjects), self.timeslots, len(\n self.teachers) + 1)\n self.model = Nj.Model([Nj.AllDiffExcept0(row) for row in self.\n matrix.row], [Nj.AllDiffExcept0(col) for col in self.matrix.col])\n self.solver = self.model.load('Mistral')\n self.solver.solve()\n self.solution = self.matrix\n",
"step-2": "<mask token>\n\n\nclass Teachers(object):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Subjects(object):\n\n def __init__(self):\n self.store = list()\n\n def add(self, subjects):\n if isinstance(subjects, (list, tuple)):\n self.store.extend(subjects)\n elif isinstance(subjects, str):\n self.store.append(subjects)\n else:\n raise TypeError(\n 'only lists, tuples and strings of subjects can be added')\n\n\nclass TimeSlots(object):\n \"\"\"\n Currently only takes # of timeslots until I can figure out a good\n way to standardized time inputs\n \"\"\"\n\n def __init__(self, num_slots):\n if isinstance(num_slots, int):\n self.store = num_slots\n else:\n raise TypeError('only accepts number of timeslots as ints')\n\n\nclass Solver(object):\n\n def __init__(self, teachers, subjects, timeslots):\n if timeslots.store < len(teachers.store):\n raise ValueError('unable to solve for more teachers than timeslots'\n )\n self.teachers = teachers.store\n self.subjects = subjects.store\n self.timeslots = timeslots.store\n self.matrix = None\n self.model = None\n self.solver = None\n self.solution = None\n\n def solve(self):\n self.matrix = Nj.Matrix(len(self.subjects), self.timeslots, len(\n self.teachers) + 1)\n self.model = Nj.Model([Nj.AllDiffExcept0(row) for row in self.\n matrix.row], [Nj.AllDiffExcept0(col) for col in self.matrix.col])\n self.solver = self.model.load('Mistral')\n self.solver.solve()\n self.solution = self.matrix\n",
"step-3": "<mask token>\n\n\nclass Teachers(object):\n <mask token>\n\n def __init__(self):\n self.store = list()\n\n def add(self, teachers):\n if isinstance(teachers, (list, tuple)):\n self.store.extend(teachers)\n elif isinstance(teachers, str):\n self.store.append(teachers)\n else:\n raise TypeError(\n 'only lists, tuples and strings of teachers can be added')\n\n\nclass Subjects(object):\n\n def __init__(self):\n self.store = list()\n\n def add(self, subjects):\n if isinstance(subjects, (list, tuple)):\n self.store.extend(subjects)\n elif isinstance(subjects, str):\n self.store.append(subjects)\n else:\n raise TypeError(\n 'only lists, tuples and strings of subjects can be added')\n\n\nclass TimeSlots(object):\n \"\"\"\n Currently only takes # of timeslots until I can figure out a good\n way to standardized time inputs\n \"\"\"\n\n def __init__(self, num_slots):\n if isinstance(num_slots, int):\n self.store = num_slots\n else:\n raise TypeError('only accepts number of timeslots as ints')\n\n\nclass Solver(object):\n\n def __init__(self, teachers, subjects, timeslots):\n if timeslots.store < len(teachers.store):\n raise ValueError('unable to solve for more teachers than timeslots'\n )\n self.teachers = teachers.store\n self.subjects = subjects.store\n self.timeslots = timeslots.store\n self.matrix = None\n self.model = None\n self.solver = None\n self.solution = None\n\n def solve(self):\n self.matrix = Nj.Matrix(len(self.subjects), self.timeslots, len(\n self.teachers) + 1)\n self.model = Nj.Model([Nj.AllDiffExcept0(row) for row in self.\n matrix.row], [Nj.AllDiffExcept0(col) for col in self.matrix.col])\n self.solver = self.model.load('Mistral')\n self.solver.solve()\n self.solution = self.matrix\n",
"step-4": "<mask token>\n\n\nclass Teachers(object):\n \"\"\"Will be expanded to allow constraints for individual teachers\"\"\"\n\n def __init__(self):\n self.store = list()\n\n def add(self, teachers):\n if isinstance(teachers, (list, tuple)):\n self.store.extend(teachers)\n elif isinstance(teachers, str):\n self.store.append(teachers)\n else:\n raise TypeError(\n 'only lists, tuples and strings of teachers can be added')\n\n\nclass Subjects(object):\n\n def __init__(self):\n self.store = list()\n\n def add(self, subjects):\n if isinstance(subjects, (list, tuple)):\n self.store.extend(subjects)\n elif isinstance(subjects, str):\n self.store.append(subjects)\n else:\n raise TypeError(\n 'only lists, tuples and strings of subjects can be added')\n\n\nclass TimeSlots(object):\n \"\"\"\n Currently only takes # of timeslots until I can figure out a good\n way to standardized time inputs\n \"\"\"\n\n def __init__(self, num_slots):\n if isinstance(num_slots, int):\n self.store = num_slots\n else:\n raise TypeError('only accepts number of timeslots as ints')\n\n\nclass Solver(object):\n\n def __init__(self, teachers, subjects, timeslots):\n if timeslots.store < len(teachers.store):\n raise ValueError('unable to solve for more teachers than timeslots'\n )\n self.teachers = teachers.store\n self.subjects = subjects.store\n self.timeslots = timeslots.store\n self.matrix = None\n self.model = None\n self.solver = None\n self.solution = None\n\n def solve(self):\n self.matrix = Nj.Matrix(len(self.subjects), self.timeslots, len(\n self.teachers) + 1)\n self.model = Nj.Model([Nj.AllDiffExcept0(row) for row in self.\n matrix.row], [Nj.AllDiffExcept0(col) for col in self.matrix.col])\n self.solver = self.model.load('Mistral')\n self.solver.solve()\n self.solution = self.matrix\n",
"step-5": "import Numberjack as Nj\n\n\nclass Teachers(object):\n \"\"\"Will be expanded to allow constraints for individual teachers\"\"\"\n def __init__(self):\n self.store = list()\n\n def add(self, teachers):\n if isinstance(teachers, (list, tuple)):\n self.store.extend(teachers)\n elif isinstance(teachers, str):\n self.store.append(teachers)\n else:\n raise TypeError('only lists, tuples and strings '\n 'of teachers can be added')\n\n\nclass Subjects(object):\n def __init__(self):\n self.store = list()\n\n def add(self, subjects):\n if isinstance(subjects, (list, tuple)):\n self.store.extend(subjects)\n elif isinstance(subjects, str):\n self.store.append(subjects)\n else:\n raise TypeError('only lists, tuples and strings '\n 'of subjects can be added')\n\n\nclass TimeSlots(object):\n \"\"\"\n Currently only takes # of timeslots until I can figure out a good\n way to standardized time inputs\n \"\"\"\n def __init__(self, num_slots):\n if isinstance(num_slots, int):\n self.store = num_slots\n else:\n raise TypeError('only accepts number of timeslots as ints')\n\n\nclass Solver(object):\n def __init__(self, teachers, subjects, timeslots):\n if timeslots.store < len(teachers.store):\n raise ValueError('unable to solve for more teachers '\n 'than timeslots')\n\n self.teachers = teachers.store\n self.subjects = subjects.store\n self.timeslots = timeslots.store\n self.matrix = None\n self.model = None\n self.solver = None\n self.solution = None\n\n def solve(self):\n self.matrix = Nj.Matrix(len(self.subjects),\n self.timeslots,\n len(self.teachers)+1)\n\n self.model = Nj.Model(\n [Nj.AllDiffExcept0(row) for row in self.matrix.row],\n [Nj.AllDiffExcept0(col) for col in self.matrix.col]\n )\n\n self.solver = self.model.load('Mistral')\n self.solver.solve()\n\n self.solution = self.matrix\n",
"step-ids": [
9,
10,
12,
13,
15
]
}
|
[
9,
10,
12,
13,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def tools():
search = str(input('Please enter search: '))
search.strip()
pulsesJSON = otx.search_pulses(search, 40)
for aPulse in pulsesJSON['results']:
name = aPulse.get('name')
description = aPulse.get('description')
modified = aPulse.get('modified')
pulseid = aPulse.get('id')
"""
If needed, add more categories to pull for each pulse here.
"""
raw_data = [{'Pulse ID': pulseid, 'Name': name, 'Description':
description, 'Modified': modified}]
filename = 'shenzi_pulses.csv'
with open(filename, 'w') as csv_file:
csv_columns_headers = ['Pulse ID', 'Name', 'Description',
'Modified']
writer = csv.DictWriter(csv_file, delimiter=',', lineterminator
='\n', fieldnames=csv_columns_headers)
if not file_exists:
writer.writeheader()
else:
for data in raw_data:
writer.writerow(data)
option = input('1: To Email 2: To quit : ')
option = int(option)
if option == 1:
sendemail()
os.remove('pulseIdsList.csv')
elif option == 2:
SystemExit()
<|reserved_special_token_1|>
from OTXv2 import OTXv2
from pandas.io.json import json_normalize
from datetime import datetime, timedelta
import getopt
import sys
from sendemail import sendemail
from main import otx
import csv
import pandas as pd
from pandas import read_csv
import os.path
def tools():
search = str(input('Please enter search: '))
search.strip()
pulsesJSON = otx.search_pulses(search, 40)
for aPulse in pulsesJSON['results']:
name = aPulse.get('name')
description = aPulse.get('description')
modified = aPulse.get('modified')
pulseid = aPulse.get('id')
"""
If needed, add more categories to pull for each pulse here.
"""
raw_data = [{'Pulse ID': pulseid, 'Name': name, 'Description':
description, 'Modified': modified}]
filename = 'shenzi_pulses.csv'
with open(filename, 'w') as csv_file:
csv_columns_headers = ['Pulse ID', 'Name', 'Description',
'Modified']
writer = csv.DictWriter(csv_file, delimiter=',', lineterminator
='\n', fieldnames=csv_columns_headers)
if not file_exists:
writer.writeheader()
else:
for data in raw_data:
writer.writerow(data)
option = input('1: To Email 2: To quit : ')
option = int(option)
if option == 1:
sendemail()
os.remove('pulseIdsList.csv')
elif option == 2:
SystemExit()
<|reserved_special_token_1|>
from OTXv2 import OTXv2
from pandas.io.json import json_normalize
from datetime import datetime, timedelta
import getopt
import sys
from sendemail import sendemail
from main import otx
import csv
import pandas as pd
from pandas import read_csv
import os.path
def tools():
search = str(input('Please enter search: '))
search.strip()
pulsesJSON = otx.search_pulses(search, 40) # Retrieves list (in json format) of top 40 pulses with tag "crypto"
# Loops through each individual pulse retrieved from OTX, and prints name & requested fields.
for aPulse in pulsesJSON["results"]:
name = aPulse.get('name')
description = aPulse.get('description')
modified = aPulse.get('modified')
pulseid = aPulse.get('id')
'''
If needed, add more categories to pull for each pulse here.
'''
#list with data to add to csv file
raw_data = [{'Pulse ID': pulseid, 'Name': name, 'Description': description, 'Modified': modified}]
#the path to the file
filename = 'shenzi_pulses.csv'
#use to check for the file
#file_exists = os.path.isfile(filename)
#opens the file to append ID, Name, Modified, Description
with open(filename, "w") as csv_file:
csv_columns_headers = ['Pulse ID','Name','Description','Modified']
writer = csv.DictWriter(csv_file, delimiter=',',lineterminator='\n', fieldnames=csv_columns_headers)
#if file does not exist write the headers
if not file_exists:
writer.writeheader()
#write the information from raw_data by rows
else:
for data in raw_data:
writer.writerow(data)
#simple option to email or quit
option = input('1: To Email 2: To quit : ')
option = int(option)
if option == 1:
#uses the email function to send email
sendemail()
#delete file once email has sent
os.remove('pulseIdsList.csv')
elif option == 2:
#option to quit
SystemExit()
|
flexible
|
{
"blob_id": "659f45d2c6c7138f26b4a8d15d1710ae60450b08",
"index": 6278,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef tools():\n search = str(input('Please enter search: '))\n search.strip()\n pulsesJSON = otx.search_pulses(search, 40)\n for aPulse in pulsesJSON['results']:\n name = aPulse.get('name')\n description = aPulse.get('description')\n modified = aPulse.get('modified')\n pulseid = aPulse.get('id')\n \"\"\"\n If needed, add more categories to pull for each pulse here.\n \"\"\"\n raw_data = [{'Pulse ID': pulseid, 'Name': name, 'Description':\n description, 'Modified': modified}]\n filename = 'shenzi_pulses.csv'\n with open(filename, 'w') as csv_file:\n csv_columns_headers = ['Pulse ID', 'Name', 'Description',\n 'Modified']\n writer = csv.DictWriter(csv_file, delimiter=',', lineterminator\n ='\\n', fieldnames=csv_columns_headers)\n if not file_exists:\n writer.writeheader()\n else:\n for data in raw_data:\n writer.writerow(data)\n option = input('1: To Email 2: To quit : ')\n option = int(option)\n if option == 1:\n sendemail()\n os.remove('pulseIdsList.csv')\n elif option == 2:\n SystemExit()\n",
"step-3": "from OTXv2 import OTXv2\nfrom pandas.io.json import json_normalize\nfrom datetime import datetime, timedelta\nimport getopt\nimport sys\nfrom sendemail import sendemail\nfrom main import otx\nimport csv\nimport pandas as pd\nfrom pandas import read_csv\nimport os.path\n\n\ndef tools():\n search = str(input('Please enter search: '))\n search.strip()\n pulsesJSON = otx.search_pulses(search, 40)\n for aPulse in pulsesJSON['results']:\n name = aPulse.get('name')\n description = aPulse.get('description')\n modified = aPulse.get('modified')\n pulseid = aPulse.get('id')\n \"\"\"\n If needed, add more categories to pull for each pulse here.\n \"\"\"\n raw_data = [{'Pulse ID': pulseid, 'Name': name, 'Description':\n description, 'Modified': modified}]\n filename = 'shenzi_pulses.csv'\n with open(filename, 'w') as csv_file:\n csv_columns_headers = ['Pulse ID', 'Name', 'Description',\n 'Modified']\n writer = csv.DictWriter(csv_file, delimiter=',', lineterminator\n ='\\n', fieldnames=csv_columns_headers)\n if not file_exists:\n writer.writeheader()\n else:\n for data in raw_data:\n writer.writerow(data)\n option = input('1: To Email 2: To quit : ')\n option = int(option)\n if option == 1:\n sendemail()\n os.remove('pulseIdsList.csv')\n elif option == 2:\n SystemExit()\n",
"step-4": "from OTXv2 import OTXv2\nfrom pandas.io.json import json_normalize\nfrom datetime import datetime, timedelta\nimport getopt\nimport sys\nfrom sendemail import sendemail\nfrom main import otx\nimport csv\nimport pandas as pd\nfrom pandas import read_csv\nimport os.path\n\ndef tools():\n\n search = str(input('Please enter search: '))\n search.strip()\n pulsesJSON = otx.search_pulses(search, 40) # Retrieves list (in json format) of top 40 pulses with tag \"crypto\"\n\n # Loops through each individual pulse retrieved from OTX, and prints name & requested fields.\n\n for aPulse in pulsesJSON[\"results\"]:\n \n name = aPulse.get('name')\n description = aPulse.get('description')\n modified = aPulse.get('modified') \n pulseid = aPulse.get('id')\n\n '''\n If needed, add more categories to pull for each pulse here.\n '''\n \n #list with data to add to csv file\n raw_data = [{'Pulse ID': pulseid, 'Name': name, 'Description': description, 'Modified': modified}]\n\n #the path to the file\n filename = 'shenzi_pulses.csv'\n \n #use to check for the file\n #file_exists = os.path.isfile(filename)\n \n #opens the file to append ID, Name, Modified, Description\n with open(filename, \"w\") as csv_file:\n csv_columns_headers = ['Pulse ID','Name','Description','Modified']\n writer = csv.DictWriter(csv_file, delimiter=',',lineterminator='\\n', fieldnames=csv_columns_headers)\n #if file does not exist write the headers\n if not file_exists:\n writer.writeheader()\n #write the information from raw_data by rows\n else:\n for data in raw_data:\n writer.writerow(data)\n\n #simple option to email or quit \n option = input('1: To Email 2: To quit : ')\n \n option = int(option)\n \n if option == 1:\n #uses the email function to send email\n sendemail()\n #delete file once email has sent\n os.remove('pulseIdsList.csv')\n elif option == 2:\n #option to quit\n SystemExit() \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
db = {
'host': "localhost",
'user': "root",
'passwd': "m74e71",
'database': "dw_toner"
}
data_inicial = '1990-01-01'
ano_final = 2018
feriados = "feriados.csv"
meses_de_ferias = (1, 2, 7, 12) #Janeiro, Fevereiro, Julho, Dezembro
dias_final_semana = (1, 6, 7) #Domingo, sexta e sábado
|
normal
|
{
"blob_id": "360881cecbad88ea5d150548fba6a39d8dc30681",
"index": 8598,
"step-1": "<mask token>\n",
"step-2": "db = {'host': 'localhost', 'user': 'root', 'passwd': 'm74e71', 'database':\n 'dw_toner'}\ndata_inicial = '1990-01-01'\nano_final = 2018\nferiados = 'feriados.csv'\nmeses_de_ferias = 1, 2, 7, 12\ndias_final_semana = 1, 6, 7\n",
"step-3": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\ndb = {\n 'host': \"localhost\",\n 'user': \"root\",\n 'passwd': \"m74e71\",\n 'database': \"dw_toner\"\n}\n\ndata_inicial = '1990-01-01'\nano_final = 2018\n\nferiados = \"feriados.csv\"\n\nmeses_de_ferias = (1, 2, 7, 12) #Janeiro, Fevereiro, Julho, Dezembro\n\ndias_final_semana = (1, 6, 7) #Domingo, sexta e sábado\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def geo_avg(x, lat, dim=2):
"""
geo_avg: to calculate weighting average according to latitude
input:
x: variable
lat: corresponding latittude
dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]
output:
result: 1d or 2d average result
"""
import numpy as np
s = x.shape
if (len(s) == 4) & (dim == 2) or (len(s) == 3) & (dim == 1):
x = np.nanmean(x, axis=-1)
coslat = np.cos(lat / 180 * np.pi)
s = x.shape
if len(s) == 3:
result = np.nanmean(x * coslat[np.newaxis, np.newaxis, :], axis=-1
) / np.nanmean(coslat)
if len(s) == 2:
result = np.nanmean(x * coslat[np.newaxis, :], axis=-1) / np.nanmean(
coslat)
return result
<|reserved_special_token_0|>
def select_month(x, target_mon):
"""
select month or season from a monthly time series
input:
x: array, 1,2,3,4 dimension
target_mon:
1. number of month, from 1-12
2. name of month, e.g. Jan, Feb
3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5
4. phase name: dry: 1,2,3,12; wet: 6,7,8,9
output:
array with month selected or seasonal mean
"""
s = x.shape
n_mon = s[0]
if type(target_mon) != str:
i_mon = [i for i in range(n_mon) if i % 12 == target_mon - 1]
return x[i_mon]
else:
import numpy as np
from datetime import datetime, timedelta
mon_name_list = [(datetime(2000, 1, 1) + timedelta(days=31 * i)).
strftime('%b') for i in range(12)]
mon_dict = {mon_name_list[i]: i for i in range(12)}
season_dict = {'DJF': [0, 1, 11], 'JJA': [5, 6, 7], 'SON': [8, 9,
10], 'MAM': [2, 3, 4]}
phase_dict = {'dry': [0, 1, 2, 11], 'wet': [5, 6, 7, 8]}
if target_mon in mon_dict:
i_mon = [i for i in range(n_mon) if i % 12 == mon_dict[target_mon]]
return x[i_mon]
elif target_mon in season_dict:
i_mon = [i for i in range(n_mon) if i % 12 in season_dict[
target_mon]]
x_mon = x[i_mon]
if target_mon == 'DJF':
x_mon = np.append(np.nan, x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0] // 12, 3, *s[1:]]), axis=1)
else:
i_mon = [i for i in range(n_mon) if i % 12 in phase_dict[
target_mon]]
x_mon = x[i_mon]
if target_mon == 'dry':
x_mon = np.append(np.nan, x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0] // 12, 4, *s[1:]]), axis=1)
def normalize(x):
"""
function to normalize data
"""
import numpy as np
return (x - np.nanmean(x)) / np.nanstd(x)
<|reserved_special_token_0|>
def moving_average(arr, n, method='nan'):
"""
calculate moving average values of 1-d array, and return an array with the same length
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
"""
import numpy as np
def moving_average_center(a, n):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
l1 = n // 2 - 1
l2 = n - l1
l = len(arr)
arr_new = np.zeros(l)
if method == 'nan':
arr_new[:l1] = np.nan
arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)
arr_new[l - l2 + 1:] = np.nan
if method == 'avg':
for i in range(l1):
arr_new[i] = np.nanmean(arr[:i + 1])
for i in range(l2):
arr_new[-i - 1] = np.nanmean(arr[-i - 1:])
arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)
if method == 'diff' and n == 13:
a2 = moving_average_center(arr, n)
diff = (arr[l1:l - l2 + 1] - a2).reshape([(len(arr) - n + 1) // 12, 12]
).mean(axis=0)
a1 = arr[:6] - diff[6:]
a12 = np.append(a1, a2)
a3 = arr[-6:] - diff[:6]
arr_new = np.append(a12, a3)
return arr_new
def convert_cftime_to_int(t):
"""
convert cftime to integer
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
"""
from datetime import datetime
return int(datetime.strftime(datetime.strptime(t.isoformat(),
'%Y-%m-%dT%H:%M:%S'), '%Y%m%d'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def geo_avg(x, lat, dim=2):
"""
geo_avg: to calculate weighting average according to latitude
input:
x: variable
lat: corresponding latittude
dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]
output:
result: 1d or 2d average result
"""
import numpy as np
s = x.shape
if (len(s) == 4) & (dim == 2) or (len(s) == 3) & (dim == 1):
x = np.nanmean(x, axis=-1)
coslat = np.cos(lat / 180 * np.pi)
s = x.shape
if len(s) == 3:
result = np.nanmean(x * coslat[np.newaxis, np.newaxis, :], axis=-1
) / np.nanmean(coslat)
if len(s) == 2:
result = np.nanmean(x * coslat[np.newaxis, :], axis=-1) / np.nanmean(
coslat)
return result
<|reserved_special_token_0|>
def select_month(x, target_mon):
"""
select month or season from a monthly time series
input:
x: array, 1,2,3,4 dimension
target_mon:
1. number of month, from 1-12
2. name of month, e.g. Jan, Feb
3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5
4. phase name: dry: 1,2,3,12; wet: 6,7,8,9
output:
array with month selected or seasonal mean
"""
s = x.shape
n_mon = s[0]
if type(target_mon) != str:
i_mon = [i for i in range(n_mon) if i % 12 == target_mon - 1]
return x[i_mon]
else:
import numpy as np
from datetime import datetime, timedelta
mon_name_list = [(datetime(2000, 1, 1) + timedelta(days=31 * i)).
strftime('%b') for i in range(12)]
mon_dict = {mon_name_list[i]: i for i in range(12)}
season_dict = {'DJF': [0, 1, 11], 'JJA': [5, 6, 7], 'SON': [8, 9,
10], 'MAM': [2, 3, 4]}
phase_dict = {'dry': [0, 1, 2, 11], 'wet': [5, 6, 7, 8]}
if target_mon in mon_dict:
i_mon = [i for i in range(n_mon) if i % 12 == mon_dict[target_mon]]
return x[i_mon]
elif target_mon in season_dict:
i_mon = [i for i in range(n_mon) if i % 12 in season_dict[
target_mon]]
x_mon = x[i_mon]
if target_mon == 'DJF':
x_mon = np.append(np.nan, x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0] // 12, 3, *s[1:]]), axis=1)
else:
i_mon = [i for i in range(n_mon) if i % 12 in phase_dict[
target_mon]]
x_mon = x[i_mon]
if target_mon == 'dry':
x_mon = np.append(np.nan, x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0] // 12, 4, *s[1:]]), axis=1)
def normalize(x):
"""
function to normalize data
"""
import numpy as np
return (x - np.nanmean(x)) / np.nanstd(x)
<|reserved_special_token_0|>
def moving_average(arr, n, method='nan'):
"""
calculate moving average values of 1-d array, and return an array with the same length
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
"""
import numpy as np
def moving_average_center(a, n):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
l1 = n // 2 - 1
l2 = n - l1
l = len(arr)
arr_new = np.zeros(l)
if method == 'nan':
arr_new[:l1] = np.nan
arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)
arr_new[l - l2 + 1:] = np.nan
if method == 'avg':
for i in range(l1):
arr_new[i] = np.nanmean(arr[:i + 1])
for i in range(l2):
arr_new[-i - 1] = np.nanmean(arr[-i - 1:])
arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)
if method == 'diff' and n == 13:
a2 = moving_average_center(arr, n)
diff = (arr[l1:l - l2 + 1] - a2).reshape([(len(arr) - n + 1) // 12, 12]
).mean(axis=0)
a1 = arr[:6] - diff[6:]
a12 = np.append(a1, a2)
a3 = arr[-6:] - diff[:6]
arr_new = np.append(a12, a3)
return arr_new
def convert_cftime_to_int(t):
"""
convert cftime to integer
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
"""
from datetime import datetime
return int(datetime.strftime(datetime.strptime(t.isoformat(),
'%Y-%m-%dT%H:%M:%S'), '%Y%m%d'))
def get_lat_lim(lat, lat_min, lat_max):
"""
calculate a range of latitude, in both hemispheres
"""
import numpy as np
i_lat_n = np.where((lat >= lat_min) & (lat <= lat_max))[0]
i_lat_s = np.where((lat <= -lat_min) & (lat >= -lat_max))[0]
i_lats = [i_lat_s, i_lat_n]
return i_lats
<|reserved_special_token_1|>
def geo_avg(x, lat, dim=2):
"""
geo_avg: to calculate weighting average according to latitude
input:
x: variable
lat: corresponding latittude
dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]
output:
result: 1d or 2d average result
"""
import numpy as np
s = x.shape
if (len(s) == 4) & (dim == 2) or (len(s) == 3) & (dim == 1):
x = np.nanmean(x, axis=-1)
coslat = np.cos(lat / 180 * np.pi)
s = x.shape
if len(s) == 3:
result = np.nanmean(x * coslat[np.newaxis, np.newaxis, :], axis=-1
) / np.nanmean(coslat)
if len(s) == 2:
result = np.nanmean(x * coslat[np.newaxis, :], axis=-1) / np.nanmean(
coslat)
return result
def cal_anomaly(x):
"""
calculate anomaly of a numpy array
input: x: 1-d,2-d,3-d or 4d numpy array, !!! the first dimension must be month
output: x with seasonal cycle removed
"""
import numpy as np
s = x.shape
n_time = s[0]
monthly_mean = np.nanmean(x.reshape([n_time // 12, 12, *s[1:]]), axis=0
).reshape([1, 12, *s[1:]]).repeat(len(x) // 12, axis=0).reshape(s)
return x - monthly_mean
def select_month(x, target_mon):
"""
select month or season from a monthly time series
input:
x: array, 1,2,3,4 dimension
target_mon:
1. number of month, from 1-12
2. name of month, e.g. Jan, Feb
3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5
4. phase name: dry: 1,2,3,12; wet: 6,7,8,9
output:
array with month selected or seasonal mean
"""
s = x.shape
n_mon = s[0]
if type(target_mon) != str:
i_mon = [i for i in range(n_mon) if i % 12 == target_mon - 1]
return x[i_mon]
else:
import numpy as np
from datetime import datetime, timedelta
mon_name_list = [(datetime(2000, 1, 1) + timedelta(days=31 * i)).
strftime('%b') for i in range(12)]
mon_dict = {mon_name_list[i]: i for i in range(12)}
season_dict = {'DJF': [0, 1, 11], 'JJA': [5, 6, 7], 'SON': [8, 9,
10], 'MAM': [2, 3, 4]}
phase_dict = {'dry': [0, 1, 2, 11], 'wet': [5, 6, 7, 8]}
if target_mon in mon_dict:
i_mon = [i for i in range(n_mon) if i % 12 == mon_dict[target_mon]]
return x[i_mon]
elif target_mon in season_dict:
i_mon = [i for i in range(n_mon) if i % 12 in season_dict[
target_mon]]
x_mon = x[i_mon]
if target_mon == 'DJF':
x_mon = np.append(np.nan, x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0] // 12, 3, *s[1:]]), axis=1)
else:
i_mon = [i for i in range(n_mon) if i % 12 in phase_dict[
target_mon]]
x_mon = x[i_mon]
if target_mon == 'dry':
x_mon = np.append(np.nan, x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0] // 12, 4, *s[1:]]), axis=1)
def normalize(x):
"""
function to normalize data
"""
import numpy as np
return (x - np.nanmean(x)) / np.nanstd(x)
<|reserved_special_token_0|>
def moving_average(arr, n, method='nan'):
"""
calculate moving average values of 1-d array, and return an array with the same length
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
"""
import numpy as np
def moving_average_center(a, n):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
l1 = n // 2 - 1
l2 = n - l1
l = len(arr)
arr_new = np.zeros(l)
if method == 'nan':
arr_new[:l1] = np.nan
arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)
arr_new[l - l2 + 1:] = np.nan
if method == 'avg':
for i in range(l1):
arr_new[i] = np.nanmean(arr[:i + 1])
for i in range(l2):
arr_new[-i - 1] = np.nanmean(arr[-i - 1:])
arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)
if method == 'diff' and n == 13:
a2 = moving_average_center(arr, n)
diff = (arr[l1:l - l2 + 1] - a2).reshape([(len(arr) - n + 1) // 12, 12]
).mean(axis=0)
a1 = arr[:6] - diff[6:]
a12 = np.append(a1, a2)
a3 = arr[-6:] - diff[:6]
arr_new = np.append(a12, a3)
return arr_new
def convert_cftime_to_int(t):
"""
convert cftime to integer
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
"""
from datetime import datetime
return int(datetime.strftime(datetime.strptime(t.isoformat(),
'%Y-%m-%dT%H:%M:%S'), '%Y%m%d'))
def get_lat_lim(lat, lat_min, lat_max):
"""
calculate a range of latitude, in both hemispheres
"""
import numpy as np
i_lat_n = np.where((lat >= lat_min) & (lat <= lat_max))[0]
i_lat_s = np.where((lat <= -lat_min) & (lat >= -lat_max))[0]
i_lats = [i_lat_s, i_lat_n]
return i_lats
<|reserved_special_token_1|>
def geo_avg(x, lat, dim=2):
"""
geo_avg: to calculate weighting average according to latitude
input:
x: variable
lat: corresponding latittude
dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]
output:
result: 1d or 2d average result
"""
import numpy as np
s = x.shape
if (len(s) == 4) & (dim == 2) or (len(s) == 3) & (dim == 1):
x = np.nanmean(x, axis=-1)
coslat = np.cos(lat / 180 * np.pi)
s = x.shape
if len(s) == 3:
result = np.nanmean(x * coslat[np.newaxis, np.newaxis, :], axis=-1
) / np.nanmean(coslat)
if len(s) == 2:
result = np.nanmean(x * coslat[np.newaxis, :], axis=-1) / np.nanmean(
coslat)
return result
def cal_anomaly(x):
"""
calculate anomaly of a numpy array
input: x: 1-d,2-d,3-d or 4d numpy array, !!! the first dimension must be month
output: x with seasonal cycle removed
"""
import numpy as np
s = x.shape
n_time = s[0]
monthly_mean = np.nanmean(x.reshape([n_time // 12, 12, *s[1:]]), axis=0
).reshape([1, 12, *s[1:]]).repeat(len(x) // 12, axis=0).reshape(s)
return x - monthly_mean
def select_month(x, target_mon):
"""
select month or season from a monthly time series
input:
x: array, 1,2,3,4 dimension
target_mon:
1. number of month, from 1-12
2. name of month, e.g. Jan, Feb
3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5
4. phase name: dry: 1,2,3,12; wet: 6,7,8,9
output:
array with month selected or seasonal mean
"""
s = x.shape
n_mon = s[0]
if type(target_mon) != str:
i_mon = [i for i in range(n_mon) if i % 12 == target_mon - 1]
return x[i_mon]
else:
import numpy as np
from datetime import datetime, timedelta
mon_name_list = [(datetime(2000, 1, 1) + timedelta(days=31 * i)).
strftime('%b') for i in range(12)]
mon_dict = {mon_name_list[i]: i for i in range(12)}
season_dict = {'DJF': [0, 1, 11], 'JJA': [5, 6, 7], 'SON': [8, 9,
10], 'MAM': [2, 3, 4]}
phase_dict = {'dry': [0, 1, 2, 11], 'wet': [5, 6, 7, 8]}
if target_mon in mon_dict:
i_mon = [i for i in range(n_mon) if i % 12 == mon_dict[target_mon]]
return x[i_mon]
elif target_mon in season_dict:
i_mon = [i for i in range(n_mon) if i % 12 in season_dict[
target_mon]]
x_mon = x[i_mon]
if target_mon == 'DJF':
x_mon = np.append(np.nan, x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0] // 12, 3, *s[1:]]), axis=1)
else:
i_mon = [i for i in range(n_mon) if i % 12 in phase_dict[
target_mon]]
x_mon = x[i_mon]
if target_mon == 'dry':
x_mon = np.append(np.nan, x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0] // 12, 4, *s[1:]]), axis=1)
def normalize(x):
"""
function to normalize data
"""
import numpy as np
return (x - np.nanmean(x)) / np.nanstd(x)
def find_index(arr, target, method='nearest'):
"""
find an index of target value from amonotonous 1-d array arr
"""
import numpy as np
if method == 'nearest':
return np.abs(arr - target).argmin()
else:
if arr[1] < arr[0]:
arr = arr[::-1]
if method == 'higher':
return np.where(arr >= target)[0][0]
if method == 'lower':
return np.where(arr <= target)[0][-1]
def moving_average(arr, n, method='nan'):
"""
calculate moving average values of 1-d array, and return an array with the same length
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
"""
import numpy as np
def moving_average_center(a, n):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
l1 = n // 2 - 1
l2 = n - l1
l = len(arr)
arr_new = np.zeros(l)
if method == 'nan':
arr_new[:l1] = np.nan
arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)
arr_new[l - l2 + 1:] = np.nan
if method == 'avg':
for i in range(l1):
arr_new[i] = np.nanmean(arr[:i + 1])
for i in range(l2):
arr_new[-i - 1] = np.nanmean(arr[-i - 1:])
arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)
if method == 'diff' and n == 13:
a2 = moving_average_center(arr, n)
diff = (arr[l1:l - l2 + 1] - a2).reshape([(len(arr) - n + 1) // 12, 12]
).mean(axis=0)
a1 = arr[:6] - diff[6:]
a12 = np.append(a1, a2)
a3 = arr[-6:] - diff[:6]
arr_new = np.append(a12, a3)
return arr_new
def convert_cftime_to_int(t):
"""
convert cftime to integer
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
"""
from datetime import datetime
return int(datetime.strftime(datetime.strptime(t.isoformat(),
'%Y-%m-%dT%H:%M:%S'), '%Y%m%d'))
def get_lat_lim(lat, lat_min, lat_max):
"""
calculate a range of latitude, in both hemispheres
"""
import numpy as np
i_lat_n = np.where((lat >= lat_min) & (lat <= lat_max))[0]
i_lat_s = np.where((lat <= -lat_min) & (lat >= -lat_max))[0]
i_lats = [i_lat_s, i_lat_n]
return i_lats
<|reserved_special_token_1|>
def geo_avg(x,lat,dim=2):
'''
geo_avg: to calculate weighting average according to latitude
input:
x: variable
lat: corresponding latittude
dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]
output:
result: 1d or 2d average result
'''
import numpy as np
s = x.shape
if ((len(s)==4) & (dim==2)) or ((len(s)==3) & (dim==1)):
x = np.nanmean(x,axis=-1)
coslat = np.cos(lat/180*np.pi)
s = x.shape
if len(s)==3:
result = np.nanmean(x*coslat[np.newaxis,np.newaxis,:],axis=-1)/np.nanmean(coslat)
if len(s)==2:
result = np.nanmean(x*coslat[np.newaxis,:],axis=-1)/np.nanmean(coslat)
return result
def cal_anomaly(x):
'''
calculate anomaly of a numpy array
input: x: 1-d,2-d,3-d or 4d numpy array, !!! the first dimension must be month
output: x with seasonal cycle removed
'''
import numpy as np
s = x.shape
n_time = s[0]
monthly_mean = np.nanmean(x.reshape([n_time//12,12,*s[1:]]),axis=0).\
reshape([1,12,*s[1:]]).repeat(len(x)//12,axis=0).reshape(s)
return x-monthly_mean
def select_month(x,target_mon):
'''
select month or season from a monthly time series
input:
x: array, 1,2,3,4 dimension
target_mon:
1. number of month, from 1-12
2. name of month, e.g. Jan, Feb
3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5
4. phase name: dry: 1,2,3,12; wet: 6,7,8,9
output:
array with month selected or seasonal mean
'''
s = x.shape
n_mon = s[0]
if type(target_mon) != str:
i_mon = [i for i in range(n_mon) if i%12 == target_mon-1]
return x[i_mon]
else:
import numpy as np
from datetime import datetime,timedelta
mon_name_list = [(datetime(2000,1,1)+timedelta(days=31*i)).strftime("%b") for i in range(12)]
mon_dict = {mon_name_list[i]:i for i in range(12)}
season_dict = {'DJF':[0,1,11],'JJA':[5,6,7],'SON':[8,9,10],'MAM':[2,3,4]}
phase_dict = {'dry':[0,1,2,11],'wet':[5,6,7,8]}
if target_mon in mon_dict:
i_mon = [i for i in range(n_mon) if i%12 == mon_dict[target_mon]]
return x[i_mon]
elif target_mon in season_dict:
i_mon = [i for i in range(n_mon) if i%12 in season_dict[target_mon]]
x_mon = x[i_mon]
if target_mon == 'DJF':
x_mon = np.append(np.nan,x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0]//12,3,*s[1:]]),axis=1)
else:
i_mon = [i for i in range(n_mon) if i%12 in phase_dict[target_mon]]
x_mon = x[i_mon]
if target_mon == 'dry':
x_mon = np.append(np.nan,x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0]//12,4,*s[1:]]),axis=1)
def normalize(x):
'''
function to normalize data
'''
import numpy as np
return (x-np.nanmean(x))/np.nanstd(x)
def find_index(arr,target,method='nearest'):
'''
find an index of target value from amonotonous 1-d array arr
'''
import numpy as np
if method == 'nearest':
return (np.abs(arr - target)).argmin()
else:
if arr[1]<arr[0]: ## if x is a decreasing array, reverse
arr = arr[::-1]
if method == 'higher':
return np.where(arr>=target)[0][0]
if method == 'lower':
return np.where(arr<=target)[0][-1]
def moving_average(arr,n,method = 'nan'):
'''
calculate moving average values of 1-d array, and return an array with the same length
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
'''
import numpy as np
def moving_average_center(a, n) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
l1 = n//2-1
l2 = n-l1
l = len(arr)
arr_new = np.zeros(l)
if method == 'nan':
arr_new[:l1] = np.nan
arr_new[l1:l-l2+1] = moving_average_center(arr, n)
arr_new[l-l2+1:] = np.nan
if method == 'avg':
for i in range(l1):
arr_new[i] = np.nanmean(arr[:i+1])
for i in range(l2):
arr_new[-i-1] = np.nanmean(arr[-i-1:])
arr_new[l1:l-l2+1] = moving_average_center(arr, n)
if method == 'diff' and n==13:
a2 = moving_average_center(arr, n)
diff = (arr[l1:l-l2+1]-a2).reshape([(len(arr)-n+1)//12,12]).mean(axis=0) # monthly mean difference between arr and running mean
a1 = arr[:6] - diff[6:]
a12 = np.append(a1,a2)
a3 = arr[-6:] - diff[:6]
arr_new = np.append(a12,a3)
return arr_new
def convert_cftime_to_int(t):
'''
convert cftime to integer
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
'''
from datetime import datetime
return int(datetime.strftime(datetime.strptime(t.isoformat(),'%Y-%m-%dT%H:%M:%S'),
'%Y%m%d'))
def get_lat_lim(lat,lat_min,lat_max):
'''
calculate a range of latitude, in both hemispheres
'''
import numpy as np
i_lat_n = np.where((lat>=lat_min) & (lat<=lat_max))[0]
i_lat_s = np.where((lat<=-lat_min) & (lat>=-lat_max))[0]
i_lats = [i_lat_s,i_lat_n]
return i_lats
|
flexible
|
{
"blob_id": "a2871585ce36888cf89c4dc5a6a7de6b212412bb",
"index": 1153,
"step-1": "def geo_avg(x, lat, dim=2):\n \"\"\"\n geo_avg: to calculate weighting average according to latitude\n input: \n x: variable \n lat: corresponding latittude\n dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]\n output:\n result: 1d or 2d average result \n \"\"\"\n import numpy as np\n s = x.shape\n if (len(s) == 4) & (dim == 2) or (len(s) == 3) & (dim == 1):\n x = np.nanmean(x, axis=-1)\n coslat = np.cos(lat / 180 * np.pi)\n s = x.shape\n if len(s) == 3:\n result = np.nanmean(x * coslat[np.newaxis, np.newaxis, :], axis=-1\n ) / np.nanmean(coslat)\n if len(s) == 2:\n result = np.nanmean(x * coslat[np.newaxis, :], axis=-1) / np.nanmean(\n coslat)\n return result\n\n\n<mask token>\n\n\ndef select_month(x, target_mon):\n \"\"\"\n select month or season from a monthly time series\n input: \n x: array, 1,2,3,4 dimension\n target_mon: \n 1. number of month, from 1-12 \n 2. name of month, e.g. Jan, Feb\n 3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5\n 4. phase name: dry: 1,2,3,12; wet: 6,7,8,9\n output: \n array with month selected or seasonal mean \n \"\"\"\n s = x.shape\n n_mon = s[0]\n if type(target_mon) != str:\n i_mon = [i for i in range(n_mon) if i % 12 == target_mon - 1]\n return x[i_mon]\n else:\n import numpy as np\n from datetime import datetime, timedelta\n mon_name_list = [(datetime(2000, 1, 1) + timedelta(days=31 * i)).\n strftime('%b') for i in range(12)]\n mon_dict = {mon_name_list[i]: i for i in range(12)}\n season_dict = {'DJF': [0, 1, 11], 'JJA': [5, 6, 7], 'SON': [8, 9, \n 10], 'MAM': [2, 3, 4]}\n phase_dict = {'dry': [0, 1, 2, 11], 'wet': [5, 6, 7, 8]}\n if target_mon in mon_dict:\n i_mon = [i for i in range(n_mon) if i % 12 == mon_dict[target_mon]]\n return x[i_mon]\n elif target_mon in season_dict:\n i_mon = [i for i in range(n_mon) if i % 12 in season_dict[\n target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'DJF':\n x_mon = np.append(np.nan, x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0] // 12, 3, *s[1:]]), axis=1)\n else:\n i_mon = [i for i in range(n_mon) if i % 12 in phase_dict[\n target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'dry':\n x_mon = np.append(np.nan, x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0] // 12, 4, *s[1:]]), axis=1)\n\n\ndef normalize(x):\n \"\"\"\n function to normalize data \n \"\"\"\n import numpy as np\n return (x - np.nanmean(x)) / np.nanstd(x)\n\n\n<mask token>\n\n\ndef moving_average(arr, n, method='nan'):\n \"\"\"\n calculate moving average values of 1-d array, and return an array with the same length \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n \"\"\"\n import numpy as np\n\n def moving_average_center(a, n):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n l1 = n // 2 - 1\n l2 = n - l1\n l = len(arr)\n arr_new = np.zeros(l)\n if method == 'nan':\n arr_new[:l1] = np.nan\n arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)\n arr_new[l - l2 + 1:] = np.nan\n if method == 'avg':\n for i in range(l1):\n arr_new[i] = np.nanmean(arr[:i + 1])\n for i in range(l2):\n arr_new[-i - 1] = np.nanmean(arr[-i - 1:])\n arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)\n if method == 'diff' and n == 13:\n a2 = moving_average_center(arr, n)\n diff = (arr[l1:l - l2 + 1] - a2).reshape([(len(arr) - n + 1) // 12, 12]\n ).mean(axis=0)\n a1 = arr[:6] - diff[6:]\n a12 = np.append(a1, a2)\n a3 = arr[-6:] - diff[:6]\n arr_new = np.append(a12, a3)\n return arr_new\n\n\ndef convert_cftime_to_int(t):\n \"\"\"\n convert cftime to integer \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n \"\"\"\n from datetime import datetime\n return int(datetime.strftime(datetime.strptime(t.isoformat(),\n '%Y-%m-%dT%H:%M:%S'), '%Y%m%d'))\n\n\n<mask token>\n",
"step-2": "def geo_avg(x, lat, dim=2):\n \"\"\"\n geo_avg: to calculate weighting average according to latitude\n input: \n x: variable \n lat: corresponding latittude\n dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]\n output:\n result: 1d or 2d average result \n \"\"\"\n import numpy as np\n s = x.shape\n if (len(s) == 4) & (dim == 2) or (len(s) == 3) & (dim == 1):\n x = np.nanmean(x, axis=-1)\n coslat = np.cos(lat / 180 * np.pi)\n s = x.shape\n if len(s) == 3:\n result = np.nanmean(x * coslat[np.newaxis, np.newaxis, :], axis=-1\n ) / np.nanmean(coslat)\n if len(s) == 2:\n result = np.nanmean(x * coslat[np.newaxis, :], axis=-1) / np.nanmean(\n coslat)\n return result\n\n\n<mask token>\n\n\ndef select_month(x, target_mon):\n \"\"\"\n select month or season from a monthly time series\n input: \n x: array, 1,2,3,4 dimension\n target_mon: \n 1. number of month, from 1-12 \n 2. name of month, e.g. Jan, Feb\n 3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5\n 4. phase name: dry: 1,2,3,12; wet: 6,7,8,9\n output: \n array with month selected or seasonal mean \n \"\"\"\n s = x.shape\n n_mon = s[0]\n if type(target_mon) != str:\n i_mon = [i for i in range(n_mon) if i % 12 == target_mon - 1]\n return x[i_mon]\n else:\n import numpy as np\n from datetime import datetime, timedelta\n mon_name_list = [(datetime(2000, 1, 1) + timedelta(days=31 * i)).\n strftime('%b') for i in range(12)]\n mon_dict = {mon_name_list[i]: i for i in range(12)}\n season_dict = {'DJF': [0, 1, 11], 'JJA': [5, 6, 7], 'SON': [8, 9, \n 10], 'MAM': [2, 3, 4]}\n phase_dict = {'dry': [0, 1, 2, 11], 'wet': [5, 6, 7, 8]}\n if target_mon in mon_dict:\n i_mon = [i for i in range(n_mon) if i % 12 == mon_dict[target_mon]]\n return x[i_mon]\n elif target_mon in season_dict:\n i_mon = [i for i in range(n_mon) if i % 12 in season_dict[\n target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'DJF':\n x_mon = np.append(np.nan, x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0] // 12, 3, *s[1:]]), axis=1)\n else:\n i_mon = [i for i in range(n_mon) if i % 12 in phase_dict[\n target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'dry':\n x_mon = np.append(np.nan, x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0] // 12, 4, *s[1:]]), axis=1)\n\n\ndef normalize(x):\n \"\"\"\n function to normalize data \n \"\"\"\n import numpy as np\n return (x - np.nanmean(x)) / np.nanstd(x)\n\n\n<mask token>\n\n\ndef moving_average(arr, n, method='nan'):\n \"\"\"\n calculate moving average values of 1-d array, and return an array with the same length \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n \"\"\"\n import numpy as np\n\n def moving_average_center(a, n):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n l1 = n // 2 - 1\n l2 = n - l1\n l = len(arr)\n arr_new = np.zeros(l)\n if method == 'nan':\n arr_new[:l1] = np.nan\n arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)\n arr_new[l - l2 + 1:] = np.nan\n if method == 'avg':\n for i in range(l1):\n arr_new[i] = np.nanmean(arr[:i + 1])\n for i in range(l2):\n arr_new[-i - 1] = np.nanmean(arr[-i - 1:])\n arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)\n if method == 'diff' and n == 13:\n a2 = moving_average_center(arr, n)\n diff = (arr[l1:l - l2 + 1] - a2).reshape([(len(arr) - n + 1) // 12, 12]\n ).mean(axis=0)\n a1 = arr[:6] - diff[6:]\n a12 = np.append(a1, a2)\n a3 = arr[-6:] - diff[:6]\n arr_new = np.append(a12, a3)\n return arr_new\n\n\ndef convert_cftime_to_int(t):\n \"\"\"\n convert cftime to integer \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n \"\"\"\n from datetime import datetime\n return int(datetime.strftime(datetime.strptime(t.isoformat(),\n '%Y-%m-%dT%H:%M:%S'), '%Y%m%d'))\n\n\ndef get_lat_lim(lat, lat_min, lat_max):\n \"\"\"\n calculate a range of latitude, in both hemispheres\n \"\"\"\n import numpy as np\n i_lat_n = np.where((lat >= lat_min) & (lat <= lat_max))[0]\n i_lat_s = np.where((lat <= -lat_min) & (lat >= -lat_max))[0]\n i_lats = [i_lat_s, i_lat_n]\n return i_lats\n",
"step-3": "def geo_avg(x, lat, dim=2):\n \"\"\"\n geo_avg: to calculate weighting average according to latitude\n input: \n x: variable \n lat: corresponding latittude\n dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]\n output:\n result: 1d or 2d average result \n \"\"\"\n import numpy as np\n s = x.shape\n if (len(s) == 4) & (dim == 2) or (len(s) == 3) & (dim == 1):\n x = np.nanmean(x, axis=-1)\n coslat = np.cos(lat / 180 * np.pi)\n s = x.shape\n if len(s) == 3:\n result = np.nanmean(x * coslat[np.newaxis, np.newaxis, :], axis=-1\n ) / np.nanmean(coslat)\n if len(s) == 2:\n result = np.nanmean(x * coslat[np.newaxis, :], axis=-1) / np.nanmean(\n coslat)\n return result\n\n\ndef cal_anomaly(x):\n \"\"\"\n calculate anomaly of a numpy array \n input: x: 1-d,2-d,3-d or 4d numpy array, !!! the first dimension must be month \n output: x with seasonal cycle removed \n \"\"\"\n import numpy as np\n s = x.shape\n n_time = s[0]\n monthly_mean = np.nanmean(x.reshape([n_time // 12, 12, *s[1:]]), axis=0\n ).reshape([1, 12, *s[1:]]).repeat(len(x) // 12, axis=0).reshape(s)\n return x - monthly_mean\n\n\ndef select_month(x, target_mon):\n \"\"\"\n select month or season from a monthly time series\n input: \n x: array, 1,2,3,4 dimension\n target_mon: \n 1. number of month, from 1-12 \n 2. name of month, e.g. Jan, Feb\n 3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5\n 4. phase name: dry: 1,2,3,12; wet: 6,7,8,9\n output: \n array with month selected or seasonal mean \n \"\"\"\n s = x.shape\n n_mon = s[0]\n if type(target_mon) != str:\n i_mon = [i for i in range(n_mon) if i % 12 == target_mon - 1]\n return x[i_mon]\n else:\n import numpy as np\n from datetime import datetime, timedelta\n mon_name_list = [(datetime(2000, 1, 1) + timedelta(days=31 * i)).\n strftime('%b') for i in range(12)]\n mon_dict = {mon_name_list[i]: i for i in range(12)}\n season_dict = {'DJF': [0, 1, 11], 'JJA': [5, 6, 7], 'SON': [8, 9, \n 10], 'MAM': [2, 3, 4]}\n phase_dict = {'dry': [0, 1, 2, 11], 'wet': [5, 6, 7, 8]}\n if target_mon in mon_dict:\n i_mon = [i for i in range(n_mon) if i % 12 == mon_dict[target_mon]]\n return x[i_mon]\n elif target_mon in season_dict:\n i_mon = [i for i in range(n_mon) if i % 12 in season_dict[\n target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'DJF':\n x_mon = np.append(np.nan, x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0] // 12, 3, *s[1:]]), axis=1)\n else:\n i_mon = [i for i in range(n_mon) if i % 12 in phase_dict[\n target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'dry':\n x_mon = np.append(np.nan, x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0] // 12, 4, *s[1:]]), axis=1)\n\n\ndef normalize(x):\n \"\"\"\n function to normalize data \n \"\"\"\n import numpy as np\n return (x - np.nanmean(x)) / np.nanstd(x)\n\n\n<mask token>\n\n\ndef moving_average(arr, n, method='nan'):\n \"\"\"\n calculate moving average values of 1-d array, and return an array with the same length \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n \"\"\"\n import numpy as np\n\n def moving_average_center(a, n):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n l1 = n // 2 - 1\n l2 = n - l1\n l = len(arr)\n arr_new = np.zeros(l)\n if method == 'nan':\n arr_new[:l1] = np.nan\n arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)\n arr_new[l - l2 + 1:] = np.nan\n if method == 'avg':\n for i in range(l1):\n arr_new[i] = np.nanmean(arr[:i + 1])\n for i in range(l2):\n arr_new[-i - 1] = np.nanmean(arr[-i - 1:])\n arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)\n if method == 'diff' and n == 13:\n a2 = moving_average_center(arr, n)\n diff = (arr[l1:l - l2 + 1] - a2).reshape([(len(arr) - n + 1) // 12, 12]\n ).mean(axis=0)\n a1 = arr[:6] - diff[6:]\n a12 = np.append(a1, a2)\n a3 = arr[-6:] - diff[:6]\n arr_new = np.append(a12, a3)\n return arr_new\n\n\ndef convert_cftime_to_int(t):\n \"\"\"\n convert cftime to integer \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n \"\"\"\n from datetime import datetime\n return int(datetime.strftime(datetime.strptime(t.isoformat(),\n '%Y-%m-%dT%H:%M:%S'), '%Y%m%d'))\n\n\ndef get_lat_lim(lat, lat_min, lat_max):\n \"\"\"\n calculate a range of latitude, in both hemispheres\n \"\"\"\n import numpy as np\n i_lat_n = np.where((lat >= lat_min) & (lat <= lat_max))[0]\n i_lat_s = np.where((lat <= -lat_min) & (lat >= -lat_max))[0]\n i_lats = [i_lat_s, i_lat_n]\n return i_lats\n",
"step-4": "def geo_avg(x, lat, dim=2):\n \"\"\"\n geo_avg: to calculate weighting average according to latitude\n input: \n x: variable \n lat: corresponding latittude\n dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]\n output:\n result: 1d or 2d average result \n \"\"\"\n import numpy as np\n s = x.shape\n if (len(s) == 4) & (dim == 2) or (len(s) == 3) & (dim == 1):\n x = np.nanmean(x, axis=-1)\n coslat = np.cos(lat / 180 * np.pi)\n s = x.shape\n if len(s) == 3:\n result = np.nanmean(x * coslat[np.newaxis, np.newaxis, :], axis=-1\n ) / np.nanmean(coslat)\n if len(s) == 2:\n result = np.nanmean(x * coslat[np.newaxis, :], axis=-1) / np.nanmean(\n coslat)\n return result\n\n\ndef cal_anomaly(x):\n \"\"\"\n calculate anomaly of a numpy array \n input: x: 1-d,2-d,3-d or 4d numpy array, !!! the first dimension must be month \n output: x with seasonal cycle removed \n \"\"\"\n import numpy as np\n s = x.shape\n n_time = s[0]\n monthly_mean = np.nanmean(x.reshape([n_time // 12, 12, *s[1:]]), axis=0\n ).reshape([1, 12, *s[1:]]).repeat(len(x) // 12, axis=0).reshape(s)\n return x - monthly_mean\n\n\ndef select_month(x, target_mon):\n \"\"\"\n select month or season from a monthly time series\n input: \n x: array, 1,2,3,4 dimension\n target_mon: \n 1. number of month, from 1-12 \n 2. name of month, e.g. Jan, Feb\n 3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5\n 4. phase name: dry: 1,2,3,12; wet: 6,7,8,9\n output: \n array with month selected or seasonal mean \n \"\"\"\n s = x.shape\n n_mon = s[0]\n if type(target_mon) != str:\n i_mon = [i for i in range(n_mon) if i % 12 == target_mon - 1]\n return x[i_mon]\n else:\n import numpy as np\n from datetime import datetime, timedelta\n mon_name_list = [(datetime(2000, 1, 1) + timedelta(days=31 * i)).\n strftime('%b') for i in range(12)]\n mon_dict = {mon_name_list[i]: i for i in range(12)}\n season_dict = {'DJF': [0, 1, 11], 'JJA': [5, 6, 7], 'SON': [8, 9, \n 10], 'MAM': [2, 3, 4]}\n phase_dict = {'dry': [0, 1, 2, 11], 'wet': [5, 6, 7, 8]}\n if target_mon in mon_dict:\n i_mon = [i for i in range(n_mon) if i % 12 == mon_dict[target_mon]]\n return x[i_mon]\n elif target_mon in season_dict:\n i_mon = [i for i in range(n_mon) if i % 12 in season_dict[\n target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'DJF':\n x_mon = np.append(np.nan, x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0] // 12, 3, *s[1:]]), axis=1)\n else:\n i_mon = [i for i in range(n_mon) if i % 12 in phase_dict[\n target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'dry':\n x_mon = np.append(np.nan, x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0] // 12, 4, *s[1:]]), axis=1)\n\n\ndef normalize(x):\n \"\"\"\n function to normalize data \n \"\"\"\n import numpy as np\n return (x - np.nanmean(x)) / np.nanstd(x)\n\n\ndef find_index(arr, target, method='nearest'):\n \"\"\"\n find an index of target value from amonotonous 1-d array arr\n \"\"\"\n import numpy as np\n if method == 'nearest':\n return np.abs(arr - target).argmin()\n else:\n if arr[1] < arr[0]:\n arr = arr[::-1]\n if method == 'higher':\n return np.where(arr >= target)[0][0]\n if method == 'lower':\n return np.where(arr <= target)[0][-1]\n\n\ndef moving_average(arr, n, method='nan'):\n \"\"\"\n calculate moving average values of 1-d array, and return an array with the same length \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n \"\"\"\n import numpy as np\n\n def moving_average_center(a, n):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n l1 = n // 2 - 1\n l2 = n - l1\n l = len(arr)\n arr_new = np.zeros(l)\n if method == 'nan':\n arr_new[:l1] = np.nan\n arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)\n arr_new[l - l2 + 1:] = np.nan\n if method == 'avg':\n for i in range(l1):\n arr_new[i] = np.nanmean(arr[:i + 1])\n for i in range(l2):\n arr_new[-i - 1] = np.nanmean(arr[-i - 1:])\n arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)\n if method == 'diff' and n == 13:\n a2 = moving_average_center(arr, n)\n diff = (arr[l1:l - l2 + 1] - a2).reshape([(len(arr) - n + 1) // 12, 12]\n ).mean(axis=0)\n a1 = arr[:6] - diff[6:]\n a12 = np.append(a1, a2)\n a3 = arr[-6:] - diff[:6]\n arr_new = np.append(a12, a3)\n return arr_new\n\n\ndef convert_cftime_to_int(t):\n \"\"\"\n convert cftime to integer \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n \"\"\"\n from datetime import datetime\n return int(datetime.strftime(datetime.strptime(t.isoformat(),\n '%Y-%m-%dT%H:%M:%S'), '%Y%m%d'))\n\n\ndef get_lat_lim(lat, lat_min, lat_max):\n \"\"\"\n calculate a range of latitude, in both hemispheres\n \"\"\"\n import numpy as np\n i_lat_n = np.where((lat >= lat_min) & (lat <= lat_max))[0]\n i_lat_s = np.where((lat <= -lat_min) & (lat >= -lat_max))[0]\n i_lats = [i_lat_s, i_lat_n]\n return i_lats\n",
"step-5": "def geo_avg(x,lat,dim=2):\n '''\n geo_avg: to calculate weighting average according to latitude\n input: \n x: variable \n lat: corresponding latittude\n dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]\n output:\n result: 1d or 2d average result \n '''\n import numpy as np\n s = x.shape\n if ((len(s)==4) & (dim==2)) or ((len(s)==3) & (dim==1)):\n x = np.nanmean(x,axis=-1)\n coslat = np.cos(lat/180*np.pi)\n s = x.shape\n if len(s)==3:\n result = np.nanmean(x*coslat[np.newaxis,np.newaxis,:],axis=-1)/np.nanmean(coslat)\n if len(s)==2:\n result = np.nanmean(x*coslat[np.newaxis,:],axis=-1)/np.nanmean(coslat)\n return result\n\ndef cal_anomaly(x):\n '''\n calculate anomaly of a numpy array \n input: x: 1-d,2-d,3-d or 4d numpy array, !!! the first dimension must be month \n output: x with seasonal cycle removed \n '''\n import numpy as np\n s = x.shape\n n_time = s[0]\n monthly_mean = np.nanmean(x.reshape([n_time//12,12,*s[1:]]),axis=0).\\\n reshape([1,12,*s[1:]]).repeat(len(x)//12,axis=0).reshape(s)\n return x-monthly_mean\n\ndef select_month(x,target_mon):\n '''\n select month or season from a monthly time series\n input: \n x: array, 1,2,3,4 dimension\n target_mon: \n 1. number of month, from 1-12 \n 2. name of month, e.g. Jan, Feb\n 3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5\n 4. phase name: dry: 1,2,3,12; wet: 6,7,8,9\n output: \n array with month selected or seasonal mean \n '''\n s = x.shape\n n_mon = s[0]\n if type(target_mon) != str:\n i_mon = [i for i in range(n_mon) if i%12 == target_mon-1]\n return x[i_mon]\n else:\n import numpy as np\n from datetime import datetime,timedelta\n mon_name_list = [(datetime(2000,1,1)+timedelta(days=31*i)).strftime(\"%b\") for i in range(12)]\n mon_dict = {mon_name_list[i]:i for i in range(12)}\n season_dict = {'DJF':[0,1,11],'JJA':[5,6,7],'SON':[8,9,10],'MAM':[2,3,4]}\n phase_dict = {'dry':[0,1,2,11],'wet':[5,6,7,8]}\n \n if target_mon in mon_dict:\n i_mon = [i for i in range(n_mon) if i%12 == mon_dict[target_mon]]\n return x[i_mon]\n elif target_mon in season_dict:\n i_mon = [i for i in range(n_mon) if i%12 in season_dict[target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'DJF':\n x_mon = np.append(np.nan,x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0]//12,3,*s[1:]]),axis=1)\n else:\n i_mon = [i for i in range(n_mon) if i%12 in phase_dict[target_mon]]\n x_mon = x[i_mon]\n if target_mon == 'dry':\n x_mon = np.append(np.nan,x_mon[:-1])\n return np.nanmean(x_mon.reshape([s[0]//12,4,*s[1:]]),axis=1)\n\ndef normalize(x):\n '''\n function to normalize data \n '''\n import numpy as np\n return (x-np.nanmean(x))/np.nanstd(x)\n\ndef find_index(arr,target,method='nearest'):\n '''\n find an index of target value from amonotonous 1-d array arr\n '''\n import numpy as np\n if method == 'nearest':\n return (np.abs(arr - target)).argmin()\n else:\n if arr[1]<arr[0]: ## if x is a decreasing array, reverse \n arr = arr[::-1] \n if method == 'higher':\n return np.where(arr>=target)[0][0]\n if method == 'lower':\n return np.where(arr<=target)[0][-1]\n \n \ndef moving_average(arr,n,method = 'nan'):\n '''\n calculate moving average values of 1-d array, and return an array with the same length \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n '''\n import numpy as np\n def moving_average_center(a, n) :\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n l1 = n//2-1\n l2 = n-l1\n l = len(arr)\n arr_new = np.zeros(l)\n if method == 'nan':\n arr_new[:l1] = np.nan\n arr_new[l1:l-l2+1] = moving_average_center(arr, n)\n arr_new[l-l2+1:] = np.nan\n if method == 'avg':\n for i in range(l1):\n arr_new[i] = np.nanmean(arr[:i+1])\n for i in range(l2):\n arr_new[-i-1] = np.nanmean(arr[-i-1:])\n arr_new[l1:l-l2+1] = moving_average_center(arr, n)\n if method == 'diff' and n==13:\n a2 = moving_average_center(arr, n)\n diff = (arr[l1:l-l2+1]-a2).reshape([(len(arr)-n+1)//12,12]).mean(axis=0) # monthly mean difference between arr and running mean\n a1 = arr[:6] - diff[6:]\n a12 = np.append(a1,a2)\n a3 = arr[-6:] - diff[:6]\n arr_new = np.append(a12,a3)\n return arr_new\n\ndef convert_cftime_to_int(t):\n '''\n convert cftime to integer \n input:\n arr: 1-d array \n n: moving window length \n method:\n nan: fill in nan \n avg: average from 0-1, 0-2, 0-3 ...\n diff: only use this when calculate annual mean, n = 13\n '''\n from datetime import datetime\n return int(datetime.strftime(datetime.strptime(t.isoformat(),'%Y-%m-%dT%H:%M:%S'),\n '%Y%m%d'))\n\ndef get_lat_lim(lat,lat_min,lat_max):\n '''\n calculate a range of latitude, in both hemispheres\n '''\n import numpy as np\n i_lat_n = np.where((lat>=lat_min) & (lat<=lat_max))[0]\n i_lat_s = np.where((lat<=-lat_min) & (lat>=-lat_max))[0]\n i_lats = [i_lat_s,i_lat_n]\n return i_lats\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
lista = []
z = 0
j = 9
for i in range(0, 10):
lista.append(int(input()))
while z < j:
c = lista[z]
lista[z] = lista[j]
lista[j] = c
z += 1
j -= 1
print(lista)
|
normal
|
{
"blob_id": "01ede703e36268dc9b3331b21726c24674a43817",
"index": 1338,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(0, 10):\n lista.append(int(input()))\nwhile z < j:\n c = lista[z]\n lista[z] = lista[j]\n lista[j] = c\n z += 1\n j -= 1\nprint(lista)\n",
"step-3": "lista = []\nz = 0\nj = 9\nfor i in range(0, 10):\n lista.append(int(input()))\nwhile z < j:\n c = lista[z]\n lista[z] = lista[j]\n lista[j] = c\n z += 1\n j -= 1\nprint(lista)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def avg(x):
return [(sum(x[i]) / row) for i in range(col)]
<|reserved_special_token_0|>
def cov(x, md_x):
cov_xy = [[(0) for r in range(col)] for c in range(col)]
for i in range(col):
for j in range(col):
for k in range(row):
cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]
) / row
return cov_xy
def cor(cov, sd_x):
cor_xy = [[(0) for r in range(col)] for c in range(col)]
for i in range(col):
for j in range(col):
cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])
print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],
'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))
return cor_xy
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def avg(x):
return [(sum(x[i]) / row) for i in range(col)]
def sd(x):
return [np.std(x[i]) for i in range(col)]
def cov(x, md_x):
cov_xy = [[(0) for r in range(col)] for c in range(col)]
for i in range(col):
for j in range(col):
for k in range(row):
cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]
) / row
return cov_xy
def cor(cov, sd_x):
cor_xy = [[(0) for r in range(col)] for c in range(col)]
for i in range(col):
for j in range(col):
cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])
print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],
'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))
return cor_xy
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def avg(x):
return [(sum(x[i]) / row) for i in range(col)]
def sd(x):
return [np.std(x[i]) for i in range(col)]
def cov(x, md_x):
cov_xy = [[(0) for r in range(col)] for c in range(col)]
for i in range(col):
for j in range(col):
for k in range(row):
cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]
) / row
return cov_xy
def cor(cov, sd_x):
cor_xy = [[(0) for r in range(col)] for c in range(col)]
for i in range(col):
for j in range(col):
cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])
print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],
'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))
return cor_xy
if __name__ == '__main__':
argv = sys.argv[:]
if len(argv) < 2:
print('1 argument required. Provide data file name')
sys.exit(0)
data = pd.read_csv(argv[1], header=None)
row = data.shape[0]
col = data.shape[1]
print('** dataset dimensions **')
print(row)
print(col)
mean = avg(data)
stdev = sd(data)
print(stdev)
covar = cov(data, mean)
correl = cor(covar, stdev)
print('---------CORRELATION MATRIX---------')
print(correl)
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
import sys
def avg(x):
return [(sum(x[i]) / row) for i in range(col)]
def sd(x):
return [np.std(x[i]) for i in range(col)]
def cov(x, md_x):
cov_xy = [[(0) for r in range(col)] for c in range(col)]
for i in range(col):
for j in range(col):
for k in range(row):
cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]
) / row
return cov_xy
def cor(cov, sd_x):
cor_xy = [[(0) for r in range(col)] for c in range(col)]
for i in range(col):
for j in range(col):
cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])
print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],
'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))
return cor_xy
if __name__ == '__main__':
argv = sys.argv[:]
if len(argv) < 2:
print('1 argument required. Provide data file name')
sys.exit(0)
data = pd.read_csv(argv[1], header=None)
row = data.shape[0]
col = data.shape[1]
print('** dataset dimensions **')
print(row)
print(col)
mean = avg(data)
stdev = sd(data)
print(stdev)
covar = cov(data, mean)
correl = cor(covar, stdev)
print('---------CORRELATION MATRIX---------')
print(correl)
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
import sys
def avg (x):
return [sum(x[i])/row for i in range(col)]
def sd (x):
return [np.std(x[i]) for i in range(col)]
def cov (x, md_x):
cov_xy=[[0 for r in range(col)] for c in range(col)]
for i in range(col):
for j in range (col):
for k in range (row):
cov_xy[i][j]+=((data[i][k]-md_x[i])*(data[j][k]-md_x[j]))/(row)
return(cov_xy)
def cor (cov, sd_x):
cor_xy=[[0 for r in range(col)] for c in range(col)]
for i in range(col):
for j in range (col):
cor_xy[i][j] = cov[i][j]/(sd_x[i]*sd_x[j])
print("cov= ",cov[i][j],"sd i", sd_x[i], " sd k", sd_x[j],"cov/sd", cov[i][j]/(sd_x[i]*sd_x[j]))
return(cor_xy)
if __name__ == "__main__":
argv=sys.argv[:]
if len(argv)<2:
print("1 argument required. Provide data file name")
sys.exit(0)
data=pd.read_csv(argv[1],header= None)
row=data.shape[0]
col=data.shape[1]
print("** dataset dimensions **")
print(row)
print(col)
mean=avg(data)
stdev=sd(data)
print(stdev)
covar=cov(data, mean)
correl=cor(covar, stdev)
print("---------CORRELATION MATRIX---------")
print(correl)
|
flexible
|
{
"blob_id": "ad3c5ed3d6a9aa83e69f53d3fec845e8e2b1c9c6",
"index": 883,
"step-1": "<mask token>\n\n\ndef avg(x):\n return [(sum(x[i]) / row) for i in range(col)]\n\n\n<mask token>\n\n\ndef cov(x, md_x):\n cov_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n for k in range(row):\n cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]\n ) / row\n return cov_xy\n\n\ndef cor(cov, sd_x):\n cor_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])\n print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],\n 'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))\n return cor_xy\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef avg(x):\n return [(sum(x[i]) / row) for i in range(col)]\n\n\ndef sd(x):\n return [np.std(x[i]) for i in range(col)]\n\n\ndef cov(x, md_x):\n cov_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n for k in range(row):\n cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]\n ) / row\n return cov_xy\n\n\ndef cor(cov, sd_x):\n cor_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])\n print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],\n 'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))\n return cor_xy\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef avg(x):\n return [(sum(x[i]) / row) for i in range(col)]\n\n\ndef sd(x):\n return [np.std(x[i]) for i in range(col)]\n\n\ndef cov(x, md_x):\n cov_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n for k in range(row):\n cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]\n ) / row\n return cov_xy\n\n\ndef cor(cov, sd_x):\n cor_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])\n print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],\n 'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))\n return cor_xy\n\n\nif __name__ == '__main__':\n argv = sys.argv[:]\n if len(argv) < 2:\n print('1 argument required. Provide data file name')\n sys.exit(0)\n data = pd.read_csv(argv[1], header=None)\n row = data.shape[0]\n col = data.shape[1]\n print('** dataset dimensions **')\n print(row)\n print(col)\n mean = avg(data)\n stdev = sd(data)\n print(stdev)\n covar = cov(data, mean)\n correl = cor(covar, stdev)\n print('---------CORRELATION MATRIX---------')\n print(correl)\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport sys\n\n\ndef avg(x):\n return [(sum(x[i]) / row) for i in range(col)]\n\n\ndef sd(x):\n return [np.std(x[i]) for i in range(col)]\n\n\ndef cov(x, md_x):\n cov_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n for k in range(row):\n cov_xy[i][j] += (data[i][k] - md_x[i]) * (data[j][k] - md_x[j]\n ) / row\n return cov_xy\n\n\ndef cor(cov, sd_x):\n cor_xy = [[(0) for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range(col):\n cor_xy[i][j] = cov[i][j] / (sd_x[i] * sd_x[j])\n print('cov= ', cov[i][j], 'sd i', sd_x[i], ' sd k', sd_x[j],\n 'cov/sd', cov[i][j] / (sd_x[i] * sd_x[j]))\n return cor_xy\n\n\nif __name__ == '__main__':\n argv = sys.argv[:]\n if len(argv) < 2:\n print('1 argument required. Provide data file name')\n sys.exit(0)\n data = pd.read_csv(argv[1], header=None)\n row = data.shape[0]\n col = data.shape[1]\n print('** dataset dimensions **')\n print(row)\n print(col)\n mean = avg(data)\n stdev = sd(data)\n print(stdev)\n covar = cov(data, mean)\n correl = cor(covar, stdev)\n print('---------CORRELATION MATRIX---------')\n print(correl)\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport sys\n\ndef avg (x):\n return [sum(x[i])/row for i in range(col)]\n\ndef sd (x):\n return [np.std(x[i]) for i in range(col)]\n\ndef cov (x, md_x):\n cov_xy=[[0 for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range (col):\n for k in range (row):\n cov_xy[i][j]+=((data[i][k]-md_x[i])*(data[j][k]-md_x[j]))/(row)\n return(cov_xy)\n\ndef cor (cov, sd_x):\n cor_xy=[[0 for r in range(col)] for c in range(col)]\n for i in range(col):\n for j in range (col):\n cor_xy[i][j] = cov[i][j]/(sd_x[i]*sd_x[j])\n print(\"cov= \",cov[i][j],\"sd i\", sd_x[i], \" sd k\", sd_x[j],\"cov/sd\", cov[i][j]/(sd_x[i]*sd_x[j]))\n return(cor_xy)\n\n\nif __name__ == \"__main__\":\n \n argv=sys.argv[:]\n \n if len(argv)<2:\n print(\"1 argument required. Provide data file name\")\n sys.exit(0)\n \n data=pd.read_csv(argv[1],header= None)\n row=data.shape[0]\n col=data.shape[1]\n print(\"** dataset dimensions **\")\n print(row)\n print(col)\n mean=avg(data)\n stdev=sd(data)\n print(stdev)\n \n covar=cov(data, mean)\n correl=cor(covar, stdev)\n print(\"---------CORRELATION MATRIX---------\")\n print(correl)\n \n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(
"""
███████████████████████████████
█ █
█═╬═════════════════════════╬═█
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░Wi-fi Fucker Tool░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░coded by arda6░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█═╬═════════════════════════╬═█
█ █
███████████████████████████████
"""
)
<|reserved_special_token_0|>
if pla == 'win32':
win = 'Windows'
print(' [!] Your Platform is ' + win + '\n')
elif pla == 'darwin':
mac = 'MacOs'
print(' [+] Your Platform is ' + mac + '\n')
elif pla == 'linux':
mac = 'Linux'
print(' [+] Your Platform is ' + mac + '\n')
if pla == 'win32':
print(' [!] Not Suitable For Tool Windows \n')
time.sleep(3)
exit(' [#] https://www.github/arda6')
print('')
print("""
1) Wep Cracking
2) Wpa2 Cracking
3) Deauth Attack
""")
<|reserved_special_token_0|>
if soru == '1':
os.system('python3 main.py')
exit()
elif soru == '2':
os.system('python3 wpa2.py')
elif soru == '3':
os.system('python3 attack.py')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(
"""
███████████████████████████████
█ █
█═╬═════════════════════════╬═█
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░Wi-fi Fucker Tool░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░coded by arda6░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█═╬═════════════════════════╬═█
█ █
███████████████████████████████
"""
)
pla = sys.platform
if pla == 'win32':
win = 'Windows'
print(' [!] Your Platform is ' + win + '\n')
elif pla == 'darwin':
mac = 'MacOs'
print(' [+] Your Platform is ' + mac + '\n')
elif pla == 'linux':
mac = 'Linux'
print(' [+] Your Platform is ' + mac + '\n')
if pla == 'win32':
print(' [!] Not Suitable For Tool Windows \n')
time.sleep(3)
exit(' [#] https://www.github/arda6')
print('')
print("""
1) Wep Cracking
2) Wpa2 Cracking
3) Deauth Attack
""")
soru = input('root@eyll:~# ')
if soru == '1':
os.system('python3 main.py')
exit()
elif soru == '2':
os.system('python3 wpa2.py')
elif soru == '3':
os.system('python3 attack.py')
<|reserved_special_token_1|>
import os, sys, time
print(
"""
███████████████████████████████
█ █
█═╬═════════════════════════╬═█
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░Wi-fi Fucker Tool░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░coded by arda6░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█═╬═════════════════════════╬═█
█ █
███████████████████████████████
"""
)
pla = sys.platform
if pla == 'win32':
win = 'Windows'
print(' [!] Your Platform is ' + win + '\n')
elif pla == 'darwin':
mac = 'MacOs'
print(' [+] Your Platform is ' + mac + '\n')
elif pla == 'linux':
mac = 'Linux'
print(' [+] Your Platform is ' + mac + '\n')
if pla == 'win32':
print(' [!] Not Suitable For Tool Windows \n')
time.sleep(3)
exit(' [#] https://www.github/arda6')
print('')
print("""
1) Wep Cracking
2) Wpa2 Cracking
3) Deauth Attack
""")
soru = input('root@eyll:~# ')
if soru == '1':
os.system('python3 main.py')
exit()
elif soru == '2':
os.system('python3 wpa2.py')
elif soru == '3':
os.system('python3 attack.py')
<|reserved_special_token_1|>
import os , sys , time
print("""
███████████████████████████████
█ █
█═╬═════════════════════════╬═█
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░Wi-fi Fucker Tool░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░coded by arda6░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█═╬═════════════════════════╬═█
█ █
███████████████████████████████
""")
pla = sys.platform
if pla == "win32":
win = "Windows"
print(" [!] Your Platform is " +win+ "\n")
elif pla == "darwin":
mac = "MacOs"
print(" [+] Your Platform is " +mac+ "\n")
elif pla == "linux":
mac = "Linux"
print(" [+] Your Platform is " +mac+"\n")
if pla == "win32":
print(" [!] Not Suitable For Tool Windows \n")
time.sleep(3)
exit(" [#] https://www.github/arda6")
print("")
print("""
1) Wep Cracking
2) Wpa2 Cracking
3) Deauth Attack
""")
soru = input("root@eyll:~# ")
if soru == '1':
os.system("python3 main.py")
exit()
elif soru == '2':
os.system("python3 wpa2.py")
elif soru == '3':
os.system("python3 attack.py")
|
flexible
|
{
"blob_id": "15eb205e6bd36844fdfc8c05efbc3a3d584c122d",
"index": 7238,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\n \"\"\"\n\n ███████████████████████████████\n █ █\n █═╬═════════════════════════╬═█\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░Wi-fi Fucker Tool░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░coded by arda6░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █═╬═════════════════════════╬═█\n █ █\n ███████████████████████████████\n\n\n\"\"\"\n )\n<mask token>\nif pla == 'win32':\n win = 'Windows'\n print(' [!] Your Platform is ' + win + '\\n')\nelif pla == 'darwin':\n mac = 'MacOs'\n print(' [+] Your Platform is ' + mac + '\\n')\nelif pla == 'linux':\n mac = 'Linux'\n print(' [+] Your Platform is ' + mac + '\\n')\nif pla == 'win32':\n print(' [!] Not Suitable For Tool Windows \\n')\n time.sleep(3)\n exit(' [#] https://www.github/arda6')\nprint('')\nprint(\"\"\"\n\n 1) Wep Cracking\n 2) Wpa2 Cracking\n 3) Deauth Attack\n \n\"\"\")\n<mask token>\nif soru == '1':\n os.system('python3 main.py')\n exit()\nelif soru == '2':\n os.system('python3 wpa2.py')\nelif soru == '3':\n os.system('python3 attack.py')\n",
"step-3": "<mask token>\nprint(\n \"\"\"\n\n ███████████████████████████████\n █ █\n █═╬═════════════════════════╬═█\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░Wi-fi Fucker Tool░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░coded by arda6░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █═╬═════════════════════════╬═█\n █ █\n ███████████████████████████████\n\n\n\"\"\"\n )\npla = sys.platform\nif pla == 'win32':\n win = 'Windows'\n print(' [!] Your Platform is ' + win + '\\n')\nelif pla == 'darwin':\n mac = 'MacOs'\n print(' [+] Your Platform is ' + mac + '\\n')\nelif pla == 'linux':\n mac = 'Linux'\n print(' [+] Your Platform is ' + mac + '\\n')\nif pla == 'win32':\n print(' [!] Not Suitable For Tool Windows \\n')\n time.sleep(3)\n exit(' [#] https://www.github/arda6')\nprint('')\nprint(\"\"\"\n\n 1) Wep Cracking\n 2) Wpa2 Cracking\n 3) Deauth Attack\n \n\"\"\")\nsoru = input('root@eyll:~# ')\nif soru == '1':\n os.system('python3 main.py')\n exit()\nelif soru == '2':\n os.system('python3 wpa2.py')\nelif soru == '3':\n os.system('python3 attack.py')\n",
"step-4": "import os, sys, time\nprint(\n \"\"\"\n\n ███████████████████████████████\n █ █\n █═╬═════════════════════════╬═█\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░Wi-fi Fucker Tool░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░coded by arda6░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █═╬═════════════════════════╬═█\n █ █\n ███████████████████████████████\n\n\n\"\"\"\n )\npla = sys.platform\nif pla == 'win32':\n win = 'Windows'\n print(' [!] Your Platform is ' + win + '\\n')\nelif pla == 'darwin':\n mac = 'MacOs'\n print(' [+] Your Platform is ' + mac + '\\n')\nelif pla == 'linux':\n mac = 'Linux'\n print(' [+] Your Platform is ' + mac + '\\n')\nif pla == 'win32':\n print(' [!] Not Suitable For Tool Windows \\n')\n time.sleep(3)\n exit(' [#] https://www.github/arda6')\nprint('')\nprint(\"\"\"\n\n 1) Wep Cracking\n 2) Wpa2 Cracking\n 3) Deauth Attack\n \n\"\"\")\nsoru = input('root@eyll:~# ')\nif soru == '1':\n os.system('python3 main.py')\n exit()\nelif soru == '2':\n os.system('python3 wpa2.py')\nelif soru == '3':\n os.system('python3 attack.py')\n",
"step-5": "import os , sys , time\r\nprint(\"\"\"\r\n\r\n ███████████████████████████████\r\n █ █\r\n █═╬═════════════════════════╬═█\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █ ║░░░░Wi-fi Fucker Tool░░░░║ █\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █ ║░░░░░coded by arda6░░░░░░║ █\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █═╬═════════════════════════╬═█\r\n █ █\r\n ███████████████████████████████\r\n\r\n\r\n\"\"\")\r\npla = sys.platform\r\nif pla == \"win32\":\r\n win = \"Windows\"\r\n print(\" [!] Your Platform is \" +win+ \"\\n\")\r\nelif pla == \"darwin\":\r\n mac = \"MacOs\"\r\n print(\" [+] Your Platform is \" +mac+ \"\\n\")\r\nelif pla == \"linux\":\r\n mac = \"Linux\"\r\n print(\" [+] Your Platform is \" +mac+\"\\n\")\r\nif pla == \"win32\":\r\n print(\" [!] Not Suitable For Tool Windows \\n\")\r\n time.sleep(3)\r\n exit(\" [#] https://www.github/arda6\")\r\nprint(\"\")\r\nprint(\"\"\"\r\n\r\n 1) Wep Cracking\r\n 2) Wpa2 Cracking\r\n 3) Deauth Attack\r\n \r\n\"\"\")\r\n\r\nsoru = input(\"root@eyll:~# \")\r\nif soru == '1':\r\n os.system(\"python3 main.py\")\r\n exit()\r\nelif soru == '2':\r\n os.system(\"python3 wpa2.py\")\r\nelif soru == '3':\r\n os.system(\"python3 attack.py\")\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Bisection recursion algo for sqrt of 2
def bisectionSqrt(x, epsilon = 0.01, low = None, high = None):
"""
Performs a recursive bisection search to find the
square root of x, within epsilon
"""
if low == None:
low = 0.0
if high == None:
high = x
midPoint = (high + low)/2.0
# If the difference of the midpoint squared and x is
# within the epsilon tolerance, OR is the midpoint is
# greater than X, we stop and give answer
if abs(midPoint**2 - x) < epsilon or midPoint > x:
return midPoint
else:
# Otherwise check if the midPoint is too big or small
if midPoint ** 2 < x:
# If too small, recurse on the upper half
return bisectionSqrt(x,epsilon,midPoint,high)
else :
# If too big, recurse on the lower half
return bisectionSqrt(x,epsilon,low,midPoint)
print "bisectionSqrt(25): ", bisectionSqrt(25)
|
normal
|
{
"blob_id": "d332ddd6c66bb22d60190ab8f94931eac6fd2394",
"index": 8482,
"step-1": "# Bisection recursion algo for sqrt of 2\n\ndef bisectionSqrt(x, epsilon = 0.01, low = None, high = None):\n\t\"\"\" \n\t\tPerforms a recursive bisection search to find the\n\t\tsquare root of x, within epsilon\n\t\"\"\"\n\n\tif low == None:\n\t\tlow = 0.0\n\tif high == None:\n\t\thigh = x\n\n\tmidPoint = (high + low)/2.0\n\t# If the difference of the midpoint squared and x is\n\t# within the epsilon tolerance, OR is the midpoint is\n\t# greater than X, we stop and give answer\n\tif abs(midPoint**2 - x) < epsilon or midPoint > x:\n\t\treturn midPoint\n\telse:\n\t\t# Otherwise check if the midPoint is too big or small\n\t\tif midPoint ** 2 < x:\n\t\t\t# If too small, recurse on the upper half\n\t\t\treturn bisectionSqrt(x,epsilon,midPoint,high)\n\t\telse :\n\t\t\t# If too big, recurse on the lower half\n\t\t\treturn bisectionSqrt(x,epsilon,low,midPoint)\n\n\nprint \"bisectionSqrt(25): \", bisectionSqrt(25)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2017-05-29 04:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nomenclature', '0002_saloon_default'),
]
operations = [
migrations.AlterField(
model_name='supplier',
name='description',
field=models.CharField(blank=True, max_length=500, null=True, verbose_name='Описание'),
),
]
|
normal
|
{
"blob_id": "7817a42e5aee1786cfb3e8018bd7ca0a5e74749d",
"index": 8447,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('nomenclature', '0002_saloon_default')]\n operations = [migrations.AlterField(model_name='supplier', name=\n 'description', field=models.CharField(blank=True, max_length=500,\n null=True, verbose_name='Описание'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('nomenclature', '0002_saloon_default')]\n operations = [migrations.AlterField(model_name='supplier', name=\n 'description', field=models.CharField(blank=True, max_length=500,\n null=True, verbose_name='Описание'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.6 on 2017-05-29 04:28\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('nomenclature', '0002_saloon_default'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='supplier',\n name='description',\n field=models.CharField(blank=True, max_length=500, null=True, verbose_name='Описание'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from parser import read_expression_line, read_expression_lines, read_assignment_line, read_import_line, Import
def test_expression():
lines = ['a % b']
expression, left = read_expression_lines(lines)
assert expression is not None and len(left) == 0, left
print "test_expression 0: {} {}".format(expression, left)
lines = ['[a+b]']
expression, left = read_expression_lines(lines)
assert expression is not None
print "{} {}".format(expression, left)
lines = [
'get_name({',
'"first":"mike",',
'"last":"yu"',
'}):'
]
expression, leftt = read_expression_lines(lines)
assert expression is not None
print "{} {}".format(expression, left)
lines = [
'[a[0]*b[1]]',
]
expression, left = read_expression_lines(lines)
assert expression is not None
print "{} {}".format(expression, left)
lines = [
'[a[0]*b[1] - c[2]*d[3],'
'e]',
]
expression, left = read_expression_lines(lines)
assert expression is not None
print "{} {}".format(expression, left)
lines = [
'(vector[i] * vector[i])'
]
expression, left = read_expression_lines(lines)
assert expression is not None
print "{} {}".format(expression, left)
lines = [
#'if value >= 0 && value < lengths[axis]:'
'value >= 0 && value < lengths[axis]'
#'value >= 0 && value < lengths[axis]'
#'value < 0'
]
expression, left = read_expression_lines(lines)
print "test_expression {} {}".format(expression, left)
assert expression is not None and len(left) == 0
lines = [
'assert(matrix == [[1,2,3],[4,5,6]])'
]
expression, left = read_expression_lines(lines)
print "test_expression assert {} {}".format(expression, left)
assert expression is not None and len(left) == 0
def test_assignment():
print "Testing assignments"
expression = read_assignment_line('a = 5')
assert expression is not None
print "{}".format(expression)
line = 'text = null'
expression = read_assignment_line(line)
assert expression is not None
print "test assignment 0: {}".format(expression)
expression = read_assignment_line('sum += 5')
assert expression is not None
print "{}".format(expression)
expression = read_assignment_line('some[axis] += value')
assert expression is not None
print "{}".format(expression)
expression = read_assignment_line('sum_indices = [indices[0], indices[1], indices[2]]')
assert expression is not None
print "{}".format(expression)
text = 'faces[0][0] = true'
expression = read_assignment_line(text)
assert expression is not None
print "{}\n {}".format(text, expression)
text = 'face.arm = true'
expression = read_assignment_line(text)
assert expression is not None
print "test asignment {}\n {}".format(text, expression)
text = '(a, b, c) = bob()'
expression = read_assignment_line(text)
assert expression is not None
print "test asignment 2 {}\n {}".format(text, expression)
text = 'c = bob(a - 6)'
assignment, tokens = read_assignment_line(text)
assert assignment is not None and len(tokens) == 0
print "test asignment 3 {}\n {}\n {}".format(text, assignment, tokens)
def test_parser():
expression, left = read_import_line("from shared import translate")
assert expression is not None
assert isinstance(expression, Import)
print "test_parser: {}".format(expression)
expression, left = read_import_line("from shared import (translate, bob)")
assert expression is not None
assert isinstance(expression, Import)
print "test_parser 2 : {}".format(expression)
lines = ['"john"']
expression, left = read_expression_line(lines[0])
assert expression is not None
lines = ['a + b']
expression, left = read_expression_line(lines[0])
assert expression is not None
lines = ['0']
expression, left = read_expression_line(lines[0])
assert expression is not None
lines = ['length(c)']
expression, left = read_expression_line(lines[0])
assert expression is not None
lines = ['length(c)[0][1][2]']
expression, left = read_expression_line(lines[0])
assert expression is not None
lines = ['(length(c))[0][1][2]']
expression, left = read_expression_line(lines[0])
assert expression is not None
print "test parser: {}".format(expression)
assert expression is not None
lines = ['d[0]']
expression, left = read_expression_line(lines[0])
assert expression is not None
lines = ['[e, f]']
expression, left = read_expression_line(lines[0])
assert expression is not None
lines = ['[g, str(h)]']
expression, left = read_expression_line(lines[0])
assert expression is not None
print "starting dict test 1"
lines = ['{"name":"mike"}']
expression, left = read_expression_line(lines[0])
assert expression is not None
lines = ['{"first":"alex", "last":"oh"}']
expression, left = read_expression_line(lines[0])
assert expression is not None
line = '((position[0] - middleX)/middleX)*width'
expression, left = read_expression_line(line)
assert expression is not None
line = 'keyboard.key_state.bob'
expression, left = read_expression_line(line)
assert expression is not None
print "test parser 3: {}".format(expression)
line = 'mouse.button[2]'
expression, left = read_expression_line(line)
assert expression is not None
print "test parser 4: {}".format(expression)
line = '{ "position": [0,0,0], "bob": "dole", "nice": "brother" }'
expression, left = read_expression_line(line)
assert expression is not None
print "test parser 5: {}".format(expression)
line = 'file_read(join([state.things_dir, "/", state.thing_name]), text)'
expression, left = read_expression_line(line)
assert expression is not None
print "test parser 6: {}".format(expression)
if __name__ == '__main__':
test_parser()
test_expression()
test_assignment()
|
normal
|
{
"blob_id": "657866affd653a99eb7d9a9a82b2f7d6503ec21a",
"index": 2468,
"step-1": "from parser import read_expression_line, read_expression_lines, read_assignment_line, read_import_line, Import\n\ndef test_expression():\n lines = ['a % b']\n expression, left = read_expression_lines(lines)\n assert expression is not None and len(left) == 0, left\n print \"test_expression 0: {} {}\".format(expression, left)\n lines = ['[a+b]']\n expression, left = read_expression_lines(lines)\n assert expression is not None\n print \"{} {}\".format(expression, left)\n\n lines = [\n 'get_name({',\n '\"first\":\"mike\",',\n '\"last\":\"yu\"',\n '}):'\n ]\n expression, leftt = read_expression_lines(lines)\n assert expression is not None\n print \"{} {}\".format(expression, left)\n\n lines = [\n '[a[0]*b[1]]',\n ]\n expression, left = read_expression_lines(lines)\n assert expression is not None\n print \"{} {}\".format(expression, left)\n\n lines = [\n '[a[0]*b[1] - c[2]*d[3],'\n 'e]',\n ]\n expression, left = read_expression_lines(lines)\n assert expression is not None\n print \"{} {}\".format(expression, left)\n\n lines = [\n '(vector[i] * vector[i])'\n ]\n expression, left = read_expression_lines(lines)\n assert expression is not None\n print \"{} {}\".format(expression, left)\n lines = [\n #'if value >= 0 && value < lengths[axis]:'\n 'value >= 0 && value < lengths[axis]'\n #'value >= 0 && value < lengths[axis]'\n #'value < 0'\n ]\n expression, left = read_expression_lines(lines)\n print \"test_expression {} {}\".format(expression, left)\n assert expression is not None and len(left) == 0\n\n lines = [\n 'assert(matrix == [[1,2,3],[4,5,6]])'\n ]\n expression, left = read_expression_lines(lines)\n print \"test_expression assert {} {}\".format(expression, left)\n assert expression is not None and len(left) == 0\n\ndef test_assignment():\n print \"Testing assignments\"\n expression = read_assignment_line('a = 5')\n assert expression is not None\n print \"{}\".format(expression)\n\n line = 'text = null'\n expression = read_assignment_line(line)\n assert expression is not None\n print \"test assignment 0: {}\".format(expression)\n\n expression = read_assignment_line('sum += 5')\n assert expression is not None\n print \"{}\".format(expression)\n\n expression = read_assignment_line('some[axis] += value')\n assert expression is not None\n print \"{}\".format(expression)\n\n expression = read_assignment_line('sum_indices = [indices[0], indices[1], indices[2]]')\n assert expression is not None\n print \"{}\".format(expression)\n text = 'faces[0][0] = true'\n expression = read_assignment_line(text)\n assert expression is not None\n print \"{}\\n {}\".format(text, expression)\n text = 'face.arm = true'\n expression = read_assignment_line(text)\n assert expression is not None\n print \"test asignment {}\\n {}\".format(text, expression)\n text = '(a, b, c) = bob()'\n expression = read_assignment_line(text)\n assert expression is not None\n print \"test asignment 2 {}\\n {}\".format(text, expression)\n text = 'c = bob(a - 6)'\n assignment, tokens = read_assignment_line(text)\n assert assignment is not None and len(tokens) == 0\n print \"test asignment 3 {}\\n {}\\n {}\".format(text, assignment, tokens)\n\ndef test_parser():\n expression, left = read_import_line(\"from shared import translate\")\n assert expression is not None\n assert isinstance(expression, Import)\n print \"test_parser: {}\".format(expression)\n\n expression, left = read_import_line(\"from shared import (translate, bob)\")\n assert expression is not None\n assert isinstance(expression, Import)\n print \"test_parser 2 : {}\".format(expression)\n\n lines = ['\"john\"']\n expression, left = read_expression_line(lines[0])\n assert expression is not None\n lines = ['a + b']\n expression, left = read_expression_line(lines[0])\n assert expression is not None\n lines = ['0']\n expression, left = read_expression_line(lines[0])\n assert expression is not None\n lines = ['length(c)']\n expression, left = read_expression_line(lines[0])\n assert expression is not None\n lines = ['length(c)[0][1][2]']\n expression, left = read_expression_line(lines[0])\n assert expression is not None\n lines = ['(length(c))[0][1][2]']\n expression, left = read_expression_line(lines[0])\n assert expression is not None\n print \"test parser: {}\".format(expression)\n assert expression is not None\n lines = ['d[0]']\n expression, left = read_expression_line(lines[0])\n assert expression is not None\n lines = ['[e, f]']\n expression, left = read_expression_line(lines[0])\n assert expression is not None\n lines = ['[g, str(h)]']\n expression, left = read_expression_line(lines[0])\n assert expression is not None\n print \"starting dict test 1\"\n lines = ['{\"name\":\"mike\"}']\n expression, left = read_expression_line(lines[0])\n assert expression is not None\n lines = ['{\"first\":\"alex\", \"last\":\"oh\"}']\n expression, left = read_expression_line(lines[0])\n assert expression is not None\n line = '((position[0] - middleX)/middleX)*width'\n expression, left = read_expression_line(line)\n assert expression is not None\n line = 'keyboard.key_state.bob'\n expression, left = read_expression_line(line)\n assert expression is not None\n print \"test parser 3: {}\".format(expression)\n\n line = 'mouse.button[2]'\n expression, left = read_expression_line(line)\n assert expression is not None\n print \"test parser 4: {}\".format(expression)\n\n line = '{ \"position\": [0,0,0], \"bob\": \"dole\", \"nice\": \"brother\" }'\n expression, left = read_expression_line(line)\n assert expression is not None\n print \"test parser 5: {}\".format(expression)\n\n line = 'file_read(join([state.things_dir, \"/\", state.thing_name]), text)'\n expression, left = read_expression_line(line)\n assert expression is not None\n print \"test parser 6: {}\".format(expression)\n\n\nif __name__ == '__main__':\n test_parser()\n test_expression()\n test_assignment()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pyodbc
from configuration.config import Configuration
from models.entities import Entities
from models.columns import Columns
from models.relationships import Relationship
from models.synonyms import Synonyms
from spacy.lemmatizer import Lemmatizer
from spacy.lookups import Lookups
class DBModel(object):
def __init__(self):
self.entities = []
self.columns = []
self.relationships = []
self.synonyms_col = []
self.synonyms_tab = []
self.entity_graph = []
self.loaded_entities = []
self.config = Configuration()
self.conn = pyodbc.connect(self.config.get_sql_connection_string())
lookups = Lookups()
self.lemmatizer = Lemmatizer(lookups)
self.load_db_model()
def load_db_model(self):
# loading the database from sql server
cursor = self.conn.cursor()
cursor.execute(self.config.get_tables_sql_query())
for row in cursor:
self.entities.append(Entities(row.table_name, self.config.get_default_column(row.table_name)))
cursor.execute(self.config.get_columns_sql_query())
current_entity = None
current_entity_name = ""
for row in cursor:
if current_entity_name != row.table_name:
current_entity_name = row.table_name
current_entity = next(en for en in self.entities if en.name == current_entity_name)
col_type = row.type_name
if col_type == "varchar" or col_type == "nvarchar":
col_type = "string"
current_entity.columns.append(Columns(row.column_name, col_type))
current_entity = None
current_entity_name = ""
cursor.execute(self.config.get_FK_sql_query())
for row in cursor:
self.relationships.append(Relationship(row.parent_table, row.refrenced_table, row.parent_table_col, row.referenced_table_col))
if len([en for en in self.entity_graph if en[0] == row.parent_table]) > 0:
current_entity = next(en for en in self.entity_graph if en[0] == row.parent_table)
current_entity[1].append(row.refrenced_table)
else:
self.entity_graph.append((row.parent_table, [row.refrenced_table]))
if len([en for en in self.entity_graph if en[0] == row.refrenced_table]) > 0:
current_entity = next(en for en in self.entity_graph if en[0] == row.refrenced_table)
current_entity[1].append(row.parent_table)
else:
self.entity_graph.append((row.refrenced_table, [row.parent_table]))
current_entity = None
current_entity_name = ""
cursor.execute(self.config.get_PK_sql_query())
for row in cursor:
if len([en for en in self.entity_graph if en[0] == row.table_name]) == 1:
current_entity = next(en for en in self.entities if en.name == row.table_name)
current_entity.primaryKey = row.primary_key
for entity_to_load in self.config.get_entitites_to_load():
entity_load_query = "select distinct " + entity_to_load["column"] + " from " + entity_to_load["entity"]
cursor.execute(entity_load_query)
entity_data = (entity_to_load["entity"], [])
for row in cursor:
entity_data[1].append(row[0])
# add lemma strings
lemmas = self.lemmatizer(str(row[0]), u'NOUN')
for lemma in lemmas:
entity_data[1].append(str(lemma))
self.loaded_entities.append(entity_data)
# load synonyms from declarative file
# table sysnonyms
for table_synonym in self.config.get_synonyms()["table"]:
orginal_val = table_synonym["original"]
synonyms_vals = table_synonym["synonyms"]
for synonyms_val in synonyms_vals:
self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))
# column sysnonyms
for column_synonym in self.config.get_synonyms()["column"]:
orginal_val = column_synonym["original"]
synonyms_vals = column_synonym["synonyms"]
for synonyms_val in synonyms_vals:
self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))
# make a single array
self.columns = [column for entity in self.entities for column in entity.columns]
# might have to write a custom matcher TODO
# build the matcher based upon the original value and domain synonyms defined
def get_matcher(self, matcher, nlp):
for entity in self.entities:
matcher.add(entity.name.upper() + "_TABLE", None, nlp(entity.name.lower()))
for column in entity.columns:
matcher.add(column.name.upper() + "_COLUMN", None, nlp(column.name.lower()))
# add table synonyms to matcher
for synonym in self.synonyms_tab:
for entity in self.entities:
if synonym.column.lower() == entity.name.lower():
matcher.add(entity.name.upper() + "_TABLE", None, nlp(synonym.synonym.lower()))
# add column synonyms to matcher
for synonym in self.synonyms_col:
for column in self.columns:
if synonym.column.lower() == column.name.lower():
matcher.add(column.name.upper() + "_COLUMN", None, nlp(synonym.synonym.lower()))
return matcher
def get_custom_matcher(self, matcher, nlp):
for entity in self.entities:
matcher.add(entity.name.upper() + "_TABLE", nlp(entity.name.lower()))
for column in entity.columns:
matcher.add(column.name.upper() + "_COLUMN", nlp(column.name.lower()))
# add table synonyms to matcher
for synonym in self.synonyms_tab:
for entity in self.entities:
if synonym.column.lower() == entity.name.lower():
matcher.add(entity.name.upper() + "_TABLE", nlp(synonym.synonym.lower()))
# add column synonyms to matcher
for synonym in self.synonyms_col:
for column in self.columns:
if synonym.column.lower() == column.name.lower():
matcher.add(column.name.upper() + "_COLUMN", nlp(synonym.synonym.lower()))
return matcher
|
normal
|
{
"blob_id": "76ebab93441676f9f00b2c2d63435e72c2d5d1ba",
"index": 9936,
"step-1": "<mask token>\n\n\nclass DBModel(object):\n <mask token>\n <mask token>\n\n def get_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(entity.\n name.lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n column.name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(\n synonym.synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n synonym.synonym.lower()))\n return matcher\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DBModel(object):\n <mask token>\n\n def load_db_model(self):\n cursor = self.conn.cursor()\n cursor.execute(self.config.get_tables_sql_query())\n for row in cursor:\n self.entities.append(Entities(row.table_name, self.config.\n get_default_column(row.table_name)))\n cursor.execute(self.config.get_columns_sql_query())\n current_entity = None\n current_entity_name = ''\n for row in cursor:\n if current_entity_name != row.table_name:\n current_entity_name = row.table_name\n current_entity = next(en for en in self.entities if en.name ==\n current_entity_name)\n col_type = row.type_name\n if col_type == 'varchar' or col_type == 'nvarchar':\n col_type = 'string'\n current_entity.columns.append(Columns(row.column_name, col_type))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_FK_sql_query())\n for row in cursor:\n self.relationships.append(Relationship(row.parent_table, row.\n refrenced_table, row.parent_table_col, row.\n referenced_table_col))\n if len([en for en in self.entity_graph if en[0] == row.\n parent_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.parent_table)\n current_entity[1].append(row.refrenced_table)\n else:\n self.entity_graph.append((row.parent_table, [row.\n refrenced_table]))\n if len([en for en in self.entity_graph if en[0] == row.\n refrenced_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.refrenced_table)\n current_entity[1].append(row.parent_table)\n else:\n self.entity_graph.append((row.refrenced_table, [row.\n parent_table]))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_PK_sql_query())\n for row in cursor:\n if len([en for en in self.entity_graph if en[0] == row.table_name]\n ) == 1:\n current_entity = next(en for en in self.entities if en.name ==\n row.table_name)\n current_entity.primaryKey = row.primary_key\n for entity_to_load in self.config.get_entitites_to_load():\n entity_load_query = 'select distinct ' + entity_to_load['column'\n ] + ' from ' + entity_to_load['entity']\n cursor.execute(entity_load_query)\n entity_data = entity_to_load['entity'], []\n for row in cursor:\n entity_data[1].append(row[0])\n lemmas = self.lemmatizer(str(row[0]), u'NOUN')\n for lemma in lemmas:\n entity_data[1].append(str(lemma))\n self.loaded_entities.append(entity_data)\n for table_synonym in self.config.get_synonyms()['table']:\n orginal_val = table_synonym['original']\n synonyms_vals = table_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))\n for column_synonym in self.config.get_synonyms()['column']:\n orginal_val = column_synonym['original']\n synonyms_vals = column_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))\n self.columns = [column for entity in self.entities for column in\n entity.columns]\n\n def get_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(entity.\n name.lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n column.name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(\n synonym.synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n synonym.synonym.lower()))\n return matcher\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass DBModel(object):\n <mask token>\n\n def load_db_model(self):\n cursor = self.conn.cursor()\n cursor.execute(self.config.get_tables_sql_query())\n for row in cursor:\n self.entities.append(Entities(row.table_name, self.config.\n get_default_column(row.table_name)))\n cursor.execute(self.config.get_columns_sql_query())\n current_entity = None\n current_entity_name = ''\n for row in cursor:\n if current_entity_name != row.table_name:\n current_entity_name = row.table_name\n current_entity = next(en for en in self.entities if en.name ==\n current_entity_name)\n col_type = row.type_name\n if col_type == 'varchar' or col_type == 'nvarchar':\n col_type = 'string'\n current_entity.columns.append(Columns(row.column_name, col_type))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_FK_sql_query())\n for row in cursor:\n self.relationships.append(Relationship(row.parent_table, row.\n refrenced_table, row.parent_table_col, row.\n referenced_table_col))\n if len([en for en in self.entity_graph if en[0] == row.\n parent_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.parent_table)\n current_entity[1].append(row.refrenced_table)\n else:\n self.entity_graph.append((row.parent_table, [row.\n refrenced_table]))\n if len([en for en in self.entity_graph if en[0] == row.\n refrenced_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.refrenced_table)\n current_entity[1].append(row.parent_table)\n else:\n self.entity_graph.append((row.refrenced_table, [row.\n parent_table]))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_PK_sql_query())\n for row in cursor:\n if len([en for en in self.entity_graph if en[0] == row.table_name]\n ) == 1:\n current_entity = next(en for en in self.entities if en.name ==\n row.table_name)\n current_entity.primaryKey = row.primary_key\n for entity_to_load in self.config.get_entitites_to_load():\n entity_load_query = 'select distinct ' + entity_to_load['column'\n ] + ' from ' + entity_to_load['entity']\n cursor.execute(entity_load_query)\n entity_data = entity_to_load['entity'], []\n for row in cursor:\n entity_data[1].append(row[0])\n lemmas = self.lemmatizer(str(row[0]), u'NOUN')\n for lemma in lemmas:\n entity_data[1].append(str(lemma))\n self.loaded_entities.append(entity_data)\n for table_synonym in self.config.get_synonyms()['table']:\n orginal_val = table_synonym['original']\n synonyms_vals = table_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))\n for column_synonym in self.config.get_synonyms()['column']:\n orginal_val = column_synonym['original']\n synonyms_vals = column_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))\n self.columns = [column for entity in self.entities for column in\n entity.columns]\n\n def get_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(entity.\n name.lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n column.name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(\n synonym.synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n synonym.synonym.lower()))\n return matcher\n\n def get_custom_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', nlp(entity.name.\n lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', nlp(column.\n name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', nlp(synonym\n .synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', nlp(\n synonym.synonym.lower()))\n return matcher\n",
"step-4": "import pyodbc\nfrom configuration.config import Configuration\nfrom models.entities import Entities\nfrom models.columns import Columns\nfrom models.relationships import Relationship\nfrom models.synonyms import Synonyms\nfrom spacy.lemmatizer import Lemmatizer\nfrom spacy.lookups import Lookups\n\n\nclass DBModel(object):\n\n def __init__(self):\n self.entities = []\n self.columns = []\n self.relationships = []\n self.synonyms_col = []\n self.synonyms_tab = []\n self.entity_graph = []\n self.loaded_entities = []\n self.config = Configuration()\n self.conn = pyodbc.connect(self.config.get_sql_connection_string())\n lookups = Lookups()\n self.lemmatizer = Lemmatizer(lookups)\n self.load_db_model()\n\n def load_db_model(self):\n cursor = self.conn.cursor()\n cursor.execute(self.config.get_tables_sql_query())\n for row in cursor:\n self.entities.append(Entities(row.table_name, self.config.\n get_default_column(row.table_name)))\n cursor.execute(self.config.get_columns_sql_query())\n current_entity = None\n current_entity_name = ''\n for row in cursor:\n if current_entity_name != row.table_name:\n current_entity_name = row.table_name\n current_entity = next(en for en in self.entities if en.name ==\n current_entity_name)\n col_type = row.type_name\n if col_type == 'varchar' or col_type == 'nvarchar':\n col_type = 'string'\n current_entity.columns.append(Columns(row.column_name, col_type))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_FK_sql_query())\n for row in cursor:\n self.relationships.append(Relationship(row.parent_table, row.\n refrenced_table, row.parent_table_col, row.\n referenced_table_col))\n if len([en for en in self.entity_graph if en[0] == row.\n parent_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.parent_table)\n current_entity[1].append(row.refrenced_table)\n else:\n self.entity_graph.append((row.parent_table, [row.\n refrenced_table]))\n if len([en for en in self.entity_graph if en[0] == row.\n refrenced_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.refrenced_table)\n current_entity[1].append(row.parent_table)\n else:\n self.entity_graph.append((row.refrenced_table, [row.\n parent_table]))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_PK_sql_query())\n for row in cursor:\n if len([en for en in self.entity_graph if en[0] == row.table_name]\n ) == 1:\n current_entity = next(en for en in self.entities if en.name ==\n row.table_name)\n current_entity.primaryKey = row.primary_key\n for entity_to_load in self.config.get_entitites_to_load():\n entity_load_query = 'select distinct ' + entity_to_load['column'\n ] + ' from ' + entity_to_load['entity']\n cursor.execute(entity_load_query)\n entity_data = entity_to_load['entity'], []\n for row in cursor:\n entity_data[1].append(row[0])\n lemmas = self.lemmatizer(str(row[0]), u'NOUN')\n for lemma in lemmas:\n entity_data[1].append(str(lemma))\n self.loaded_entities.append(entity_data)\n for table_synonym in self.config.get_synonyms()['table']:\n orginal_val = table_synonym['original']\n synonyms_vals = table_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))\n for column_synonym in self.config.get_synonyms()['column']:\n orginal_val = column_synonym['original']\n synonyms_vals = column_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))\n self.columns = [column for entity in self.entities for column in\n entity.columns]\n\n def get_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(entity.\n name.lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n column.name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(\n synonym.synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n synonym.synonym.lower()))\n return matcher\n\n def get_custom_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', nlp(entity.name.\n lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', nlp(column.\n name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', nlp(synonym\n .synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', nlp(\n synonym.synonym.lower()))\n return matcher\n",
"step-5": "import pyodbc\n\nfrom configuration.config import Configuration\nfrom models.entities import Entities\nfrom models.columns import Columns\nfrom models.relationships import Relationship\nfrom models.synonyms import Synonyms\n\nfrom spacy.lemmatizer import Lemmatizer\nfrom spacy.lookups import Lookups\n\n\nclass DBModel(object):\n def __init__(self):\n self.entities = []\n self.columns = []\n self.relationships = []\n self.synonyms_col = []\n self.synonyms_tab = []\n self.entity_graph = []\n self.loaded_entities = []\n self.config = Configuration()\n self.conn = pyodbc.connect(self.config.get_sql_connection_string())\n lookups = Lookups()\n self.lemmatizer = Lemmatizer(lookups)\n self.load_db_model()\n\n def load_db_model(self):\n # loading the database from sql server\n cursor = self.conn.cursor()\n cursor.execute(self.config.get_tables_sql_query())\n for row in cursor:\n self.entities.append(Entities(row.table_name, self.config.get_default_column(row.table_name)))\n\n cursor.execute(self.config.get_columns_sql_query())\n current_entity = None\n current_entity_name = \"\"\n for row in cursor:\n if current_entity_name != row.table_name:\n current_entity_name = row.table_name\n current_entity = next(en for en in self.entities if en.name == current_entity_name)\n\n col_type = row.type_name\n if col_type == \"varchar\" or col_type == \"nvarchar\":\n col_type = \"string\"\n current_entity.columns.append(Columns(row.column_name, col_type))\n\n current_entity = None\n current_entity_name = \"\"\n cursor.execute(self.config.get_FK_sql_query())\n for row in cursor:\n self.relationships.append(Relationship(row.parent_table, row.refrenced_table, row.parent_table_col, row.referenced_table_col))\n if len([en for en in self.entity_graph if en[0] == row.parent_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[0] == row.parent_table)\n current_entity[1].append(row.refrenced_table)\n else:\n self.entity_graph.append((row.parent_table, [row.refrenced_table]))\n \n if len([en for en in self.entity_graph if en[0] == row.refrenced_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[0] == row.refrenced_table)\n current_entity[1].append(row.parent_table)\n else:\n self.entity_graph.append((row.refrenced_table, [row.parent_table]))\n\n current_entity = None\n current_entity_name = \"\"\n cursor.execute(self.config.get_PK_sql_query())\n for row in cursor:\n if len([en for en in self.entity_graph if en[0] == row.table_name]) == 1:\n current_entity = next(en for en in self.entities if en.name == row.table_name)\n current_entity.primaryKey = row.primary_key\n\n for entity_to_load in self.config.get_entitites_to_load():\n entity_load_query = \"select distinct \" + entity_to_load[\"column\"] + \" from \" + entity_to_load[\"entity\"]\n cursor.execute(entity_load_query)\n entity_data = (entity_to_load[\"entity\"], [])\n for row in cursor:\n entity_data[1].append(row[0])\n # add lemma strings\n lemmas = self.lemmatizer(str(row[0]), u'NOUN')\n for lemma in lemmas:\n entity_data[1].append(str(lemma))\n self.loaded_entities.append(entity_data)\n \n # load synonyms from declarative file\n # table sysnonyms\n for table_synonym in self.config.get_synonyms()[\"table\"]:\n orginal_val = table_synonym[\"original\"]\n synonyms_vals = table_synonym[\"synonyms\"]\n for synonyms_val in synonyms_vals:\n self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))\n\n # column sysnonyms\n for column_synonym in self.config.get_synonyms()[\"column\"]:\n orginal_val = column_synonym[\"original\"]\n synonyms_vals = column_synonym[\"synonyms\"]\n for synonyms_val in synonyms_vals:\n self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))\n\n\n # make a single array\n self.columns = [column for entity in self.entities for column in entity.columns]\n \n\n # might have to write a custom matcher TODO\n # build the matcher based upon the original value and domain synonyms defined\n def get_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + \"_TABLE\", None, nlp(entity.name.lower())) \n for column in entity.columns:\n matcher.add(column.name.upper() + \"_COLUMN\", None, nlp(column.name.lower()))\n\n # add table synonyms to matcher\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + \"_TABLE\", None, nlp(synonym.synonym.lower())) \n\n # add column synonyms to matcher\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + \"_COLUMN\", None, nlp(synonym.synonym.lower())) \n \n\n return matcher\n\n def get_custom_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + \"_TABLE\", nlp(entity.name.lower())) \n for column in entity.columns:\n matcher.add(column.name.upper() + \"_COLUMN\", nlp(column.name.lower()))\n\n # add table synonyms to matcher\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + \"_TABLE\", nlp(synonym.synonym.lower())) \n\n # add column synonyms to matcher\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + \"_COLUMN\", nlp(synonym.synonym.lower())) \n \n\n return matcher\n",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
<|reserved_special_token_0|>
class Gui:
<|reserved_special_token_0|>
def insert_data(self):
self.id = e.get()
self.name1 = e1.get()
self.fathername = e2.get()
self.mothername = e3.get()
self.cont = e4.get()
self.email = e5.get()
self.cursor.execute(
"insert into user values('{}','{}','{}','{}','{}','{}')".format
(self.id, self.name1, self.fathername, self.mothername, self.
cont, self.email))
self.connection.commit()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def update(self):
global e
global e2
global e3
self.top1 = Toplevel(self.scr)
self.top1.geometry('400x400')
l1 = Label(self.top1, text='USER_ID', font=('times', 25, 'bold'),
bg='green2', fg='white')
l1.pack()
self.Id = StringVar()
e = Entry(self.top1, relief='sunken', textvariable=self.Id, font=(
'times', 25, 'bold'))
e.pack()
self.col_name = StringVar()
l2 = Label(self.top1, text='col_name', font=('times', 25, 'bold'),
bg='green2', fg='white')
l2.pack()
e2 = Entry(self.top1, relief='sunken', textvariable=self.col_name,
font=('times', 25, 'bold'))
e2.pack()
self.value = StringVar()
l3 = Label(self.top1, text='VALUE', font=('times', 25, 'bold'), bg=
'green2', fg='white')
l3.pack()
e3 = Entry(self.top1, relief='sunken', textvariable=self.value,
font=('times', 25, 'bold'))
e3.pack()
b = Button(self.top1, text='UPDATE', command=self.update_data, font
=('times', 25, 'bold'), bg='white', fg='black')
b.pack()
self.top1.mainloop()
def delete_data(self):
self.cursor.execute("Delete from user where id ='{}'".format(e.get()))
self.list.delete(0, END)
self.connection.commit()
self.show_data()
def del_rec(self):
global e
self.top2 = Toplevel(self.scr)
self.top2.geometry('400x400')
l1 = Label(self.top2, text='USER_ID', font=('times', 25, 'bold'),
bg='green2', fg='white')
l1.pack()
self.Id = StringVar()
e = Entry(self.top2, relief='sunken', textvariable=self.Id, font=(
'times', 25, 'bold'))
e.pack()
b = Button(self.top2, text='delete records', command=self.
delete_data, font=('times', 25, 'bold'), bg='white', fg='black')
b.pack()
self.top2.mainloop()
def sample(self):
s = '{}'.format(self.en3.get())
a = self.cursor.execute('{}'.format(self.en3.get()))
r = self.cursor.fetchall()
for row in r:
self.list.insert(0, row)
self.connection.commit()
<|reserved_special_token_0|>
def add_record(self):
global e
global e1
global e2
global e3
global e4
global e5
self.e = StringVar()
self.e1 = StringVar()
self.e2 = StringVar()
self.e3 = StringVar()
self.e4 = StringVar()
self.e5 = StringVar()
self.top = Toplevel(self.scr)
self.top.geometry('400x800')
l = Label(self.top, text='USER_ID', font=('times', 25, 'bold'), bg=
'green2', fg='white')
l.pack()
e = Entry(self.top, relief='sunken', textvariable=self.e, font=(
'times', 25, 'bold'))
e.pack()
l1 = Label(self.top, text='USERNAME', font=('times', 25, 'bold'),
bg='green2', fg='white')
l1.pack()
e1 = Entry(self.top, relief='sunken', textvariable=self.e1, font=(
'times', 25, 'bold'))
e1.pack()
l2 = Label(self.top, text='FATHERS NAME', font=('times', 25, 'bold'
), bg='green2', fg='white')
l2.pack()
e2 = Entry(self.top, relief='sunken', textvariable=self.e2, font=(
'times', 25, 'bold'))
e2.pack()
l3 = Label(self.top, text='MOTHERS NAME', font=('times', 25, 'bold'
), bg='green2', fg='white')
l3.pack()
e3 = Entry(self.top, relief='sunken', textvariable=self.e3, font=(
'times', 25, 'bold'))
e3.pack()
l4 = Label(self.top, text='CONTACT NO', font=('times', 25, 'bold'),
bg='green2', fg='white')
l4.pack()
e4 = Entry(self.top, relief='sunken', textvariable=self.e4, font=(
'times', 25, 'bold'))
e4.pack()
l5 = Label(self.top, text='E-MAIL ID', font=('times', 25, 'bold'),
bg='green2', fg='white')
l5.pack()
e5 = Entry(self.top, relief='sunken', textvariable=self.e5, font=(
'times', 25, 'bold'))
e5.pack()
varchk = IntVar()
b = Button(self.top, text='SUBMIT', command=self.insert_data, font=
('times', 25, 'bold'), bg='white', fg='black')
b.pack()
self.top.mainloop()
<|reserved_special_token_0|>
def selection(self):
lb = self.list_box.curselection()
print(lb)
for i in list(lb):
self.show_data()
<|reserved_special_token_0|>
global table_name
<|reserved_special_token_0|>
def show_entered_data(self):
global en1
global en2
global list1
global tbl_name
self.tbl_name = self.table_name.get()
self.en1 = self.entry1.get()
self.en2 = self.entry2.get()
sent = 'Create table ' + str(self.tbl_name) + "('" + str(self.en1
) + ' ' + str(self.en2) + "')"
list1 = Text(self.top, width=41, height=8, font=('times', 25, 'bold'))
list1.place(x=0, y=0)
list1.insert(0.0, sent)
print(self.tbl_name, self.en1, self.en2)
self.cursor.execute(sent)
self.list.insert(0, sent)
self.connection.commit()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Gui:
<|reserved_special_token_0|>
def insert_data(self):
self.id = e.get()
self.name1 = e1.get()
self.fathername = e2.get()
self.mothername = e3.get()
self.cont = e4.get()
self.email = e5.get()
self.cursor.execute(
"insert into user values('{}','{}','{}','{}','{}','{}')".format
(self.id, self.name1, self.fathername, self.mothername, self.
cont, self.email))
self.connection.commit()
def show_data(self):
self.connection = sqlite3.connect('student_details.db')
self.cursor = self.connection.cursor()
self.cursor.execute('Select * from user')
rows = self.cursor.fetchall()
for row in rows:
l1 = self.list.insert(END, row)
self.connection.commit()
<|reserved_special_token_0|>
def update(self):
global e
global e2
global e3
self.top1 = Toplevel(self.scr)
self.top1.geometry('400x400')
l1 = Label(self.top1, text='USER_ID', font=('times', 25, 'bold'),
bg='green2', fg='white')
l1.pack()
self.Id = StringVar()
e = Entry(self.top1, relief='sunken', textvariable=self.Id, font=(
'times', 25, 'bold'))
e.pack()
self.col_name = StringVar()
l2 = Label(self.top1, text='col_name', font=('times', 25, 'bold'),
bg='green2', fg='white')
l2.pack()
e2 = Entry(self.top1, relief='sunken', textvariable=self.col_name,
font=('times', 25, 'bold'))
e2.pack()
self.value = StringVar()
l3 = Label(self.top1, text='VALUE', font=('times', 25, 'bold'), bg=
'green2', fg='white')
l3.pack()
e3 = Entry(self.top1, relief='sunken', textvariable=self.value,
font=('times', 25, 'bold'))
e3.pack()
b = Button(self.top1, text='UPDATE', command=self.update_data, font
=('times', 25, 'bold'), bg='white', fg='black')
b.pack()
self.top1.mainloop()
def delete_data(self):
self.cursor.execute("Delete from user where id ='{}'".format(e.get()))
self.list.delete(0, END)
self.connection.commit()
self.show_data()
def del_rec(self):
global e
self.top2 = Toplevel(self.scr)
self.top2.geometry('400x400')
l1 = Label(self.top2, text='USER_ID', font=('times', 25, 'bold'),
bg='green2', fg='white')
l1.pack()
self.Id = StringVar()
e = Entry(self.top2, relief='sunken', textvariable=self.Id, font=(
'times', 25, 'bold'))
e.pack()
b = Button(self.top2, text='delete records', command=self.
delete_data, font=('times', 25, 'bold'), bg='white', fg='black')
b.pack()
self.top2.mainloop()
def sample(self):
s = '{}'.format(self.en3.get())
a = self.cursor.execute('{}'.format(self.en3.get()))
r = self.cursor.fetchall()
for row in r:
self.list.insert(0, row)
self.connection.commit()
def file(self):
self.f1.filename = filedialog.askopenfilename(title='Select file')
p = self.f1.filename
self.list.insert(0, self.f1.filename)
def add_record(self):
global e
global e1
global e2
global e3
global e4
global e5
self.e = StringVar()
self.e1 = StringVar()
self.e2 = StringVar()
self.e3 = StringVar()
self.e4 = StringVar()
self.e5 = StringVar()
self.top = Toplevel(self.scr)
self.top.geometry('400x800')
l = Label(self.top, text='USER_ID', font=('times', 25, 'bold'), bg=
'green2', fg='white')
l.pack()
e = Entry(self.top, relief='sunken', textvariable=self.e, font=(
'times', 25, 'bold'))
e.pack()
l1 = Label(self.top, text='USERNAME', font=('times', 25, 'bold'),
bg='green2', fg='white')
l1.pack()
e1 = Entry(self.top, relief='sunken', textvariable=self.e1, font=(
'times', 25, 'bold'))
e1.pack()
l2 = Label(self.top, text='FATHERS NAME', font=('times', 25, 'bold'
), bg='green2', fg='white')
l2.pack()
e2 = Entry(self.top, relief='sunken', textvariable=self.e2, font=(
'times', 25, 'bold'))
e2.pack()
l3 = Label(self.top, text='MOTHERS NAME', font=('times', 25, 'bold'
), bg='green2', fg='white')
l3.pack()
e3 = Entry(self.top, relief='sunken', textvariable=self.e3, font=(
'times', 25, 'bold'))
e3.pack()
l4 = Label(self.top, text='CONTACT NO', font=('times', 25, 'bold'),
bg='green2', fg='white')
l4.pack()
e4 = Entry(self.top, relief='sunken', textvariable=self.e4, font=(
'times', 25, 'bold'))
e4.pack()
l5 = Label(self.top, text='E-MAIL ID', font=('times', 25, 'bold'),
bg='green2', fg='white')
l5.pack()
e5 = Entry(self.top, relief='sunken', textvariable=self.e5, font=(
'times', 25, 'bold'))
e5.pack()
varchk = IntVar()
b = Button(self.top, text='SUBMIT', command=self.insert_data, font=
('times', 25, 'bold'), bg='white', fg='black')
b.pack()
self.top.mainloop()
<|reserved_special_token_0|>
def selection(self):
lb = self.list_box.curselection()
print(lb)
for i in list(lb):
self.show_data()
<|reserved_special_token_0|>
global table_name
def create_table(self):
self.top = Toplevel(self.scr)
self.top.geometry('400x800')
self.table_name = StringVar()
l = Label(self.top, text='Table', font=('times', 20, 'bold'), bg=
'white', fg='black')
l.pack()
e = Entry(self.top, textvariable=self.table_name, font=('times', 20,
'bold'))
e.pack()
b = Button(self.top, text='Add field', command=self.fun_show, font=
('times', 20, 'bold'), bg='white', fg='black')
b.pack()
b = Button(self.top, text='OK', font=('times', 20, 'bold'), command
=self.show_entered_data, bg='white', fg='black')
b.pack(side=RIGHT)
def show_entered_data(self):
global en1
global en2
global list1
global tbl_name
self.tbl_name = self.table_name.get()
self.en1 = self.entry1.get()
self.en2 = self.entry2.get()
sent = 'Create table ' + str(self.tbl_name) + "('" + str(self.en1
) + ' ' + str(self.en2) + "')"
list1 = Text(self.top, width=41, height=8, font=('times', 25, 'bold'))
list1.place(x=0, y=0)
list1.insert(0.0, sent)
print(self.tbl_name, self.en1, self.en2)
self.cursor.execute(sent)
self.list.insert(0, sent)
self.connection.commit()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Gui:
def __init__(self):
global en3
self.scr = Tk()
self.scr.geometry('2000x3000')
self.scr.title('VIEWING DATABASE')
self.connection = sqlite3.connect('student_details.db')
self.cursor = self.connection.cursor()
self.id = StringVar()
self.name1 = StringVar()
self.fathername = StringVar()
self.mothername = StringVar()
self.cont = StringVar()
self.email = StringVar()
self.f1 = Frame(self.scr, bg='brown1')
self.f1.pack(side=TOP)
self.left_frame = Frame(self.scr, bg='red')
self.left_frame.pack(side=LEFT, fill=Y)
self.right_frame = Frame(self.scr, width=3000, bg='yellow')
self.right_frame.pack(side=LEFT, fill=Y)
l = Label(self.right_frame, text=
'***************SHOW TABLE RECORDS IN A DATABASE******************'
, font=('times', 25, 'bold'), bg='black', fg='white')
l.pack(side=TOP, fill=X)
scrollbar = Scrollbar(self.right_frame)
scrollbar.pack(side=RIGHT, fill=Y)
self.list = Listbox(self.right_frame, width=61, height=12, font=(
'times', 25, 'bold'), yscrollcommand=scrollbar.set)
self.list.bind('student_list', self.show_records)
self.list.pack(side=TOP, fill=Y)
scrollbar.config(command=self.list.yview)
self.querry_frame = Frame(self.right_frame, width=81, height=5, bg=
'white')
self.querry_frame.pack(side=BOTTOM, fill=X)
self.en3 = Entry(self.querry_frame, font=('times', 25, 'bold'))
self.en3.pack(side=BOTTOM, fill=X)
b = Button(self.querry_frame, text='Enter', command=self.sample,
font=('times', 25, 'bold'), bg='white', fg='black')
b.pack(side=RIGHT)
b1 = Button(self.querry_frame, text='Save', command=self.show_data,
font=('times', 25, 'bold'), bg='white', fg='black')
b1.pack(side=RIGHT)
b = Button(self.f1, text='OPEN', command=self.file, font=('times',
25, 'bold'), bg='white', fg='black')
b.pack(side=LEFT)
b = Button(self.f1, text='CREATE', command=self.create_table, font=
('times', 25, 'bold'), bg='white', fg='black')
b.pack(side=LEFT)
b1 = Button(self.f1, text='INSERT', command=self.add_record, font=(
'times', 25, 'bold'), bg='white', fg='black')
b1.pack(side=LEFT)
b2 = Button(self.f1, text='DELETE', command=self.del_rec, font=(
'times', 25, 'bold'), bg='white', fg='black')
b2.pack(side=LEFT)
b3 = Button(self.f1, text='UPDATE', command=self.update, font=(
'times', 25, 'bold'), bg='white', fg='black')
b3.pack(side=RIGHT)
b4 = Button(self.f1, text='VIEW', command=lambda : self.view_table(
), font=('times', 25, 'bold'), bg='white', fg='black')
b4.pack(side=RIGHT)
b4 = Button(self.f1, text='BROWSE', command=self.show_data, font=(
'times', 25, 'bold'), bg='white', fg='black')
b4.pack(side=RIGHT)
l = Label(self.left_frame, text='View Table in Database', font=(
'times', 25, 'bold'), bg='blue', fg='white')
l.pack(side=TOP, fill=X)
self.scr.mainloop()
try:
self.cursor.execute(
'create table user(Id varchar(10),Name varchar(30),FathersName varchar(20),MothersName varchar(20),Contact varchar(10),Email varchar(30))'
)
self.connection.commit()
except:
pass
def insert_data(self):
self.id = e.get()
self.name1 = e1.get()
self.fathername = e2.get()
self.mothername = e3.get()
self.cont = e4.get()
self.email = e5.get()
self.cursor.execute(
"insert into user values('{}','{}','{}','{}','{}','{}')".format
(self.id, self.name1, self.fathername, self.mothername, self.
cont, self.email))
self.connection.commit()
def show_data(self):
self.connection = sqlite3.connect('student_details.db')
self.cursor = self.connection.cursor()
self.cursor.execute('Select * from user')
rows = self.cursor.fetchall()
for row in rows:
l1 = self.list.insert(END, row)
self.connection.commit()
def update_data(self):
self.cursor.execute("Update user set {} = '{}' where id ='{}'".
format(e2.get(), e3.get(), e.get()))
self.connection.commit()
self.list.delete(0, END)
self.show_data()
def update(self):
global e
global e2
global e3
self.top1 = Toplevel(self.scr)
self.top1.geometry('400x400')
l1 = Label(self.top1, text='USER_ID', font=('times', 25, 'bold'),
bg='green2', fg='white')
l1.pack()
self.Id = StringVar()
e = Entry(self.top1, relief='sunken', textvariable=self.Id, font=(
'times', 25, 'bold'))
e.pack()
self.col_name = StringVar()
l2 = Label(self.top1, text='col_name', font=('times', 25, 'bold'),
bg='green2', fg='white')
l2.pack()
e2 = Entry(self.top1, relief='sunken', textvariable=self.col_name,
font=('times', 25, 'bold'))
e2.pack()
self.value = StringVar()
l3 = Label(self.top1, text='VALUE', font=('times', 25, 'bold'), bg=
'green2', fg='white')
l3.pack()
e3 = Entry(self.top1, relief='sunken', textvariable=self.value,
font=('times', 25, 'bold'))
e3.pack()
b = Button(self.top1, text='UPDATE', command=self.update_data, font
=('times', 25, 'bold'), bg='white', fg='black')
b.pack()
self.top1.mainloop()
def delete_data(self):
self.cursor.execute("Delete from user where id ='{}'".format(e.get()))
self.list.delete(0, END)
self.connection.commit()
self.show_data()
def del_rec(self):
global e
self.top2 = Toplevel(self.scr)
self.top2.geometry('400x400')
l1 = Label(self.top2, text='USER_ID', font=('times', 25, 'bold'),
bg='green2', fg='white')
l1.pack()
self.Id = StringVar()
e = Entry(self.top2, relief='sunken', textvariable=self.Id, font=(
'times', 25, 'bold'))
e.pack()
b = Button(self.top2, text='delete records', command=self.
delete_data, font=('times', 25, 'bold'), bg='white', fg='black')
b.pack()
self.top2.mainloop()
def sample(self):
s = '{}'.format(self.en3.get())
a = self.cursor.execute('{}'.format(self.en3.get()))
r = self.cursor.fetchall()
for row in r:
self.list.insert(0, row)
self.connection.commit()
def file(self):
self.f1.filename = filedialog.askopenfilename(title='Select file')
p = self.f1.filename
self.list.insert(0, self.f1.filename)
def add_record(self):
global e
global e1
global e2
global e3
global e4
global e5
self.e = StringVar()
self.e1 = StringVar()
self.e2 = StringVar()
self.e3 = StringVar()
self.e4 = StringVar()
self.e5 = StringVar()
self.top = Toplevel(self.scr)
self.top.geometry('400x800')
l = Label(self.top, text='USER_ID', font=('times', 25, 'bold'), bg=
'green2', fg='white')
l.pack()
e = Entry(self.top, relief='sunken', textvariable=self.e, font=(
'times', 25, 'bold'))
e.pack()
l1 = Label(self.top, text='USERNAME', font=('times', 25, 'bold'),
bg='green2', fg='white')
l1.pack()
e1 = Entry(self.top, relief='sunken', textvariable=self.e1, font=(
'times', 25, 'bold'))
e1.pack()
l2 = Label(self.top, text='FATHERS NAME', font=('times', 25, 'bold'
), bg='green2', fg='white')
l2.pack()
e2 = Entry(self.top, relief='sunken', textvariable=self.e2, font=(
'times', 25, 'bold'))
e2.pack()
l3 = Label(self.top, text='MOTHERS NAME', font=('times', 25, 'bold'
), bg='green2', fg='white')
l3.pack()
e3 = Entry(self.top, relief='sunken', textvariable=self.e3, font=(
'times', 25, 'bold'))
e3.pack()
l4 = Label(self.top, text='CONTACT NO', font=('times', 25, 'bold'),
bg='green2', fg='white')
l4.pack()
e4 = Entry(self.top, relief='sunken', textvariable=self.e4, font=(
'times', 25, 'bold'))
e4.pack()
l5 = Label(self.top, text='E-MAIL ID', font=('times', 25, 'bold'),
bg='green2', fg='white')
l5.pack()
e5 = Entry(self.top, relief='sunken', textvariable=self.e5, font=(
'times', 25, 'bold'))
e5.pack()
varchk = IntVar()
b = Button(self.top, text='SUBMIT', command=self.insert_data, font=
('times', 25, 'bold'), bg='white', fg='black')
b.pack()
self.top.mainloop()
def view_table(self):
global list_box
self.list_box = Listbox(self.left_frame, font=('times', 20, 'bold'))
try:
self.list_box.insert(1, 'user')
self.list_box.insert(2, self.tbl_name)
except:
pass
b = Button(self.left_frame, text='Click', font=('times', 20, 'bold'
), command=self.selection, bg='white', fg='black')
b.place(x=100, y=400)
self.list_box.place(x=10, y=50)
def selection(self):
lb = self.list_box.curselection()
print(lb)
for i in list(lb):
self.show_data()
def show_records(self):
global m
m = self.list.curselection()
m = self.list.get(m)
self.id.delete(0, END)
self.id.insert(END, self.add_record())
global table_name
def create_table(self):
self.top = Toplevel(self.scr)
self.top.geometry('400x800')
self.table_name = StringVar()
l = Label(self.top, text='Table', font=('times', 20, 'bold'), bg=
'white', fg='black')
l.pack()
e = Entry(self.top, textvariable=self.table_name, font=('times', 20,
'bold'))
e.pack()
b = Button(self.top, text='Add field', command=self.fun_show, font=
('times', 20, 'bold'), bg='white', fg='black')
b.pack()
b = Button(self.top, text='OK', font=('times', 20, 'bold'), command
=self.show_entered_data, bg='white', fg='black')
b.pack(side=RIGHT)
def show_entered_data(self):
global en1
global en2
global list1
global tbl_name
self.tbl_name = self.table_name.get()
self.en1 = self.entry1.get()
self.en2 = self.entry2.get()
sent = 'Create table ' + str(self.tbl_name) + "('" + str(self.en1
) + ' ' + str(self.en2) + "')"
list1 = Text(self.top, width=41, height=8, font=('times', 25, 'bold'))
list1.place(x=0, y=0)
list1.insert(0.0, sent)
print(self.tbl_name, self.en1, self.en2)
self.cursor.execute(sent)
self.list.insert(0, sent)
self.connection.commit()
def fun_show(self):
l = Label(self.top, text='Name', font=('times', 20, 'bold'), bg=
'white', fg='black')
l.pack(side=TOP)
self.entry1 = StringVar()
e1 = Entry(self.top, textvariable=self.entry1, font=('times', 20,
'bold'))
e1.pack()
l = Label(self.top, text='type', font=('times', 20, 'bold'), bg=
'white', fg='black')
l.pack(side=TOP)
self.entry2 = StringVar()
e1 = Entry(self.top, textvariable=self.entry2, font=('times', 20,
'bold'))
e1.pack()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Gui:
def __init__(self):
global en3
self.scr = Tk()
self.scr.geometry('2000x3000')
self.scr.title('VIEWING DATABASE')
self.connection = sqlite3.connect('student_details.db')
self.cursor = self.connection.cursor()
self.id = StringVar()
self.name1 = StringVar()
self.fathername = StringVar()
self.mothername = StringVar()
self.cont = StringVar()
self.email = StringVar()
self.f1 = Frame(self.scr, bg='brown1')
self.f1.pack(side=TOP)
self.left_frame = Frame(self.scr, bg='red')
self.left_frame.pack(side=LEFT, fill=Y)
self.right_frame = Frame(self.scr, width=3000, bg='yellow')
self.right_frame.pack(side=LEFT, fill=Y)
l = Label(self.right_frame, text=
'***************SHOW TABLE RECORDS IN A DATABASE******************'
, font=('times', 25, 'bold'), bg='black', fg='white')
l.pack(side=TOP, fill=X)
scrollbar = Scrollbar(self.right_frame)
scrollbar.pack(side=RIGHT, fill=Y)
self.list = Listbox(self.right_frame, width=61, height=12, font=(
'times', 25, 'bold'), yscrollcommand=scrollbar.set)
self.list.bind('student_list', self.show_records)
self.list.pack(side=TOP, fill=Y)
scrollbar.config(command=self.list.yview)
self.querry_frame = Frame(self.right_frame, width=81, height=5, bg=
'white')
self.querry_frame.pack(side=BOTTOM, fill=X)
self.en3 = Entry(self.querry_frame, font=('times', 25, 'bold'))
self.en3.pack(side=BOTTOM, fill=X)
b = Button(self.querry_frame, text='Enter', command=self.sample,
font=('times', 25, 'bold'), bg='white', fg='black')
b.pack(side=RIGHT)
b1 = Button(self.querry_frame, text='Save', command=self.show_data,
font=('times', 25, 'bold'), bg='white', fg='black')
b1.pack(side=RIGHT)
b = Button(self.f1, text='OPEN', command=self.file, font=('times',
25, 'bold'), bg='white', fg='black')
b.pack(side=LEFT)
b = Button(self.f1, text='CREATE', command=self.create_table, font=
('times', 25, 'bold'), bg='white', fg='black')
b.pack(side=LEFT)
b1 = Button(self.f1, text='INSERT', command=self.add_record, font=(
'times', 25, 'bold'), bg='white', fg='black')
b1.pack(side=LEFT)
b2 = Button(self.f1, text='DELETE', command=self.del_rec, font=(
'times', 25, 'bold'), bg='white', fg='black')
b2.pack(side=LEFT)
b3 = Button(self.f1, text='UPDATE', command=self.update, font=(
'times', 25, 'bold'), bg='white', fg='black')
b3.pack(side=RIGHT)
b4 = Button(self.f1, text='VIEW', command=lambda : self.view_table(
), font=('times', 25, 'bold'), bg='white', fg='black')
b4.pack(side=RIGHT)
b4 = Button(self.f1, text='BROWSE', command=self.show_data, font=(
'times', 25, 'bold'), bg='white', fg='black')
b4.pack(side=RIGHT)
l = Label(self.left_frame, text='View Table in Database', font=(
'times', 25, 'bold'), bg='blue', fg='white')
l.pack(side=TOP, fill=X)
self.scr.mainloop()
try:
self.cursor.execute(
'create table user(Id varchar(10),Name varchar(30),FathersName varchar(20),MothersName varchar(20),Contact varchar(10),Email varchar(30))'
)
self.connection.commit()
except:
pass
def insert_data(self):
self.id = e.get()
self.name1 = e1.get()
self.fathername = e2.get()
self.mothername = e3.get()
self.cont = e4.get()
self.email = e5.get()
self.cursor.execute(
"insert into user values('{}','{}','{}','{}','{}','{}')".format
(self.id, self.name1, self.fathername, self.mothername, self.
cont, self.email))
self.connection.commit()
def show_data(self):
self.connection = sqlite3.connect('student_details.db')
self.cursor = self.connection.cursor()
self.cursor.execute('Select * from user')
rows = self.cursor.fetchall()
for row in rows:
l1 = self.list.insert(END, row)
self.connection.commit()
def update_data(self):
self.cursor.execute("Update user set {} = '{}' where id ='{}'".
format(e2.get(), e3.get(), e.get()))
self.connection.commit()
self.list.delete(0, END)
self.show_data()
def update(self):
global e
global e2
global e3
self.top1 = Toplevel(self.scr)
self.top1.geometry('400x400')
l1 = Label(self.top1, text='USER_ID', font=('times', 25, 'bold'),
bg='green2', fg='white')
l1.pack()
self.Id = StringVar()
e = Entry(self.top1, relief='sunken', textvariable=self.Id, font=(
'times', 25, 'bold'))
e.pack()
self.col_name = StringVar()
l2 = Label(self.top1, text='col_name', font=('times', 25, 'bold'),
bg='green2', fg='white')
l2.pack()
e2 = Entry(self.top1, relief='sunken', textvariable=self.col_name,
font=('times', 25, 'bold'))
e2.pack()
self.value = StringVar()
l3 = Label(self.top1, text='VALUE', font=('times', 25, 'bold'), bg=
'green2', fg='white')
l3.pack()
e3 = Entry(self.top1, relief='sunken', textvariable=self.value,
font=('times', 25, 'bold'))
e3.pack()
b = Button(self.top1, text='UPDATE', command=self.update_data, font
=('times', 25, 'bold'), bg='white', fg='black')
b.pack()
self.top1.mainloop()
def delete_data(self):
self.cursor.execute("Delete from user where id ='{}'".format(e.get()))
self.list.delete(0, END)
self.connection.commit()
self.show_data()
def del_rec(self):
global e
self.top2 = Toplevel(self.scr)
self.top2.geometry('400x400')
l1 = Label(self.top2, text='USER_ID', font=('times', 25, 'bold'),
bg='green2', fg='white')
l1.pack()
self.Id = StringVar()
e = Entry(self.top2, relief='sunken', textvariable=self.Id, font=(
'times', 25, 'bold'))
e.pack()
b = Button(self.top2, text='delete records', command=self.
delete_data, font=('times', 25, 'bold'), bg='white', fg='black')
b.pack()
self.top2.mainloop()
def sample(self):
s = '{}'.format(self.en3.get())
a = self.cursor.execute('{}'.format(self.en3.get()))
r = self.cursor.fetchall()
for row in r:
self.list.insert(0, row)
self.connection.commit()
def file(self):
self.f1.filename = filedialog.askopenfilename(title='Select file')
p = self.f1.filename
self.list.insert(0, self.f1.filename)
def add_record(self):
global e
global e1
global e2
global e3
global e4
global e5
self.e = StringVar()
self.e1 = StringVar()
self.e2 = StringVar()
self.e3 = StringVar()
self.e4 = StringVar()
self.e5 = StringVar()
self.top = Toplevel(self.scr)
self.top.geometry('400x800')
l = Label(self.top, text='USER_ID', font=('times', 25, 'bold'), bg=
'green2', fg='white')
l.pack()
e = Entry(self.top, relief='sunken', textvariable=self.e, font=(
'times', 25, 'bold'))
e.pack()
l1 = Label(self.top, text='USERNAME', font=('times', 25, 'bold'),
bg='green2', fg='white')
l1.pack()
e1 = Entry(self.top, relief='sunken', textvariable=self.e1, font=(
'times', 25, 'bold'))
e1.pack()
l2 = Label(self.top, text='FATHERS NAME', font=('times', 25, 'bold'
), bg='green2', fg='white')
l2.pack()
e2 = Entry(self.top, relief='sunken', textvariable=self.e2, font=(
'times', 25, 'bold'))
e2.pack()
l3 = Label(self.top, text='MOTHERS NAME', font=('times', 25, 'bold'
), bg='green2', fg='white')
l3.pack()
e3 = Entry(self.top, relief='sunken', textvariable=self.e3, font=(
'times', 25, 'bold'))
e3.pack()
l4 = Label(self.top, text='CONTACT NO', font=('times', 25, 'bold'),
bg='green2', fg='white')
l4.pack()
e4 = Entry(self.top, relief='sunken', textvariable=self.e4, font=(
'times', 25, 'bold'))
e4.pack()
l5 = Label(self.top, text='E-MAIL ID', font=('times', 25, 'bold'),
bg='green2', fg='white')
l5.pack()
e5 = Entry(self.top, relief='sunken', textvariable=self.e5, font=(
'times', 25, 'bold'))
e5.pack()
varchk = IntVar()
b = Button(self.top, text='SUBMIT', command=self.insert_data, font=
('times', 25, 'bold'), bg='white', fg='black')
b.pack()
self.top.mainloop()
def view_table(self):
global list_box
self.list_box = Listbox(self.left_frame, font=('times', 20, 'bold'))
try:
self.list_box.insert(1, 'user')
self.list_box.insert(2, self.tbl_name)
except:
pass
b = Button(self.left_frame, text='Click', font=('times', 20, 'bold'
), command=self.selection, bg='white', fg='black')
b.place(x=100, y=400)
self.list_box.place(x=10, y=50)
def selection(self):
lb = self.list_box.curselection()
print(lb)
for i in list(lb):
self.show_data()
def show_records(self):
global m
m = self.list.curselection()
m = self.list.get(m)
self.id.delete(0, END)
self.id.insert(END, self.add_record())
global table_name
def create_table(self):
self.top = Toplevel(self.scr)
self.top.geometry('400x800')
self.table_name = StringVar()
l = Label(self.top, text='Table', font=('times', 20, 'bold'), bg=
'white', fg='black')
l.pack()
e = Entry(self.top, textvariable=self.table_name, font=('times', 20,
'bold'))
e.pack()
b = Button(self.top, text='Add field', command=self.fun_show, font=
('times', 20, 'bold'), bg='white', fg='black')
b.pack()
b = Button(self.top, text='OK', font=('times', 20, 'bold'), command
=self.show_entered_data, bg='white', fg='black')
b.pack(side=RIGHT)
def show_entered_data(self):
global en1
global en2
global list1
global tbl_name
self.tbl_name = self.table_name.get()
self.en1 = self.entry1.get()
self.en2 = self.entry2.get()
sent = 'Create table ' + str(self.tbl_name) + "('" + str(self.en1
) + ' ' + str(self.en2) + "')"
list1 = Text(self.top, width=41, height=8, font=('times', 25, 'bold'))
list1.place(x=0, y=0)
list1.insert(0.0, sent)
print(self.tbl_name, self.en1, self.en2)
self.cursor.execute(sent)
self.list.insert(0, sent)
self.connection.commit()
def fun_show(self):
l = Label(self.top, text='Name', font=('times', 20, 'bold'), bg=
'white', fg='black')
l.pack(side=TOP)
self.entry1 = StringVar()
e1 = Entry(self.top, textvariable=self.entry1, font=('times', 20,
'bold'))
e1.pack()
l = Label(self.top, text='type', font=('times', 20, 'bold'), bg=
'white', fg='black')
l.pack(side=TOP)
self.entry2 = StringVar()
e1 = Entry(self.top, textvariable=self.entry2, font=('times', 20,
'bold'))
e1.pack()
Gui()
<|reserved_special_token_1|>
from tkinter import*
from tkinter import filedialog
import sqlite3
class Gui:
def __init__(self):
global en3
self.scr = Tk()
self.scr.geometry("2000x3000")
self.scr.title("VIEWING DATABASE")
self.connection = sqlite3.connect("student_details.db")
self.cursor = self.connection.cursor()
self.id = StringVar()
self.name1 = StringVar()
self.fathername = StringVar()
self.mothername = StringVar()
self.cont = StringVar()
self.email = StringVar()
self.f1 = Frame(self.scr, bg='brown1')
self.f1.pack(side=TOP)
self.left_frame = Frame(self.scr, bg='red')
self.left_frame.pack(side=LEFT, fill=Y)
self.right_frame = Frame(self.scr, width=3000, bg='yellow')
self.right_frame.pack(side=LEFT, fill=Y)
l = Label(self.right_frame, text="***************SHOW TABLE RECORDS IN A DATABASE******************",
font=('times', 25, 'bold'), bg="black", fg="white")
l.pack(side=TOP, fill=X)
scrollbar = Scrollbar(self.right_frame)
scrollbar.pack(side=RIGHT, fill=Y)
self.list = Listbox(self.right_frame, width=61, height=12, font=('times', 25, 'bold'),
yscrollcommand=scrollbar.set)
self.list.bind("student_list", self.show_records)
self.list.pack(side=TOP, fill=Y)
scrollbar.config(command=self.list.yview)
self.querry_frame = Frame(self.right_frame, width=81, height=5, bg="white")
self.querry_frame.pack(side=BOTTOM, fill=X)
self.en3 = Entry(self.querry_frame, font=('times', 25, 'bold'))
self.en3.pack(side=BOTTOM, fill=X)
b = Button(self.querry_frame, text="Enter",command=self.sample, font=('times', 25, 'bold'), bg="white", fg="black")
b.pack(side=RIGHT)
b1 = Button(self.querry_frame, text="Save", command=self.show_data, font=('times', 25, 'bold'), bg="white",
fg="black")
b1.pack(side=RIGHT)
b = Button(self.f1, text="OPEN", command=self.file, font=('times', 25, 'bold'), bg="white", fg="black")
b.pack(side=LEFT)
b = Button(self.f1, text="CREATE", command=self.create_table, font=('times', 25, 'bold'), bg="white",
fg="black")
b.pack(side=LEFT)
b1 = Button(self.f1, text="INSERT", command=self.add_record, font=('times', 25, 'bold'), bg="white",
fg="black")
b1.pack(side=LEFT)
b2 = Button(self.f1, text="DELETE", command=self.del_rec, font=('times', 25, 'bold'), bg="white",
fg="black")
b2.pack(side=LEFT)
b3 = Button(self.f1, text="UPDATE", command=self.update, font=('times', 25, 'bold'), bg="white",
fg="black")
b3.pack(side=RIGHT)
b4 = Button(self.f1, text="VIEW", command=lambda: self.view_table(), font=('times', 25, 'bold'), bg="white",
fg="black")
b4.pack(side=RIGHT)
b4 = Button(self.f1, text="BROWSE", command=self.show_data, font=('times', 25, 'bold'), bg="white",
fg="black")
b4.pack(side=RIGHT)
l = Label(self.left_frame, text="View Table in Database", font=('times', 25, 'bold'), bg='blue', fg='white')
l.pack(side=TOP, fill=X)
self.scr.mainloop()
try:
self.cursor.execute("create table user(Id varchar(10),Name varchar(30),FathersName varchar(20),MothersName varchar(20),Contact varchar(10),Email varchar(30))")
self.connection.commit()
except:
pass
def insert_data(self):
self.id = e.get()
self.name1 = e1.get()
self.fathername=e2.get()
self.mothername = e3.get()
self.cont = e4.get()
self.email = e5.get()
self.cursor.execute("insert into user values('{}','{}','{}','{}','{}','{}')".format(self.id,self.name1, self.fathername,self.mothername,self.cont , self.email))
self.connection.commit()
def show_data(self):
self.connection = sqlite3.connect("student_details.db")
self.cursor = self.connection.cursor()
self.cursor.execute("Select * from user")
rows = self.cursor.fetchall()
for row in rows:
l1 = self.list.insert(END, row)
self.connection.commit()
def update_data(self):
self.cursor.execute("Update user set {} = '{}' where id ='{}'".format(e2.get(),e3.get(),e.get()))
self.connection.commit()
self.list.delete(0, END)
self.show_data()
def update(self):
global e
global e2
global e3
self.top1 = Toplevel(self.scr)
self.top1.geometry("400x400")
l1 = Label(self.top1, text="USER_ID", font=('times', 25, 'bold'), bg="green2", fg="white")
l1.pack()
self.Id=StringVar()
e = Entry(self.top1, relief="sunken", textvariable=self.Id, font=('times', 25, 'bold'))
e.pack()
self.col_name=StringVar()
l2 = Label(self.top1, text="col_name", font=('times', 25, 'bold'), bg="green2", fg="white")
l2.pack()
e2 = Entry(self.top1, relief="sunken", textvariable=self.col_name, font=('times', 25, 'bold'))
e2.pack()
self.value=StringVar()
l3 = Label(self.top1, text="VALUE", font=('times', 25, 'bold'), bg="green2", fg="white")
l3.pack()
e3 = Entry(self.top1, relief="sunken", textvariable=self.value, font=('times', 25, 'bold'))
e3.pack()
b = Button(self.top1, text="UPDATE", command=self.update_data, font=('times', 25, 'bold'), bg="white",
fg="black")
b.pack()
self.top1.mainloop()
def delete_data(self):
self.cursor.execute("Delete from user where id ='{}'".format(e.get()))
self.list.delete(0,END)
self.connection.commit()
self.show_data()
def del_rec(self):
global e
self.top2 = Toplevel(self.scr)
self.top2.geometry("400x400")
l1 = Label(self.top2, text="USER_ID", font=('times', 25, 'bold'), bg="green2", fg="white")
l1.pack()
self.Id = StringVar()
e = Entry(self.top2, relief="sunken", textvariable=self.Id, font=('times', 25, 'bold'))
e.pack()
b = Button(self.top2, text="delete records", command=self.delete_data, font=('times', 25, 'bold'), bg="white",
fg="black")
b.pack()
self.top2.mainloop()
def sample(self):
s=('{}'.format(self.en3.get()))
a=self.cursor.execute("{}".format(self.en3.get()))
r=self.cursor.fetchall()
for row in r:
self.list.insert(0,row)
self.connection.commit()
def file(self):
self.f1.filename = filedialog.askopenfilename( title="Select file")
p=self.f1.filename
self.list.insert(0,self.f1.filename)
def add_record(self):
global e
global e1
global e2
global e3
global e4
global e5
self.e = StringVar()
self.e1 = StringVar()
self.e2 = StringVar()
self.e3 = StringVar()
self.e4 = StringVar()
self.e5 = StringVar()
self.top=Toplevel(self.scr)
self.top.geometry("400x800")
l=Label(self.top,text="USER_ID",font=('times',25,'bold'),bg="green2",fg="white")
l.pack()
e=Entry(self.top,relief="sunken",textvariable=self.e,font=('times',25,'bold'))
e.pack()
l1 = Label(self.top, text="USERNAME", font=('times', 25, 'bold'), bg="green2", fg="white")
l1.pack()
e1 = Entry(self.top, relief="sunken",textvariable=self.e1, font=('times', 25, 'bold'))
e1.pack()
l2 = Label(self.top, text="FATHERS NAME", font=('times', 25, 'bold'), bg="green2", fg="white")
l2.pack()
e2 = Entry(self.top, relief="sunken",textvariable=self.e2, font=('times', 25, 'bold'))
e2.pack()
l3 = Label(self.top, text="MOTHERS NAME", font=('times', 25, 'bold'), bg="green2", fg="white")
l3.pack()
e3 = Entry(self.top, relief="sunken",textvariable=self.e3, font=('times', 25, 'bold'))
e3.pack()
l4 = Label(self.top, text="CONTACT NO", font=('times', 25, 'bold'), bg="green2", fg="white")
l4.pack()
e4 = Entry(self.top, relief="sunken",textvariable=self.e4, font=('times', 25, 'bold'))
e4.pack()
l5 = Label(self.top, text="E-MAIL ID", font=('times', 25, 'bold'), bg="green2", fg="white")
l5.pack()
e5 = Entry(self.top, relief="sunken",textvariable=self.e5, font=('times', 25, 'bold'))
e5.pack()
varchk=IntVar()
b = Button(self.top, text="SUBMIT", command=self.insert_data,font=('times', 25, 'bold'), bg="white",fg="black")
b.pack()
self.top.mainloop()
def view_table(self):
global list_box
self.list_box = Listbox(self.left_frame, font=('times', 20, 'bold'))
try:
self.list_box.insert(1,"user")
self.list_box.insert(2,self.tbl_name)
except:
pass
b=Button(self.left_frame,text="Click",font=('times', 20, 'bold'),command=self.selection,bg="white",fg="black")
b.place(x=100,y=400)
self.list_box.place(x=10,y=50)
def selection(self):
lb = self.list_box.curselection()
print(lb)
for i in list(lb):
self.show_data()
def show_records(self):
global m
m=self.list.curselection()
m=self.list.get(m)
self.id.delete(0,END)
self.id.insert(END,self.add_record())
global table_name
def create_table(self):
self.top = Toplevel(self.scr)
self.top.geometry("400x800")
self.table_name=StringVar()
l=Label(self.top,text="Table",font=('times', 20, 'bold'),bg="white",fg="black")
l.pack()
e=Entry(self.top,textvariable=self.table_name,font=('times', 20, 'bold'))
e.pack()
b=Button(self.top,text="Add field",command=self.fun_show , font=('times', 20, 'bold'),bg="white",fg="black")
b.pack()
b=Button(self.top,text="OK",font=('times', 20, 'bold'),command=self.show_entered_data,bg="white",fg="black")
b.pack(side=RIGHT)
def show_entered_data(self):
global en1
global en2
global list1
global tbl_name
self.tbl_name=self.table_name.get()
self.en1=self.entry1.get()
self.en2=self.entry2.get()
sent="Create table "+str(self.tbl_name)+"('"+str(self.en1)+ " "+ str(self.en2)+"')"
list1 = Text(self.top, width=41, height=8, font=('times', 25, 'bold'))
list1.place(x=0,y=0)
list1.insert(0.0,sent)
print(self.tbl_name,self.en1,self.en2)
self.cursor.execute(sent)
self.list.insert(0,sent)
self.connection.commit()
def fun_show(self):
l = Label(self.top, text="Name", font=('times', 20, 'bold'), bg="white", fg="black")
l.pack(side=TOP)
self.entry1 = StringVar()
e1 = Entry(self.top, textvariable=self.entry1, font=('times', 20, 'bold'))
e1.pack()
l = Label(self.top, text="type", font=('times', 20, 'bold'), bg="white", fg="black")
l.pack(side=TOP)
self.entry2 = StringVar()
e1 = Entry(self.top, textvariable=self.entry2, font=('times', 20, 'bold'))
e1.pack()
Gui()
|
flexible
|
{
"blob_id": "4c6b04716f41c3413896f0d59f2cc9b1475d7f64",
"index": 5164,
"step-1": "<mask token>\n\n\nclass Gui:\n <mask token>\n\n def insert_data(self):\n self.id = e.get()\n self.name1 = e1.get()\n self.fathername = e2.get()\n self.mothername = e3.get()\n self.cont = e4.get()\n self.email = e5.get()\n self.cursor.execute(\n \"insert into user values('{}','{}','{}','{}','{}','{}')\".format\n (self.id, self.name1, self.fathername, self.mothername, self.\n cont, self.email))\n self.connection.commit()\n <mask token>\n <mask token>\n\n def update(self):\n global e\n global e2\n global e3\n self.top1 = Toplevel(self.scr)\n self.top1.geometry('400x400')\n l1 = Label(self.top1, text='USER_ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n self.Id = StringVar()\n e = Entry(self.top1, relief='sunken', textvariable=self.Id, font=(\n 'times', 25, 'bold'))\n e.pack()\n self.col_name = StringVar()\n l2 = Label(self.top1, text='col_name', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l2.pack()\n e2 = Entry(self.top1, relief='sunken', textvariable=self.col_name,\n font=('times', 25, 'bold'))\n e2.pack()\n self.value = StringVar()\n l3 = Label(self.top1, text='VALUE', font=('times', 25, 'bold'), bg=\n 'green2', fg='white')\n l3.pack()\n e3 = Entry(self.top1, relief='sunken', textvariable=self.value,\n font=('times', 25, 'bold'))\n e3.pack()\n b = Button(self.top1, text='UPDATE', command=self.update_data, font\n =('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top1.mainloop()\n\n def delete_data(self):\n self.cursor.execute(\"Delete from user where id ='{}'\".format(e.get()))\n self.list.delete(0, END)\n self.connection.commit()\n self.show_data()\n\n def del_rec(self):\n global e\n self.top2 = Toplevel(self.scr)\n self.top2.geometry('400x400')\n l1 = Label(self.top2, text='USER_ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n self.Id = StringVar()\n e = Entry(self.top2, relief='sunken', textvariable=self.Id, font=(\n 'times', 25, 'bold'))\n e.pack()\n b = Button(self.top2, text='delete records', command=self.\n delete_data, font=('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top2.mainloop()\n\n def sample(self):\n s = '{}'.format(self.en3.get())\n a = self.cursor.execute('{}'.format(self.en3.get()))\n r = self.cursor.fetchall()\n for row in r:\n self.list.insert(0, row)\n self.connection.commit()\n <mask token>\n\n def add_record(self):\n global e\n global e1\n global e2\n global e3\n global e4\n global e5\n self.e = StringVar()\n self.e1 = StringVar()\n self.e2 = StringVar()\n self.e3 = StringVar()\n self.e4 = StringVar()\n self.e5 = StringVar()\n self.top = Toplevel(self.scr)\n self.top.geometry('400x800')\n l = Label(self.top, text='USER_ID', font=('times', 25, 'bold'), bg=\n 'green2', fg='white')\n l.pack()\n e = Entry(self.top, relief='sunken', textvariable=self.e, font=(\n 'times', 25, 'bold'))\n e.pack()\n l1 = Label(self.top, text='USERNAME', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n e1 = Entry(self.top, relief='sunken', textvariable=self.e1, font=(\n 'times', 25, 'bold'))\n e1.pack()\n l2 = Label(self.top, text='FATHERS NAME', font=('times', 25, 'bold'\n ), bg='green2', fg='white')\n l2.pack()\n e2 = Entry(self.top, relief='sunken', textvariable=self.e2, font=(\n 'times', 25, 'bold'))\n e2.pack()\n l3 = Label(self.top, text='MOTHERS NAME', font=('times', 25, 'bold'\n ), bg='green2', fg='white')\n l3.pack()\n e3 = Entry(self.top, relief='sunken', textvariable=self.e3, font=(\n 'times', 25, 'bold'))\n e3.pack()\n l4 = Label(self.top, text='CONTACT NO', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l4.pack()\n e4 = Entry(self.top, relief='sunken', textvariable=self.e4, font=(\n 'times', 25, 'bold'))\n e4.pack()\n l5 = Label(self.top, text='E-MAIL ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l5.pack()\n e5 = Entry(self.top, relief='sunken', textvariable=self.e5, font=(\n 'times', 25, 'bold'))\n e5.pack()\n varchk = IntVar()\n b = Button(self.top, text='SUBMIT', command=self.insert_data, font=\n ('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top.mainloop()\n <mask token>\n\n def selection(self):\n lb = self.list_box.curselection()\n print(lb)\n for i in list(lb):\n self.show_data()\n <mask token>\n global table_name\n <mask token>\n\n def show_entered_data(self):\n global en1\n global en2\n global list1\n global tbl_name\n self.tbl_name = self.table_name.get()\n self.en1 = self.entry1.get()\n self.en2 = self.entry2.get()\n sent = 'Create table ' + str(self.tbl_name) + \"('\" + str(self.en1\n ) + ' ' + str(self.en2) + \"')\"\n list1 = Text(self.top, width=41, height=8, font=('times', 25, 'bold'))\n list1.place(x=0, y=0)\n list1.insert(0.0, sent)\n print(self.tbl_name, self.en1, self.en2)\n self.cursor.execute(sent)\n self.list.insert(0, sent)\n self.connection.commit()\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Gui:\n <mask token>\n\n def insert_data(self):\n self.id = e.get()\n self.name1 = e1.get()\n self.fathername = e2.get()\n self.mothername = e3.get()\n self.cont = e4.get()\n self.email = e5.get()\n self.cursor.execute(\n \"insert into user values('{}','{}','{}','{}','{}','{}')\".format\n (self.id, self.name1, self.fathername, self.mothername, self.\n cont, self.email))\n self.connection.commit()\n\n def show_data(self):\n self.connection = sqlite3.connect('student_details.db')\n self.cursor = self.connection.cursor()\n self.cursor.execute('Select * from user')\n rows = self.cursor.fetchall()\n for row in rows:\n l1 = self.list.insert(END, row)\n self.connection.commit()\n <mask token>\n\n def update(self):\n global e\n global e2\n global e3\n self.top1 = Toplevel(self.scr)\n self.top1.geometry('400x400')\n l1 = Label(self.top1, text='USER_ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n self.Id = StringVar()\n e = Entry(self.top1, relief='sunken', textvariable=self.Id, font=(\n 'times', 25, 'bold'))\n e.pack()\n self.col_name = StringVar()\n l2 = Label(self.top1, text='col_name', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l2.pack()\n e2 = Entry(self.top1, relief='sunken', textvariable=self.col_name,\n font=('times', 25, 'bold'))\n e2.pack()\n self.value = StringVar()\n l3 = Label(self.top1, text='VALUE', font=('times', 25, 'bold'), bg=\n 'green2', fg='white')\n l3.pack()\n e3 = Entry(self.top1, relief='sunken', textvariable=self.value,\n font=('times', 25, 'bold'))\n e3.pack()\n b = Button(self.top1, text='UPDATE', command=self.update_data, font\n =('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top1.mainloop()\n\n def delete_data(self):\n self.cursor.execute(\"Delete from user where id ='{}'\".format(e.get()))\n self.list.delete(0, END)\n self.connection.commit()\n self.show_data()\n\n def del_rec(self):\n global e\n self.top2 = Toplevel(self.scr)\n self.top2.geometry('400x400')\n l1 = Label(self.top2, text='USER_ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n self.Id = StringVar()\n e = Entry(self.top2, relief='sunken', textvariable=self.Id, font=(\n 'times', 25, 'bold'))\n e.pack()\n b = Button(self.top2, text='delete records', command=self.\n delete_data, font=('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top2.mainloop()\n\n def sample(self):\n s = '{}'.format(self.en3.get())\n a = self.cursor.execute('{}'.format(self.en3.get()))\n r = self.cursor.fetchall()\n for row in r:\n self.list.insert(0, row)\n self.connection.commit()\n\n def file(self):\n self.f1.filename = filedialog.askopenfilename(title='Select file')\n p = self.f1.filename\n self.list.insert(0, self.f1.filename)\n\n def add_record(self):\n global e\n global e1\n global e2\n global e3\n global e4\n global e5\n self.e = StringVar()\n self.e1 = StringVar()\n self.e2 = StringVar()\n self.e3 = StringVar()\n self.e4 = StringVar()\n self.e5 = StringVar()\n self.top = Toplevel(self.scr)\n self.top.geometry('400x800')\n l = Label(self.top, text='USER_ID', font=('times', 25, 'bold'), bg=\n 'green2', fg='white')\n l.pack()\n e = Entry(self.top, relief='sunken', textvariable=self.e, font=(\n 'times', 25, 'bold'))\n e.pack()\n l1 = Label(self.top, text='USERNAME', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n e1 = Entry(self.top, relief='sunken', textvariable=self.e1, font=(\n 'times', 25, 'bold'))\n e1.pack()\n l2 = Label(self.top, text='FATHERS NAME', font=('times', 25, 'bold'\n ), bg='green2', fg='white')\n l2.pack()\n e2 = Entry(self.top, relief='sunken', textvariable=self.e2, font=(\n 'times', 25, 'bold'))\n e2.pack()\n l3 = Label(self.top, text='MOTHERS NAME', font=('times', 25, 'bold'\n ), bg='green2', fg='white')\n l3.pack()\n e3 = Entry(self.top, relief='sunken', textvariable=self.e3, font=(\n 'times', 25, 'bold'))\n e3.pack()\n l4 = Label(self.top, text='CONTACT NO', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l4.pack()\n e4 = Entry(self.top, relief='sunken', textvariable=self.e4, font=(\n 'times', 25, 'bold'))\n e4.pack()\n l5 = Label(self.top, text='E-MAIL ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l5.pack()\n e5 = Entry(self.top, relief='sunken', textvariable=self.e5, font=(\n 'times', 25, 'bold'))\n e5.pack()\n varchk = IntVar()\n b = Button(self.top, text='SUBMIT', command=self.insert_data, font=\n ('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top.mainloop()\n <mask token>\n\n def selection(self):\n lb = self.list_box.curselection()\n print(lb)\n for i in list(lb):\n self.show_data()\n <mask token>\n global table_name\n\n def create_table(self):\n self.top = Toplevel(self.scr)\n self.top.geometry('400x800')\n self.table_name = StringVar()\n l = Label(self.top, text='Table', font=('times', 20, 'bold'), bg=\n 'white', fg='black')\n l.pack()\n e = Entry(self.top, textvariable=self.table_name, font=('times', 20,\n 'bold'))\n e.pack()\n b = Button(self.top, text='Add field', command=self.fun_show, font=\n ('times', 20, 'bold'), bg='white', fg='black')\n b.pack()\n b = Button(self.top, text='OK', font=('times', 20, 'bold'), command\n =self.show_entered_data, bg='white', fg='black')\n b.pack(side=RIGHT)\n\n def show_entered_data(self):\n global en1\n global en2\n global list1\n global tbl_name\n self.tbl_name = self.table_name.get()\n self.en1 = self.entry1.get()\n self.en2 = self.entry2.get()\n sent = 'Create table ' + str(self.tbl_name) + \"('\" + str(self.en1\n ) + ' ' + str(self.en2) + \"')\"\n list1 = Text(self.top, width=41, height=8, font=('times', 25, 'bold'))\n list1.place(x=0, y=0)\n list1.insert(0.0, sent)\n print(self.tbl_name, self.en1, self.en2)\n self.cursor.execute(sent)\n self.list.insert(0, sent)\n self.connection.commit()\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Gui:\n\n def __init__(self):\n global en3\n self.scr = Tk()\n self.scr.geometry('2000x3000')\n self.scr.title('VIEWING DATABASE')\n self.connection = sqlite3.connect('student_details.db')\n self.cursor = self.connection.cursor()\n self.id = StringVar()\n self.name1 = StringVar()\n self.fathername = StringVar()\n self.mothername = StringVar()\n self.cont = StringVar()\n self.email = StringVar()\n self.f1 = Frame(self.scr, bg='brown1')\n self.f1.pack(side=TOP)\n self.left_frame = Frame(self.scr, bg='red')\n self.left_frame.pack(side=LEFT, fill=Y)\n self.right_frame = Frame(self.scr, width=3000, bg='yellow')\n self.right_frame.pack(side=LEFT, fill=Y)\n l = Label(self.right_frame, text=\n '***************SHOW TABLE RECORDS IN A DATABASE******************'\n , font=('times', 25, 'bold'), bg='black', fg='white')\n l.pack(side=TOP, fill=X)\n scrollbar = Scrollbar(self.right_frame)\n scrollbar.pack(side=RIGHT, fill=Y)\n self.list = Listbox(self.right_frame, width=61, height=12, font=(\n 'times', 25, 'bold'), yscrollcommand=scrollbar.set)\n self.list.bind('student_list', self.show_records)\n self.list.pack(side=TOP, fill=Y)\n scrollbar.config(command=self.list.yview)\n self.querry_frame = Frame(self.right_frame, width=81, height=5, bg=\n 'white')\n self.querry_frame.pack(side=BOTTOM, fill=X)\n self.en3 = Entry(self.querry_frame, font=('times', 25, 'bold'))\n self.en3.pack(side=BOTTOM, fill=X)\n b = Button(self.querry_frame, text='Enter', command=self.sample,\n font=('times', 25, 'bold'), bg='white', fg='black')\n b.pack(side=RIGHT)\n b1 = Button(self.querry_frame, text='Save', command=self.show_data,\n font=('times', 25, 'bold'), bg='white', fg='black')\n b1.pack(side=RIGHT)\n b = Button(self.f1, text='OPEN', command=self.file, font=('times', \n 25, 'bold'), bg='white', fg='black')\n b.pack(side=LEFT)\n b = Button(self.f1, text='CREATE', command=self.create_table, font=\n ('times', 25, 'bold'), bg='white', fg='black')\n b.pack(side=LEFT)\n b1 = Button(self.f1, text='INSERT', command=self.add_record, font=(\n 'times', 25, 'bold'), bg='white', fg='black')\n b1.pack(side=LEFT)\n b2 = Button(self.f1, text='DELETE', command=self.del_rec, font=(\n 'times', 25, 'bold'), bg='white', fg='black')\n b2.pack(side=LEFT)\n b3 = Button(self.f1, text='UPDATE', command=self.update, font=(\n 'times', 25, 'bold'), bg='white', fg='black')\n b3.pack(side=RIGHT)\n b4 = Button(self.f1, text='VIEW', command=lambda : self.view_table(\n ), font=('times', 25, 'bold'), bg='white', fg='black')\n b4.pack(side=RIGHT)\n b4 = Button(self.f1, text='BROWSE', command=self.show_data, font=(\n 'times', 25, 'bold'), bg='white', fg='black')\n b4.pack(side=RIGHT)\n l = Label(self.left_frame, text='View Table in Database', font=(\n 'times', 25, 'bold'), bg='blue', fg='white')\n l.pack(side=TOP, fill=X)\n self.scr.mainloop()\n try:\n self.cursor.execute(\n 'create table user(Id varchar(10),Name varchar(30),FathersName varchar(20),MothersName varchar(20),Contact varchar(10),Email varchar(30))'\n )\n self.connection.commit()\n except:\n pass\n\n def insert_data(self):\n self.id = e.get()\n self.name1 = e1.get()\n self.fathername = e2.get()\n self.mothername = e3.get()\n self.cont = e4.get()\n self.email = e5.get()\n self.cursor.execute(\n \"insert into user values('{}','{}','{}','{}','{}','{}')\".format\n (self.id, self.name1, self.fathername, self.mothername, self.\n cont, self.email))\n self.connection.commit()\n\n def show_data(self):\n self.connection = sqlite3.connect('student_details.db')\n self.cursor = self.connection.cursor()\n self.cursor.execute('Select * from user')\n rows = self.cursor.fetchall()\n for row in rows:\n l1 = self.list.insert(END, row)\n self.connection.commit()\n\n def update_data(self):\n self.cursor.execute(\"Update user set {} = '{}' where id ='{}'\".\n format(e2.get(), e3.get(), e.get()))\n self.connection.commit()\n self.list.delete(0, END)\n self.show_data()\n\n def update(self):\n global e\n global e2\n global e3\n self.top1 = Toplevel(self.scr)\n self.top1.geometry('400x400')\n l1 = Label(self.top1, text='USER_ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n self.Id = StringVar()\n e = Entry(self.top1, relief='sunken', textvariable=self.Id, font=(\n 'times', 25, 'bold'))\n e.pack()\n self.col_name = StringVar()\n l2 = Label(self.top1, text='col_name', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l2.pack()\n e2 = Entry(self.top1, relief='sunken', textvariable=self.col_name,\n font=('times', 25, 'bold'))\n e2.pack()\n self.value = StringVar()\n l3 = Label(self.top1, text='VALUE', font=('times', 25, 'bold'), bg=\n 'green2', fg='white')\n l3.pack()\n e3 = Entry(self.top1, relief='sunken', textvariable=self.value,\n font=('times', 25, 'bold'))\n e3.pack()\n b = Button(self.top1, text='UPDATE', command=self.update_data, font\n =('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top1.mainloop()\n\n def delete_data(self):\n self.cursor.execute(\"Delete from user where id ='{}'\".format(e.get()))\n self.list.delete(0, END)\n self.connection.commit()\n self.show_data()\n\n def del_rec(self):\n global e\n self.top2 = Toplevel(self.scr)\n self.top2.geometry('400x400')\n l1 = Label(self.top2, text='USER_ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n self.Id = StringVar()\n e = Entry(self.top2, relief='sunken', textvariable=self.Id, font=(\n 'times', 25, 'bold'))\n e.pack()\n b = Button(self.top2, text='delete records', command=self.\n delete_data, font=('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top2.mainloop()\n\n def sample(self):\n s = '{}'.format(self.en3.get())\n a = self.cursor.execute('{}'.format(self.en3.get()))\n r = self.cursor.fetchall()\n for row in r:\n self.list.insert(0, row)\n self.connection.commit()\n\n def file(self):\n self.f1.filename = filedialog.askopenfilename(title='Select file')\n p = self.f1.filename\n self.list.insert(0, self.f1.filename)\n\n def add_record(self):\n global e\n global e1\n global e2\n global e3\n global e4\n global e5\n self.e = StringVar()\n self.e1 = StringVar()\n self.e2 = StringVar()\n self.e3 = StringVar()\n self.e4 = StringVar()\n self.e5 = StringVar()\n self.top = Toplevel(self.scr)\n self.top.geometry('400x800')\n l = Label(self.top, text='USER_ID', font=('times', 25, 'bold'), bg=\n 'green2', fg='white')\n l.pack()\n e = Entry(self.top, relief='sunken', textvariable=self.e, font=(\n 'times', 25, 'bold'))\n e.pack()\n l1 = Label(self.top, text='USERNAME', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n e1 = Entry(self.top, relief='sunken', textvariable=self.e1, font=(\n 'times', 25, 'bold'))\n e1.pack()\n l2 = Label(self.top, text='FATHERS NAME', font=('times', 25, 'bold'\n ), bg='green2', fg='white')\n l2.pack()\n e2 = Entry(self.top, relief='sunken', textvariable=self.e2, font=(\n 'times', 25, 'bold'))\n e2.pack()\n l3 = Label(self.top, text='MOTHERS NAME', font=('times', 25, 'bold'\n ), bg='green2', fg='white')\n l3.pack()\n e3 = Entry(self.top, relief='sunken', textvariable=self.e3, font=(\n 'times', 25, 'bold'))\n e3.pack()\n l4 = Label(self.top, text='CONTACT NO', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l4.pack()\n e4 = Entry(self.top, relief='sunken', textvariable=self.e4, font=(\n 'times', 25, 'bold'))\n e4.pack()\n l5 = Label(self.top, text='E-MAIL ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l5.pack()\n e5 = Entry(self.top, relief='sunken', textvariable=self.e5, font=(\n 'times', 25, 'bold'))\n e5.pack()\n varchk = IntVar()\n b = Button(self.top, text='SUBMIT', command=self.insert_data, font=\n ('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top.mainloop()\n\n def view_table(self):\n global list_box\n self.list_box = Listbox(self.left_frame, font=('times', 20, 'bold'))\n try:\n self.list_box.insert(1, 'user')\n self.list_box.insert(2, self.tbl_name)\n except:\n pass\n b = Button(self.left_frame, text='Click', font=('times', 20, 'bold'\n ), command=self.selection, bg='white', fg='black')\n b.place(x=100, y=400)\n self.list_box.place(x=10, y=50)\n\n def selection(self):\n lb = self.list_box.curselection()\n print(lb)\n for i in list(lb):\n self.show_data()\n\n def show_records(self):\n global m\n m = self.list.curselection()\n m = self.list.get(m)\n self.id.delete(0, END)\n self.id.insert(END, self.add_record())\n global table_name\n\n def create_table(self):\n self.top = Toplevel(self.scr)\n self.top.geometry('400x800')\n self.table_name = StringVar()\n l = Label(self.top, text='Table', font=('times', 20, 'bold'), bg=\n 'white', fg='black')\n l.pack()\n e = Entry(self.top, textvariable=self.table_name, font=('times', 20,\n 'bold'))\n e.pack()\n b = Button(self.top, text='Add field', command=self.fun_show, font=\n ('times', 20, 'bold'), bg='white', fg='black')\n b.pack()\n b = Button(self.top, text='OK', font=('times', 20, 'bold'), command\n =self.show_entered_data, bg='white', fg='black')\n b.pack(side=RIGHT)\n\n def show_entered_data(self):\n global en1\n global en2\n global list1\n global tbl_name\n self.tbl_name = self.table_name.get()\n self.en1 = self.entry1.get()\n self.en2 = self.entry2.get()\n sent = 'Create table ' + str(self.tbl_name) + \"('\" + str(self.en1\n ) + ' ' + str(self.en2) + \"')\"\n list1 = Text(self.top, width=41, height=8, font=('times', 25, 'bold'))\n list1.place(x=0, y=0)\n list1.insert(0.0, sent)\n print(self.tbl_name, self.en1, self.en2)\n self.cursor.execute(sent)\n self.list.insert(0, sent)\n self.connection.commit()\n\n def fun_show(self):\n l = Label(self.top, text='Name', font=('times', 20, 'bold'), bg=\n 'white', fg='black')\n l.pack(side=TOP)\n self.entry1 = StringVar()\n e1 = Entry(self.top, textvariable=self.entry1, font=('times', 20,\n 'bold'))\n e1.pack()\n l = Label(self.top, text='type', font=('times', 20, 'bold'), bg=\n 'white', fg='black')\n l.pack(side=TOP)\n self.entry2 = StringVar()\n e1 = Entry(self.top, textvariable=self.entry2, font=('times', 20,\n 'bold'))\n e1.pack()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Gui:\n\n def __init__(self):\n global en3\n self.scr = Tk()\n self.scr.geometry('2000x3000')\n self.scr.title('VIEWING DATABASE')\n self.connection = sqlite3.connect('student_details.db')\n self.cursor = self.connection.cursor()\n self.id = StringVar()\n self.name1 = StringVar()\n self.fathername = StringVar()\n self.mothername = StringVar()\n self.cont = StringVar()\n self.email = StringVar()\n self.f1 = Frame(self.scr, bg='brown1')\n self.f1.pack(side=TOP)\n self.left_frame = Frame(self.scr, bg='red')\n self.left_frame.pack(side=LEFT, fill=Y)\n self.right_frame = Frame(self.scr, width=3000, bg='yellow')\n self.right_frame.pack(side=LEFT, fill=Y)\n l = Label(self.right_frame, text=\n '***************SHOW TABLE RECORDS IN A DATABASE******************'\n , font=('times', 25, 'bold'), bg='black', fg='white')\n l.pack(side=TOP, fill=X)\n scrollbar = Scrollbar(self.right_frame)\n scrollbar.pack(side=RIGHT, fill=Y)\n self.list = Listbox(self.right_frame, width=61, height=12, font=(\n 'times', 25, 'bold'), yscrollcommand=scrollbar.set)\n self.list.bind('student_list', self.show_records)\n self.list.pack(side=TOP, fill=Y)\n scrollbar.config(command=self.list.yview)\n self.querry_frame = Frame(self.right_frame, width=81, height=5, bg=\n 'white')\n self.querry_frame.pack(side=BOTTOM, fill=X)\n self.en3 = Entry(self.querry_frame, font=('times', 25, 'bold'))\n self.en3.pack(side=BOTTOM, fill=X)\n b = Button(self.querry_frame, text='Enter', command=self.sample,\n font=('times', 25, 'bold'), bg='white', fg='black')\n b.pack(side=RIGHT)\n b1 = Button(self.querry_frame, text='Save', command=self.show_data,\n font=('times', 25, 'bold'), bg='white', fg='black')\n b1.pack(side=RIGHT)\n b = Button(self.f1, text='OPEN', command=self.file, font=('times', \n 25, 'bold'), bg='white', fg='black')\n b.pack(side=LEFT)\n b = Button(self.f1, text='CREATE', command=self.create_table, font=\n ('times', 25, 'bold'), bg='white', fg='black')\n b.pack(side=LEFT)\n b1 = Button(self.f1, text='INSERT', command=self.add_record, font=(\n 'times', 25, 'bold'), bg='white', fg='black')\n b1.pack(side=LEFT)\n b2 = Button(self.f1, text='DELETE', command=self.del_rec, font=(\n 'times', 25, 'bold'), bg='white', fg='black')\n b2.pack(side=LEFT)\n b3 = Button(self.f1, text='UPDATE', command=self.update, font=(\n 'times', 25, 'bold'), bg='white', fg='black')\n b3.pack(side=RIGHT)\n b4 = Button(self.f1, text='VIEW', command=lambda : self.view_table(\n ), font=('times', 25, 'bold'), bg='white', fg='black')\n b4.pack(side=RIGHT)\n b4 = Button(self.f1, text='BROWSE', command=self.show_data, font=(\n 'times', 25, 'bold'), bg='white', fg='black')\n b4.pack(side=RIGHT)\n l = Label(self.left_frame, text='View Table in Database', font=(\n 'times', 25, 'bold'), bg='blue', fg='white')\n l.pack(side=TOP, fill=X)\n self.scr.mainloop()\n try:\n self.cursor.execute(\n 'create table user(Id varchar(10),Name varchar(30),FathersName varchar(20),MothersName varchar(20),Contact varchar(10),Email varchar(30))'\n )\n self.connection.commit()\n except:\n pass\n\n def insert_data(self):\n self.id = e.get()\n self.name1 = e1.get()\n self.fathername = e2.get()\n self.mothername = e3.get()\n self.cont = e4.get()\n self.email = e5.get()\n self.cursor.execute(\n \"insert into user values('{}','{}','{}','{}','{}','{}')\".format\n (self.id, self.name1, self.fathername, self.mothername, self.\n cont, self.email))\n self.connection.commit()\n\n def show_data(self):\n self.connection = sqlite3.connect('student_details.db')\n self.cursor = self.connection.cursor()\n self.cursor.execute('Select * from user')\n rows = self.cursor.fetchall()\n for row in rows:\n l1 = self.list.insert(END, row)\n self.connection.commit()\n\n def update_data(self):\n self.cursor.execute(\"Update user set {} = '{}' where id ='{}'\".\n format(e2.get(), e3.get(), e.get()))\n self.connection.commit()\n self.list.delete(0, END)\n self.show_data()\n\n def update(self):\n global e\n global e2\n global e3\n self.top1 = Toplevel(self.scr)\n self.top1.geometry('400x400')\n l1 = Label(self.top1, text='USER_ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n self.Id = StringVar()\n e = Entry(self.top1, relief='sunken', textvariable=self.Id, font=(\n 'times', 25, 'bold'))\n e.pack()\n self.col_name = StringVar()\n l2 = Label(self.top1, text='col_name', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l2.pack()\n e2 = Entry(self.top1, relief='sunken', textvariable=self.col_name,\n font=('times', 25, 'bold'))\n e2.pack()\n self.value = StringVar()\n l3 = Label(self.top1, text='VALUE', font=('times', 25, 'bold'), bg=\n 'green2', fg='white')\n l3.pack()\n e3 = Entry(self.top1, relief='sunken', textvariable=self.value,\n font=('times', 25, 'bold'))\n e3.pack()\n b = Button(self.top1, text='UPDATE', command=self.update_data, font\n =('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top1.mainloop()\n\n def delete_data(self):\n self.cursor.execute(\"Delete from user where id ='{}'\".format(e.get()))\n self.list.delete(0, END)\n self.connection.commit()\n self.show_data()\n\n def del_rec(self):\n global e\n self.top2 = Toplevel(self.scr)\n self.top2.geometry('400x400')\n l1 = Label(self.top2, text='USER_ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n self.Id = StringVar()\n e = Entry(self.top2, relief='sunken', textvariable=self.Id, font=(\n 'times', 25, 'bold'))\n e.pack()\n b = Button(self.top2, text='delete records', command=self.\n delete_data, font=('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top2.mainloop()\n\n def sample(self):\n s = '{}'.format(self.en3.get())\n a = self.cursor.execute('{}'.format(self.en3.get()))\n r = self.cursor.fetchall()\n for row in r:\n self.list.insert(0, row)\n self.connection.commit()\n\n def file(self):\n self.f1.filename = filedialog.askopenfilename(title='Select file')\n p = self.f1.filename\n self.list.insert(0, self.f1.filename)\n\n def add_record(self):\n global e\n global e1\n global e2\n global e3\n global e4\n global e5\n self.e = StringVar()\n self.e1 = StringVar()\n self.e2 = StringVar()\n self.e3 = StringVar()\n self.e4 = StringVar()\n self.e5 = StringVar()\n self.top = Toplevel(self.scr)\n self.top.geometry('400x800')\n l = Label(self.top, text='USER_ID', font=('times', 25, 'bold'), bg=\n 'green2', fg='white')\n l.pack()\n e = Entry(self.top, relief='sunken', textvariable=self.e, font=(\n 'times', 25, 'bold'))\n e.pack()\n l1 = Label(self.top, text='USERNAME', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l1.pack()\n e1 = Entry(self.top, relief='sunken', textvariable=self.e1, font=(\n 'times', 25, 'bold'))\n e1.pack()\n l2 = Label(self.top, text='FATHERS NAME', font=('times', 25, 'bold'\n ), bg='green2', fg='white')\n l2.pack()\n e2 = Entry(self.top, relief='sunken', textvariable=self.e2, font=(\n 'times', 25, 'bold'))\n e2.pack()\n l3 = Label(self.top, text='MOTHERS NAME', font=('times', 25, 'bold'\n ), bg='green2', fg='white')\n l3.pack()\n e3 = Entry(self.top, relief='sunken', textvariable=self.e3, font=(\n 'times', 25, 'bold'))\n e3.pack()\n l4 = Label(self.top, text='CONTACT NO', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l4.pack()\n e4 = Entry(self.top, relief='sunken', textvariable=self.e4, font=(\n 'times', 25, 'bold'))\n e4.pack()\n l5 = Label(self.top, text='E-MAIL ID', font=('times', 25, 'bold'),\n bg='green2', fg='white')\n l5.pack()\n e5 = Entry(self.top, relief='sunken', textvariable=self.e5, font=(\n 'times', 25, 'bold'))\n e5.pack()\n varchk = IntVar()\n b = Button(self.top, text='SUBMIT', command=self.insert_data, font=\n ('times', 25, 'bold'), bg='white', fg='black')\n b.pack()\n self.top.mainloop()\n\n def view_table(self):\n global list_box\n self.list_box = Listbox(self.left_frame, font=('times', 20, 'bold'))\n try:\n self.list_box.insert(1, 'user')\n self.list_box.insert(2, self.tbl_name)\n except:\n pass\n b = Button(self.left_frame, text='Click', font=('times', 20, 'bold'\n ), command=self.selection, bg='white', fg='black')\n b.place(x=100, y=400)\n self.list_box.place(x=10, y=50)\n\n def selection(self):\n lb = self.list_box.curselection()\n print(lb)\n for i in list(lb):\n self.show_data()\n\n def show_records(self):\n global m\n m = self.list.curselection()\n m = self.list.get(m)\n self.id.delete(0, END)\n self.id.insert(END, self.add_record())\n global table_name\n\n def create_table(self):\n self.top = Toplevel(self.scr)\n self.top.geometry('400x800')\n self.table_name = StringVar()\n l = Label(self.top, text='Table', font=('times', 20, 'bold'), bg=\n 'white', fg='black')\n l.pack()\n e = Entry(self.top, textvariable=self.table_name, font=('times', 20,\n 'bold'))\n e.pack()\n b = Button(self.top, text='Add field', command=self.fun_show, font=\n ('times', 20, 'bold'), bg='white', fg='black')\n b.pack()\n b = Button(self.top, text='OK', font=('times', 20, 'bold'), command\n =self.show_entered_data, bg='white', fg='black')\n b.pack(side=RIGHT)\n\n def show_entered_data(self):\n global en1\n global en2\n global list1\n global tbl_name\n self.tbl_name = self.table_name.get()\n self.en1 = self.entry1.get()\n self.en2 = self.entry2.get()\n sent = 'Create table ' + str(self.tbl_name) + \"('\" + str(self.en1\n ) + ' ' + str(self.en2) + \"')\"\n list1 = Text(self.top, width=41, height=8, font=('times', 25, 'bold'))\n list1.place(x=0, y=0)\n list1.insert(0.0, sent)\n print(self.tbl_name, self.en1, self.en2)\n self.cursor.execute(sent)\n self.list.insert(0, sent)\n self.connection.commit()\n\n def fun_show(self):\n l = Label(self.top, text='Name', font=('times', 20, 'bold'), bg=\n 'white', fg='black')\n l.pack(side=TOP)\n self.entry1 = StringVar()\n e1 = Entry(self.top, textvariable=self.entry1, font=('times', 20,\n 'bold'))\n e1.pack()\n l = Label(self.top, text='type', font=('times', 20, 'bold'), bg=\n 'white', fg='black')\n l.pack(side=TOP)\n self.entry2 = StringVar()\n e1 = Entry(self.top, textvariable=self.entry2, font=('times', 20,\n 'bold'))\n e1.pack()\n\n\nGui()\n",
"step-5": "from tkinter import*\r\nfrom tkinter import filedialog\r\nimport sqlite3\r\n\r\nclass Gui:\r\n def __init__(self):\r\n global en3\r\n self.scr = Tk()\r\n self.scr.geometry(\"2000x3000\")\r\n self.scr.title(\"VIEWING DATABASE\")\r\n self.connection = sqlite3.connect(\"student_details.db\")\r\n self.cursor = self.connection.cursor()\r\n self.id = StringVar()\r\n self.name1 = StringVar()\r\n self.fathername = StringVar()\r\n self.mothername = StringVar()\r\n self.cont = StringVar()\r\n self.email = StringVar()\r\n self.f1 = Frame(self.scr, bg='brown1')\r\n self.f1.pack(side=TOP)\r\n self.left_frame = Frame(self.scr, bg='red')\r\n self.left_frame.pack(side=LEFT, fill=Y)\r\n self.right_frame = Frame(self.scr, width=3000, bg='yellow')\r\n self.right_frame.pack(side=LEFT, fill=Y)\r\n l = Label(self.right_frame, text=\"***************SHOW TABLE RECORDS IN A DATABASE******************\",\r\n font=('times', 25, 'bold'), bg=\"black\", fg=\"white\")\r\n l.pack(side=TOP, fill=X)\r\n scrollbar = Scrollbar(self.right_frame)\r\n scrollbar.pack(side=RIGHT, fill=Y)\r\n self.list = Listbox(self.right_frame, width=61, height=12, font=('times', 25, 'bold'),\r\n yscrollcommand=scrollbar.set)\r\n self.list.bind(\"student_list\", self.show_records)\r\n self.list.pack(side=TOP, fill=Y)\r\n scrollbar.config(command=self.list.yview)\r\n self.querry_frame = Frame(self.right_frame, width=81, height=5, bg=\"white\")\r\n self.querry_frame.pack(side=BOTTOM, fill=X)\r\n self.en3 = Entry(self.querry_frame, font=('times', 25, 'bold'))\r\n self.en3.pack(side=BOTTOM, fill=X)\r\n b = Button(self.querry_frame, text=\"Enter\",command=self.sample, font=('times', 25, 'bold'), bg=\"white\", fg=\"black\")\r\n b.pack(side=RIGHT)\r\n b1 = Button(self.querry_frame, text=\"Save\", command=self.show_data, font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b1.pack(side=RIGHT)\r\n b = Button(self.f1, text=\"OPEN\", command=self.file, font=('times', 25, 'bold'), bg=\"white\", fg=\"black\")\r\n b.pack(side=LEFT)\r\n b = Button(self.f1, text=\"CREATE\", command=self.create_table, font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b.pack(side=LEFT)\r\n b1 = Button(self.f1, text=\"INSERT\", command=self.add_record, font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b1.pack(side=LEFT)\r\n b2 = Button(self.f1, text=\"DELETE\", command=self.del_rec, font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b2.pack(side=LEFT)\r\n b3 = Button(self.f1, text=\"UPDATE\", command=self.update, font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b3.pack(side=RIGHT)\r\n b4 = Button(self.f1, text=\"VIEW\", command=lambda: self.view_table(), font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b4.pack(side=RIGHT)\r\n b4 = Button(self.f1, text=\"BROWSE\", command=self.show_data, font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b4.pack(side=RIGHT)\r\n l = Label(self.left_frame, text=\"View Table in Database\", font=('times', 25, 'bold'), bg='blue', fg='white')\r\n l.pack(side=TOP, fill=X)\r\n\r\n self.scr.mainloop()\r\n\r\n try:\r\n self.cursor.execute(\"create table user(Id varchar(10),Name varchar(30),FathersName varchar(20),MothersName varchar(20),Contact varchar(10),Email varchar(30))\")\r\n self.connection.commit()\r\n except:\r\n pass\r\n\r\n def insert_data(self):\r\n self.id = e.get()\r\n self.name1 = e1.get()\r\n self.fathername=e2.get()\r\n self.mothername = e3.get()\r\n self.cont = e4.get()\r\n self.email = e5.get()\r\n self.cursor.execute(\"insert into user values('{}','{}','{}','{}','{}','{}')\".format(self.id,self.name1, self.fathername,self.mothername,self.cont , self.email))\r\n self.connection.commit()\r\n\r\n\r\n def show_data(self):\r\n self.connection = sqlite3.connect(\"student_details.db\")\r\n self.cursor = self.connection.cursor()\r\n self.cursor.execute(\"Select * from user\")\r\n rows = self.cursor.fetchall()\r\n for row in rows:\r\n l1 = self.list.insert(END, row)\r\n self.connection.commit()\r\n\r\n def update_data(self):\r\n self.cursor.execute(\"Update user set {} = '{}' where id ='{}'\".format(e2.get(),e3.get(),e.get()))\r\n self.connection.commit()\r\n self.list.delete(0, END)\r\n self.show_data()\r\n\r\n def update(self):\r\n global e\r\n global e2\r\n global e3\r\n self.top1 = Toplevel(self.scr)\r\n self.top1.geometry(\"400x400\")\r\n l1 = Label(self.top1, text=\"USER_ID\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l1.pack()\r\n self.Id=StringVar()\r\n e = Entry(self.top1, relief=\"sunken\", textvariable=self.Id, font=('times', 25, 'bold'))\r\n e.pack()\r\n self.col_name=StringVar()\r\n l2 = Label(self.top1, text=\"col_name\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l2.pack()\r\n e2 = Entry(self.top1, relief=\"sunken\", textvariable=self.col_name, font=('times', 25, 'bold'))\r\n e2.pack()\r\n self.value=StringVar()\r\n l3 = Label(self.top1, text=\"VALUE\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l3.pack()\r\n e3 = Entry(self.top1, relief=\"sunken\", textvariable=self.value, font=('times', 25, 'bold'))\r\n e3.pack()\r\n b = Button(self.top1, text=\"UPDATE\", command=self.update_data, font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b.pack()\r\n\r\n self.top1.mainloop()\r\n\r\n def delete_data(self):\r\n self.cursor.execute(\"Delete from user where id ='{}'\".format(e.get()))\r\n self.list.delete(0,END)\r\n self.connection.commit()\r\n self.show_data()\r\n\r\n def del_rec(self):\r\n global e\r\n self.top2 = Toplevel(self.scr)\r\n self.top2.geometry(\"400x400\")\r\n l1 = Label(self.top2, text=\"USER_ID\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l1.pack()\r\n self.Id = StringVar()\r\n e = Entry(self.top2, relief=\"sunken\", textvariable=self.Id, font=('times', 25, 'bold'))\r\n e.pack()\r\n b = Button(self.top2, text=\"delete records\", command=self.delete_data, font=('times', 25, 'bold'), bg=\"white\",\r\n fg=\"black\")\r\n b.pack()\r\n self.top2.mainloop()\r\n\r\n def sample(self):\r\n s=('{}'.format(self.en3.get()))\r\n a=self.cursor.execute(\"{}\".format(self.en3.get()))\r\n r=self.cursor.fetchall()\r\n for row in r:\r\n self.list.insert(0,row)\r\n self.connection.commit()\r\n\r\n\r\n\r\n def file(self):\r\n self.f1.filename = filedialog.askopenfilename( title=\"Select file\")\r\n p=self.f1.filename\r\n self.list.insert(0,self.f1.filename)\r\n\r\n def add_record(self):\r\n global e\r\n global e1\r\n global e2\r\n global e3\r\n global e4\r\n global e5\r\n self.e = StringVar()\r\n self.e1 = StringVar()\r\n self.e2 = StringVar()\r\n self.e3 = StringVar()\r\n self.e4 = StringVar()\r\n self.e5 = StringVar()\r\n self.top=Toplevel(self.scr)\r\n self.top.geometry(\"400x800\")\r\n l=Label(self.top,text=\"USER_ID\",font=('times',25,'bold'),bg=\"green2\",fg=\"white\")\r\n l.pack()\r\n e=Entry(self.top,relief=\"sunken\",textvariable=self.e,font=('times',25,'bold'))\r\n e.pack()\r\n l1 = Label(self.top, text=\"USERNAME\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l1.pack()\r\n e1 = Entry(self.top, relief=\"sunken\",textvariable=self.e1, font=('times', 25, 'bold'))\r\n e1.pack()\r\n l2 = Label(self.top, text=\"FATHERS NAME\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l2.pack()\r\n e2 = Entry(self.top, relief=\"sunken\",textvariable=self.e2, font=('times', 25, 'bold'))\r\n e2.pack()\r\n l3 = Label(self.top, text=\"MOTHERS NAME\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l3.pack()\r\n e3 = Entry(self.top, relief=\"sunken\",textvariable=self.e3, font=('times', 25, 'bold'))\r\n e3.pack()\r\n l4 = Label(self.top, text=\"CONTACT NO\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l4.pack()\r\n e4 = Entry(self.top, relief=\"sunken\",textvariable=self.e4, font=('times', 25, 'bold'))\r\n e4.pack()\r\n l5 = Label(self.top, text=\"E-MAIL ID\", font=('times', 25, 'bold'), bg=\"green2\", fg=\"white\")\r\n l5.pack()\r\n e5 = Entry(self.top, relief=\"sunken\",textvariable=self.e5, font=('times', 25, 'bold'))\r\n e5.pack()\r\n varchk=IntVar()\r\n b = Button(self.top, text=\"SUBMIT\", command=self.insert_data,font=('times', 25, 'bold'), bg=\"white\",fg=\"black\")\r\n b.pack()\r\n self.top.mainloop()\r\n\r\n\r\n def view_table(self):\r\n global list_box\r\n self.list_box = Listbox(self.left_frame, font=('times', 20, 'bold'))\r\n\r\n try:\r\n\r\n self.list_box.insert(1,\"user\")\r\n self.list_box.insert(2,self.tbl_name)\r\n except:\r\n pass\r\n b=Button(self.left_frame,text=\"Click\",font=('times', 20, 'bold'),command=self.selection,bg=\"white\",fg=\"black\")\r\n b.place(x=100,y=400)\r\n self.list_box.place(x=10,y=50)\r\n\r\n def selection(self):\r\n lb = self.list_box.curselection()\r\n print(lb)\r\n for i in list(lb):\r\n self.show_data()\r\n\r\n def show_records(self):\r\n global m\r\n m=self.list.curselection()\r\n m=self.list.get(m)\r\n self.id.delete(0,END)\r\n self.id.insert(END,self.add_record())\r\n\r\n global table_name\r\n\r\n def create_table(self):\r\n self.top = Toplevel(self.scr)\r\n self.top.geometry(\"400x800\")\r\n self.table_name=StringVar()\r\n l=Label(self.top,text=\"Table\",font=('times', 20, 'bold'),bg=\"white\",fg=\"black\")\r\n l.pack()\r\n e=Entry(self.top,textvariable=self.table_name,font=('times', 20, 'bold'))\r\n e.pack()\r\n b=Button(self.top,text=\"Add field\",command=self.fun_show , font=('times', 20, 'bold'),bg=\"white\",fg=\"black\")\r\n b.pack()\r\n b=Button(self.top,text=\"OK\",font=('times', 20, 'bold'),command=self.show_entered_data,bg=\"white\",fg=\"black\")\r\n b.pack(side=RIGHT)\r\n\r\n\r\n def show_entered_data(self):\r\n global en1\r\n global en2\r\n global list1\r\n global tbl_name\r\n self.tbl_name=self.table_name.get()\r\n self.en1=self.entry1.get()\r\n self.en2=self.entry2.get()\r\n sent=\"Create table \"+str(self.tbl_name)+\"('\"+str(self.en1)+ \" \"+ str(self.en2)+\"')\"\r\n list1 = Text(self.top, width=41, height=8, font=('times', 25, 'bold'))\r\n list1.place(x=0,y=0)\r\n list1.insert(0.0,sent)\r\n print(self.tbl_name,self.en1,self.en2)\r\n self.cursor.execute(sent)\r\n self.list.insert(0,sent)\r\n self.connection.commit()\r\n\r\n\r\n def fun_show(self):\r\n l = Label(self.top, text=\"Name\", font=('times', 20, 'bold'), bg=\"white\", fg=\"black\")\r\n l.pack(side=TOP)\r\n self.entry1 = StringVar()\r\n e1 = Entry(self.top, textvariable=self.entry1, font=('times', 20, 'bold'))\r\n e1.pack()\r\n l = Label(self.top, text=\"type\", font=('times', 20, 'bold'), bg=\"white\", fg=\"black\")\r\n l.pack(side=TOP)\r\n self.entry2 = StringVar()\r\n e1 = Entry(self.top, textvariable=self.entry2, font=('times', 20, 'bold'))\r\n e1.pack()\r\n\r\n\r\nGui()",
"step-ids": [
9,
12,
17,
18,
20
]
}
|
[
9,
12,
17,
18,
20
] |
#from tkinter import Tk, Text, INSERT
import mnemonicos as mne
class Ensambler(object):
def __init__(self, fileName):
#Nombre del archivo
self.fileName = fileName
#Lineas del Archivo
self.fileLines = []
#Contador de Localidades
self.cl = 0
#Tamaño
self.size = 0
#Opcode
self.code = ""
#Intruccion
self.instruction = ""
#Contador de operadores
self.num_ope = 0
#Operandos
self.operands = []
# Tabla de simbolos
self.TS = {}
# Codigo Objeto
self.CO = []
#Aux
self.x = 0
#self.window = Tk()
#self.window.geometry('400x50')
def leerArchivo(self):
file = open(self.fileName, "r")
for line in file:
line = line.replace("\n", "")
line = line.replace("\t", "")
self.fileLines.append(line)
file.close()
#Primera Pasada
def first_pass(self):
for line in self.fileLines:
self.clean_line(line)
self.get_label()
self.get_operands()
if self.num_ope == 1:
if self.instruction in mne.v_jump:
if self.instruction == "JP":
self.x = self.TS[operands[0]]
print("l")
print(self.x)
if self.operands[0] in mne.v_jump:
self.instruction = self.instruction + " " + self.operands[0]+","+self.operands[1]
if self.operands[0][1:-1].isnumeric():
self.instruction = self.instruction + " " + self.operands[0]+","+self.operands[1]
if self.num_ope == 1:
if self.instruction in mne.v_jump:
self.operands[0] = "nn"
self.instruction = self.instruction + " " + self.operands[0]
code, size = mne.map_mnem.get(self.instruction,"Error")("0000")
self.cl += size
else:
#Valida si no es opcode valido
print(self.instruction)
#code, size = mne.map_mnem.get(self.instruction,"Error")()
#lst = "CL: " + str(self.cl) + " Code: " + code
#self.CO.append(code)
print(self.CO)
print(self.cl)
print(self.TS)
def Second_pass(self):
for line in self.fileLines:
self.clean_line(line)
self.get_label()
self.get_operands()
if self.instruction in mne.v_jump:
if len(self.operands) == 2:
aux = self.operands[1]
else:
aux = self.operands[0]
if aux in self.TS.keys():
self.x = self.TS[aux]
self.instruction = self.instruction + " " + "nn"
code, size = mne.map_mnem.get(self.instruction,"Error")(str(self.x))
self.CO.append(code)
else:
print("Error")
else:
if self.num_ope == 2:
self.instruction = self.instruction + " " + self.operands[0]+","+self.operands[1]
if self.num_ope == 1:
self.instruction = self.instruction + " " + self.operands[0]
code, size = mne.map_mnem.get(self.instruction,"Error")()
self.CO.append(code)
print(self.CO)
#Quitar Comentarios
def clean_line(self,line):
line = line.split(";")
self.instruction = line[0].upper().replace(",","")
# Obtener y guardar etiqueta si existe
def get_label(self):
label = self.instruction.split(":")
if len(label) > 1:
if label[0] in mne.v_ops or label[0] in mne.map_mnem:
print("Error etiqueta invalida")
#Quitar espacio al inicio
self.TS[label[0].strip()] = self.cl
del label[0]
self.instruction = label[0]
#Obtener los operandos y la instruccion
def get_operands(self):
line = self.instruction.split()
self.operands = [operand for operand in line]
self.instruction = self.operands[0]
del self.operands[0]
self.num_ope = len(self.operands)
aux = Ensambler("1.txt")
aux.leerArchivo()
aux.first_pass()
aux.Second_pass()
|
normal
|
{
"blob_id": "3bc009271c7dd34ad09bcef81214387b63dfac59",
"index": 2549,
"step-1": "<mask token>\n\n\nclass Ensambler(object):\n\n def __init__(self, fileName):\n self.fileName = fileName\n self.fileLines = []\n self.cl = 0\n self.size = 0\n self.code = ''\n self.instruction = ''\n self.num_ope = 0\n self.operands = []\n self.TS = {}\n self.CO = []\n self.x = 0\n\n def leerArchivo(self):\n file = open(self.fileName, 'r')\n for line in file:\n line = line.replace('\\n', '')\n line = line.replace('\\t', '')\n self.fileLines.append(line)\n file.close()\n\n def first_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n if self.instruction == 'JP':\n self.x = self.TS[operands[0]]\n print('l')\n print(self.x)\n if self.operands[0] in mne.v_jump:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.operands[0][1:-1].isnumeric():\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n self.operands[0] = 'nn'\n self.instruction = (self.instruction + ' ' + self.\n operands[0])\n code, size = mne.map_mnem.get(self.instruction, 'Error'\n )('0000')\n self.cl += size\n else:\n print(self.instruction)\n print(self.CO)\n print(self.cl)\n print(self.TS)\n\n def Second_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.instruction in mne.v_jump:\n if len(self.operands) == 2:\n aux = self.operands[1]\n else:\n aux = self.operands[0]\n if aux in self.TS.keys():\n self.x = self.TS[aux]\n self.instruction = self.instruction + ' ' + 'nn'\n code, size = mne.map_mnem.get(self.instruction, 'Error')(\n str(self.x))\n self.CO.append(code)\n else:\n print('Error')\n else:\n if self.num_ope == 2:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n self.instruction = self.instruction + ' ' + self.operands[0\n ]\n code, size = mne.map_mnem.get(self.instruction, 'Error')()\n self.CO.append(code)\n print(self.CO)\n <mask token>\n\n def get_label(self):\n label = self.instruction.split(':')\n if len(label) > 1:\n if label[0] in mne.v_ops or label[0] in mne.map_mnem:\n print('Error etiqueta invalida')\n self.TS[label[0].strip()] = self.cl\n del label[0]\n self.instruction = label[0]\n\n def get_operands(self):\n line = self.instruction.split()\n self.operands = [operand for operand in line]\n self.instruction = self.operands[0]\n del self.operands[0]\n self.num_ope = len(self.operands)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Ensambler(object):\n\n def __init__(self, fileName):\n self.fileName = fileName\n self.fileLines = []\n self.cl = 0\n self.size = 0\n self.code = ''\n self.instruction = ''\n self.num_ope = 0\n self.operands = []\n self.TS = {}\n self.CO = []\n self.x = 0\n\n def leerArchivo(self):\n file = open(self.fileName, 'r')\n for line in file:\n line = line.replace('\\n', '')\n line = line.replace('\\t', '')\n self.fileLines.append(line)\n file.close()\n\n def first_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n if self.instruction == 'JP':\n self.x = self.TS[operands[0]]\n print('l')\n print(self.x)\n if self.operands[0] in mne.v_jump:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.operands[0][1:-1].isnumeric():\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n self.operands[0] = 'nn'\n self.instruction = (self.instruction + ' ' + self.\n operands[0])\n code, size = mne.map_mnem.get(self.instruction, 'Error'\n )('0000')\n self.cl += size\n else:\n print(self.instruction)\n print(self.CO)\n print(self.cl)\n print(self.TS)\n\n def Second_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.instruction in mne.v_jump:\n if len(self.operands) == 2:\n aux = self.operands[1]\n else:\n aux = self.operands[0]\n if aux in self.TS.keys():\n self.x = self.TS[aux]\n self.instruction = self.instruction + ' ' + 'nn'\n code, size = mne.map_mnem.get(self.instruction, 'Error')(\n str(self.x))\n self.CO.append(code)\n else:\n print('Error')\n else:\n if self.num_ope == 2:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n self.instruction = self.instruction + ' ' + self.operands[0\n ]\n code, size = mne.map_mnem.get(self.instruction, 'Error')()\n self.CO.append(code)\n print(self.CO)\n\n def clean_line(self, line):\n line = line.split(';')\n self.instruction = line[0].upper().replace(',', '')\n\n def get_label(self):\n label = self.instruction.split(':')\n if len(label) > 1:\n if label[0] in mne.v_ops or label[0] in mne.map_mnem:\n print('Error etiqueta invalida')\n self.TS[label[0].strip()] = self.cl\n del label[0]\n self.instruction = label[0]\n\n def get_operands(self):\n line = self.instruction.split()\n self.operands = [operand for operand in line]\n self.instruction = self.operands[0]\n del self.operands[0]\n self.num_ope = len(self.operands)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Ensambler(object):\n\n def __init__(self, fileName):\n self.fileName = fileName\n self.fileLines = []\n self.cl = 0\n self.size = 0\n self.code = ''\n self.instruction = ''\n self.num_ope = 0\n self.operands = []\n self.TS = {}\n self.CO = []\n self.x = 0\n\n def leerArchivo(self):\n file = open(self.fileName, 'r')\n for line in file:\n line = line.replace('\\n', '')\n line = line.replace('\\t', '')\n self.fileLines.append(line)\n file.close()\n\n def first_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n if self.instruction == 'JP':\n self.x = self.TS[operands[0]]\n print('l')\n print(self.x)\n if self.operands[0] in mne.v_jump:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.operands[0][1:-1].isnumeric():\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n self.operands[0] = 'nn'\n self.instruction = (self.instruction + ' ' + self.\n operands[0])\n code, size = mne.map_mnem.get(self.instruction, 'Error'\n )('0000')\n self.cl += size\n else:\n print(self.instruction)\n print(self.CO)\n print(self.cl)\n print(self.TS)\n\n def Second_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.instruction in mne.v_jump:\n if len(self.operands) == 2:\n aux = self.operands[1]\n else:\n aux = self.operands[0]\n if aux in self.TS.keys():\n self.x = self.TS[aux]\n self.instruction = self.instruction + ' ' + 'nn'\n code, size = mne.map_mnem.get(self.instruction, 'Error')(\n str(self.x))\n self.CO.append(code)\n else:\n print('Error')\n else:\n if self.num_ope == 2:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n self.instruction = self.instruction + ' ' + self.operands[0\n ]\n code, size = mne.map_mnem.get(self.instruction, 'Error')()\n self.CO.append(code)\n print(self.CO)\n\n def clean_line(self, line):\n line = line.split(';')\n self.instruction = line[0].upper().replace(',', '')\n\n def get_label(self):\n label = self.instruction.split(':')\n if len(label) > 1:\n if label[0] in mne.v_ops or label[0] in mne.map_mnem:\n print('Error etiqueta invalida')\n self.TS[label[0].strip()] = self.cl\n del label[0]\n self.instruction = label[0]\n\n def get_operands(self):\n line = self.instruction.split()\n self.operands = [operand for operand in line]\n self.instruction = self.operands[0]\n del self.operands[0]\n self.num_ope = len(self.operands)\n\n\n<mask token>\naux.leerArchivo()\naux.first_pass()\naux.Second_pass()\n",
"step-4": "<mask token>\n\n\nclass Ensambler(object):\n\n def __init__(self, fileName):\n self.fileName = fileName\n self.fileLines = []\n self.cl = 0\n self.size = 0\n self.code = ''\n self.instruction = ''\n self.num_ope = 0\n self.operands = []\n self.TS = {}\n self.CO = []\n self.x = 0\n\n def leerArchivo(self):\n file = open(self.fileName, 'r')\n for line in file:\n line = line.replace('\\n', '')\n line = line.replace('\\t', '')\n self.fileLines.append(line)\n file.close()\n\n def first_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n if self.instruction == 'JP':\n self.x = self.TS[operands[0]]\n print('l')\n print(self.x)\n if self.operands[0] in mne.v_jump:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.operands[0][1:-1].isnumeric():\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n if self.instruction in mne.v_jump:\n self.operands[0] = 'nn'\n self.instruction = (self.instruction + ' ' + self.\n operands[0])\n code, size = mne.map_mnem.get(self.instruction, 'Error'\n )('0000')\n self.cl += size\n else:\n print(self.instruction)\n print(self.CO)\n print(self.cl)\n print(self.TS)\n\n def Second_pass(self):\n for line in self.fileLines:\n self.clean_line(line)\n self.get_label()\n self.get_operands()\n if self.instruction in mne.v_jump:\n if len(self.operands) == 2:\n aux = self.operands[1]\n else:\n aux = self.operands[0]\n if aux in self.TS.keys():\n self.x = self.TS[aux]\n self.instruction = self.instruction + ' ' + 'nn'\n code, size = mne.map_mnem.get(self.instruction, 'Error')(\n str(self.x))\n self.CO.append(code)\n else:\n print('Error')\n else:\n if self.num_ope == 2:\n self.instruction = self.instruction + ' ' + self.operands[0\n ] + ',' + self.operands[1]\n if self.num_ope == 1:\n self.instruction = self.instruction + ' ' + self.operands[0\n ]\n code, size = mne.map_mnem.get(self.instruction, 'Error')()\n self.CO.append(code)\n print(self.CO)\n\n def clean_line(self, line):\n line = line.split(';')\n self.instruction = line[0].upper().replace(',', '')\n\n def get_label(self):\n label = self.instruction.split(':')\n if len(label) > 1:\n if label[0] in mne.v_ops or label[0] in mne.map_mnem:\n print('Error etiqueta invalida')\n self.TS[label[0].strip()] = self.cl\n del label[0]\n self.instruction = label[0]\n\n def get_operands(self):\n line = self.instruction.split()\n self.operands = [operand for operand in line]\n self.instruction = self.operands[0]\n del self.operands[0]\n self.num_ope = len(self.operands)\n\n\naux = Ensambler('1.txt')\naux.leerArchivo()\naux.first_pass()\naux.Second_pass()\n",
"step-5": "\n#from tkinter import Tk, Text, INSERT\nimport mnemonicos as mne\n\n\nclass Ensambler(object):\n\n\n\tdef __init__(self, fileName):\n\t\n\t\t#Nombre del archivo\n\t\tself.fileName = fileName\n\t\t#Lineas del Archivo\n\t\tself.fileLines = []\n\t\t#Contador de Localidades\n\t\tself.cl = 0\n\t\t#Tamaño\n\t\tself.size = 0\n\t\t#Opcode\n\t\tself.code = \"\"\n\t\t#Intruccion\n\t\tself.instruction = \"\"\n\t\t#Contador de operadores\n\t\tself.num_ope = 0\n\t\t#Operandos\n\t\tself.operands = []\n\t\t# Tabla de simbolos\n\t\tself.TS = {}\n\t\t# Codigo Objeto\n\t\tself.CO = []\n\t\t#Aux\n\t\tself.x = 0\n\n\t\t#self.window = Tk()\n\t\t#self.window.geometry('400x50')\n\n\tdef leerArchivo(self):\n\t\tfile = open(self.fileName, \"r\")\n\t\tfor line in file:\n\t\t\tline = line.replace(\"\\n\", \"\")\n\t\t\tline = line.replace(\"\\t\", \"\")\n\t\t\tself.fileLines.append(line)\n\t\tfile.close()\n\n\t#Primera Pasada\n\tdef first_pass(self):\n\t\tfor line in self.fileLines:\n\t\t\tself.clean_line(line)\n\t\t\tself.get_label()\n\t\t\tself.get_operands()\n\t\t\tif self.num_ope == 1:\n\t\t\t\tif self.instruction in mne.v_jump:\n\t\t\t\t\tif self.instruction == \"JP\":\n\t\t\t\t\t\tself.x = self.TS[operands[0]]\n\t\t\t\t\t\tprint(\"l\")\n\t\t\t\t\t\tprint(self.x)\n\n\n\t\t\t\tif self.operands[0] in mne.v_jump:\n\t\t\t\t\tself.instruction = self.instruction + \" \" + self.operands[0]+\",\"+self.operands[1]\n\n\t\t\t\tif self.operands[0][1:-1].isnumeric():\n\t\t\t\t\tself.instruction = self.instruction + \" \" + self.operands[0]+\",\"+self.operands[1]\n\n\n\t\t\t\tif self.num_ope == 1:\n\t\t\t\t\tif self.instruction in mne.v_jump:\n\t\t\t\t\t\tself.operands[0] = \"nn\"\n\t\t\t\t\t\tself.instruction = self.instruction + \" \" + self.operands[0]\n\t\t\t\t\t\tcode, size = mne.map_mnem.get(self.instruction,\"Error\")(\"0000\")\n\t\t\t\t\t\tself.cl += size \n\t\t\telse:\n\t\t\t\t\n\t\t\t#Valida si no es opcode valido\n\t\t\t\tprint(self.instruction)\n\t\t\t#code, size = mne.map_mnem.get(self.instruction,\"Error\")()\n\t\t\t\n\t\t\t#lst = \"CL: \" + str(self.cl) + \" Code: \" + code\n\t\t\t#self.CO.append(code)\n\t\tprint(self.CO)\n\t\tprint(self.cl)\n\t\tprint(self.TS)\n\n\n\tdef Second_pass(self):\n\t\tfor line in self.fileLines:\n\t\t\tself.clean_line(line)\n\t\t\tself.get_label()\n\t\t\tself.get_operands()\n\t\t\t\n\t\t\tif self.instruction in mne.v_jump:\n\n\t\t\t\tif len(self.operands) == 2:\n\t\t\t\t\taux = self.operands[1]\n\t\t\t\telse:\n\t\t\t\t\taux = self.operands[0]\n\n\t\t\t\tif aux in self.TS.keys():\n\t\t\t\t\tself.x = self.TS[aux]\n\t\t\t\t\tself.instruction = self.instruction + \" \" + \"nn\"\n\t\t\t\t\tcode, size = mne.map_mnem.get(self.instruction,\"Error\")(str(self.x))\n\t\t\t\t\tself.CO.append(code)\n\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Error\")\n\t\t\telse:\n\t\t\t\tif self.num_ope == 2:\n\t\t\t\t\tself.instruction = self.instruction + \" \" + self.operands[0]+\",\"+self.operands[1]\n\t\t\t\tif self.num_ope == 1:\n\t\t\t\t\tself.instruction = self.instruction + \" \" + self.operands[0]\n\t\t\t\tcode, size = mne.map_mnem.get(self.instruction,\"Error\")()\n\t\t\t\tself.CO.append(code)\n\t\tprint(self.CO)\n\n\n\t#Quitar Comentarios\n\tdef clean_line(self,line):\n\t\tline = line.split(\";\")\n\t\tself.instruction = line[0].upper().replace(\",\",\"\")\n\n\t# Obtener y guardar etiqueta si existe\n\tdef get_label(self):\n\n\t\tlabel = self.instruction.split(\":\")\n\n\t\tif len(label) > 1:\n\n\t\t\tif label[0] in mne.v_ops or label[0] in mne.map_mnem:\n\t\t\t\tprint(\"Error etiqueta invalida\")\n\t\t\t#Quitar espacio al inicio\n\t\t\tself.TS[label[0].strip()] = self.cl\n\n\t\t\tdel label[0]\n\n\n\t\tself.instruction = label[0]\n\n\t#Obtener los operandos y la instruccion\n\tdef get_operands(self):\n\t\tline = self.instruction.split()\n\t\tself.operands = [operand for operand in line]\n\t\tself.instruction = self.operands[0]\n\t\tdel self.operands[0]\n\t\tself.num_ope = len(self.operands)\n\n\t\t\n\t\naux = Ensambler(\"1.txt\")\naux.leerArchivo()\naux.first_pass()\naux.Second_pass()\n\n",
"step-ids": [
7,
8,
9,
10,
12
]
}
|
[
7,
8,
9,
10,
12
] |
<|reserved_special_token_0|>
class Downloader:
<|reserved_special_token_0|>
def _worker(self, download_range: tuple, counter: AtomicCounter):
start, end = download_range
header = {'Range': 'bytes=' + str(start) + '-' + str(end)}
r = requests.get(self.url, headers=header, stream=True, timeout=30)
binary_content = r.content
counter.increment_by_value(end - start + 1)
print(counter.get_value() / self.file_size)
with open(self.file_name, 'wb') as f:
f.seek(start)
f.write(binary_content)
def download(self) ->None:
download_ranges = []
for i in range(self.num_threads):
start = i * self.chunk_size
if i == self.num_threads - 1:
end = self.file_size
else:
end = start + self.chunk_size - 1
download_ranges.append((start, end))
atomic_counter = AtomicCounter()
process_pool = [Process(target=self._worker, args=(download_ranges[
i], atomic_counter)) for i in range(self.num_threads)]
for p in process_pool:
p.start()
for p in process_pool:
p.join()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Downloader:
def __init__(self, src_url, num_threads):
try:
header = requests.head(src_url).headers
self.url = src_url
self.file_size = int(header.get('content-length'))
self.file_name = src_url.split('/')[-1]
self.num_threads = num_threads
self.chunk_size = self.file_size // self.num_threads
with open(self.file_name, 'wb') as f:
f.write(b'\x00' * self.file_size)
except requests.exceptions.ConnectionError:
print('Connection error, please check your internet connection.')
def _worker(self, download_range: tuple, counter: AtomicCounter):
start, end = download_range
header = {'Range': 'bytes=' + str(start) + '-' + str(end)}
r = requests.get(self.url, headers=header, stream=True, timeout=30)
binary_content = r.content
counter.increment_by_value(end - start + 1)
print(counter.get_value() / self.file_size)
with open(self.file_name, 'wb') as f:
f.seek(start)
f.write(binary_content)
def download(self) ->None:
download_ranges = []
for i in range(self.num_threads):
start = i * self.chunk_size
if i == self.num_threads - 1:
end = self.file_size
else:
end = start + self.chunk_size - 1
download_ranges.append((start, end))
atomic_counter = AtomicCounter()
process_pool = [Process(target=self._worker, args=(download_ranges[
i], atomic_counter)) for i in range(self.num_threads)]
for p in process_pool:
p.start()
for p in process_pool:
p.join()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Downloader:
def __init__(self, src_url, num_threads):
try:
header = requests.head(src_url).headers
self.url = src_url
self.file_size = int(header.get('content-length'))
self.file_name = src_url.split('/')[-1]
self.num_threads = num_threads
self.chunk_size = self.file_size // self.num_threads
with open(self.file_name, 'wb') as f:
f.write(b'\x00' * self.file_size)
except requests.exceptions.ConnectionError:
print('Connection error, please check your internet connection.')
def _worker(self, download_range: tuple, counter: AtomicCounter):
start, end = download_range
header = {'Range': 'bytes=' + str(start) + '-' + str(end)}
r = requests.get(self.url, headers=header, stream=True, timeout=30)
binary_content = r.content
counter.increment_by_value(end - start + 1)
print(counter.get_value() / self.file_size)
with open(self.file_name, 'wb') as f:
f.seek(start)
f.write(binary_content)
def download(self) ->None:
download_ranges = []
for i in range(self.num_threads):
start = i * self.chunk_size
if i == self.num_threads - 1:
end = self.file_size
else:
end = start + self.chunk_size - 1
download_ranges.append((start, end))
atomic_counter = AtomicCounter()
process_pool = [Process(target=self._worker, args=(download_ranges[
i], atomic_counter)) for i in range(self.num_threads)]
for p in process_pool:
p.start()
for p in process_pool:
p.join()
if __name__ == '__main__':
downloader = Downloader(
'https://download-cf.jetbrains.com/idea/ideaIC-2019.3.3.tar.gz', 4)
downloader.download()
<|reserved_special_token_1|>
import requests
from multiprocessing import Process
from atomic_counter import AtomicCounter
class Downloader:
def __init__(self, src_url, num_threads):
try:
header = requests.head(src_url).headers
self.url = src_url
self.file_size = int(header.get('content-length'))
self.file_name = src_url.split('/')[-1]
self.num_threads = num_threads
self.chunk_size = self.file_size // self.num_threads
with open(self.file_name, 'wb') as f:
f.write(b'\x00' * self.file_size)
except requests.exceptions.ConnectionError:
print('Connection error, please check your internet connection.')
def _worker(self, download_range: tuple, counter: AtomicCounter):
start, end = download_range
header = {'Range': 'bytes=' + str(start) + '-' + str(end)}
r = requests.get(self.url, headers=header, stream=True, timeout=30)
binary_content = r.content
counter.increment_by_value(end - start + 1)
print(counter.get_value() / self.file_size)
with open(self.file_name, 'wb') as f:
f.seek(start)
f.write(binary_content)
def download(self) ->None:
download_ranges = []
for i in range(self.num_threads):
start = i * self.chunk_size
if i == self.num_threads - 1:
end = self.file_size
else:
end = start + self.chunk_size - 1
download_ranges.append((start, end))
atomic_counter = AtomicCounter()
process_pool = [Process(target=self._worker, args=(download_ranges[
i], atomic_counter)) for i in range(self.num_threads)]
for p in process_pool:
p.start()
for p in process_pool:
p.join()
if __name__ == '__main__':
downloader = Downloader(
'https://download-cf.jetbrains.com/idea/ideaIC-2019.3.3.tar.gz', 4)
downloader.download()
<|reserved_special_token_1|>
import requests
from multiprocessing import Process
from atomic_counter import AtomicCounter
class Downloader:
def __init__(self, src_url, num_threads):
try:
header = requests.head(src_url).headers
self.url = src_url
self.file_size = int(header.get('content-length'))
self.file_name = src_url.split('/')[-1]
self.num_threads = num_threads
self.chunk_size = self.file_size // self.num_threads
with open(self.file_name, 'wb') as f:
f.write(b'\x00' * self.file_size)
except requests.exceptions.ConnectionError:
print('Connection error, please check your internet connection.')
def _worker(self, download_range: tuple, counter: AtomicCounter):
start, end = download_range
header = {'Range': 'bytes=' + str(start) + '-' + str(end)}
r = requests.get(self.url, headers=header, stream=True, timeout=30)
binary_content = r.content
counter.increment_by_value(end - start + 1)
print(counter.get_value() / self.file_size)
with open(self.file_name, 'wb') as f:
f.seek(start)
f.write(binary_content)
def download(self) -> None:
download_ranges = []
for i in range(self.num_threads):
start = i * self.chunk_size
if i == self.num_threads - 1:
end = self.file_size
else:
end = start + self.chunk_size - 1
download_ranges.append((start, end))
atomic_counter = AtomicCounter()
process_pool = [Process(target=self._worker,
args=(download_ranges[i], atomic_counter))
for i in range(self.num_threads)]
for p in process_pool:
p.start()
for p in process_pool:
p.join()
if __name__ == "__main__":
downloader = Downloader(
'https://download-cf.jetbrains.com/idea/ideaIC-2019.3.3.tar.gz', 4)
downloader.download()
|
flexible
|
{
"blob_id": "3dc3bbd00f9c2d00093bf8669963d96f5019b2da",
"index": 4648,
"step-1": "<mask token>\n\n\nclass Downloader:\n <mask token>\n\n def _worker(self, download_range: tuple, counter: AtomicCounter):\n start, end = download_range\n header = {'Range': 'bytes=' + str(start) + '-' + str(end)}\n r = requests.get(self.url, headers=header, stream=True, timeout=30)\n binary_content = r.content\n counter.increment_by_value(end - start + 1)\n print(counter.get_value() / self.file_size)\n with open(self.file_name, 'wb') as f:\n f.seek(start)\n f.write(binary_content)\n\n def download(self) ->None:\n download_ranges = []\n for i in range(self.num_threads):\n start = i * self.chunk_size\n if i == self.num_threads - 1:\n end = self.file_size\n else:\n end = start + self.chunk_size - 1\n download_ranges.append((start, end))\n atomic_counter = AtomicCounter()\n process_pool = [Process(target=self._worker, args=(download_ranges[\n i], atomic_counter)) for i in range(self.num_threads)]\n for p in process_pool:\n p.start()\n for p in process_pool:\n p.join()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Downloader:\n\n def __init__(self, src_url, num_threads):\n try:\n header = requests.head(src_url).headers\n self.url = src_url\n self.file_size = int(header.get('content-length'))\n self.file_name = src_url.split('/')[-1]\n self.num_threads = num_threads\n self.chunk_size = self.file_size // self.num_threads\n with open(self.file_name, 'wb') as f:\n f.write(b'\\x00' * self.file_size)\n except requests.exceptions.ConnectionError:\n print('Connection error, please check your internet connection.')\n\n def _worker(self, download_range: tuple, counter: AtomicCounter):\n start, end = download_range\n header = {'Range': 'bytes=' + str(start) + '-' + str(end)}\n r = requests.get(self.url, headers=header, stream=True, timeout=30)\n binary_content = r.content\n counter.increment_by_value(end - start + 1)\n print(counter.get_value() / self.file_size)\n with open(self.file_name, 'wb') as f:\n f.seek(start)\n f.write(binary_content)\n\n def download(self) ->None:\n download_ranges = []\n for i in range(self.num_threads):\n start = i * self.chunk_size\n if i == self.num_threads - 1:\n end = self.file_size\n else:\n end = start + self.chunk_size - 1\n download_ranges.append((start, end))\n atomic_counter = AtomicCounter()\n process_pool = [Process(target=self._worker, args=(download_ranges[\n i], atomic_counter)) for i in range(self.num_threads)]\n for p in process_pool:\n p.start()\n for p in process_pool:\n p.join()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Downloader:\n\n def __init__(self, src_url, num_threads):\n try:\n header = requests.head(src_url).headers\n self.url = src_url\n self.file_size = int(header.get('content-length'))\n self.file_name = src_url.split('/')[-1]\n self.num_threads = num_threads\n self.chunk_size = self.file_size // self.num_threads\n with open(self.file_name, 'wb') as f:\n f.write(b'\\x00' * self.file_size)\n except requests.exceptions.ConnectionError:\n print('Connection error, please check your internet connection.')\n\n def _worker(self, download_range: tuple, counter: AtomicCounter):\n start, end = download_range\n header = {'Range': 'bytes=' + str(start) + '-' + str(end)}\n r = requests.get(self.url, headers=header, stream=True, timeout=30)\n binary_content = r.content\n counter.increment_by_value(end - start + 1)\n print(counter.get_value() / self.file_size)\n with open(self.file_name, 'wb') as f:\n f.seek(start)\n f.write(binary_content)\n\n def download(self) ->None:\n download_ranges = []\n for i in range(self.num_threads):\n start = i * self.chunk_size\n if i == self.num_threads - 1:\n end = self.file_size\n else:\n end = start + self.chunk_size - 1\n download_ranges.append((start, end))\n atomic_counter = AtomicCounter()\n process_pool = [Process(target=self._worker, args=(download_ranges[\n i], atomic_counter)) for i in range(self.num_threads)]\n for p in process_pool:\n p.start()\n for p in process_pool:\n p.join()\n\n\nif __name__ == '__main__':\n downloader = Downloader(\n 'https://download-cf.jetbrains.com/idea/ideaIC-2019.3.3.tar.gz', 4)\n downloader.download()\n",
"step-4": "import requests\nfrom multiprocessing import Process\nfrom atomic_counter import AtomicCounter\n\n\nclass Downloader:\n\n def __init__(self, src_url, num_threads):\n try:\n header = requests.head(src_url).headers\n self.url = src_url\n self.file_size = int(header.get('content-length'))\n self.file_name = src_url.split('/')[-1]\n self.num_threads = num_threads\n self.chunk_size = self.file_size // self.num_threads\n with open(self.file_name, 'wb') as f:\n f.write(b'\\x00' * self.file_size)\n except requests.exceptions.ConnectionError:\n print('Connection error, please check your internet connection.')\n\n def _worker(self, download_range: tuple, counter: AtomicCounter):\n start, end = download_range\n header = {'Range': 'bytes=' + str(start) + '-' + str(end)}\n r = requests.get(self.url, headers=header, stream=True, timeout=30)\n binary_content = r.content\n counter.increment_by_value(end - start + 1)\n print(counter.get_value() / self.file_size)\n with open(self.file_name, 'wb') as f:\n f.seek(start)\n f.write(binary_content)\n\n def download(self) ->None:\n download_ranges = []\n for i in range(self.num_threads):\n start = i * self.chunk_size\n if i == self.num_threads - 1:\n end = self.file_size\n else:\n end = start + self.chunk_size - 1\n download_ranges.append((start, end))\n atomic_counter = AtomicCounter()\n process_pool = [Process(target=self._worker, args=(download_ranges[\n i], atomic_counter)) for i in range(self.num_threads)]\n for p in process_pool:\n p.start()\n for p in process_pool:\n p.join()\n\n\nif __name__ == '__main__':\n downloader = Downloader(\n 'https://download-cf.jetbrains.com/idea/ideaIC-2019.3.3.tar.gz', 4)\n downloader.download()\n",
"step-5": "import requests\nfrom multiprocessing import Process\nfrom atomic_counter import AtomicCounter\n\n\nclass Downloader:\n def __init__(self, src_url, num_threads):\n try:\n header = requests.head(src_url).headers\n self.url = src_url\n self.file_size = int(header.get('content-length'))\n self.file_name = src_url.split('/')[-1]\n self.num_threads = num_threads\n self.chunk_size = self.file_size // self.num_threads\n\n with open(self.file_name, 'wb') as f:\n f.write(b'\\x00' * self.file_size)\n\n except requests.exceptions.ConnectionError:\n print('Connection error, please check your internet connection.')\n\n def _worker(self, download_range: tuple, counter: AtomicCounter):\n start, end = download_range\n header = {'Range': 'bytes=' + str(start) + '-' + str(end)}\n\n r = requests.get(self.url, headers=header, stream=True, timeout=30)\n binary_content = r.content\n counter.increment_by_value(end - start + 1)\n print(counter.get_value() / self.file_size)\n\n with open(self.file_name, 'wb') as f:\n f.seek(start)\n f.write(binary_content)\n\n def download(self) -> None:\n download_ranges = []\n\n for i in range(self.num_threads):\n start = i * self.chunk_size\n if i == self.num_threads - 1:\n end = self.file_size\n else:\n end = start + self.chunk_size - 1\n\n download_ranges.append((start, end))\n\n atomic_counter = AtomicCounter()\n process_pool = [Process(target=self._worker,\n args=(download_ranges[i], atomic_counter))\n for i in range(self.num_threads)]\n\n for p in process_pool:\n p.start()\n\n for p in process_pool:\n p.join()\n\n\nif __name__ == \"__main__\":\n downloader = Downloader(\n 'https://download-cf.jetbrains.com/idea/ideaIC-2019.3.3.tar.gz', 4)\n downloader.download()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
""" Url router for the federated search application
"""
from django.conf.urls import include
from django.urls import re_path
urlpatterns = [
re_path(r"^rest/", include("core_federated_search_app.rest.urls")),
]
|
normal
|
{
"blob_id": "6903584b27c0720cebf42ed39968b18f0f67f796",
"index": 6167,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [re_path('^rest/', include(\n 'core_federated_search_app.rest.urls'))]\n",
"step-3": "<mask token>\nfrom django.conf.urls import include\nfrom django.urls import re_path\nurlpatterns = [re_path('^rest/', include(\n 'core_federated_search_app.rest.urls'))]\n",
"step-4": "\"\"\" Url router for the federated search application\n\"\"\"\nfrom django.conf.urls import include\nfrom django.urls import re_path\n\nurlpatterns = [\n re_path(r\"^rest/\", include(\"core_federated_search_app.rest.urls\")),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from meross_iot.model.http.exception import HttpApiError
from logger import get_logger
from typing import Dict
from flask import Blueprint
from authentication import _user_login
from decorator import meross_http_api
from messaging import make_api_response
auth_blueprint = Blueprint('auth', __name__)
_LOGGER = get_logger(__name__)
@auth_blueprint.route('/Login', methods=['POST'])
@meross_http_api(login_required=False)
def login(api_payload: Dict, *args, **kwargs):
email = api_payload.get("email")
password = api_payload.get("password")
if email is None:
raise HttpApiError("Missing email parameter")
if password is None:
raise HttpApiError("Missing password parameter")
user, token = _user_login(email, password)
_LOGGER.info("User: %s successfully logged in" % email)
data = {
"token": str(token.token),
"key": str(user.mqtt_key),
"userid": str(user.user_id),
"email": str(user.email)
}
return make_api_response(data=data)
|
normal
|
{
"blob_id": "afccd33e4c6bc5b7907a6af4ab698489fc9ea70d",
"index": 5299,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@auth_blueprint.route('/Login', methods=['POST'])\n@meross_http_api(login_required=False)\ndef login(api_payload: Dict, *args, **kwargs):\n email = api_payload.get('email')\n password = api_payload.get('password')\n if email is None:\n raise HttpApiError('Missing email parameter')\n if password is None:\n raise HttpApiError('Missing password parameter')\n user, token = _user_login(email, password)\n _LOGGER.info('User: %s successfully logged in' % email)\n data = {'token': str(token.token), 'key': str(user.mqtt_key), 'userid':\n str(user.user_id), 'email': str(user.email)}\n return make_api_response(data=data)\n",
"step-3": "<mask token>\nauth_blueprint = Blueprint('auth', __name__)\n_LOGGER = get_logger(__name__)\n\n\n@auth_blueprint.route('/Login', methods=['POST'])\n@meross_http_api(login_required=False)\ndef login(api_payload: Dict, *args, **kwargs):\n email = api_payload.get('email')\n password = api_payload.get('password')\n if email is None:\n raise HttpApiError('Missing email parameter')\n if password is None:\n raise HttpApiError('Missing password parameter')\n user, token = _user_login(email, password)\n _LOGGER.info('User: %s successfully logged in' % email)\n data = {'token': str(token.token), 'key': str(user.mqtt_key), 'userid':\n str(user.user_id), 'email': str(user.email)}\n return make_api_response(data=data)\n",
"step-4": "from meross_iot.model.http.exception import HttpApiError\nfrom logger import get_logger\nfrom typing import Dict\nfrom flask import Blueprint\nfrom authentication import _user_login\nfrom decorator import meross_http_api\nfrom messaging import make_api_response\nauth_blueprint = Blueprint('auth', __name__)\n_LOGGER = get_logger(__name__)\n\n\n@auth_blueprint.route('/Login', methods=['POST'])\n@meross_http_api(login_required=False)\ndef login(api_payload: Dict, *args, **kwargs):\n email = api_payload.get('email')\n password = api_payload.get('password')\n if email is None:\n raise HttpApiError('Missing email parameter')\n if password is None:\n raise HttpApiError('Missing password parameter')\n user, token = _user_login(email, password)\n _LOGGER.info('User: %s successfully logged in' % email)\n data = {'token': str(token.token), 'key': str(user.mqtt_key), 'userid':\n str(user.user_id), 'email': str(user.email)}\n return make_api_response(data=data)\n",
"step-5": "from meross_iot.model.http.exception import HttpApiError\n\nfrom logger import get_logger\nfrom typing import Dict\n\nfrom flask import Blueprint\n\nfrom authentication import _user_login\nfrom decorator import meross_http_api\nfrom messaging import make_api_response\n\n\nauth_blueprint = Blueprint('auth', __name__)\n_LOGGER = get_logger(__name__)\n\n\n@auth_blueprint.route('/Login', methods=['POST'])\n@meross_http_api(login_required=False)\ndef login(api_payload: Dict, *args, **kwargs):\n email = api_payload.get(\"email\")\n password = api_payload.get(\"password\")\n\n if email is None:\n raise HttpApiError(\"Missing email parameter\")\n if password is None:\n raise HttpApiError(\"Missing password parameter\")\n\n user, token = _user_login(email, password)\n _LOGGER.info(\"User: %s successfully logged in\" % email)\n data = {\n \"token\": str(token.token),\n \"key\": str(user.mqtt_key),\n \"userid\": str(user.user_id),\n \"email\": str(user.email)\n }\n return make_api_response(data=data)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""AWS CDK application.
See https://docs.aws.amazon.com/cdk/ for details.
"""
from ias_pmi_cdk_common import PMIApp
from stacks import MainStack
APP_NAME = 'etl-pm-pipeline-be'
# create CDK application
app = PMIApp(APP_NAME)
# add stacks
MainStack(app, app, 'main')
# synthesize application assembly
app.synth()
|
normal
|
{
"blob_id": "dfbbbaf6b5f02c60ca48f7864068d59349c547d1",
"index": 5484,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nMainStack(app, app, 'main')\napp.synth()\n",
"step-3": "<mask token>\nAPP_NAME = 'etl-pm-pipeline-be'\napp = PMIApp(APP_NAME)\nMainStack(app, app, 'main')\napp.synth()\n",
"step-4": "<mask token>\nfrom ias_pmi_cdk_common import PMIApp\nfrom stacks import MainStack\nAPP_NAME = 'etl-pm-pipeline-be'\napp = PMIApp(APP_NAME)\nMainStack(app, app, 'main')\napp.synth()\n",
"step-5": "\"\"\"AWS CDK application.\n\nSee https://docs.aws.amazon.com/cdk/ for details.\n\n\"\"\"\n\nfrom ias_pmi_cdk_common import PMIApp\n\nfrom stacks import MainStack\n\n\nAPP_NAME = 'etl-pm-pipeline-be'\n\n\n# create CDK application\napp = PMIApp(APP_NAME)\n\n# add stacks\nMainStack(app, app, 'main')\n\n# synthesize application assembly\napp.synth()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from . import by_trips
from . import by_slope
|
normal
|
{
"blob_id": "74fae3636b1c1b0b79d0c6bec8698581b063eb9c",
"index": 8944,
"step-1": "<mask token>\n",
"step-2": "from . import by_trips\nfrom . import by_slope\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath('.')
return os.path.join(base_path, relative_path)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath('.')
return os.path.join(base_path, relative_path)
def numStrip(n):
striped = []
if n == 0:
return [0]
while n > 0:
striped.append(n % 10)
n //= 10
return striped
<|reserved_special_token_1|>
import sys, os
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath('.')
return os.path.join(base_path, relative_path)
def numStrip(n):
striped = []
if n == 0:
return [0]
while n > 0:
striped.append(n % 10)
n //= 10
return striped
<|reserved_special_token_1|>
import sys, os
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def numStrip(n):
striped = []
if n == 0:
return [0]
while n > 0:
striped.append(n % 10)
n //= 10
return striped
|
flexible
|
{
"blob_id": "5fb3905abf958f0a8be41cd6ad07efb2a0cf6c66",
"index": 7542,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n try:\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath('.')\n return os.path.join(base_path, relative_path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n try:\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath('.')\n return os.path.join(base_path, relative_path)\n\n\ndef numStrip(n):\n striped = []\n if n == 0:\n return [0]\n while n > 0:\n striped.append(n % 10)\n n //= 10\n return striped\n",
"step-4": "import sys, os\n\n\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n try:\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath('.')\n return os.path.join(base_path, relative_path)\n\n\ndef numStrip(n):\n striped = []\n if n == 0:\n return [0]\n while n > 0:\n striped.append(n % 10)\n n //= 10\n return striped\n",
"step-5": "import sys, os\n\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)\n\ndef numStrip(n):\n striped = []\n if n == 0:\n return [0]\n while n > 0:\n striped.append(n % 10)\n n //= 10\n \n return striped\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
animals = ['bear', 'python', 'peacock', 'kangaroo', 'whale', 'platypus']
The animal at 1.
The third (3rd) animal.
The first (1st) animal.
The animal at 3.
The fifth (5th) animal.
The animal at 2.
The sixth (6th) animal.
The animal at 4.
|
normal
|
{
"blob_id": "a319ebb05e9034f19aef39bd46830c8a607ed121",
"index": 1013,
"step-1": "animals = ['bear', 'python', 'peacock', 'kangaroo', 'whale', 'platypus']\nThe animal at 1.\nThe third (3rd) animal.\nThe first (1st) animal.\nThe animal at 3.\nThe fifth (5th) animal.\nThe animal at 2.\nThe sixth (6th) animal.\nThe animal at 4.\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from .core import S3FileSystem, S3File
from .mapping import S3Map
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
normal
|
{
"blob_id": "32e60c672d6e73600d442c4344743deccaed6796",
"index": 8819,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndel get_versions\n",
"step-3": "<mask token>\n__version__ = get_versions()['version']\ndel get_versions\n",
"step-4": "from .core import S3FileSystem, S3File\nfrom .mapping import S3Map\nfrom ._version import get_versions\n__version__ = get_versions()['version']\ndel get_versions\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.utils import timezone
from timesheets.models import TimeSheet
from channels import Group
class ProjectTS(models.Model):
class Meta:
permissions = (
("approve_project_ts", "Can approve timesheet"),
)
pay_period_begin = models.DateField()
pay_period_end = models.DateField()
ambassador = models.ForeignKey(
User, related_name='project_ts_member',
limit_choices_to={'is_staff' : True})
ambassador_finalized = models.BooleanField(default=False)
final_approval = models.BooleanField(default=False)
date_submitted = models.DateTimeField(auto_now_add=True)
date_approved = models.DateTimeField(auto_now_add=True)
class ProjectTSEntry(models.Model):
description = models.CharField(max_length=150, default="")
project_time_sheet = models.ForeignKey(ProjectTS, related_name="project_time_sheet")
project_leader = models.ForeignKey(User, related_name="pl",
limit_choices_to={'is_staff' : True, 'groups__name' : 'Team Leader'})
project_leader_verification = models.BooleanField(default=False)
title = models.CharField(max_length=16, default="")
total_time = models.IntegerField(default=0)
start_time = models.TimeField(default=timezone.now)
end_time = models.TimeField(default=timezone.now)
day = models.DateField(default=timezone.now)
def save(self, *args, **kwargs):
self.total_time = self.end_time.hour - self.start_time.hour
result = super(ProjectTSEntry, self).save(*args, **kwargs)
return result
|
normal
|
{
"blob_id": "df39a97db25f03aca8ebd501283fd6a7c486db8c",
"index": 1243,
"step-1": "<mask token>\n\n\nclass ProjectTSEntry(models.Model):\n description = models.CharField(max_length=150, default='')\n project_time_sheet = models.ForeignKey(ProjectTS, related_name=\n 'project_time_sheet')\n project_leader = models.ForeignKey(User, related_name='pl',\n limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})\n project_leader_verification = models.BooleanField(default=False)\n title = models.CharField(max_length=16, default='')\n total_time = models.IntegerField(default=0)\n start_time = models.TimeField(default=timezone.now)\n end_time = models.TimeField(default=timezone.now)\n day = models.DateField(default=timezone.now)\n\n def save(self, *args, **kwargs):\n self.total_time = self.end_time.hour - self.start_time.hour\n result = super(ProjectTSEntry, self).save(*args, **kwargs)\n return result\n",
"step-2": "<mask token>\n\n\nclass ProjectTS(models.Model):\n\n\n class Meta:\n permissions = ('approve_project_ts', 'Can approve timesheet'),\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ProjectTSEntry(models.Model):\n description = models.CharField(max_length=150, default='')\n project_time_sheet = models.ForeignKey(ProjectTS, related_name=\n 'project_time_sheet')\n project_leader = models.ForeignKey(User, related_name='pl',\n limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})\n project_leader_verification = models.BooleanField(default=False)\n title = models.CharField(max_length=16, default='')\n total_time = models.IntegerField(default=0)\n start_time = models.TimeField(default=timezone.now)\n end_time = models.TimeField(default=timezone.now)\n day = models.DateField(default=timezone.now)\n\n def save(self, *args, **kwargs):\n self.total_time = self.end_time.hour - self.start_time.hour\n result = super(ProjectTSEntry, self).save(*args, **kwargs)\n return result\n",
"step-3": "<mask token>\n\n\nclass ProjectTS(models.Model):\n\n\n class Meta:\n permissions = ('approve_project_ts', 'Can approve timesheet'),\n pay_period_begin = models.DateField()\n pay_period_end = models.DateField()\n ambassador = models.ForeignKey(User, related_name='project_ts_member',\n limit_choices_to={'is_staff': True})\n ambassador_finalized = models.BooleanField(default=False)\n final_approval = models.BooleanField(default=False)\n date_submitted = models.DateTimeField(auto_now_add=True)\n date_approved = models.DateTimeField(auto_now_add=True)\n\n\nclass ProjectTSEntry(models.Model):\n description = models.CharField(max_length=150, default='')\n project_time_sheet = models.ForeignKey(ProjectTS, related_name=\n 'project_time_sheet')\n project_leader = models.ForeignKey(User, related_name='pl',\n limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})\n project_leader_verification = models.BooleanField(default=False)\n title = models.CharField(max_length=16, default='')\n total_time = models.IntegerField(default=0)\n start_time = models.TimeField(default=timezone.now)\n end_time = models.TimeField(default=timezone.now)\n day = models.DateField(default=timezone.now)\n\n def save(self, *args, **kwargs):\n self.total_time = self.end_time.hour - self.start_time.hour\n result = super(ProjectTSEntry, self).save(*args, **kwargs)\n return result\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.utils import timezone\nfrom timesheets.models import TimeSheet\nfrom channels import Group\n\n\nclass ProjectTS(models.Model):\n\n\n class Meta:\n permissions = ('approve_project_ts', 'Can approve timesheet'),\n pay_period_begin = models.DateField()\n pay_period_end = models.DateField()\n ambassador = models.ForeignKey(User, related_name='project_ts_member',\n limit_choices_to={'is_staff': True})\n ambassador_finalized = models.BooleanField(default=False)\n final_approval = models.BooleanField(default=False)\n date_submitted = models.DateTimeField(auto_now_add=True)\n date_approved = models.DateTimeField(auto_now_add=True)\n\n\nclass ProjectTSEntry(models.Model):\n description = models.CharField(max_length=150, default='')\n project_time_sheet = models.ForeignKey(ProjectTS, related_name=\n 'project_time_sheet')\n project_leader = models.ForeignKey(User, related_name='pl',\n limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})\n project_leader_verification = models.BooleanField(default=False)\n title = models.CharField(max_length=16, default='')\n total_time = models.IntegerField(default=0)\n start_time = models.TimeField(default=timezone.now)\n end_time = models.TimeField(default=timezone.now)\n day = models.DateField(default=timezone.now)\n\n def save(self, *args, **kwargs):\n self.total_time = self.end_time.hour - self.start_time.hour\n result = super(ProjectTSEntry, self).save(*args, **kwargs)\n return result\n",
"step-5": "from __future__ import unicode_literals\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.utils import timezone\nfrom timesheets.models import TimeSheet\nfrom channels import Group\n\n\nclass ProjectTS(models.Model):\n class Meta:\n permissions = (\n (\"approve_project_ts\", \"Can approve timesheet\"),\n )\n\n pay_period_begin = models.DateField()\n pay_period_end = models.DateField()\n ambassador = models.ForeignKey(\n User, related_name='project_ts_member',\n limit_choices_to={'is_staff' : True})\n ambassador_finalized = models.BooleanField(default=False)\n final_approval = models.BooleanField(default=False)\n date_submitted = models.DateTimeField(auto_now_add=True)\n date_approved = models.DateTimeField(auto_now_add=True)\n\n\nclass ProjectTSEntry(models.Model):\n description = models.CharField(max_length=150, default=\"\")\n project_time_sheet = models.ForeignKey(ProjectTS, related_name=\"project_time_sheet\")\n project_leader = models.ForeignKey(User, related_name=\"pl\",\n limit_choices_to={'is_staff' : True, 'groups__name' : 'Team Leader'})\n project_leader_verification = models.BooleanField(default=False)\n title = models.CharField(max_length=16, default=\"\")\n total_time = models.IntegerField(default=0)\n start_time = models.TimeField(default=timezone.now)\n end_time = models.TimeField(default=timezone.now)\n day = models.DateField(default=timezone.now)\n\n def save(self, *args, **kwargs):\n self.total_time = self.end_time.hour - self.start_time.hour\n result = super(ProjectTSEntry, self).save(*args, **kwargs)\n return result\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def smallest_divisible(nmax=20):
smallest = 1
for i in range(1, nmax + 1):
if smallest % i:
smallest *= i / gcd(i, smallest)
return smallest
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from fractions import gcd
def smallest_divisible(nmax=20):
smallest = 1
for i in range(1, nmax + 1):
if smallest % i:
smallest *= i / gcd(i, smallest)
return smallest
<|reserved_special_token_1|>
"""2520 is the smallest number that can be divided by each of the
numbers from 1 to 10 without any remainder.
What is the smallest positive number that is evenly divisible by all
of the numbers from 1 to 20?
"""
from fractions import gcd
def smallest_divisible(nmax=20):
smallest = 1
for i in range(1, nmax+1):
if smallest % i:
smallest *= i/gcd(i, smallest)
return smallest
|
flexible
|
{
"blob_id": "1cc696410a5d2eaf294d032c04a96974d5ef5db0",
"index": 2831,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef smallest_divisible(nmax=20):\n smallest = 1\n for i in range(1, nmax + 1):\n if smallest % i:\n smallest *= i / gcd(i, smallest)\n return smallest\n",
"step-3": "<mask token>\nfrom fractions import gcd\n\n\ndef smallest_divisible(nmax=20):\n smallest = 1\n for i in range(1, nmax + 1):\n if smallest % i:\n smallest *= i / gcd(i, smallest)\n return smallest\n",
"step-4": "\"\"\"2520 is the smallest number that can be divided by each of the\nnumbers from 1 to 10 without any remainder.\n\nWhat is the smallest positive number that is evenly divisible by all\nof the numbers from 1 to 20?\n\"\"\"\nfrom fractions import gcd\n\ndef smallest_divisible(nmax=20):\n smallest = 1\n for i in range(1, nmax+1):\n if smallest % i:\n smallest *= i/gcd(i, smallest)\n\n return smallest\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pickle
import pytest
from reader import EntryError
from reader import FeedError
from reader import SingleUpdateHookError
from reader import TagError
from reader.exceptions import _FancyExceptionBase
def test_fancy_exception_base():
exc = _FancyExceptionBase('message')
assert str(exc) == 'message'
exc = _FancyExceptionBase(message='message')
assert str(exc) == 'message'
cause = Exception('cause')
exc = _FancyExceptionBase('message')
exc.__cause__ = cause
pickled_exc = pickle.dumps(exc)
assert str(exc) == 'message: builtins.Exception: cause'
assert str(exc) == str(pickle.loads(pickled_exc))
class WithURL(_FancyExceptionBase):
message = 'default message'
def __init__(self, url, **kwargs):
super().__init__(**kwargs)
self.url = url
@property
def _str(self):
return self.url.upper()
exc = WithURL('url')
assert str(exc) == 'default message: URL'
exc = WithURL('url', message='another message')
exc.__cause__ = cause
assert str(exc) == 'another message: URL: builtins.Exception: cause'
def _all_classes(cls):
yield cls
for subclass in cls.__subclasses__():
yield from _all_classes(subclass)
def all_classes(*args, **kwargs):
return list(_all_classes(*args, **kwargs))
@pytest.mark.parametrize('exc_type', all_classes(FeedError))
def test_feed_error_str(exc_type):
exc = exc_type('url')
assert repr('url') in str(exc)
@pytest.mark.parametrize('exc_type', all_classes(EntryError))
def test_entry_error_str(exc_type):
exc = exc_type('url', 'id')
assert repr(('url', 'id')) in str(exc)
@pytest.mark.parametrize('exc_type', all_classes(TagError))
def test_tag_error_str(exc_type):
exc = exc_type(('object',), 'key')
assert "'object': 'key'" in str(exc)
@pytest.mark.parametrize(
'args, expected',
[
(
('before_feeds_update', 'myhook'),
"unexpected hook error: before_feeds_update: 'myhook'",
),
(
('before_feeds_update', 'myhook', ()),
"unexpected hook error: before_feeds_update: 'myhook': ()",
),
(
('before_feed_update', 'myhook', ('feed',)),
"unexpected hook error: before_feed_update: 'myhook': 'feed'",
),
(
('after_entry_update', 'myhook', ('feed', 'entry')),
"unexpected hook error: after_entry_update: 'myhook': ('feed', 'entry')",
),
],
)
def test_single_update_hook_error_str(args, expected):
exc = SingleUpdateHookError(*args)
assert str(exc) == expected
exc = SingleUpdateHookError(*args)
exc.__cause__ = Exception('cause')
assert str(exc) == expected + ": builtins.Exception: cause"
|
normal
|
{
"blob_id": "6fd4df7370de2343fe7723a2d8f5aacffa333835",
"index": 3105,
"step-1": "<mask token>\n\n\ndef test_fancy_exception_base():\n exc = _FancyExceptionBase('message')\n assert str(exc) == 'message'\n exc = _FancyExceptionBase(message='message')\n assert str(exc) == 'message'\n cause = Exception('cause')\n exc = _FancyExceptionBase('message')\n exc.__cause__ = cause\n pickled_exc = pickle.dumps(exc)\n assert str(exc) == 'message: builtins.Exception: cause'\n assert str(exc) == str(pickle.loads(pickled_exc))\n\n\n class WithURL(_FancyExceptionBase):\n message = 'default message'\n\n def __init__(self, url, **kwargs):\n super().__init__(**kwargs)\n self.url = url\n\n @property\n def _str(self):\n return self.url.upper()\n exc = WithURL('url')\n assert str(exc) == 'default message: URL'\n exc = WithURL('url', message='another message')\n exc.__cause__ = cause\n assert str(exc) == 'another message: URL: builtins.Exception: cause'\n\n\ndef _all_classes(cls):\n yield cls\n for subclass in cls.__subclasses__():\n yield from _all_classes(subclass)\n\n\n<mask token>\n\n\[email protected]('exc_type', all_classes(FeedError))\ndef test_feed_error_str(exc_type):\n exc = exc_type('url')\n assert repr('url') in str(exc)\n\n\n<mask token>\n\n\[email protected]('exc_type', all_classes(TagError))\ndef test_tag_error_str(exc_type):\n exc = exc_type(('object',), 'key')\n assert \"'object': 'key'\" in str(exc)\n\n\[email protected]('args, expected', [(('before_feeds_update',\n 'myhook'), \"unexpected hook error: before_feeds_update: 'myhook'\"), ((\n 'before_feeds_update', 'myhook', ()),\n \"unexpected hook error: before_feeds_update: 'myhook': ()\"), ((\n 'before_feed_update', 'myhook', ('feed',)),\n \"unexpected hook error: before_feed_update: 'myhook': 'feed'\"), ((\n 'after_entry_update', 'myhook', ('feed', 'entry')),\n \"unexpected hook error: after_entry_update: 'myhook': ('feed', 'entry')\")])\ndef test_single_update_hook_error_str(args, expected):\n exc = SingleUpdateHookError(*args)\n assert str(exc) == expected\n exc = SingleUpdateHookError(*args)\n exc.__cause__ = Exception('cause')\n assert str(exc) == expected + ': builtins.Exception: cause'\n",
"step-2": "<mask token>\n\n\ndef test_fancy_exception_base():\n exc = _FancyExceptionBase('message')\n assert str(exc) == 'message'\n exc = _FancyExceptionBase(message='message')\n assert str(exc) == 'message'\n cause = Exception('cause')\n exc = _FancyExceptionBase('message')\n exc.__cause__ = cause\n pickled_exc = pickle.dumps(exc)\n assert str(exc) == 'message: builtins.Exception: cause'\n assert str(exc) == str(pickle.loads(pickled_exc))\n\n\n class WithURL(_FancyExceptionBase):\n message = 'default message'\n\n def __init__(self, url, **kwargs):\n super().__init__(**kwargs)\n self.url = url\n\n @property\n def _str(self):\n return self.url.upper()\n exc = WithURL('url')\n assert str(exc) == 'default message: URL'\n exc = WithURL('url', message='another message')\n exc.__cause__ = cause\n assert str(exc) == 'another message: URL: builtins.Exception: cause'\n\n\ndef _all_classes(cls):\n yield cls\n for subclass in cls.__subclasses__():\n yield from _all_classes(subclass)\n\n\ndef all_classes(*args, **kwargs):\n return list(_all_classes(*args, **kwargs))\n\n\[email protected]('exc_type', all_classes(FeedError))\ndef test_feed_error_str(exc_type):\n exc = exc_type('url')\n assert repr('url') in str(exc)\n\n\n<mask token>\n\n\[email protected]('exc_type', all_classes(TagError))\ndef test_tag_error_str(exc_type):\n exc = exc_type(('object',), 'key')\n assert \"'object': 'key'\" in str(exc)\n\n\[email protected]('args, expected', [(('before_feeds_update',\n 'myhook'), \"unexpected hook error: before_feeds_update: 'myhook'\"), ((\n 'before_feeds_update', 'myhook', ()),\n \"unexpected hook error: before_feeds_update: 'myhook': ()\"), ((\n 'before_feed_update', 'myhook', ('feed',)),\n \"unexpected hook error: before_feed_update: 'myhook': 'feed'\"), ((\n 'after_entry_update', 'myhook', ('feed', 'entry')),\n \"unexpected hook error: after_entry_update: 'myhook': ('feed', 'entry')\")])\ndef test_single_update_hook_error_str(args, expected):\n exc = SingleUpdateHookError(*args)\n assert str(exc) == expected\n exc = SingleUpdateHookError(*args)\n exc.__cause__ = Exception('cause')\n assert str(exc) == expected + ': builtins.Exception: cause'\n",
"step-3": "<mask token>\n\n\ndef test_fancy_exception_base():\n exc = _FancyExceptionBase('message')\n assert str(exc) == 'message'\n exc = _FancyExceptionBase(message='message')\n assert str(exc) == 'message'\n cause = Exception('cause')\n exc = _FancyExceptionBase('message')\n exc.__cause__ = cause\n pickled_exc = pickle.dumps(exc)\n assert str(exc) == 'message: builtins.Exception: cause'\n assert str(exc) == str(pickle.loads(pickled_exc))\n\n\n class WithURL(_FancyExceptionBase):\n message = 'default message'\n\n def __init__(self, url, **kwargs):\n super().__init__(**kwargs)\n self.url = url\n\n @property\n def _str(self):\n return self.url.upper()\n exc = WithURL('url')\n assert str(exc) == 'default message: URL'\n exc = WithURL('url', message='another message')\n exc.__cause__ = cause\n assert str(exc) == 'another message: URL: builtins.Exception: cause'\n\n\ndef _all_classes(cls):\n yield cls\n for subclass in cls.__subclasses__():\n yield from _all_classes(subclass)\n\n\ndef all_classes(*args, **kwargs):\n return list(_all_classes(*args, **kwargs))\n\n\[email protected]('exc_type', all_classes(FeedError))\ndef test_feed_error_str(exc_type):\n exc = exc_type('url')\n assert repr('url') in str(exc)\n\n\[email protected]('exc_type', all_classes(EntryError))\ndef test_entry_error_str(exc_type):\n exc = exc_type('url', 'id')\n assert repr(('url', 'id')) in str(exc)\n\n\[email protected]('exc_type', all_classes(TagError))\ndef test_tag_error_str(exc_type):\n exc = exc_type(('object',), 'key')\n assert \"'object': 'key'\" in str(exc)\n\n\[email protected]('args, expected', [(('before_feeds_update',\n 'myhook'), \"unexpected hook error: before_feeds_update: 'myhook'\"), ((\n 'before_feeds_update', 'myhook', ()),\n \"unexpected hook error: before_feeds_update: 'myhook': ()\"), ((\n 'before_feed_update', 'myhook', ('feed',)),\n \"unexpected hook error: before_feed_update: 'myhook': 'feed'\"), ((\n 'after_entry_update', 'myhook', ('feed', 'entry')),\n \"unexpected hook error: after_entry_update: 'myhook': ('feed', 'entry')\")])\ndef test_single_update_hook_error_str(args, expected):\n exc = SingleUpdateHookError(*args)\n assert str(exc) == expected\n exc = SingleUpdateHookError(*args)\n exc.__cause__ = Exception('cause')\n assert str(exc) == expected + ': builtins.Exception: cause'\n",
"step-4": "import pickle\nimport pytest\nfrom reader import EntryError\nfrom reader import FeedError\nfrom reader import SingleUpdateHookError\nfrom reader import TagError\nfrom reader.exceptions import _FancyExceptionBase\n\n\ndef test_fancy_exception_base():\n exc = _FancyExceptionBase('message')\n assert str(exc) == 'message'\n exc = _FancyExceptionBase(message='message')\n assert str(exc) == 'message'\n cause = Exception('cause')\n exc = _FancyExceptionBase('message')\n exc.__cause__ = cause\n pickled_exc = pickle.dumps(exc)\n assert str(exc) == 'message: builtins.Exception: cause'\n assert str(exc) == str(pickle.loads(pickled_exc))\n\n\n class WithURL(_FancyExceptionBase):\n message = 'default message'\n\n def __init__(self, url, **kwargs):\n super().__init__(**kwargs)\n self.url = url\n\n @property\n def _str(self):\n return self.url.upper()\n exc = WithURL('url')\n assert str(exc) == 'default message: URL'\n exc = WithURL('url', message='another message')\n exc.__cause__ = cause\n assert str(exc) == 'another message: URL: builtins.Exception: cause'\n\n\ndef _all_classes(cls):\n yield cls\n for subclass in cls.__subclasses__():\n yield from _all_classes(subclass)\n\n\ndef all_classes(*args, **kwargs):\n return list(_all_classes(*args, **kwargs))\n\n\[email protected]('exc_type', all_classes(FeedError))\ndef test_feed_error_str(exc_type):\n exc = exc_type('url')\n assert repr('url') in str(exc)\n\n\[email protected]('exc_type', all_classes(EntryError))\ndef test_entry_error_str(exc_type):\n exc = exc_type('url', 'id')\n assert repr(('url', 'id')) in str(exc)\n\n\[email protected]('exc_type', all_classes(TagError))\ndef test_tag_error_str(exc_type):\n exc = exc_type(('object',), 'key')\n assert \"'object': 'key'\" in str(exc)\n\n\[email protected]('args, expected', [(('before_feeds_update',\n 'myhook'), \"unexpected hook error: before_feeds_update: 'myhook'\"), ((\n 'before_feeds_update', 'myhook', ()),\n \"unexpected hook error: before_feeds_update: 'myhook': ()\"), ((\n 'before_feed_update', 'myhook', ('feed',)),\n \"unexpected hook error: before_feed_update: 'myhook': 'feed'\"), ((\n 'after_entry_update', 'myhook', ('feed', 'entry')),\n \"unexpected hook error: after_entry_update: 'myhook': ('feed', 'entry')\")])\ndef test_single_update_hook_error_str(args, expected):\n exc = SingleUpdateHookError(*args)\n assert str(exc) == expected\n exc = SingleUpdateHookError(*args)\n exc.__cause__ = Exception('cause')\n assert str(exc) == expected + ': builtins.Exception: cause'\n",
"step-5": "import pickle\n\nimport pytest\n\nfrom reader import EntryError\nfrom reader import FeedError\nfrom reader import SingleUpdateHookError\nfrom reader import TagError\nfrom reader.exceptions import _FancyExceptionBase\n\n\ndef test_fancy_exception_base():\n exc = _FancyExceptionBase('message')\n assert str(exc) == 'message'\n\n exc = _FancyExceptionBase(message='message')\n assert str(exc) == 'message'\n\n cause = Exception('cause')\n\n exc = _FancyExceptionBase('message')\n exc.__cause__ = cause\n pickled_exc = pickle.dumps(exc)\n assert str(exc) == 'message: builtins.Exception: cause'\n assert str(exc) == str(pickle.loads(pickled_exc))\n\n class WithURL(_FancyExceptionBase):\n message = 'default message'\n\n def __init__(self, url, **kwargs):\n super().__init__(**kwargs)\n self.url = url\n\n @property\n def _str(self):\n return self.url.upper()\n\n exc = WithURL('url')\n assert str(exc) == 'default message: URL'\n\n exc = WithURL('url', message='another message')\n exc.__cause__ = cause\n assert str(exc) == 'another message: URL: builtins.Exception: cause'\n\n\ndef _all_classes(cls):\n yield cls\n for subclass in cls.__subclasses__():\n yield from _all_classes(subclass)\n\n\ndef all_classes(*args, **kwargs):\n return list(_all_classes(*args, **kwargs))\n\n\[email protected]('exc_type', all_classes(FeedError))\ndef test_feed_error_str(exc_type):\n exc = exc_type('url')\n assert repr('url') in str(exc)\n\n\[email protected]('exc_type', all_classes(EntryError))\ndef test_entry_error_str(exc_type):\n exc = exc_type('url', 'id')\n assert repr(('url', 'id')) in str(exc)\n\n\[email protected]('exc_type', all_classes(TagError))\ndef test_tag_error_str(exc_type):\n exc = exc_type(('object',), 'key')\n assert \"'object': 'key'\" in str(exc)\n\n\[email protected](\n 'args, expected',\n [\n (\n ('before_feeds_update', 'myhook'),\n \"unexpected hook error: before_feeds_update: 'myhook'\",\n ),\n (\n ('before_feeds_update', 'myhook', ()),\n \"unexpected hook error: before_feeds_update: 'myhook': ()\",\n ),\n (\n ('before_feed_update', 'myhook', ('feed',)),\n \"unexpected hook error: before_feed_update: 'myhook': 'feed'\",\n ),\n (\n ('after_entry_update', 'myhook', ('feed', 'entry')),\n \"unexpected hook error: after_entry_update: 'myhook': ('feed', 'entry')\",\n ),\n ],\n)\ndef test_single_update_hook_error_str(args, expected):\n exc = SingleUpdateHookError(*args)\n assert str(exc) == expected\n exc = SingleUpdateHookError(*args)\n exc.__cause__ = Exception('cause')\n assert str(exc) == expected + \": builtins.Exception: cause\"\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def login_name(request):
if request.method == 'POST':
form = Login(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
password = form.cleaned_data['password']
return render(request, 'login/new_index.html', {'form': form})
else:
form = Login()
return render(request, 'login/new_index.html', {'form': form})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request):
return render(request, 'index.html')
<|reserved_special_token_0|>
def login_name(request):
if request.method == 'POST':
form = Login(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
password = form.cleaned_data['password']
return render(request, 'login/new_index.html', {'form': form})
else:
form = Login()
return render(request, 'login/new_index.html', {'form': form})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request):
return render(request, 'index.html')
def get_name(request):
if request.method == 'POST':
form = Sign_Up(request.POST)
if form.is_valid():
firstName = form.cleaned_data['first_name']
lastName = form.cleaned_data['last_name']
email = form.cleaned_data['email']
password = form.cleaned_data['password']
details = Student(first_name=firstName, last_name=lastName,
email=email, password=password)
details.save()
return render(request, 'login/new_index.html', {'form': form})
else:
form = Sign_Up()
return render(request, 'login/new_index.html', {'form': form})
def login_name(request):
if request.method == 'POST':
form = Login(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
password = form.cleaned_data['password']
return render(request, 'login/new_index.html', {'form': form})
else:
form = Login()
return render(request, 'login/new_index.html', {'form': form})
<|reserved_special_token_1|>
from django.http import HttpResponse
from django.shortcuts import render
from .forms import Sign_Up, Login
from .models import Student
def index(request):
return render(request, 'index.html')
def get_name(request):
if request.method == 'POST':
form = Sign_Up(request.POST)
if form.is_valid():
firstName = form.cleaned_data['first_name']
lastName = form.cleaned_data['last_name']
email = form.cleaned_data['email']
password = form.cleaned_data['password']
details = Student(first_name=firstName, last_name=lastName,
email=email, password=password)
details.save()
return render(request, 'login/new_index.html', {'form': form})
else:
form = Sign_Up()
return render(request, 'login/new_index.html', {'form': form})
def login_name(request):
if request.method == 'POST':
form = Login(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
password = form.cleaned_data['password']
return render(request, 'login/new_index.html', {'form': form})
else:
form = Login()
return render(request, 'login/new_index.html', {'form': form})
<|reserved_special_token_1|>
# i have created this file-hitu
from django.http import HttpResponse
from django.shortcuts import render
from .forms import Sign_Up, Login
from .models import Student
# render is used to create and impot the templates
# render takes first arg = request, 2nd arg = name of the file you want to import, 3rd arg = parameters or variable name
def index(request):
return render(request, 'index.html')
def get_name(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = Sign_Up(request.POST)
# check whether it's valid:
if form.is_valid():
firstName = form.cleaned_data['first_name']
lastName = form.cleaned_data['last_name']
email = form.cleaned_data['email']
password = form.cleaned_data['password']
details = Student(first_name=firstName, last_name=lastName, email=email,
password=password) # these are models variable in red
# process the data in form.cleaned_data as required
details.save() # this is used to save all the details
# ...
# redirect to a new URL:
return render(request, 'login/new_index.html', {'form': form})
# if a GET (or any other method) we'll create a blank form
else:
form = Sign_Up()
return render(request, 'login/new_index.html', {'form': form})
def login_name(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = Login(request.POST)
# check whether it's valid:
if form.is_valid():
email = form.cleaned_data['email']
password = form.cleaned_data['password']
return render(request, 'login/new_index.html', {'form': form})
# if a GET (or any other method) we'll create a blank form
else:
form = Login()
return render(request, 'login/new_index.html', {'form': form})
|
flexible
|
{
"blob_id": "cbbb314a3262713f6cb2bb2dd90709d7bf1ca8eb",
"index": 6095,
"step-1": "<mask token>\n\n\ndef login_name(request):\n if request.method == 'POST':\n form = Login(request.POST)\n if form.is_valid():\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n return render(request, 'login/new_index.html', {'form': form})\n else:\n form = Login()\n return render(request, 'login/new_index.html', {'form': form})\n",
"step-2": "<mask token>\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\n<mask token>\n\n\ndef login_name(request):\n if request.method == 'POST':\n form = Login(request.POST)\n if form.is_valid():\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n return render(request, 'login/new_index.html', {'form': form})\n else:\n form = Login()\n return render(request, 'login/new_index.html', {'form': form})\n",
"step-3": "<mask token>\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef get_name(request):\n if request.method == 'POST':\n form = Sign_Up(request.POST)\n if form.is_valid():\n firstName = form.cleaned_data['first_name']\n lastName = form.cleaned_data['last_name']\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n details = Student(first_name=firstName, last_name=lastName,\n email=email, password=password)\n details.save()\n return render(request, 'login/new_index.html', {'form': form})\n else:\n form = Sign_Up()\n return render(request, 'login/new_index.html', {'form': form})\n\n\ndef login_name(request):\n if request.method == 'POST':\n form = Login(request.POST)\n if form.is_valid():\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n return render(request, 'login/new_index.html', {'form': form})\n else:\n form = Login()\n return render(request, 'login/new_index.html', {'form': form})\n",
"step-4": "from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom .forms import Sign_Up, Login\nfrom .models import Student\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef get_name(request):\n if request.method == 'POST':\n form = Sign_Up(request.POST)\n if form.is_valid():\n firstName = form.cleaned_data['first_name']\n lastName = form.cleaned_data['last_name']\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n details = Student(first_name=firstName, last_name=lastName,\n email=email, password=password)\n details.save()\n return render(request, 'login/new_index.html', {'form': form})\n else:\n form = Sign_Up()\n return render(request, 'login/new_index.html', {'form': form})\n\n\ndef login_name(request):\n if request.method == 'POST':\n form = Login(request.POST)\n if form.is_valid():\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n return render(request, 'login/new_index.html', {'form': form})\n else:\n form = Login()\n return render(request, 'login/new_index.html', {'form': form})\n",
"step-5": "# i have created this file-hitu\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom .forms import Sign_Up, Login\nfrom .models import Student\n\n\n# render is used to create and impot the templates\n# render takes first arg = request, 2nd arg = name of the file you want to import, 3rd arg = parameters or variable name\ndef index(request):\n return render(request, 'index.html')\n\n\ndef get_name(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = Sign_Up(request.POST)\n # check whether it's valid:\n if form.is_valid():\n firstName = form.cleaned_data['first_name']\n lastName = form.cleaned_data['last_name']\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n details = Student(first_name=firstName, last_name=lastName, email=email,\n password=password) # these are models variable in red\n # process the data in form.cleaned_data as required\n details.save() # this is used to save all the details\n # ...\n # redirect to a new URL:\n return render(request, 'login/new_index.html', {'form': form})\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = Sign_Up()\n\n return render(request, 'login/new_index.html', {'form': form})\n\n\ndef login_name(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = Login(request.POST)\n # check whether it's valid:\n if form.is_valid():\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n\n return render(request, 'login/new_index.html', {'form': form})\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = Login()\n\n return render(request, 'login/new_index.html', {'form': form})\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def StringToList(input_string):
word_list = []
word = ''
for i in range(0, len(input_string)):
if input_string[i] == ' ':
word_list.append(word)
word = ''
elif i == len(input_string) - 1:
word = word + input_string[i]
word_list.append(word)
word = ''
else:
word = word + input_string[i]
return word_list
<|reserved_special_token_0|>
def LongestWord(word_list):
length = 0
for i in word_list:
temp = len(i)
if temp > length:
length = temp
return length
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def StringToList(input_string):
word_list = []
word = ''
for i in range(0, len(input_string)):
if input_string[i] == ' ':
word_list.append(word)
word = ''
elif i == len(input_string) - 1:
word = word + input_string[i]
word_list.append(word)
word = ''
else:
word = word + input_string[i]
return word_list
<|reserved_special_token_0|>
def LongestWord(word_list):
length = 0
for i in word_list:
temp = len(i)
if temp > length:
length = temp
return length
<|reserved_special_token_0|>
def return_vertically(input_string):
word_list = StringToList(input_string)
longest_word = LongestWord(word_list)
print(longest_word)
print(word_list)
vertical_list = []
"""initializing empty list"""
for i in range(0, longest_word):
vertical_list.append('')
for word in word_list:
for i in range(0, longest_word):
if i < len(word):
vertical_list[i] = vertical_list[i] + word[i]
else:
vertical_list[i] = vertical_list[i] + ' '
"""deleting trailing spaces"""
for i in range(0, len(vertical_list)):
vertical_list[i] = vertical_list[i].rstrip()
return vertical_list
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def StringToList(input_string):
word_list = []
word = ''
for i in range(0, len(input_string)):
if input_string[i] == ' ':
word_list.append(word)
word = ''
elif i == len(input_string) - 1:
word = word + input_string[i]
word_list.append(word)
word = ''
else:
word = word + input_string[i]
return word_list
<|reserved_special_token_0|>
def LongestWord(word_list):
length = 0
for i in word_list:
temp = len(i)
if temp > length:
length = temp
return length
<|reserved_special_token_0|>
def return_vertically(input_string):
word_list = StringToList(input_string)
longest_word = LongestWord(word_list)
print(longest_word)
print(word_list)
vertical_list = []
"""initializing empty list"""
for i in range(0, longest_word):
vertical_list.append('')
for word in word_list:
for i in range(0, longest_word):
if i < len(word):
vertical_list[i] = vertical_list[i] + word[i]
else:
vertical_list[i] = vertical_list[i] + ' '
"""deleting trailing spaces"""
for i in range(0, len(vertical_list)):
vertical_list[i] = vertical_list[i].rstrip()
return vertical_list
if __name__ == '__main__':
input_string = 'TO BE OR NOT TO BE'
print(return_vertically(input_string))
<|reserved_special_token_1|>
"""
Given a string s. Return all the words vertically in the same
order in which they appear in s.
Words are returned as a list of strings, complete with spaces
when is necessary. (Trailing spaces are not allowed).
Each word would be put on only one column and that in one column
there will be only one word.
Example 1:
Input: s = "HOW ARE YOU"
Output: ["HAY","ORO","WEU"]
Explanation: Each word is printed vertically.
"HAY"
"ORO"
"WEU"
Example 2:
Input: s = "TO BE OR NOT TO BE"
Output: ["TBONTB","OEROOE"," T"]
Explanation: Trailing spaces is not allowed.
"TBONTB"
"OEROOE"
" T"
"""
"""converting string to list of words"""
def StringToList(input_string):
word_list=[]
word=""
for i in range(0,len(input_string)):
if input_string[i]==" ":
word_list.append(word)
word=""
elif i==len(input_string)-1:
word=word+input_string[i]
word_list.append(word)
word=""
else:
word=word+input_string[i]
return word_list
"""find length of longest word"""
def LongestWord(word_list):
length=0
for i in word_list:
temp=len(i)
if temp>length:
length=temp
return length
"""converting list to word to vertical list"""
def return_vertically(input_string):
word_list=StringToList(input_string)
longest_word = LongestWord(word_list)
print(longest_word)
print(word_list)
vertical_list=[]
"""initializing empty list"""
for i in range(0,longest_word):
vertical_list.append("")
for word in word_list:
for i in range(0,longest_word):
if i<len(word):
vertical_list[i]=vertical_list[i]+word[i]
else:
vertical_list[i]=vertical_list[i]+" "
"""deleting trailing spaces"""
for i in range(0, len(vertical_list)):
vertical_list[i]=vertical_list[i].rstrip()
return vertical_list
if __name__ == "__main__":
input_string = "TO BE OR NOT TO BE"
print(return_vertically(input_string))
|
flexible
|
{
"blob_id": "7c2897dcb732e75d7328e8c0484d5bd7f3b56e6f",
"index": 9190,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef StringToList(input_string):\n word_list = []\n word = ''\n for i in range(0, len(input_string)):\n if input_string[i] == ' ':\n word_list.append(word)\n word = ''\n elif i == len(input_string) - 1:\n word = word + input_string[i]\n word_list.append(word)\n word = ''\n else:\n word = word + input_string[i]\n return word_list\n\n\n<mask token>\n\n\ndef LongestWord(word_list):\n length = 0\n for i in word_list:\n temp = len(i)\n if temp > length:\n length = temp\n return length\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef StringToList(input_string):\n word_list = []\n word = ''\n for i in range(0, len(input_string)):\n if input_string[i] == ' ':\n word_list.append(word)\n word = ''\n elif i == len(input_string) - 1:\n word = word + input_string[i]\n word_list.append(word)\n word = ''\n else:\n word = word + input_string[i]\n return word_list\n\n\n<mask token>\n\n\ndef LongestWord(word_list):\n length = 0\n for i in word_list:\n temp = len(i)\n if temp > length:\n length = temp\n return length\n\n\n<mask token>\n\n\ndef return_vertically(input_string):\n word_list = StringToList(input_string)\n longest_word = LongestWord(word_list)\n print(longest_word)\n print(word_list)\n vertical_list = []\n \"\"\"initializing empty list\"\"\"\n for i in range(0, longest_word):\n vertical_list.append('')\n for word in word_list:\n for i in range(0, longest_word):\n if i < len(word):\n vertical_list[i] = vertical_list[i] + word[i]\n else:\n vertical_list[i] = vertical_list[i] + ' '\n \"\"\"deleting trailing spaces\"\"\"\n for i in range(0, len(vertical_list)):\n vertical_list[i] = vertical_list[i].rstrip()\n return vertical_list\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef StringToList(input_string):\n word_list = []\n word = ''\n for i in range(0, len(input_string)):\n if input_string[i] == ' ':\n word_list.append(word)\n word = ''\n elif i == len(input_string) - 1:\n word = word + input_string[i]\n word_list.append(word)\n word = ''\n else:\n word = word + input_string[i]\n return word_list\n\n\n<mask token>\n\n\ndef LongestWord(word_list):\n length = 0\n for i in word_list:\n temp = len(i)\n if temp > length:\n length = temp\n return length\n\n\n<mask token>\n\n\ndef return_vertically(input_string):\n word_list = StringToList(input_string)\n longest_word = LongestWord(word_list)\n print(longest_word)\n print(word_list)\n vertical_list = []\n \"\"\"initializing empty list\"\"\"\n for i in range(0, longest_word):\n vertical_list.append('')\n for word in word_list:\n for i in range(0, longest_word):\n if i < len(word):\n vertical_list[i] = vertical_list[i] + word[i]\n else:\n vertical_list[i] = vertical_list[i] + ' '\n \"\"\"deleting trailing spaces\"\"\"\n for i in range(0, len(vertical_list)):\n vertical_list[i] = vertical_list[i].rstrip()\n return vertical_list\n\n\nif __name__ == '__main__':\n input_string = 'TO BE OR NOT TO BE'\n print(return_vertically(input_string))\n",
"step-5": "\"\"\"\r\n Given a string s. Return all the words vertically in the same\r\n order in which they appear in s.\r\n Words are returned as a list of strings, complete with spaces\r\n when is necessary. (Trailing spaces are not allowed).\r\n Each word would be put on only one column and that in one column\r\n there will be only one word.\r\n\r\nExample 1:\r\n\r\nInput: s = \"HOW ARE YOU\"\r\nOutput: [\"HAY\",\"ORO\",\"WEU\"]\r\nExplanation: Each word is printed vertically. \r\n \"HAY\"\r\n \"ORO\"\r\n \"WEU\"\r\n\r\nExample 2:\r\n\r\nInput: s = \"TO BE OR NOT TO BE\"\r\nOutput: [\"TBONTB\",\"OEROOE\",\" T\"]\r\nExplanation: Trailing spaces is not allowed. \r\n\"TBONTB\"\r\n\"OEROOE\"\r\n\" T\"\r\n\"\"\"\r\n\"\"\"converting string to list of words\"\"\"\r\ndef StringToList(input_string):\r\n word_list=[]\r\n word=\"\"\r\n \r\n for i in range(0,len(input_string)):\r\n if input_string[i]==\" \":\r\n word_list.append(word)\r\n word=\"\"\r\n elif i==len(input_string)-1:\r\n word=word+input_string[i]\r\n word_list.append(word)\r\n word=\"\"\r\n else:\r\n word=word+input_string[i]\r\n return word_list\r\n\r\n\"\"\"find length of longest word\"\"\"\r\ndef LongestWord(word_list):\r\n length=0\r\n for i in word_list:\r\n temp=len(i)\r\n if temp>length:\r\n length=temp\r\n return length\r\n \r\n \r\n \r\n\"\"\"converting list to word to vertical list\"\"\"\r\ndef return_vertically(input_string):\r\n word_list=StringToList(input_string)\r\n longest_word = LongestWord(word_list)\r\n print(longest_word)\r\n print(word_list)\r\n vertical_list=[]\r\n \r\n \"\"\"initializing empty list\"\"\"\r\n for i in range(0,longest_word):\r\n vertical_list.append(\"\")\r\n \r\n \r\n \r\n for word in word_list:\r\n for i in range(0,longest_word):\r\n if i<len(word):\r\n vertical_list[i]=vertical_list[i]+word[i]\r\n else:\r\n vertical_list[i]=vertical_list[i]+\" \"\r\n \r\n \"\"\"deleting trailing spaces\"\"\"\r\n for i in range(0, len(vertical_list)):\r\n vertical_list[i]=vertical_list[i].rstrip()\r\n \r\n \r\n return vertical_list\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\nif __name__ == \"__main__\":\r\n input_string = \"TO BE OR NOT TO BE\"\r\n \r\n print(return_vertically(input_string))\r\n\r\n\r\n\r\n\r\n\r\n\r\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class SSDigitDecoder(Elaboratable):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Blinky(Elaboratable):
def __init__(self):
self.dd0 = SSDigitDecoder()
self.dd1 = SSDigitDecoder()
def elaborate(self, platform):
m = Module()
m.submodules.dd0 = self.dd0
m.submodules.dd1 = self.dd1
timer = Signal(20)
led = platform.request('led', 0)
btn = platform.request('button', 0)
btn1 = platform.request('button', 1)
dig_sel = platform.request('ss_dig_sel', 0)
disp = platform.request('ss_disp', 0)
m.d.sync += timer.eq(timer + 1)
m.d.comb += led.o.eq(timer[-1] & ~btn)
running = Signal(1)
"""
# naive btn
last_btn1 = Signal(1)
m.d.sync += last_btn1.eq(btn1.i)
with m.If(btn1.i & ~last_btn1):
m.d.sync += running.eq(~running)
"""
btn1_pipe1 = Signal(1)
btn1_pipe2 = Signal(1)
btn1_db = Signal(range(0, 65535))
m.d.sync += [btn1_pipe1.eq(btn1.i), btn1_pipe2.eq(btn1_pipe1)]
with m.If(btn1_pipe2):
m.d.sync += btn1_db.eq(65535)
with m.Else():
with m.If(btn1_db > 0):
m.d.sync += btn1_db.eq(btn1_db - 1)
with m.If(btn1_pipe2 & (btn1_db == 0)):
m.d.sync += running.eq(~running)
with m.If(running & (timer == 0)):
with m.If(self.dd0.i_num == 9):
m.d.sync += self.dd0.i_num.eq(0)
with m.If(self.dd1.i_num == 9):
m.d.sync += self.dd1.i_num.eq(0)
with m.Else():
m.d.sync += self.dd1.incr()
with m.Else():
m.d.sync += self.dd0.incr()
with m.If(timer[8]):
m.d.comb += [dig_sel.o.eq(0), disp.o.eq(self.dd1.o_disp)]
with m.Else():
m.d.comb += [dig_sel.o.eq(1), disp.o.eq(self.dd0.o_disp)]
return m
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SSDigitDecoder(Elaboratable):
def __init__(self):
self.i_num = Signal(4)
self.o_disp = Signal(7)
self.lut = {(0): 63, (1): 6, (2): 91, (3): 79, (4): 102, (5): 109,
(6): 125, (7): 7, (8): 127, (9): 103}
def incr(self):
return self.i_num.eq(self.i_num + 1)
<|reserved_special_token_0|>
class Blinky(Elaboratable):
def __init__(self):
self.dd0 = SSDigitDecoder()
self.dd1 = SSDigitDecoder()
def elaborate(self, platform):
m = Module()
m.submodules.dd0 = self.dd0
m.submodules.dd1 = self.dd1
timer = Signal(20)
led = platform.request('led', 0)
btn = platform.request('button', 0)
btn1 = platform.request('button', 1)
dig_sel = platform.request('ss_dig_sel', 0)
disp = platform.request('ss_disp', 0)
m.d.sync += timer.eq(timer + 1)
m.d.comb += led.o.eq(timer[-1] & ~btn)
running = Signal(1)
"""
# naive btn
last_btn1 = Signal(1)
m.d.sync += last_btn1.eq(btn1.i)
with m.If(btn1.i & ~last_btn1):
m.d.sync += running.eq(~running)
"""
btn1_pipe1 = Signal(1)
btn1_pipe2 = Signal(1)
btn1_db = Signal(range(0, 65535))
m.d.sync += [btn1_pipe1.eq(btn1.i), btn1_pipe2.eq(btn1_pipe1)]
with m.If(btn1_pipe2):
m.d.sync += btn1_db.eq(65535)
with m.Else():
with m.If(btn1_db > 0):
m.d.sync += btn1_db.eq(btn1_db - 1)
with m.If(btn1_pipe2 & (btn1_db == 0)):
m.d.sync += running.eq(~running)
with m.If(running & (timer == 0)):
with m.If(self.dd0.i_num == 9):
m.d.sync += self.dd0.i_num.eq(0)
with m.If(self.dd1.i_num == 9):
m.d.sync += self.dd1.i_num.eq(0)
with m.Else():
m.d.sync += self.dd1.incr()
with m.Else():
m.d.sync += self.dd0.incr()
with m.If(timer[8]):
m.d.comb += [dig_sel.o.eq(0), disp.o.eq(self.dd1.o_disp)]
with m.Else():
m.d.comb += [dig_sel.o.eq(1), disp.o.eq(self.dd0.o_disp)]
return m
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SSDigitDecoder(Elaboratable):
def __init__(self):
self.i_num = Signal(4)
self.o_disp = Signal(7)
self.lut = {(0): 63, (1): 6, (2): 91, (3): 79, (4): 102, (5): 109,
(6): 125, (7): 7, (8): 127, (9): 103}
def incr(self):
return self.i_num.eq(self.i_num + 1)
def elaborate(self, platform):
m = Module()
with m.Switch(self.i_num):
for a, b in self.lut.items():
with m.Case(a):
m.d.comb += self.o_disp.eq(b)
return m
class Blinky(Elaboratable):
def __init__(self):
self.dd0 = SSDigitDecoder()
self.dd1 = SSDigitDecoder()
def elaborate(self, platform):
m = Module()
m.submodules.dd0 = self.dd0
m.submodules.dd1 = self.dd1
timer = Signal(20)
led = platform.request('led', 0)
btn = platform.request('button', 0)
btn1 = platform.request('button', 1)
dig_sel = platform.request('ss_dig_sel', 0)
disp = platform.request('ss_disp', 0)
m.d.sync += timer.eq(timer + 1)
m.d.comb += led.o.eq(timer[-1] & ~btn)
running = Signal(1)
"""
# naive btn
last_btn1 = Signal(1)
m.d.sync += last_btn1.eq(btn1.i)
with m.If(btn1.i & ~last_btn1):
m.d.sync += running.eq(~running)
"""
btn1_pipe1 = Signal(1)
btn1_pipe2 = Signal(1)
btn1_db = Signal(range(0, 65535))
m.d.sync += [btn1_pipe1.eq(btn1.i), btn1_pipe2.eq(btn1_pipe1)]
with m.If(btn1_pipe2):
m.d.sync += btn1_db.eq(65535)
with m.Else():
with m.If(btn1_db > 0):
m.d.sync += btn1_db.eq(btn1_db - 1)
with m.If(btn1_pipe2 & (btn1_db == 0)):
m.d.sync += running.eq(~running)
with m.If(running & (timer == 0)):
with m.If(self.dd0.i_num == 9):
m.d.sync += self.dd0.i_num.eq(0)
with m.If(self.dd1.i_num == 9):
m.d.sync += self.dd1.i_num.eq(0)
with m.Else():
m.d.sync += self.dd1.incr()
with m.Else():
m.d.sync += self.dd0.incr()
with m.If(timer[8]):
m.d.comb += [dig_sel.o.eq(0), disp.o.eq(self.dd1.o_disp)]
with m.Else():
m.d.comb += [dig_sel.o.eq(1), disp.o.eq(self.dd0.o_disp)]
return m
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SSDigitDecoder(Elaboratable):
def __init__(self):
self.i_num = Signal(4)
self.o_disp = Signal(7)
self.lut = {(0): 63, (1): 6, (2): 91, (3): 79, (4): 102, (5): 109,
(6): 125, (7): 7, (8): 127, (9): 103}
def incr(self):
return self.i_num.eq(self.i_num + 1)
def elaborate(self, platform):
m = Module()
with m.Switch(self.i_num):
for a, b in self.lut.items():
with m.Case(a):
m.d.comb += self.o_disp.eq(b)
return m
class Blinky(Elaboratable):
def __init__(self):
self.dd0 = SSDigitDecoder()
self.dd1 = SSDigitDecoder()
def elaborate(self, platform):
m = Module()
m.submodules.dd0 = self.dd0
m.submodules.dd1 = self.dd1
timer = Signal(20)
led = platform.request('led', 0)
btn = platform.request('button', 0)
btn1 = platform.request('button', 1)
dig_sel = platform.request('ss_dig_sel', 0)
disp = platform.request('ss_disp', 0)
m.d.sync += timer.eq(timer + 1)
m.d.comb += led.o.eq(timer[-1] & ~btn)
running = Signal(1)
"""
# naive btn
last_btn1 = Signal(1)
m.d.sync += last_btn1.eq(btn1.i)
with m.If(btn1.i & ~last_btn1):
m.d.sync += running.eq(~running)
"""
btn1_pipe1 = Signal(1)
btn1_pipe2 = Signal(1)
btn1_db = Signal(range(0, 65535))
m.d.sync += [btn1_pipe1.eq(btn1.i), btn1_pipe2.eq(btn1_pipe1)]
with m.If(btn1_pipe2):
m.d.sync += btn1_db.eq(65535)
with m.Else():
with m.If(btn1_db > 0):
m.d.sync += btn1_db.eq(btn1_db - 1)
with m.If(btn1_pipe2 & (btn1_db == 0)):
m.d.sync += running.eq(~running)
with m.If(running & (timer == 0)):
with m.If(self.dd0.i_num == 9):
m.d.sync += self.dd0.i_num.eq(0)
with m.If(self.dd1.i_num == 9):
m.d.sync += self.dd1.i_num.eq(0)
with m.Else():
m.d.sync += self.dd1.incr()
with m.Else():
m.d.sync += self.dd0.incr()
with m.If(timer[8]):
m.d.comb += [dig_sel.o.eq(0), disp.o.eq(self.dd1.o_disp)]
with m.Else():
m.d.comb += [dig_sel.o.eq(1), disp.o.eq(self.dd0.o_disp)]
return m
if __name__ == '__main__':
p = ICEBreakerPlatform()
p.add_resources(p.break_off_pmod)
p.add_resources([Resource('ss_dig_sel', 0, Pins('10', dir='o', conn=(
'pmod', 0)), Attrs(IO_STANDARD='SB_LVCMOS')), Resource('ss_disp', 0,
PinsN('1 2 3 4 7 8 9', dir='o', conn=('pmod', 0)), Attrs(
IO_STANDARD='SB_LVCMOS'))])
for r in p.resources:
print('r:', r)
p.build(Blinky(), do_program=False)
<|reserved_special_token_1|>
#!/usr/bin/env python3
from nmigen import *
from nmigen.build import *
from nmigen_boards.icebreaker import ICEBreakerPlatform
class SSDigitDecoder(Elaboratable):
def __init__(self):
self.i_num = Signal(4)
self.o_disp = Signal(7)
self.lut = {
0: 0b011_1111,
1: 0b000_0110,
2: 0b101_1011,
3: 0b100_1111,
4: 0b110_0110,
5: 0b110_1101,
6: 0b111_1101,
7: 0b000_0111,
8: 0b111_1111,
9: 0b110_0111,
}
def incr(self):
return self.i_num.eq(self.i_num+1)
def elaborate(self, platform):
m = Module()
with m.Switch(self.i_num):
for a, b in self.lut.items():
with m.Case(a):
m.d.comb += self.o_disp.eq(b)
return m
class Blinky(Elaboratable):
def __init__(self):
self.dd0 = SSDigitDecoder()
self.dd1 = SSDigitDecoder()
def elaborate(self, platform):
m = Module()
m.submodules.dd0 = self.dd0
m.submodules.dd1 = self.dd1
timer = Signal(20)
led = platform.request('led', 0)
btn = platform.request('button', 0)
btn1 = platform.request('button', 1)
dig_sel = platform.request('ss_dig_sel', 0)
disp = platform.request('ss_disp', 0)
# blinky led
m.d.sync += timer.eq(timer+1)
m.d.comb += led.o.eq(timer[-1] & ~btn)
# 7 seg
running = Signal(1)
"""
# naive btn
last_btn1 = Signal(1)
m.d.sync += last_btn1.eq(btn1.i)
with m.If(btn1.i & ~last_btn1):
m.d.sync += running.eq(~running)
"""
btn1_pipe1 = Signal(1)
btn1_pipe2 = Signal(1)
btn1_db = Signal(range(0, 0xffff))
m.d.sync += [
btn1_pipe1.eq(btn1.i),
btn1_pipe2.eq(btn1_pipe1),
]
with m.If(btn1_pipe2):
m.d.sync += btn1_db.eq(0xffff)
with m.Else():
with m.If(btn1_db > 0):
m.d.sync += btn1_db.eq(btn1_db-1)
with m.If(btn1_pipe2 & (btn1_db == 0)):
m.d.sync += running.eq(~running)
with m.If(running & (timer == 0)):
with m.If(self.dd0.i_num == 9):
m.d.sync += self.dd0.i_num.eq(0)
with m.If(self.dd1.i_num == 9):
m.d.sync += self.dd1.i_num.eq(0)
with m.Else():
m.d.sync += self.dd1.incr()
with m.Else():
m.d.sync += self.dd0.incr()
with m.If(timer[8]):
m.d.comb += [
dig_sel.o.eq(0),
disp.o.eq(self.dd1.o_disp),
]
with m.Else():
m.d.comb += [
dig_sel.o.eq(1),
disp.o.eq(self.dd0.o_disp),
]
return m
if __name__ == '__main__':
p = ICEBreakerPlatform()
p.add_resources(p.break_off_pmod)
p.add_resources([
Resource('ss_dig_sel', 0,
Pins('10', dir='o', conn=('pmod', 0)),
Attrs(IO_STANDARD='SB_LVCMOS')),
Resource('ss_disp', 0,
PinsN('1 2 3 4 7 8 9', dir='o', conn=('pmod', 0)),
Attrs(IO_STANDARD='SB_LVCMOS')),
])
for r in p.resources:
print('r:', r)
p.build(Blinky(), do_program=False)
|
flexible
|
{
"blob_id": "74bb511a9ec272020693db65a2e708f3db56931e",
"index": 9954,
"step-1": "<mask token>\n\n\nclass SSDigitDecoder(Elaboratable):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Blinky(Elaboratable):\n\n def __init__(self):\n self.dd0 = SSDigitDecoder()\n self.dd1 = SSDigitDecoder()\n\n def elaborate(self, platform):\n m = Module()\n m.submodules.dd0 = self.dd0\n m.submodules.dd1 = self.dd1\n timer = Signal(20)\n led = platform.request('led', 0)\n btn = platform.request('button', 0)\n btn1 = platform.request('button', 1)\n dig_sel = platform.request('ss_dig_sel', 0)\n disp = platform.request('ss_disp', 0)\n m.d.sync += timer.eq(timer + 1)\n m.d.comb += led.o.eq(timer[-1] & ~btn)\n running = Signal(1)\n \"\"\"\n # naive btn\n last_btn1 = Signal(1)\n m.d.sync += last_btn1.eq(btn1.i)\n with m.If(btn1.i & ~last_btn1):\n m.d.sync += running.eq(~running)\n \"\"\"\n btn1_pipe1 = Signal(1)\n btn1_pipe2 = Signal(1)\n btn1_db = Signal(range(0, 65535))\n m.d.sync += [btn1_pipe1.eq(btn1.i), btn1_pipe2.eq(btn1_pipe1)]\n with m.If(btn1_pipe2):\n m.d.sync += btn1_db.eq(65535)\n with m.Else():\n with m.If(btn1_db > 0):\n m.d.sync += btn1_db.eq(btn1_db - 1)\n with m.If(btn1_pipe2 & (btn1_db == 0)):\n m.d.sync += running.eq(~running)\n with m.If(running & (timer == 0)):\n with m.If(self.dd0.i_num == 9):\n m.d.sync += self.dd0.i_num.eq(0)\n with m.If(self.dd1.i_num == 9):\n m.d.sync += self.dd1.i_num.eq(0)\n with m.Else():\n m.d.sync += self.dd1.incr()\n with m.Else():\n m.d.sync += self.dd0.incr()\n with m.If(timer[8]):\n m.d.comb += [dig_sel.o.eq(0), disp.o.eq(self.dd1.o_disp)]\n with m.Else():\n m.d.comb += [dig_sel.o.eq(1), disp.o.eq(self.dd0.o_disp)]\n return m\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SSDigitDecoder(Elaboratable):\n\n def __init__(self):\n self.i_num = Signal(4)\n self.o_disp = Signal(7)\n self.lut = {(0): 63, (1): 6, (2): 91, (3): 79, (4): 102, (5): 109,\n (6): 125, (7): 7, (8): 127, (9): 103}\n\n def incr(self):\n return self.i_num.eq(self.i_num + 1)\n <mask token>\n\n\nclass Blinky(Elaboratable):\n\n def __init__(self):\n self.dd0 = SSDigitDecoder()\n self.dd1 = SSDigitDecoder()\n\n def elaborate(self, platform):\n m = Module()\n m.submodules.dd0 = self.dd0\n m.submodules.dd1 = self.dd1\n timer = Signal(20)\n led = platform.request('led', 0)\n btn = platform.request('button', 0)\n btn1 = platform.request('button', 1)\n dig_sel = platform.request('ss_dig_sel', 0)\n disp = platform.request('ss_disp', 0)\n m.d.sync += timer.eq(timer + 1)\n m.d.comb += led.o.eq(timer[-1] & ~btn)\n running = Signal(1)\n \"\"\"\n # naive btn\n last_btn1 = Signal(1)\n m.d.sync += last_btn1.eq(btn1.i)\n with m.If(btn1.i & ~last_btn1):\n m.d.sync += running.eq(~running)\n \"\"\"\n btn1_pipe1 = Signal(1)\n btn1_pipe2 = Signal(1)\n btn1_db = Signal(range(0, 65535))\n m.d.sync += [btn1_pipe1.eq(btn1.i), btn1_pipe2.eq(btn1_pipe1)]\n with m.If(btn1_pipe2):\n m.d.sync += btn1_db.eq(65535)\n with m.Else():\n with m.If(btn1_db > 0):\n m.d.sync += btn1_db.eq(btn1_db - 1)\n with m.If(btn1_pipe2 & (btn1_db == 0)):\n m.d.sync += running.eq(~running)\n with m.If(running & (timer == 0)):\n with m.If(self.dd0.i_num == 9):\n m.d.sync += self.dd0.i_num.eq(0)\n with m.If(self.dd1.i_num == 9):\n m.d.sync += self.dd1.i_num.eq(0)\n with m.Else():\n m.d.sync += self.dd1.incr()\n with m.Else():\n m.d.sync += self.dd0.incr()\n with m.If(timer[8]):\n m.d.comb += [dig_sel.o.eq(0), disp.o.eq(self.dd1.o_disp)]\n with m.Else():\n m.d.comb += [dig_sel.o.eq(1), disp.o.eq(self.dd0.o_disp)]\n return m\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SSDigitDecoder(Elaboratable):\n\n def __init__(self):\n self.i_num = Signal(4)\n self.o_disp = Signal(7)\n self.lut = {(0): 63, (1): 6, (2): 91, (3): 79, (4): 102, (5): 109,\n (6): 125, (7): 7, (8): 127, (9): 103}\n\n def incr(self):\n return self.i_num.eq(self.i_num + 1)\n\n def elaborate(self, platform):\n m = Module()\n with m.Switch(self.i_num):\n for a, b in self.lut.items():\n with m.Case(a):\n m.d.comb += self.o_disp.eq(b)\n return m\n\n\nclass Blinky(Elaboratable):\n\n def __init__(self):\n self.dd0 = SSDigitDecoder()\n self.dd1 = SSDigitDecoder()\n\n def elaborate(self, platform):\n m = Module()\n m.submodules.dd0 = self.dd0\n m.submodules.dd1 = self.dd1\n timer = Signal(20)\n led = platform.request('led', 0)\n btn = platform.request('button', 0)\n btn1 = platform.request('button', 1)\n dig_sel = platform.request('ss_dig_sel', 0)\n disp = platform.request('ss_disp', 0)\n m.d.sync += timer.eq(timer + 1)\n m.d.comb += led.o.eq(timer[-1] & ~btn)\n running = Signal(1)\n \"\"\"\n # naive btn\n last_btn1 = Signal(1)\n m.d.sync += last_btn1.eq(btn1.i)\n with m.If(btn1.i & ~last_btn1):\n m.d.sync += running.eq(~running)\n \"\"\"\n btn1_pipe1 = Signal(1)\n btn1_pipe2 = Signal(1)\n btn1_db = Signal(range(0, 65535))\n m.d.sync += [btn1_pipe1.eq(btn1.i), btn1_pipe2.eq(btn1_pipe1)]\n with m.If(btn1_pipe2):\n m.d.sync += btn1_db.eq(65535)\n with m.Else():\n with m.If(btn1_db > 0):\n m.d.sync += btn1_db.eq(btn1_db - 1)\n with m.If(btn1_pipe2 & (btn1_db == 0)):\n m.d.sync += running.eq(~running)\n with m.If(running & (timer == 0)):\n with m.If(self.dd0.i_num == 9):\n m.d.sync += self.dd0.i_num.eq(0)\n with m.If(self.dd1.i_num == 9):\n m.d.sync += self.dd1.i_num.eq(0)\n with m.Else():\n m.d.sync += self.dd1.incr()\n with m.Else():\n m.d.sync += self.dd0.incr()\n with m.If(timer[8]):\n m.d.comb += [dig_sel.o.eq(0), disp.o.eq(self.dd1.o_disp)]\n with m.Else():\n m.d.comb += [dig_sel.o.eq(1), disp.o.eq(self.dd0.o_disp)]\n return m\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass SSDigitDecoder(Elaboratable):\n\n def __init__(self):\n self.i_num = Signal(4)\n self.o_disp = Signal(7)\n self.lut = {(0): 63, (1): 6, (2): 91, (3): 79, (4): 102, (5): 109,\n (6): 125, (7): 7, (8): 127, (9): 103}\n\n def incr(self):\n return self.i_num.eq(self.i_num + 1)\n\n def elaborate(self, platform):\n m = Module()\n with m.Switch(self.i_num):\n for a, b in self.lut.items():\n with m.Case(a):\n m.d.comb += self.o_disp.eq(b)\n return m\n\n\nclass Blinky(Elaboratable):\n\n def __init__(self):\n self.dd0 = SSDigitDecoder()\n self.dd1 = SSDigitDecoder()\n\n def elaborate(self, platform):\n m = Module()\n m.submodules.dd0 = self.dd0\n m.submodules.dd1 = self.dd1\n timer = Signal(20)\n led = platform.request('led', 0)\n btn = platform.request('button', 0)\n btn1 = platform.request('button', 1)\n dig_sel = platform.request('ss_dig_sel', 0)\n disp = platform.request('ss_disp', 0)\n m.d.sync += timer.eq(timer + 1)\n m.d.comb += led.o.eq(timer[-1] & ~btn)\n running = Signal(1)\n \"\"\"\n # naive btn\n last_btn1 = Signal(1)\n m.d.sync += last_btn1.eq(btn1.i)\n with m.If(btn1.i & ~last_btn1):\n m.d.sync += running.eq(~running)\n \"\"\"\n btn1_pipe1 = Signal(1)\n btn1_pipe2 = Signal(1)\n btn1_db = Signal(range(0, 65535))\n m.d.sync += [btn1_pipe1.eq(btn1.i), btn1_pipe2.eq(btn1_pipe1)]\n with m.If(btn1_pipe2):\n m.d.sync += btn1_db.eq(65535)\n with m.Else():\n with m.If(btn1_db > 0):\n m.d.sync += btn1_db.eq(btn1_db - 1)\n with m.If(btn1_pipe2 & (btn1_db == 0)):\n m.d.sync += running.eq(~running)\n with m.If(running & (timer == 0)):\n with m.If(self.dd0.i_num == 9):\n m.d.sync += self.dd0.i_num.eq(0)\n with m.If(self.dd1.i_num == 9):\n m.d.sync += self.dd1.i_num.eq(0)\n with m.Else():\n m.d.sync += self.dd1.incr()\n with m.Else():\n m.d.sync += self.dd0.incr()\n with m.If(timer[8]):\n m.d.comb += [dig_sel.o.eq(0), disp.o.eq(self.dd1.o_disp)]\n with m.Else():\n m.d.comb += [dig_sel.o.eq(1), disp.o.eq(self.dd0.o_disp)]\n return m\n\n\nif __name__ == '__main__':\n p = ICEBreakerPlatform()\n p.add_resources(p.break_off_pmod)\n p.add_resources([Resource('ss_dig_sel', 0, Pins('10', dir='o', conn=(\n 'pmod', 0)), Attrs(IO_STANDARD='SB_LVCMOS')), Resource('ss_disp', 0,\n PinsN('1 2 3 4 7 8 9', dir='o', conn=('pmod', 0)), Attrs(\n IO_STANDARD='SB_LVCMOS'))])\n for r in p.resources:\n print('r:', r)\n p.build(Blinky(), do_program=False)\n",
"step-5": "#!/usr/bin/env python3\n\nfrom nmigen import *\nfrom nmigen.build import *\nfrom nmigen_boards.icebreaker import ICEBreakerPlatform\n\nclass SSDigitDecoder(Elaboratable):\n def __init__(self):\n self.i_num = Signal(4)\n self.o_disp = Signal(7)\n self.lut = {\n 0: 0b011_1111,\n 1: 0b000_0110,\n 2: 0b101_1011,\n 3: 0b100_1111,\n 4: 0b110_0110,\n 5: 0b110_1101,\n 6: 0b111_1101,\n 7: 0b000_0111,\n 8: 0b111_1111,\n 9: 0b110_0111,\n }\n def incr(self):\n return self.i_num.eq(self.i_num+1)\n def elaborate(self, platform):\n m = Module()\n with m.Switch(self.i_num):\n for a, b in self.lut.items():\n with m.Case(a):\n m.d.comb += self.o_disp.eq(b)\n return m\n\nclass Blinky(Elaboratable):\n def __init__(self):\n self.dd0 = SSDigitDecoder()\n self.dd1 = SSDigitDecoder()\n def elaborate(self, platform):\n m = Module()\n m.submodules.dd0 = self.dd0\n m.submodules.dd1 = self.dd1\n\n timer = Signal(20)\n led = platform.request('led', 0)\n btn = platform.request('button', 0)\n btn1 = platform.request('button', 1)\n dig_sel = platform.request('ss_dig_sel', 0)\n disp = platform.request('ss_disp', 0)\n\n # blinky led\n m.d.sync += timer.eq(timer+1)\n m.d.comb += led.o.eq(timer[-1] & ~btn)\n\n # 7 seg\n running = Signal(1)\n \"\"\"\n # naive btn\n last_btn1 = Signal(1)\n m.d.sync += last_btn1.eq(btn1.i)\n with m.If(btn1.i & ~last_btn1):\n m.d.sync += running.eq(~running)\n \"\"\"\n btn1_pipe1 = Signal(1)\n btn1_pipe2 = Signal(1)\n btn1_db = Signal(range(0, 0xffff))\n m.d.sync += [\n btn1_pipe1.eq(btn1.i),\n btn1_pipe2.eq(btn1_pipe1),\n ]\n with m.If(btn1_pipe2):\n m.d.sync += btn1_db.eq(0xffff)\n with m.Else():\n with m.If(btn1_db > 0):\n m.d.sync += btn1_db.eq(btn1_db-1)\n with m.If(btn1_pipe2 & (btn1_db == 0)):\n m.d.sync += running.eq(~running)\n\n with m.If(running & (timer == 0)):\n with m.If(self.dd0.i_num == 9):\n m.d.sync += self.dd0.i_num.eq(0)\n with m.If(self.dd1.i_num == 9):\n m.d.sync += self.dd1.i_num.eq(0)\n with m.Else():\n m.d.sync += self.dd1.incr()\n with m.Else():\n m.d.sync += self.dd0.incr()\n with m.If(timer[8]):\n m.d.comb += [\n dig_sel.o.eq(0),\n disp.o.eq(self.dd1.o_disp),\n ]\n with m.Else():\n m.d.comb += [\n dig_sel.o.eq(1),\n disp.o.eq(self.dd0.o_disp),\n ]\n\n return m\n\nif __name__ == '__main__':\n p = ICEBreakerPlatform()\n p.add_resources(p.break_off_pmod)\n p.add_resources([\n Resource('ss_dig_sel', 0, \n Pins('10', dir='o', conn=('pmod', 0)),\n Attrs(IO_STANDARD='SB_LVCMOS')),\n Resource('ss_disp', 0, \n PinsN('1 2 3 4 7 8 9', dir='o', conn=('pmod', 0)),\n Attrs(IO_STANDARD='SB_LVCMOS')),\n ])\n for r in p.resources:\n print('r:', r)\n p.build(Blinky(), do_program=False)\n",
"step-ids": [
4,
6,
7,
8,
10
]
}
|
[
4,
6,
7,
8,
10
] |
import logging
from datetime import datetime
from preprocessing import death_preprocessing
from preprocessing_three_month import death_preprocessing_three_month
from death_rule_first_55 import death_rule_first_55
from death_rule_second import death_rule_second_new
from death_escalation import death_escalation
if __name__ == '__main__':
logging.basicConfig(filename='logfile.log', filemode='a', format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO)
logging.info('Start of the mortality analysis algorithm')
start_time_ALL = datetime.now()
print('Start of the mortality analysis algorithm')
try:
print('The month is over. Start forming tasks ...')
# death_preprocessing(save_to_sql=True, save_to_excel=False)
death_preprocessing_three_month(save_to_sql=True, save_to_excel=False)
death_rule_first_55(save_to_sql=True, save_to_excel=True)
death_rule_second_new(save_to_sql=True, save_to_excel=True)
death_escalation(save_to_sql=True, save_to_excel=False)
print(f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}')
logging.info(f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}')
except Exception as e:
print('The execution of the mortality analysis algorithm was not completed due to an error')
logging.exception('Exception occurred')
logging.info('The execution of the mortality analysis algorithm was not completed due to an error')
|
normal
|
{
"blob_id": "f44a8837056eb77fbf0ff37b9c57891cc3a3d6b2",
"index": 6783,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n logging.basicConfig(filename='logfile.log', filemode='a', format=\n '%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)\n logging.info('Start of the mortality analysis algorithm')\n start_time_ALL = datetime.now()\n print('Start of the mortality analysis algorithm')\n try:\n print('The month is over. Start forming tasks ...')\n death_preprocessing_three_month(save_to_sql=True, save_to_excel=False)\n death_rule_first_55(save_to_sql=True, save_to_excel=True)\n death_rule_second_new(save_to_sql=True, save_to_excel=True)\n death_escalation(save_to_sql=True, save_to_excel=False)\n print(\n f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}'\n )\n logging.info(\n f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}'\n )\n except Exception as e:\n print(\n 'The execution of the mortality analysis algorithm was not completed due to an error'\n )\n logging.exception('Exception occurred')\n logging.info(\n 'The execution of the mortality analysis algorithm was not completed due to an error'\n )\n",
"step-3": "import logging\nfrom datetime import datetime\nfrom preprocessing import death_preprocessing\nfrom preprocessing_three_month import death_preprocessing_three_month\nfrom death_rule_first_55 import death_rule_first_55\nfrom death_rule_second import death_rule_second_new\nfrom death_escalation import death_escalation\nif __name__ == '__main__':\n logging.basicConfig(filename='logfile.log', filemode='a', format=\n '%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)\n logging.info('Start of the mortality analysis algorithm')\n start_time_ALL = datetime.now()\n print('Start of the mortality analysis algorithm')\n try:\n print('The month is over. Start forming tasks ...')\n death_preprocessing_three_month(save_to_sql=True, save_to_excel=False)\n death_rule_first_55(save_to_sql=True, save_to_excel=True)\n death_rule_second_new(save_to_sql=True, save_to_excel=True)\n death_escalation(save_to_sql=True, save_to_excel=False)\n print(\n f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}'\n )\n logging.info(\n f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}'\n )\n except Exception as e:\n print(\n 'The execution of the mortality analysis algorithm was not completed due to an error'\n )\n logging.exception('Exception occurred')\n logging.info(\n 'The execution of the mortality analysis algorithm was not completed due to an error'\n )\n",
"step-4": "import logging\nfrom datetime import datetime\n\nfrom preprocessing import death_preprocessing\nfrom preprocessing_three_month import death_preprocessing_three_month\nfrom death_rule_first_55 import death_rule_first_55\nfrom death_rule_second import death_rule_second_new\nfrom death_escalation import death_escalation\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename='logfile.log', filemode='a', format='%(asctime)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n logging.info('Start of the mortality analysis algorithm')\n start_time_ALL = datetime.now()\n print('Start of the mortality analysis algorithm')\n\n try:\n print('The month is over. Start forming tasks ...')\n # death_preprocessing(save_to_sql=True, save_to_excel=False)\n death_preprocessing_three_month(save_to_sql=True, save_to_excel=False)\n death_rule_first_55(save_to_sql=True, save_to_excel=True)\n death_rule_second_new(save_to_sql=True, save_to_excel=True)\n death_escalation(save_to_sql=True, save_to_excel=False)\n print(f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}')\n logging.info(f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}')\n\n except Exception as e:\n print('The execution of the mortality analysis algorithm was not completed due to an error')\n logging.exception('Exception occurred')\n logging.info('The execution of the mortality analysis algorithm was not completed due to an error')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
a=int(input("Choose a number: "))
for x in range(1,100000):
b=a*x;
print(x, '*', a,'=',b)
if b>100:
break
|
normal
|
{
"blob_id": "043dd97d4d4ade29536a83c3557a34db3a4cb0f9",
"index": 2002,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor x in range(1, 100000):\n b = a * x\n print(x, '*', a, '=', b)\n if b > 100:\n break\n",
"step-3": "a = int(input('Choose a number: '))\nfor x in range(1, 100000):\n b = a * x\n print(x, '*', a, '=', b)\n if b > 100:\n break\n",
"step-4": "a=int(input(\"Choose a number: \"))\nfor x in range(1,100000):\n b=a*x;\n print(x, '*', a,'=',b)\n if b>100:\n break\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class SchemathesisCase(PyCollector):
<|reserved_special_token_0|>
def _get_test_name(self, endpoint: Endpoint) ->str:
return f'{self.name}[{endpoint.method}:{endpoint.path}]'
def _gen_items(self, endpoint: Endpoint) ->Generator[Function, None, None]:
"""Generate all items for the given endpoint.
Could produce more than one test item if
parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.
"""
try:
hypothesis_item = create_test(endpoint, self.test_function)
except InvalidSchema:
hypothesis_item = lambda : pytest.fail(
'Invalid schema for endpoint')
items = self.ihook.pytest_pycollect_makeitem(collector=self.parent,
name=self._get_test_name(endpoint), obj=hypothesis_item)
for item in items:
item.obj = hypothesis_item
yield item
def collect(self) ->List[Function]:
"""Generate different test items for all endpoints available in the given schema."""
try:
return [item for endpoint in self.schemathesis_case.
get_all_endpoints() for item in self._gen_items(endpoint)]
except Exception:
pytest.fail('Error during collection')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any
) ->Optional['SchemathesisCase']:
"""Switch to a different collector if the test is parametrized marked by schemathesis."""
outcome = yield
if is_schemathesis_test(obj):
outcome.force_result(SchemathesisCase(obj, name, collector))
else:
outcome.get_result()
class SchemathesisCase(PyCollector):
def __init__(self, test_function: Callable, *args: Any, **kwargs: Any
) ->None:
self.test_function = test_function
self.schemathesis_case = test_function._schemathesis_test
super().__init__(*args, **kwargs)
def _get_test_name(self, endpoint: Endpoint) ->str:
return f'{self.name}[{endpoint.method}:{endpoint.path}]'
def _gen_items(self, endpoint: Endpoint) ->Generator[Function, None, None]:
"""Generate all items for the given endpoint.
Could produce more than one test item if
parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.
"""
try:
hypothesis_item = create_test(endpoint, self.test_function)
except InvalidSchema:
hypothesis_item = lambda : pytest.fail(
'Invalid schema for endpoint')
items = self.ihook.pytest_pycollect_makeitem(collector=self.parent,
name=self._get_test_name(endpoint), obj=hypothesis_item)
for item in items:
item.obj = hypothesis_item
yield item
def collect(self) ->List[Function]:
"""Generate different test items for all endpoints available in the given schema."""
try:
return [item for endpoint in self.schemathesis_case.
get_all_endpoints() for item in self._gen_items(endpoint)]
except Exception:
pytest.fail('Error during collection')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any
) ->Optional['SchemathesisCase']:
"""Switch to a different collector if the test is parametrized marked by schemathesis."""
outcome = yield
if is_schemathesis_test(obj):
outcome.force_result(SchemathesisCase(obj, name, collector))
else:
outcome.get_result()
class SchemathesisCase(PyCollector):
def __init__(self, test_function: Callable, *args: Any, **kwargs: Any
) ->None:
self.test_function = test_function
self.schemathesis_case = test_function._schemathesis_test
super().__init__(*args, **kwargs)
def _get_test_name(self, endpoint: Endpoint) ->str:
return f'{self.name}[{endpoint.method}:{endpoint.path}]'
def _gen_items(self, endpoint: Endpoint) ->Generator[Function, None, None]:
"""Generate all items for the given endpoint.
Could produce more than one test item if
parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.
"""
try:
hypothesis_item = create_test(endpoint, self.test_function)
except InvalidSchema:
hypothesis_item = lambda : pytest.fail(
'Invalid schema for endpoint')
items = self.ihook.pytest_pycollect_makeitem(collector=self.parent,
name=self._get_test_name(endpoint), obj=hypothesis_item)
for item in items:
item.obj = hypothesis_item
yield item
def collect(self) ->List[Function]:
"""Generate different test items for all endpoints available in the given schema."""
try:
return [item for endpoint in self.schemathesis_case.
get_all_endpoints() for item in self._gen_items(endpoint)]
except Exception:
pytest.fail('Error during collection')
@hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem):
"""It is possible to have a Hypothesis exception in runtime.
For example - kwargs validation is failed for some strategy.
"""
outcome = yield
try:
outcome.get_result()
except InvalidArgument as exc:
pytest.fail(exc.args[0])
<|reserved_special_token_1|>
from typing import Any, Callable, Generator, List, Optional
import pytest
from _pytest import nodes
from _pytest.config import hookimpl
from _pytest.python import Function, PyCollector
from hypothesis.errors import InvalidArgument
from .._hypothesis import create_test
from ..exceptions import InvalidSchema
from ..models import Endpoint
from ..utils import is_schemathesis_test
@hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any
) ->Optional['SchemathesisCase']:
"""Switch to a different collector if the test is parametrized marked by schemathesis."""
outcome = yield
if is_schemathesis_test(obj):
outcome.force_result(SchemathesisCase(obj, name, collector))
else:
outcome.get_result()
class SchemathesisCase(PyCollector):
def __init__(self, test_function: Callable, *args: Any, **kwargs: Any
) ->None:
self.test_function = test_function
self.schemathesis_case = test_function._schemathesis_test
super().__init__(*args, **kwargs)
def _get_test_name(self, endpoint: Endpoint) ->str:
return f'{self.name}[{endpoint.method}:{endpoint.path}]'
def _gen_items(self, endpoint: Endpoint) ->Generator[Function, None, None]:
"""Generate all items for the given endpoint.
Could produce more than one test item if
parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.
"""
try:
hypothesis_item = create_test(endpoint, self.test_function)
except InvalidSchema:
hypothesis_item = lambda : pytest.fail(
'Invalid schema for endpoint')
items = self.ihook.pytest_pycollect_makeitem(collector=self.parent,
name=self._get_test_name(endpoint), obj=hypothesis_item)
for item in items:
item.obj = hypothesis_item
yield item
def collect(self) ->List[Function]:
"""Generate different test items for all endpoints available in the given schema."""
try:
return [item for endpoint in self.schemathesis_case.
get_all_endpoints() for item in self._gen_items(endpoint)]
except Exception:
pytest.fail('Error during collection')
@hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem):
"""It is possible to have a Hypothesis exception in runtime.
For example - kwargs validation is failed for some strategy.
"""
outcome = yield
try:
outcome.get_result()
except InvalidArgument as exc:
pytest.fail(exc.args[0])
<|reserved_special_token_1|>
from typing import Any, Callable, Generator, List, Optional
import pytest
from _pytest import nodes
from _pytest.config import hookimpl
from _pytest.python import Function, PyCollector # type: ignore
from hypothesis.errors import InvalidArgument # pylint: disable=ungrouped-imports
from .._hypothesis import create_test
from ..exceptions import InvalidSchema
from ..models import Endpoint
from ..utils import is_schemathesis_test
@hookimpl(hookwrapper=True) # type:ignore # pragma: no mutate
def pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any) -> Optional["SchemathesisCase"]:
"""Switch to a different collector if the test is parametrized marked by schemathesis."""
outcome = yield
if is_schemathesis_test(obj):
outcome.force_result(SchemathesisCase(obj, name, collector))
else:
outcome.get_result()
class SchemathesisCase(PyCollector):
def __init__(self, test_function: Callable, *args: Any, **kwargs: Any) -> None:
self.test_function = test_function
self.schemathesis_case = test_function._schemathesis_test # type: ignore
super().__init__(*args, **kwargs)
def _get_test_name(self, endpoint: Endpoint) -> str:
return f"{self.name}[{endpoint.method}:{endpoint.path}]"
def _gen_items(self, endpoint: Endpoint) -> Generator[Function, None, None]:
"""Generate all items for the given endpoint.
Could produce more than one test item if
parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.
"""
try:
hypothesis_item = create_test(endpoint, self.test_function)
except InvalidSchema:
hypothesis_item = lambda: pytest.fail("Invalid schema for endpoint")
items = self.ihook.pytest_pycollect_makeitem(
collector=self.parent, name=self._get_test_name(endpoint), obj=hypothesis_item
)
for item in items:
item.obj = hypothesis_item
yield item
def collect(self) -> List[Function]: # type: ignore
"""Generate different test items for all endpoints available in the given schema."""
try:
return [
item for endpoint in self.schemathesis_case.get_all_endpoints() for item in self._gen_items(endpoint)
]
except Exception:
pytest.fail("Error during collection")
@hookimpl(hookwrapper=True) # pragma: no mutate
def pytest_pyfunc_call(pyfuncitem): # type:ignore
"""It is possible to have a Hypothesis exception in runtime.
For example - kwargs validation is failed for some strategy.
"""
outcome = yield
try:
outcome.get_result()
except InvalidArgument as exc:
pytest.fail(exc.args[0])
|
flexible
|
{
"blob_id": "2060f0af351c1487f8aa45943dbaa050f4291c58",
"index": 7791,
"step-1": "<mask token>\n\n\nclass SchemathesisCase(PyCollector):\n <mask token>\n\n def _get_test_name(self, endpoint: Endpoint) ->str:\n return f'{self.name}[{endpoint.method}:{endpoint.path}]'\n\n def _gen_items(self, endpoint: Endpoint) ->Generator[Function, None, None]:\n \"\"\"Generate all items for the given endpoint.\n\n Could produce more than one test item if\n parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.\n \"\"\"\n try:\n hypothesis_item = create_test(endpoint, self.test_function)\n except InvalidSchema:\n hypothesis_item = lambda : pytest.fail(\n 'Invalid schema for endpoint')\n items = self.ihook.pytest_pycollect_makeitem(collector=self.parent,\n name=self._get_test_name(endpoint), obj=hypothesis_item)\n for item in items:\n item.obj = hypothesis_item\n yield item\n\n def collect(self) ->List[Function]:\n \"\"\"Generate different test items for all endpoints available in the given schema.\"\"\"\n try:\n return [item for endpoint in self.schemathesis_case.\n get_all_endpoints() for item in self._gen_items(endpoint)]\n except Exception:\n pytest.fail('Error during collection')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@hookimpl(hookwrapper=True)\ndef pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any\n ) ->Optional['SchemathesisCase']:\n \"\"\"Switch to a different collector if the test is parametrized marked by schemathesis.\"\"\"\n outcome = yield\n if is_schemathesis_test(obj):\n outcome.force_result(SchemathesisCase(obj, name, collector))\n else:\n outcome.get_result()\n\n\nclass SchemathesisCase(PyCollector):\n\n def __init__(self, test_function: Callable, *args: Any, **kwargs: Any\n ) ->None:\n self.test_function = test_function\n self.schemathesis_case = test_function._schemathesis_test\n super().__init__(*args, **kwargs)\n\n def _get_test_name(self, endpoint: Endpoint) ->str:\n return f'{self.name}[{endpoint.method}:{endpoint.path}]'\n\n def _gen_items(self, endpoint: Endpoint) ->Generator[Function, None, None]:\n \"\"\"Generate all items for the given endpoint.\n\n Could produce more than one test item if\n parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.\n \"\"\"\n try:\n hypothesis_item = create_test(endpoint, self.test_function)\n except InvalidSchema:\n hypothesis_item = lambda : pytest.fail(\n 'Invalid schema for endpoint')\n items = self.ihook.pytest_pycollect_makeitem(collector=self.parent,\n name=self._get_test_name(endpoint), obj=hypothesis_item)\n for item in items:\n item.obj = hypothesis_item\n yield item\n\n def collect(self) ->List[Function]:\n \"\"\"Generate different test items for all endpoints available in the given schema.\"\"\"\n try:\n return [item for endpoint in self.schemathesis_case.\n get_all_endpoints() for item in self._gen_items(endpoint)]\n except Exception:\n pytest.fail('Error during collection')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@hookimpl(hookwrapper=True)\ndef pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any\n ) ->Optional['SchemathesisCase']:\n \"\"\"Switch to a different collector if the test is parametrized marked by schemathesis.\"\"\"\n outcome = yield\n if is_schemathesis_test(obj):\n outcome.force_result(SchemathesisCase(obj, name, collector))\n else:\n outcome.get_result()\n\n\nclass SchemathesisCase(PyCollector):\n\n def __init__(self, test_function: Callable, *args: Any, **kwargs: Any\n ) ->None:\n self.test_function = test_function\n self.schemathesis_case = test_function._schemathesis_test\n super().__init__(*args, **kwargs)\n\n def _get_test_name(self, endpoint: Endpoint) ->str:\n return f'{self.name}[{endpoint.method}:{endpoint.path}]'\n\n def _gen_items(self, endpoint: Endpoint) ->Generator[Function, None, None]:\n \"\"\"Generate all items for the given endpoint.\n\n Could produce more than one test item if\n parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.\n \"\"\"\n try:\n hypothesis_item = create_test(endpoint, self.test_function)\n except InvalidSchema:\n hypothesis_item = lambda : pytest.fail(\n 'Invalid schema for endpoint')\n items = self.ihook.pytest_pycollect_makeitem(collector=self.parent,\n name=self._get_test_name(endpoint), obj=hypothesis_item)\n for item in items:\n item.obj = hypothesis_item\n yield item\n\n def collect(self) ->List[Function]:\n \"\"\"Generate different test items for all endpoints available in the given schema.\"\"\"\n try:\n return [item for endpoint in self.schemathesis_case.\n get_all_endpoints() for item in self._gen_items(endpoint)]\n except Exception:\n pytest.fail('Error during collection')\n\n\n@hookimpl(hookwrapper=True)\ndef pytest_pyfunc_call(pyfuncitem):\n \"\"\"It is possible to have a Hypothesis exception in runtime.\n\n For example - kwargs validation is failed for some strategy.\n \"\"\"\n outcome = yield\n try:\n outcome.get_result()\n except InvalidArgument as exc:\n pytest.fail(exc.args[0])\n",
"step-4": "from typing import Any, Callable, Generator, List, Optional\nimport pytest\nfrom _pytest import nodes\nfrom _pytest.config import hookimpl\nfrom _pytest.python import Function, PyCollector\nfrom hypothesis.errors import InvalidArgument\nfrom .._hypothesis import create_test\nfrom ..exceptions import InvalidSchema\nfrom ..models import Endpoint\nfrom ..utils import is_schemathesis_test\n\n\n@hookimpl(hookwrapper=True)\ndef pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any\n ) ->Optional['SchemathesisCase']:\n \"\"\"Switch to a different collector if the test is parametrized marked by schemathesis.\"\"\"\n outcome = yield\n if is_schemathesis_test(obj):\n outcome.force_result(SchemathesisCase(obj, name, collector))\n else:\n outcome.get_result()\n\n\nclass SchemathesisCase(PyCollector):\n\n def __init__(self, test_function: Callable, *args: Any, **kwargs: Any\n ) ->None:\n self.test_function = test_function\n self.schemathesis_case = test_function._schemathesis_test\n super().__init__(*args, **kwargs)\n\n def _get_test_name(self, endpoint: Endpoint) ->str:\n return f'{self.name}[{endpoint.method}:{endpoint.path}]'\n\n def _gen_items(self, endpoint: Endpoint) ->Generator[Function, None, None]:\n \"\"\"Generate all items for the given endpoint.\n\n Could produce more than one test item if\n parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.\n \"\"\"\n try:\n hypothesis_item = create_test(endpoint, self.test_function)\n except InvalidSchema:\n hypothesis_item = lambda : pytest.fail(\n 'Invalid schema for endpoint')\n items = self.ihook.pytest_pycollect_makeitem(collector=self.parent,\n name=self._get_test_name(endpoint), obj=hypothesis_item)\n for item in items:\n item.obj = hypothesis_item\n yield item\n\n def collect(self) ->List[Function]:\n \"\"\"Generate different test items for all endpoints available in the given schema.\"\"\"\n try:\n return [item for endpoint in self.schemathesis_case.\n get_all_endpoints() for item in self._gen_items(endpoint)]\n except Exception:\n pytest.fail('Error during collection')\n\n\n@hookimpl(hookwrapper=True)\ndef pytest_pyfunc_call(pyfuncitem):\n \"\"\"It is possible to have a Hypothesis exception in runtime.\n\n For example - kwargs validation is failed for some strategy.\n \"\"\"\n outcome = yield\n try:\n outcome.get_result()\n except InvalidArgument as exc:\n pytest.fail(exc.args[0])\n",
"step-5": "from typing import Any, Callable, Generator, List, Optional\n\nimport pytest\nfrom _pytest import nodes\nfrom _pytest.config import hookimpl\nfrom _pytest.python import Function, PyCollector # type: ignore\nfrom hypothesis.errors import InvalidArgument # pylint: disable=ungrouped-imports\n\nfrom .._hypothesis import create_test\nfrom ..exceptions import InvalidSchema\nfrom ..models import Endpoint\nfrom ..utils import is_schemathesis_test\n\n\n@hookimpl(hookwrapper=True) # type:ignore # pragma: no mutate\ndef pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any) -> Optional[\"SchemathesisCase\"]:\n \"\"\"Switch to a different collector if the test is parametrized marked by schemathesis.\"\"\"\n outcome = yield\n if is_schemathesis_test(obj):\n outcome.force_result(SchemathesisCase(obj, name, collector))\n else:\n outcome.get_result()\n\n\nclass SchemathesisCase(PyCollector):\n def __init__(self, test_function: Callable, *args: Any, **kwargs: Any) -> None:\n self.test_function = test_function\n self.schemathesis_case = test_function._schemathesis_test # type: ignore\n super().__init__(*args, **kwargs)\n\n def _get_test_name(self, endpoint: Endpoint) -> str:\n return f\"{self.name}[{endpoint.method}:{endpoint.path}]\"\n\n def _gen_items(self, endpoint: Endpoint) -> Generator[Function, None, None]:\n \"\"\"Generate all items for the given endpoint.\n\n Could produce more than one test item if\n parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.\n \"\"\"\n try:\n hypothesis_item = create_test(endpoint, self.test_function)\n except InvalidSchema:\n hypothesis_item = lambda: pytest.fail(\"Invalid schema for endpoint\")\n items = self.ihook.pytest_pycollect_makeitem(\n collector=self.parent, name=self._get_test_name(endpoint), obj=hypothesis_item\n )\n for item in items:\n item.obj = hypothesis_item\n yield item\n\n def collect(self) -> List[Function]: # type: ignore\n \"\"\"Generate different test items for all endpoints available in the given schema.\"\"\"\n try:\n return [\n item for endpoint in self.schemathesis_case.get_all_endpoints() for item in self._gen_items(endpoint)\n ]\n except Exception:\n pytest.fail(\"Error during collection\")\n\n\n@hookimpl(hookwrapper=True) # pragma: no mutate\ndef pytest_pyfunc_call(pyfuncitem): # type:ignore\n \"\"\"It is possible to have a Hypothesis exception in runtime.\n\n For example - kwargs validation is failed for some strategy.\n \"\"\"\n outcome = yield\n try:\n outcome.get_result()\n except InvalidArgument as exc:\n pytest.fail(exc.args[0])\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class BureauActifCalendarDataType(db.Model, BaseModel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BureauActifCalendarDataType(db.Model, BaseModel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def to_json(self, ignore_fields=None, minimal=False):
if ignore_fields is None:
ignore_fields = []
return super().to_json(ignore_fields=ignore_fields)
@staticmethod
def create_defaults():
data = BureauActifCalendarDataType()
data.name = 'seating'
db.session.add(data)
data2 = BureauActifCalendarDataType()
data2.name = 'standing'
db.session.add(data2)
data3 = BureauActifCalendarDataType()
data3.name = 'positionChanges'
db.session.add(data3)
data4 = BureauActifCalendarDataType()
data4.name = 'absent'
db.session.add(data4)
db.session.commit()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BureauActifCalendarDataType(db.Model, BaseModel):
__tablename__ = 'ba_calendar_data_type'
id_calendar_data_type = db.Column(db.Integer, db.Sequence(
'id_calendar_data_type_sequence'), primary_key=True, autoincrement=True
)
name = db.Column(db.String, nullable=False)
def to_json(self, ignore_fields=None, minimal=False):
if ignore_fields is None:
ignore_fields = []
return super().to_json(ignore_fields=ignore_fields)
@staticmethod
def create_defaults():
data = BureauActifCalendarDataType()
data.name = 'seating'
db.session.add(data)
data2 = BureauActifCalendarDataType()
data2.name = 'standing'
db.session.add(data2)
data3 = BureauActifCalendarDataType()
data3.name = 'positionChanges'
db.session.add(data3)
data4 = BureauActifCalendarDataType()
data4.name = 'absent'
db.session.add(data4)
db.session.commit()
<|reserved_special_token_1|>
from services.BureauActif.libbureauactif.db.Base import db, BaseModel
class BureauActifCalendarDataType(db.Model, BaseModel):
__tablename__ = 'ba_calendar_data_type'
id_calendar_data_type = db.Column(db.Integer, db.Sequence(
'id_calendar_data_type_sequence'), primary_key=True, autoincrement=True
)
name = db.Column(db.String, nullable=False)
def to_json(self, ignore_fields=None, minimal=False):
if ignore_fields is None:
ignore_fields = []
return super().to_json(ignore_fields=ignore_fields)
@staticmethod
def create_defaults():
data = BureauActifCalendarDataType()
data.name = 'seating'
db.session.add(data)
data2 = BureauActifCalendarDataType()
data2.name = 'standing'
db.session.add(data2)
data3 = BureauActifCalendarDataType()
data3.name = 'positionChanges'
db.session.add(data3)
data4 = BureauActifCalendarDataType()
data4.name = 'absent'
db.session.add(data4)
db.session.commit()
<|reserved_special_token_1|>
from services.BureauActif.libbureauactif.db.Base import db, BaseModel
class BureauActifCalendarDataType(db.Model, BaseModel):
__tablename__ = "ba_calendar_data_type"
id_calendar_data_type = db.Column(db.Integer, db.Sequence('id_calendar_data_type_sequence'), primary_key=True,
autoincrement=True)
name = db.Column(db.String, nullable=False)
def to_json(self, ignore_fields=None, minimal=False):
if ignore_fields is None:
ignore_fields = []
return super().to_json(ignore_fields=ignore_fields)
@staticmethod
def create_defaults():
data = BureauActifCalendarDataType()
data.name = 'seating'
db.session.add(data)
data2 = BureauActifCalendarDataType()
data2.name = 'standing'
db.session.add(data2)
data3 = BureauActifCalendarDataType()
data3.name = 'positionChanges'
db.session.add(data3)
data4 = BureauActifCalendarDataType()
data4.name = 'absent'
db.session.add(data4)
db.session.commit()
|
flexible
|
{
"blob_id": "83117000f5f34490cb14580a9867b1e871ccc2ae",
"index": 526,
"step-1": "<mask token>\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n <mask token>\n <mask token>\n <mask token>\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def create_defaults():\n data = BureauActifCalendarDataType()\n data.name = 'seating'\n db.session.add(data)\n data2 = BureauActifCalendarDataType()\n data2.name = 'standing'\n db.session.add(data2)\n data3 = BureauActifCalendarDataType()\n data3.name = 'positionChanges'\n db.session.add(data3)\n data4 = BureauActifCalendarDataType()\n data4.name = 'absent'\n db.session.add(data4)\n db.session.commit()\n",
"step-3": "<mask token>\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n __tablename__ = 'ba_calendar_data_type'\n id_calendar_data_type = db.Column(db.Integer, db.Sequence(\n 'id_calendar_data_type_sequence'), primary_key=True, autoincrement=True\n )\n name = db.Column(db.String, nullable=False)\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def create_defaults():\n data = BureauActifCalendarDataType()\n data.name = 'seating'\n db.session.add(data)\n data2 = BureauActifCalendarDataType()\n data2.name = 'standing'\n db.session.add(data2)\n data3 = BureauActifCalendarDataType()\n data3.name = 'positionChanges'\n db.session.add(data3)\n data4 = BureauActifCalendarDataType()\n data4.name = 'absent'\n db.session.add(data4)\n db.session.commit()\n",
"step-4": "from services.BureauActif.libbureauactif.db.Base import db, BaseModel\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n __tablename__ = 'ba_calendar_data_type'\n id_calendar_data_type = db.Column(db.Integer, db.Sequence(\n 'id_calendar_data_type_sequence'), primary_key=True, autoincrement=True\n )\n name = db.Column(db.String, nullable=False)\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def create_defaults():\n data = BureauActifCalendarDataType()\n data.name = 'seating'\n db.session.add(data)\n data2 = BureauActifCalendarDataType()\n data2.name = 'standing'\n db.session.add(data2)\n data3 = BureauActifCalendarDataType()\n data3.name = 'positionChanges'\n db.session.add(data3)\n data4 = BureauActifCalendarDataType()\n data4.name = 'absent'\n db.session.add(data4)\n db.session.commit()\n",
"step-5": "from services.BureauActif.libbureauactif.db.Base import db, BaseModel\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n __tablename__ = \"ba_calendar_data_type\"\n id_calendar_data_type = db.Column(db.Integer, db.Sequence('id_calendar_data_type_sequence'), primary_key=True,\n autoincrement=True)\n name = db.Column(db.String, nullable=False)\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def create_defaults():\n data = BureauActifCalendarDataType()\n data.name = 'seating'\n db.session.add(data)\n\n data2 = BureauActifCalendarDataType()\n data2.name = 'standing'\n db.session.add(data2)\n\n data3 = BureauActifCalendarDataType()\n data3.name = 'positionChanges'\n db.session.add(data3)\n\n data4 = BureauActifCalendarDataType()\n data4.name = 'absent'\n db.session.add(data4)\n\n db.session.commit()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AccountsnConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AccountsnConfig(AppConfig):
name = 'accounts'
<|reserved_special_token_1|>
from django.apps import AppConfig
class AccountsnConfig(AppConfig):
name = 'accounts'
|
flexible
|
{
"blob_id": "a3fc624d6d101667ab11842eac96ed1b34d4317e",
"index": 3369,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass AccountsnConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass AccountsnConfig(AppConfig):\n name = 'accounts'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass AccountsnConfig(AppConfig):\n name = 'accounts'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
from MdApi import MdApi
class Adapter(MdApi):
def __init__(self):
super(Adapter, self).__init__()
def connect(self):
self.createFtdcMdApi(os.getcwd())
self.registerFront('tcp://180.168.146.187:10010')
def onFrontConnected(self):
print 'front success'
if __name__ == '__main__':
adapter = Adapter()
adapter.connect()
|
normal
|
{
"blob_id": "0e58834120c34b5152026bde6d089be19244e21a",
"index": 269,
"step-1": "import os\n\nfrom MdApi import MdApi\n\nclass Adapter(MdApi):\n\n def __init__(self):\n \n super(Adapter, self).__init__()\n\n\n def connect(self):\n\n\n self.createFtdcMdApi(os.getcwd())\n\n self.registerFront('tcp://180.168.146.187:10010')\n\n\n def onFrontConnected(self):\n\n print 'front success'\n\n\nif __name__ == '__main__':\n\n adapter = Adapter()\n adapter.connect()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from clients.models import Budget
from clients.models import Spend
from datetime import date as datetimedate
from datetime import datetime
from datetime import timedelta
from django.db import models
from rest_framework.exceptions import ParseError
import math
import pandas as pd
class CampaignPerformance:
""" Get aggregated info about one campaign """
def __init__(self, campaign, start):
# Initial arguments
self.campaign = campaign
self.start = start
self.BUDGETS_NAME = 'Budgets'
self.required_ran = False
def get(self, filt=None):
""" Return data
filt: only return certain data (list)
"""
# Required functions
self.check_required()
# Filter output
results = {}
if filt is None:
filt = [
'daily_data', 'daily_diff', 'cum_diff',
'totals', 'info'
]
# Optional functions
# Prerequisits to multiple funcions
if 'daily_diff' in filt or 'cum_diff' in filt:
daily_diff = self.get_daily_diff()
if 'daily_data' in filt or 'daily_diff' in filt:
results['daily_index'] = self.daily_df.index
# Single functions
if 'daily_data' in filt:
results['daily_data'] = self.daily_df.to_dict('list')
if 'daily_diff' in filt:
results['daily_diff'] = daily_diff
if 'totals' in filt:
results['totals'] = self.get_totals()
if 'info' in filt:
results['info'] = self.get_info()
if 'cum_diff' in filt:
results['cum_diff'] = self.get_cum_diff(daily_diff)
# results['recommend'] = {'spend_per_day', 'spend_diff(spend per day vs avg_past_spend_per_day)'}
print(results)
return results
def _get_start_date(self):
""" self.start = week, month, quarter, year, all, or %Y-%m-%d date
"""
today = datetimedate.today()
if self.start == 'week':
start_date = today - timedelta(days=today.weekday())
elif self.start == 'month':
start_date = today.replace(day=1)
elif self.start == 'quarter':
quarter = math.ceil(today.month / 3)
start_date = datetimedate(
today.year,
((quarter - 1) * 3) + 1,
1
)
elif self.start == 'year':
start_date = datetimedate(today.year, 1, 1)
elif self.start == 'all':
start_date = datetimedate(2010, 1, 1)
else:
try:
start_date = datetime.strptime(self.start, "%Y-%m-%d").date()
except Exception as e:
raise ParseError("start argument not valid")
self.start_date = start_date
def _get_querysets(self):
# GET SPEND
# Only for same client as campaign
spend = Spend.objects.filter(platform__client=self.campaign.client)
# Only for same platforms as campaign
spend = spend.filter(
platform__pk__in=(
self.campaign.platforms.values_list('pk', flat=True)
)
)
# Only where spend end_date >= start_date
spend = spend.filter(end_date__gte=self.start_date)
# Apply regex filter to spend if provided by campaign
if self.campaign.name_filter:
spend = spend.filter(name__iregex=self.campaign.name_filter)
# GET BUDGETS
budgets = self.campaign.budget_set
# Only where budget end_date >= start_date
budgets = budgets.filter(end_date__gte=self.start_date)
# SAVE
self.spend = spend
self.budgets = budgets
def _convert_to_daily_df(self):
daily = {}
for each in self.budgets:
# Calculate amount per day
days = (each.end_date - each.start_date).days + 1
daily_amount = each.amount / days
for i in range(days):
day = each.start_date + timedelta(days=i)
if day < self.start_date:
continue
daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount
for each in self.spend:
name = each.platform.name
if name == self.BUDGETS_NAME:
name = f'{self.BUDGETS_NAME} (spend)'
days = (each.end_date - each.start_date).days + 1
daily_amount = each.amount / days
for i in range(days):
day = each.start_date + timedelta(days=i)
if day < self.start_date:
continue
dayspend = daily.setdefault(name, {}).setdefault(day, 0)
daily[name][day] = dayspend + daily_amount
df = pd.DataFrame(daily)
# Change datetime dates to string and fillNA for later json
df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]
df.fillna(0, inplace=True)
self.daily_df = df
def _convert_spend_currency(self):
if self.spend.count() > 0:
spend_cur = list(set(
self.spend.values_list('currency', flat=True)
))
if spend_cur != [self.campaign.currency]:
raise NotImplementedError(
"Currency converting not implemented, make sure budgets "
"and spends are in the same currency"
)
# Convert spend to list so that we can alter change currency
self.spend = list(self.spend)
else:
self.spend = []
def _get_budget_spend_series(self):
try:
self.budget_series = self.daily_df[self.BUDGETS_NAME]
except KeyError:
self.budget_series = pd.Series()
self.spend_series = (
self.daily_df
.drop(self.BUDGETS_NAME, axis=1, errors='ignore')
.sum(axis=1)
)
def check_required(self):
""" Functions needed for any of the public methods to work """
if not self.required_ran:
self._get_start_date()
self._get_querysets()
self._convert_spend_currency()
self._convert_to_daily_df()
self._get_budget_spend_series()
self.required_ran = True
def get_daily_diff(self):
self.check_required()
res = self.budget_series - self.spend_series
res.fillna(0, inplace=True)
return res
def get_cum_diff(self, daily_diff):
self.check_required()
return daily_diff.cumsum()
def get_totals(self):
self.check_required()
spend_sum = self.spend_series.sum()
budget_sum = self.budget_series.sum()
spend_days = self.spend_series.count()
budget_days = self.budget_series.count()
diff = budget_sum - spend_sum
totals = {
'spend': spend_sum,
'budget': budget_sum,
'avg_spend_per_day': (
spend_sum / spend_days
),
'avg_budget_per_day': (
budget_sum / budget_days
),
'diff': diff,
'avg_diff_per_day': diff / spend_days
}
for each in totals:
if pd.isnull(totals[each]):
totals[each] = 0
return totals
def get_info(self):
info = {
'last_spend': self.spend_series.dropna().index[-1]
}
return info
|
normal
|
{
"blob_id": "a860e6670719a733e75c7580cf2e07765b0777eb",
"index": 2806,
"step-1": "<mask token>\n\n\nclass CampaignPerformance:\n <mask token>\n\n def __init__(self, campaign, start):\n self.campaign = campaign\n self.start = start\n self.BUDGETS_NAME = 'Budgets'\n self.required_ran = False\n <mask token>\n\n def _get_start_date(self):\n \"\"\" self.start = week, month, quarter, year, all, or %Y-%m-%d date\n \"\"\"\n today = datetimedate.today()\n if self.start == 'week':\n start_date = today - timedelta(days=today.weekday())\n elif self.start == 'month':\n start_date = today.replace(day=1)\n elif self.start == 'quarter':\n quarter = math.ceil(today.month / 3)\n start_date = datetimedate(today.year, (quarter - 1) * 3 + 1, 1)\n elif self.start == 'year':\n start_date = datetimedate(today.year, 1, 1)\n elif self.start == 'all':\n start_date = datetimedate(2010, 1, 1)\n else:\n try:\n start_date = datetime.strptime(self.start, '%Y-%m-%d').date()\n except Exception as e:\n raise ParseError('start argument not valid')\n self.start_date = start_date\n\n def _get_querysets(self):\n spend = Spend.objects.filter(platform__client=self.campaign.client)\n spend = spend.filter(platform__pk__in=self.campaign.platforms.\n values_list('pk', flat=True))\n spend = spend.filter(end_date__gte=self.start_date)\n if self.campaign.name_filter:\n spend = spend.filter(name__iregex=self.campaign.name_filter)\n budgets = self.campaign.budget_set\n budgets = budgets.filter(end_date__gte=self.start_date)\n self.spend = spend\n self.budgets = budgets\n\n def _convert_to_daily_df(self):\n daily = {}\n for each in self.budgets:\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount\n for each in self.spend:\n name = each.platform.name\n if name == self.BUDGETS_NAME:\n name = f'{self.BUDGETS_NAME} (spend)'\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n dayspend = daily.setdefault(name, {}).setdefault(day, 0)\n daily[name][day] = dayspend + daily_amount\n df = pd.DataFrame(daily)\n df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]\n df.fillna(0, inplace=True)\n self.daily_df = df\n <mask token>\n\n def _get_budget_spend_series(self):\n try:\n self.budget_series = self.daily_df[self.BUDGETS_NAME]\n except KeyError:\n self.budget_series = pd.Series()\n self.spend_series = self.daily_df.drop(self.BUDGETS_NAME, axis=1,\n errors='ignore').sum(axis=1)\n <mask token>\n\n def get_daily_diff(self):\n self.check_required()\n res = self.budget_series - self.spend_series\n res.fillna(0, inplace=True)\n return res\n <mask token>\n\n def get_totals(self):\n self.check_required()\n spend_sum = self.spend_series.sum()\n budget_sum = self.budget_series.sum()\n spend_days = self.spend_series.count()\n budget_days = self.budget_series.count()\n diff = budget_sum - spend_sum\n totals = {'spend': spend_sum, 'budget': budget_sum,\n 'avg_spend_per_day': spend_sum / spend_days,\n 'avg_budget_per_day': budget_sum / budget_days, 'diff': diff,\n 'avg_diff_per_day': diff / spend_days}\n for each in totals:\n if pd.isnull(totals[each]):\n totals[each] = 0\n return totals\n\n def get_info(self):\n info = {'last_spend': self.spend_series.dropna().index[-1]}\n return info\n",
"step-2": "<mask token>\n\n\nclass CampaignPerformance:\n <mask token>\n\n def __init__(self, campaign, start):\n self.campaign = campaign\n self.start = start\n self.BUDGETS_NAME = 'Budgets'\n self.required_ran = False\n\n def get(self, filt=None):\n \"\"\" Return data\n filt: only return certain data (list)\n \"\"\"\n self.check_required()\n results = {}\n if filt is None:\n filt = ['daily_data', 'daily_diff', 'cum_diff', 'totals', 'info']\n if 'daily_diff' in filt or 'cum_diff' in filt:\n daily_diff = self.get_daily_diff()\n if 'daily_data' in filt or 'daily_diff' in filt:\n results['daily_index'] = self.daily_df.index\n if 'daily_data' in filt:\n results['daily_data'] = self.daily_df.to_dict('list')\n if 'daily_diff' in filt:\n results['daily_diff'] = daily_diff\n if 'totals' in filt:\n results['totals'] = self.get_totals()\n if 'info' in filt:\n results['info'] = self.get_info()\n if 'cum_diff' in filt:\n results['cum_diff'] = self.get_cum_diff(daily_diff)\n print(results)\n return results\n\n def _get_start_date(self):\n \"\"\" self.start = week, month, quarter, year, all, or %Y-%m-%d date\n \"\"\"\n today = datetimedate.today()\n if self.start == 'week':\n start_date = today - timedelta(days=today.weekday())\n elif self.start == 'month':\n start_date = today.replace(day=1)\n elif self.start == 'quarter':\n quarter = math.ceil(today.month / 3)\n start_date = datetimedate(today.year, (quarter - 1) * 3 + 1, 1)\n elif self.start == 'year':\n start_date = datetimedate(today.year, 1, 1)\n elif self.start == 'all':\n start_date = datetimedate(2010, 1, 1)\n else:\n try:\n start_date = datetime.strptime(self.start, '%Y-%m-%d').date()\n except Exception as e:\n raise ParseError('start argument not valid')\n self.start_date = start_date\n\n def _get_querysets(self):\n spend = Spend.objects.filter(platform__client=self.campaign.client)\n spend = spend.filter(platform__pk__in=self.campaign.platforms.\n values_list('pk', flat=True))\n spend = spend.filter(end_date__gte=self.start_date)\n if self.campaign.name_filter:\n spend = spend.filter(name__iregex=self.campaign.name_filter)\n budgets = self.campaign.budget_set\n budgets = budgets.filter(end_date__gte=self.start_date)\n self.spend = spend\n self.budgets = budgets\n\n def _convert_to_daily_df(self):\n daily = {}\n for each in self.budgets:\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount\n for each in self.spend:\n name = each.platform.name\n if name == self.BUDGETS_NAME:\n name = f'{self.BUDGETS_NAME} (spend)'\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n dayspend = daily.setdefault(name, {}).setdefault(day, 0)\n daily[name][day] = dayspend + daily_amount\n df = pd.DataFrame(daily)\n df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]\n df.fillna(0, inplace=True)\n self.daily_df = df\n\n def _convert_spend_currency(self):\n if self.spend.count() > 0:\n spend_cur = list(set(self.spend.values_list('currency', flat=True))\n )\n if spend_cur != [self.campaign.currency]:\n raise NotImplementedError(\n 'Currency converting not implemented, make sure budgets and spends are in the same currency'\n )\n self.spend = list(self.spend)\n else:\n self.spend = []\n\n def _get_budget_spend_series(self):\n try:\n self.budget_series = self.daily_df[self.BUDGETS_NAME]\n except KeyError:\n self.budget_series = pd.Series()\n self.spend_series = self.daily_df.drop(self.BUDGETS_NAME, axis=1,\n errors='ignore').sum(axis=1)\n\n def check_required(self):\n \"\"\" Functions needed for any of the public methods to work \"\"\"\n if not self.required_ran:\n self._get_start_date()\n self._get_querysets()\n self._convert_spend_currency()\n self._convert_to_daily_df()\n self._get_budget_spend_series()\n self.required_ran = True\n\n def get_daily_diff(self):\n self.check_required()\n res = self.budget_series - self.spend_series\n res.fillna(0, inplace=True)\n return res\n <mask token>\n\n def get_totals(self):\n self.check_required()\n spend_sum = self.spend_series.sum()\n budget_sum = self.budget_series.sum()\n spend_days = self.spend_series.count()\n budget_days = self.budget_series.count()\n diff = budget_sum - spend_sum\n totals = {'spend': spend_sum, 'budget': budget_sum,\n 'avg_spend_per_day': spend_sum / spend_days,\n 'avg_budget_per_day': budget_sum / budget_days, 'diff': diff,\n 'avg_diff_per_day': diff / spend_days}\n for each in totals:\n if pd.isnull(totals[each]):\n totals[each] = 0\n return totals\n\n def get_info(self):\n info = {'last_spend': self.spend_series.dropna().index[-1]}\n return info\n",
"step-3": "<mask token>\n\n\nclass CampaignPerformance:\n <mask token>\n\n def __init__(self, campaign, start):\n self.campaign = campaign\n self.start = start\n self.BUDGETS_NAME = 'Budgets'\n self.required_ran = False\n\n def get(self, filt=None):\n \"\"\" Return data\n filt: only return certain data (list)\n \"\"\"\n self.check_required()\n results = {}\n if filt is None:\n filt = ['daily_data', 'daily_diff', 'cum_diff', 'totals', 'info']\n if 'daily_diff' in filt or 'cum_diff' in filt:\n daily_diff = self.get_daily_diff()\n if 'daily_data' in filt or 'daily_diff' in filt:\n results['daily_index'] = self.daily_df.index\n if 'daily_data' in filt:\n results['daily_data'] = self.daily_df.to_dict('list')\n if 'daily_diff' in filt:\n results['daily_diff'] = daily_diff\n if 'totals' in filt:\n results['totals'] = self.get_totals()\n if 'info' in filt:\n results['info'] = self.get_info()\n if 'cum_diff' in filt:\n results['cum_diff'] = self.get_cum_diff(daily_diff)\n print(results)\n return results\n\n def _get_start_date(self):\n \"\"\" self.start = week, month, quarter, year, all, or %Y-%m-%d date\n \"\"\"\n today = datetimedate.today()\n if self.start == 'week':\n start_date = today - timedelta(days=today.weekday())\n elif self.start == 'month':\n start_date = today.replace(day=1)\n elif self.start == 'quarter':\n quarter = math.ceil(today.month / 3)\n start_date = datetimedate(today.year, (quarter - 1) * 3 + 1, 1)\n elif self.start == 'year':\n start_date = datetimedate(today.year, 1, 1)\n elif self.start == 'all':\n start_date = datetimedate(2010, 1, 1)\n else:\n try:\n start_date = datetime.strptime(self.start, '%Y-%m-%d').date()\n except Exception as e:\n raise ParseError('start argument not valid')\n self.start_date = start_date\n\n def _get_querysets(self):\n spend = Spend.objects.filter(platform__client=self.campaign.client)\n spend = spend.filter(platform__pk__in=self.campaign.platforms.\n values_list('pk', flat=True))\n spend = spend.filter(end_date__gte=self.start_date)\n if self.campaign.name_filter:\n spend = spend.filter(name__iregex=self.campaign.name_filter)\n budgets = self.campaign.budget_set\n budgets = budgets.filter(end_date__gte=self.start_date)\n self.spend = spend\n self.budgets = budgets\n\n def _convert_to_daily_df(self):\n daily = {}\n for each in self.budgets:\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount\n for each in self.spend:\n name = each.platform.name\n if name == self.BUDGETS_NAME:\n name = f'{self.BUDGETS_NAME} (spend)'\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n dayspend = daily.setdefault(name, {}).setdefault(day, 0)\n daily[name][day] = dayspend + daily_amount\n df = pd.DataFrame(daily)\n df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]\n df.fillna(0, inplace=True)\n self.daily_df = df\n\n def _convert_spend_currency(self):\n if self.spend.count() > 0:\n spend_cur = list(set(self.spend.values_list('currency', flat=True))\n )\n if spend_cur != [self.campaign.currency]:\n raise NotImplementedError(\n 'Currency converting not implemented, make sure budgets and spends are in the same currency'\n )\n self.spend = list(self.spend)\n else:\n self.spend = []\n\n def _get_budget_spend_series(self):\n try:\n self.budget_series = self.daily_df[self.BUDGETS_NAME]\n except KeyError:\n self.budget_series = pd.Series()\n self.spend_series = self.daily_df.drop(self.BUDGETS_NAME, axis=1,\n errors='ignore').sum(axis=1)\n\n def check_required(self):\n \"\"\" Functions needed for any of the public methods to work \"\"\"\n if not self.required_ran:\n self._get_start_date()\n self._get_querysets()\n self._convert_spend_currency()\n self._convert_to_daily_df()\n self._get_budget_spend_series()\n self.required_ran = True\n\n def get_daily_diff(self):\n self.check_required()\n res = self.budget_series - self.spend_series\n res.fillna(0, inplace=True)\n return res\n\n def get_cum_diff(self, daily_diff):\n self.check_required()\n return daily_diff.cumsum()\n\n def get_totals(self):\n self.check_required()\n spend_sum = self.spend_series.sum()\n budget_sum = self.budget_series.sum()\n spend_days = self.spend_series.count()\n budget_days = self.budget_series.count()\n diff = budget_sum - spend_sum\n totals = {'spend': spend_sum, 'budget': budget_sum,\n 'avg_spend_per_day': spend_sum / spend_days,\n 'avg_budget_per_day': budget_sum / budget_days, 'diff': diff,\n 'avg_diff_per_day': diff / spend_days}\n for each in totals:\n if pd.isnull(totals[each]):\n totals[each] = 0\n return totals\n\n def get_info(self):\n info = {'last_spend': self.spend_series.dropna().index[-1]}\n return info\n",
"step-4": "from clients.models import Budget\nfrom clients.models import Spend\nfrom datetime import date as datetimedate\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom django.db import models\nfrom rest_framework.exceptions import ParseError\nimport math\nimport pandas as pd\n\n\nclass CampaignPerformance:\n \"\"\" Get aggregated info about one campaign \"\"\"\n\n def __init__(self, campaign, start):\n self.campaign = campaign\n self.start = start\n self.BUDGETS_NAME = 'Budgets'\n self.required_ran = False\n\n def get(self, filt=None):\n \"\"\" Return data\n filt: only return certain data (list)\n \"\"\"\n self.check_required()\n results = {}\n if filt is None:\n filt = ['daily_data', 'daily_diff', 'cum_diff', 'totals', 'info']\n if 'daily_diff' in filt or 'cum_diff' in filt:\n daily_diff = self.get_daily_diff()\n if 'daily_data' in filt or 'daily_diff' in filt:\n results['daily_index'] = self.daily_df.index\n if 'daily_data' in filt:\n results['daily_data'] = self.daily_df.to_dict('list')\n if 'daily_diff' in filt:\n results['daily_diff'] = daily_diff\n if 'totals' in filt:\n results['totals'] = self.get_totals()\n if 'info' in filt:\n results['info'] = self.get_info()\n if 'cum_diff' in filt:\n results['cum_diff'] = self.get_cum_diff(daily_diff)\n print(results)\n return results\n\n def _get_start_date(self):\n \"\"\" self.start = week, month, quarter, year, all, or %Y-%m-%d date\n \"\"\"\n today = datetimedate.today()\n if self.start == 'week':\n start_date = today - timedelta(days=today.weekday())\n elif self.start == 'month':\n start_date = today.replace(day=1)\n elif self.start == 'quarter':\n quarter = math.ceil(today.month / 3)\n start_date = datetimedate(today.year, (quarter - 1) * 3 + 1, 1)\n elif self.start == 'year':\n start_date = datetimedate(today.year, 1, 1)\n elif self.start == 'all':\n start_date = datetimedate(2010, 1, 1)\n else:\n try:\n start_date = datetime.strptime(self.start, '%Y-%m-%d').date()\n except Exception as e:\n raise ParseError('start argument not valid')\n self.start_date = start_date\n\n def _get_querysets(self):\n spend = Spend.objects.filter(platform__client=self.campaign.client)\n spend = spend.filter(platform__pk__in=self.campaign.platforms.\n values_list('pk', flat=True))\n spend = spend.filter(end_date__gte=self.start_date)\n if self.campaign.name_filter:\n spend = spend.filter(name__iregex=self.campaign.name_filter)\n budgets = self.campaign.budget_set\n budgets = budgets.filter(end_date__gte=self.start_date)\n self.spend = spend\n self.budgets = budgets\n\n def _convert_to_daily_df(self):\n daily = {}\n for each in self.budgets:\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount\n for each in self.spend:\n name = each.platform.name\n if name == self.BUDGETS_NAME:\n name = f'{self.BUDGETS_NAME} (spend)'\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n dayspend = daily.setdefault(name, {}).setdefault(day, 0)\n daily[name][day] = dayspend + daily_amount\n df = pd.DataFrame(daily)\n df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]\n df.fillna(0, inplace=True)\n self.daily_df = df\n\n def _convert_spend_currency(self):\n if self.spend.count() > 0:\n spend_cur = list(set(self.spend.values_list('currency', flat=True))\n )\n if spend_cur != [self.campaign.currency]:\n raise NotImplementedError(\n 'Currency converting not implemented, make sure budgets and spends are in the same currency'\n )\n self.spend = list(self.spend)\n else:\n self.spend = []\n\n def _get_budget_spend_series(self):\n try:\n self.budget_series = self.daily_df[self.BUDGETS_NAME]\n except KeyError:\n self.budget_series = pd.Series()\n self.spend_series = self.daily_df.drop(self.BUDGETS_NAME, axis=1,\n errors='ignore').sum(axis=1)\n\n def check_required(self):\n \"\"\" Functions needed for any of the public methods to work \"\"\"\n if not self.required_ran:\n self._get_start_date()\n self._get_querysets()\n self._convert_spend_currency()\n self._convert_to_daily_df()\n self._get_budget_spend_series()\n self.required_ran = True\n\n def get_daily_diff(self):\n self.check_required()\n res = self.budget_series - self.spend_series\n res.fillna(0, inplace=True)\n return res\n\n def get_cum_diff(self, daily_diff):\n self.check_required()\n return daily_diff.cumsum()\n\n def get_totals(self):\n self.check_required()\n spend_sum = self.spend_series.sum()\n budget_sum = self.budget_series.sum()\n spend_days = self.spend_series.count()\n budget_days = self.budget_series.count()\n diff = budget_sum - spend_sum\n totals = {'spend': spend_sum, 'budget': budget_sum,\n 'avg_spend_per_day': spend_sum / spend_days,\n 'avg_budget_per_day': budget_sum / budget_days, 'diff': diff,\n 'avg_diff_per_day': diff / spend_days}\n for each in totals:\n if pd.isnull(totals[each]):\n totals[each] = 0\n return totals\n\n def get_info(self):\n info = {'last_spend': self.spend_series.dropna().index[-1]}\n return info\n",
"step-5": "from clients.models import Budget\nfrom clients.models import Spend\nfrom datetime import date as datetimedate\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom django.db import models\nfrom rest_framework.exceptions import ParseError\nimport math\nimport pandas as pd\n\n\nclass CampaignPerformance:\n \"\"\" Get aggregated info about one campaign \"\"\"\n def __init__(self, campaign, start):\n # Initial arguments\n self.campaign = campaign\n self.start = start\n\n self.BUDGETS_NAME = 'Budgets'\n\n self.required_ran = False\n\n def get(self, filt=None):\n \"\"\" Return data\n filt: only return certain data (list)\n \"\"\"\n # Required functions\n self.check_required()\n\n # Filter output\n results = {}\n if filt is None:\n filt = [\n 'daily_data', 'daily_diff', 'cum_diff',\n 'totals', 'info'\n ]\n\n # Optional functions\n # Prerequisits to multiple funcions\n if 'daily_diff' in filt or 'cum_diff' in filt:\n daily_diff = self.get_daily_diff()\n if 'daily_data' in filt or 'daily_diff' in filt:\n results['daily_index'] = self.daily_df.index\n\n # Single functions\n if 'daily_data' in filt:\n results['daily_data'] = self.daily_df.to_dict('list')\n if 'daily_diff' in filt:\n results['daily_diff'] = daily_diff\n if 'totals' in filt:\n results['totals'] = self.get_totals()\n if 'info' in filt:\n results['info'] = self.get_info()\n if 'cum_diff' in filt:\n results['cum_diff'] = self.get_cum_diff(daily_diff)\n # results['recommend'] = {'spend_per_day', 'spend_diff(spend per day vs avg_past_spend_per_day)'}\n\n print(results)\n return results\n\n def _get_start_date(self):\n \"\"\" self.start = week, month, quarter, year, all, or %Y-%m-%d date\n \"\"\"\n today = datetimedate.today()\n if self.start == 'week':\n start_date = today - timedelta(days=today.weekday())\n elif self.start == 'month':\n start_date = today.replace(day=1)\n elif self.start == 'quarter':\n quarter = math.ceil(today.month / 3)\n start_date = datetimedate(\n today.year,\n ((quarter - 1) * 3) + 1,\n 1\n )\n elif self.start == 'year':\n start_date = datetimedate(today.year, 1, 1)\n elif self.start == 'all':\n start_date = datetimedate(2010, 1, 1)\n else:\n try:\n start_date = datetime.strptime(self.start, \"%Y-%m-%d\").date()\n except Exception as e:\n raise ParseError(\"start argument not valid\")\n\n self.start_date = start_date\n\n def _get_querysets(self):\n # GET SPEND\n # Only for same client as campaign\n spend = Spend.objects.filter(platform__client=self.campaign.client)\n # Only for same platforms as campaign\n spend = spend.filter(\n platform__pk__in=(\n self.campaign.platforms.values_list('pk', flat=True)\n )\n )\n # Only where spend end_date >= start_date\n spend = spend.filter(end_date__gte=self.start_date)\n # Apply regex filter to spend if provided by campaign\n if self.campaign.name_filter:\n spend = spend.filter(name__iregex=self.campaign.name_filter)\n\n # GET BUDGETS\n budgets = self.campaign.budget_set\n # Only where budget end_date >= start_date\n budgets = budgets.filter(end_date__gte=self.start_date)\n\n # SAVE\n self.spend = spend\n self.budgets = budgets\n\n def _convert_to_daily_df(self):\n daily = {}\n for each in self.budgets:\n # Calculate amount per day\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount\n\n for each in self.spend:\n name = each.platform.name\n if name == self.BUDGETS_NAME:\n name = f'{self.BUDGETS_NAME} (spend)'\n days = (each.end_date - each.start_date).days + 1\n daily_amount = each.amount / days\n for i in range(days):\n day = each.start_date + timedelta(days=i)\n if day < self.start_date:\n continue\n dayspend = daily.setdefault(name, {}).setdefault(day, 0)\n daily[name][day] = dayspend + daily_amount\n\n df = pd.DataFrame(daily)\n # Change datetime dates to string and fillNA for later json\n df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]\n df.fillna(0, inplace=True)\n\n self.daily_df = df\n\n def _convert_spend_currency(self):\n if self.spend.count() > 0:\n spend_cur = list(set(\n self.spend.values_list('currency', flat=True)\n ))\n if spend_cur != [self.campaign.currency]:\n raise NotImplementedError(\n \"Currency converting not implemented, make sure budgets \"\n \"and spends are in the same currency\"\n )\n # Convert spend to list so that we can alter change currency\n self.spend = list(self.spend)\n else:\n self.spend = []\n\n def _get_budget_spend_series(self):\n try:\n self.budget_series = self.daily_df[self.BUDGETS_NAME]\n except KeyError:\n self.budget_series = pd.Series()\n\n self.spend_series = (\n self.daily_df\n .drop(self.BUDGETS_NAME, axis=1, errors='ignore')\n .sum(axis=1)\n )\n\n def check_required(self):\n \"\"\" Functions needed for any of the public methods to work \"\"\"\n if not self.required_ran:\n self._get_start_date()\n self._get_querysets()\n self._convert_spend_currency()\n self._convert_to_daily_df()\n self._get_budget_spend_series()\n\n self.required_ran = True\n\n def get_daily_diff(self):\n self.check_required()\n res = self.budget_series - self.spend_series\n res.fillna(0, inplace=True)\n return res\n\n def get_cum_diff(self, daily_diff):\n self.check_required()\n return daily_diff.cumsum()\n\n def get_totals(self):\n self.check_required()\n spend_sum = self.spend_series.sum()\n budget_sum = self.budget_series.sum()\n spend_days = self.spend_series.count()\n budget_days = self.budget_series.count()\n diff = budget_sum - spend_sum\n totals = {\n 'spend': spend_sum,\n 'budget': budget_sum,\n 'avg_spend_per_day': (\n spend_sum / spend_days\n ),\n 'avg_budget_per_day': (\n budget_sum / budget_days\n ),\n 'diff': diff,\n 'avg_diff_per_day': diff / spend_days\n }\n\n for each in totals:\n if pd.isnull(totals[each]):\n totals[each] = 0\n\n return totals\n\n def get_info(self):\n info = {\n 'last_spend': self.spend_series.dropna().index[-1]\n }\n\n return info\n",
"step-ids": [
9,
12,
13,
15,
16
]
}
|
[
9,
12,
13,
15,
16
] |
<|reserved_special_token_0|>
def getRandomUserAgnet():
user_agents = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'
]
userAgent = random.choice(user_agents)
return userAgent
def getProxies():
proxies = []
for i in range(1, 10):
url = 'http://www.xicidaili.com/nn/{0}'.format(i)
userAgent = getRandomUserAgnet()
headers = {'User-Agent': userAgent}
opener = urllib.request.build_opener()
opener.addheaders = [headers]
try:
data = opener.open(url, timeout=5).read()
sleep(3)
except Exception as e:
logging.debug(e)
selector = etree.HTML(data)
ip_addr = selector.xpath("//tr[@class='odd']/td[2]/text()")
port = selector.xpath("//tr[@class='odd']/td[3]/text()")
sur_time = selector.xpath("//tr[@class='odd']/td[9]/text()")
ver_time = selector.xpath("//tr[@class='odd']/td[10]/text()")
for j in range(len(ip_addr)):
ip = ip_addr[j] + ':' + port[j]
proxies.append(ip)
return proxies
def verify_ip(currentIp):
tmp_proxies = []
testUrl = 'http://www.baidu.com'
userAgent = getRandomUserAgnet()
proxy_support = urllib.request.ProxyHandler({'http': currentIp})
opener = urllib.request.build_opener(proxy_support)
opener.addheaders = [('User-Agent', userAgent)]
urllib.request.install_opener(opener)
try:
res = urllib.request.urlopen(testUrl, timeout=5).read()
if len(res) != 0:
tmp_proxies.append(currentIp)
except urllib.error.URLError as er2:
if hasattr(er2, 'code'):
logging.debug('unvalid ipaddress' + currentIp + str(er2.code))
if hasattr(er2, 'reason'):
logging.debug('reason is the ' + currentIp + str(er2.reason))
except Exception as er:
logging.debug(er)
sleep(2)
return tmp_proxies
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getRandomUserAgnet():
user_agents = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'
]
userAgent = random.choice(user_agents)
return userAgent
def getProxies():
proxies = []
for i in range(1, 10):
url = 'http://www.xicidaili.com/nn/{0}'.format(i)
userAgent = getRandomUserAgnet()
headers = {'User-Agent': userAgent}
opener = urllib.request.build_opener()
opener.addheaders = [headers]
try:
data = opener.open(url, timeout=5).read()
sleep(3)
except Exception as e:
logging.debug(e)
selector = etree.HTML(data)
ip_addr = selector.xpath("//tr[@class='odd']/td[2]/text()")
port = selector.xpath("//tr[@class='odd']/td[3]/text()")
sur_time = selector.xpath("//tr[@class='odd']/td[9]/text()")
ver_time = selector.xpath("//tr[@class='odd']/td[10]/text()")
for j in range(len(ip_addr)):
ip = ip_addr[j] + ':' + port[j]
proxies.append(ip)
return proxies
def verify_ip(currentIp):
tmp_proxies = []
testUrl = 'http://www.baidu.com'
userAgent = getRandomUserAgnet()
proxy_support = urllib.request.ProxyHandler({'http': currentIp})
opener = urllib.request.build_opener(proxy_support)
opener.addheaders = [('User-Agent', userAgent)]
urllib.request.install_opener(opener)
try:
res = urllib.request.urlopen(testUrl, timeout=5).read()
if len(res) != 0:
tmp_proxies.append(currentIp)
except urllib.error.URLError as er2:
if hasattr(er2, 'code'):
logging.debug('unvalid ipaddress' + currentIp + str(er2.code))
if hasattr(er2, 'reason'):
logging.debug('reason is the ' + currentIp + str(er2.reason))
except Exception as er:
logging.debug(er)
sleep(2)
return tmp_proxies
if __name__ == '__main__':
getProxies()
<|reserved_special_token_1|>
__author__ = 'Administrator'
<|reserved_special_token_0|>
def getRandomUserAgnet():
user_agents = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'
]
userAgent = random.choice(user_agents)
return userAgent
def getProxies():
proxies = []
for i in range(1, 10):
url = 'http://www.xicidaili.com/nn/{0}'.format(i)
userAgent = getRandomUserAgnet()
headers = {'User-Agent': userAgent}
opener = urllib.request.build_opener()
opener.addheaders = [headers]
try:
data = opener.open(url, timeout=5).read()
sleep(3)
except Exception as e:
logging.debug(e)
selector = etree.HTML(data)
ip_addr = selector.xpath("//tr[@class='odd']/td[2]/text()")
port = selector.xpath("//tr[@class='odd']/td[3]/text()")
sur_time = selector.xpath("//tr[@class='odd']/td[9]/text()")
ver_time = selector.xpath("//tr[@class='odd']/td[10]/text()")
for j in range(len(ip_addr)):
ip = ip_addr[j] + ':' + port[j]
proxies.append(ip)
return proxies
def verify_ip(currentIp):
tmp_proxies = []
testUrl = 'http://www.baidu.com'
userAgent = getRandomUserAgnet()
proxy_support = urllib.request.ProxyHandler({'http': currentIp})
opener = urllib.request.build_opener(proxy_support)
opener.addheaders = [('User-Agent', userAgent)]
urllib.request.install_opener(opener)
try:
res = urllib.request.urlopen(testUrl, timeout=5).read()
if len(res) != 0:
tmp_proxies.append(currentIp)
except urllib.error.URLError as er2:
if hasattr(er2, 'code'):
logging.debug('unvalid ipaddress' + currentIp + str(er2.code))
if hasattr(er2, 'reason'):
logging.debug('reason is the ' + currentIp + str(er2.reason))
except Exception as er:
logging.debug(er)
sleep(2)
return tmp_proxies
if __name__ == '__main__':
getProxies()
<|reserved_special_token_1|>
__author__ = 'Administrator'
from urllib import request
import urllib.parse
import logging
from multiprocessing import pool
from time import sleep
import random
from lxml import etree
def getRandomUserAgnet():
user_agents = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'
]
userAgent = random.choice(user_agents)
return userAgent
def getProxies():
proxies = []
for i in range(1, 10):
url = 'http://www.xicidaili.com/nn/{0}'.format(i)
userAgent = getRandomUserAgnet()
headers = {'User-Agent': userAgent}
opener = urllib.request.build_opener()
opener.addheaders = [headers]
try:
data = opener.open(url, timeout=5).read()
sleep(3)
except Exception as e:
logging.debug(e)
selector = etree.HTML(data)
ip_addr = selector.xpath("//tr[@class='odd']/td[2]/text()")
port = selector.xpath("//tr[@class='odd']/td[3]/text()")
sur_time = selector.xpath("//tr[@class='odd']/td[9]/text()")
ver_time = selector.xpath("//tr[@class='odd']/td[10]/text()")
for j in range(len(ip_addr)):
ip = ip_addr[j] + ':' + port[j]
proxies.append(ip)
return proxies
def verify_ip(currentIp):
tmp_proxies = []
testUrl = 'http://www.baidu.com'
userAgent = getRandomUserAgnet()
proxy_support = urllib.request.ProxyHandler({'http': currentIp})
opener = urllib.request.build_opener(proxy_support)
opener.addheaders = [('User-Agent', userAgent)]
urllib.request.install_opener(opener)
try:
res = urllib.request.urlopen(testUrl, timeout=5).read()
if len(res) != 0:
tmp_proxies.append(currentIp)
except urllib.error.URLError as er2:
if hasattr(er2, 'code'):
logging.debug('unvalid ipaddress' + currentIp + str(er2.code))
if hasattr(er2, 'reason'):
logging.debug('reason is the ' + currentIp + str(er2.reason))
except Exception as er:
logging.debug(er)
sleep(2)
return tmp_proxies
if __name__ == '__main__':
getProxies()
<|reserved_special_token_1|>
__author__ = 'Administrator'
# 抓取IP的主要逻辑
from urllib import request
import urllib.parse
import logging
from multiprocessing import pool
from time import sleep
import random
from lxml import etree
def getRandomUserAgnet():
user_agents=[
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S"
]
userAgent=random.choice(user_agents)
return userAgent
def getProxies():
proxies=[]
for i in range(1,10):
url="http://www.xicidaili.com/nn/{0}".format(i)
userAgent=getRandomUserAgnet()
headers={"User-Agent":userAgent}
opener=urllib.request.build_opener()
opener.addheaders=[headers]
try:
data=opener.open(url,timeout=5).read()
sleep(3)
except Exception as e:
logging.debug(e)
selector=etree.HTML(data)
ip_addr=selector.xpath("//tr[@class='odd']/td[2]/text()")
port=selector.xpath("//tr[@class='odd']/td[3]/text()")
sur_time=selector.xpath("//tr[@class='odd']/td[9]/text()")
ver_time=selector.xpath("//tr[@class='odd']/td[10]/text()")
for j in range(len(ip_addr)):
ip=ip_addr[j]+":"+port[j]
proxies.append(ip)
return proxies
def verify_ip(currentIp):
tmp_proxies=[]
testUrl="http://www.baidu.com"
userAgent=getRandomUserAgnet()
proxy_support=urllib.request.ProxyHandler({"http":currentIp})
opener=urllib.request.build_opener(proxy_support)
opener.addheaders=[("User-Agent",userAgent)]
urllib.request.install_opener(opener)
try:
res=urllib.request.urlopen(testUrl,timeout=5).read()
if len(res)!=0:
tmp_proxies.append(currentIp)
except urllib.error.URLError as er2:
if hasattr(er2,'code'):
logging.debug("unvalid ipaddress"+currentIp+str(er2.code))
if hasattr(er2,"reason"):
logging.debug("reason is the "+currentIp+str(er2.reason))
except Exception as er:
logging.debug(er)
sleep(2)
return tmp_proxies
if __name__=="__main__":
getProxies()
|
flexible
|
{
"blob_id": "911631e96d21bdf22a219007f1bdc04a5e6965dc",
"index": 739,
"step-1": "<mask token>\n\n\ndef getRandomUserAgnet():\n user_agents = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'\n ]\n userAgent = random.choice(user_agents)\n return userAgent\n\n\ndef getProxies():\n proxies = []\n for i in range(1, 10):\n url = 'http://www.xicidaili.com/nn/{0}'.format(i)\n userAgent = getRandomUserAgnet()\n headers = {'User-Agent': userAgent}\n opener = urllib.request.build_opener()\n opener.addheaders = [headers]\n try:\n data = opener.open(url, timeout=5).read()\n sleep(3)\n except Exception as e:\n logging.debug(e)\n selector = etree.HTML(data)\n ip_addr = selector.xpath(\"//tr[@class='odd']/td[2]/text()\")\n port = selector.xpath(\"//tr[@class='odd']/td[3]/text()\")\n sur_time = selector.xpath(\"//tr[@class='odd']/td[9]/text()\")\n ver_time = selector.xpath(\"//tr[@class='odd']/td[10]/text()\")\n for j in range(len(ip_addr)):\n ip = ip_addr[j] + ':' + port[j]\n proxies.append(ip)\n return proxies\n\n\ndef verify_ip(currentIp):\n tmp_proxies = []\n testUrl = 'http://www.baidu.com'\n userAgent = getRandomUserAgnet()\n proxy_support = urllib.request.ProxyHandler({'http': currentIp})\n opener = urllib.request.build_opener(proxy_support)\n opener.addheaders = [('User-Agent', userAgent)]\n urllib.request.install_opener(opener)\n try:\n res = urllib.request.urlopen(testUrl, timeout=5).read()\n if len(res) != 0:\n tmp_proxies.append(currentIp)\n except urllib.error.URLError as er2:\n if hasattr(er2, 'code'):\n logging.debug('unvalid ipaddress' + currentIp + str(er2.code))\n if hasattr(er2, 'reason'):\n logging.debug('reason is the ' + currentIp + str(er2.reason))\n except Exception as er:\n logging.debug(er)\n sleep(2)\n return tmp_proxies\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getRandomUserAgnet():\n user_agents = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'\n ]\n userAgent = random.choice(user_agents)\n return userAgent\n\n\ndef getProxies():\n proxies = []\n for i in range(1, 10):\n url = 'http://www.xicidaili.com/nn/{0}'.format(i)\n userAgent = getRandomUserAgnet()\n headers = {'User-Agent': userAgent}\n opener = urllib.request.build_opener()\n opener.addheaders = [headers]\n try:\n data = opener.open(url, timeout=5).read()\n sleep(3)\n except Exception as e:\n logging.debug(e)\n selector = etree.HTML(data)\n ip_addr = selector.xpath(\"//tr[@class='odd']/td[2]/text()\")\n port = selector.xpath(\"//tr[@class='odd']/td[3]/text()\")\n sur_time = selector.xpath(\"//tr[@class='odd']/td[9]/text()\")\n ver_time = selector.xpath(\"//tr[@class='odd']/td[10]/text()\")\n for j in range(len(ip_addr)):\n ip = ip_addr[j] + ':' + port[j]\n proxies.append(ip)\n return proxies\n\n\ndef verify_ip(currentIp):\n tmp_proxies = []\n testUrl = 'http://www.baidu.com'\n userAgent = getRandomUserAgnet()\n proxy_support = urllib.request.ProxyHandler({'http': currentIp})\n opener = urllib.request.build_opener(proxy_support)\n opener.addheaders = [('User-Agent', userAgent)]\n urllib.request.install_opener(opener)\n try:\n res = urllib.request.urlopen(testUrl, timeout=5).read()\n if len(res) != 0:\n tmp_proxies.append(currentIp)\n except urllib.error.URLError as er2:\n if hasattr(er2, 'code'):\n logging.debug('unvalid ipaddress' + currentIp + str(er2.code))\n if hasattr(er2, 'reason'):\n logging.debug('reason is the ' + currentIp + str(er2.reason))\n except Exception as er:\n logging.debug(er)\n sleep(2)\n return tmp_proxies\n\n\nif __name__ == '__main__':\n getProxies()\n",
"step-3": "__author__ = 'Administrator'\n<mask token>\n\n\ndef getRandomUserAgnet():\n user_agents = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'\n ]\n userAgent = random.choice(user_agents)\n return userAgent\n\n\ndef getProxies():\n proxies = []\n for i in range(1, 10):\n url = 'http://www.xicidaili.com/nn/{0}'.format(i)\n userAgent = getRandomUserAgnet()\n headers = {'User-Agent': userAgent}\n opener = urllib.request.build_opener()\n opener.addheaders = [headers]\n try:\n data = opener.open(url, timeout=5).read()\n sleep(3)\n except Exception as e:\n logging.debug(e)\n selector = etree.HTML(data)\n ip_addr = selector.xpath(\"//tr[@class='odd']/td[2]/text()\")\n port = selector.xpath(\"//tr[@class='odd']/td[3]/text()\")\n sur_time = selector.xpath(\"//tr[@class='odd']/td[9]/text()\")\n ver_time = selector.xpath(\"//tr[@class='odd']/td[10]/text()\")\n for j in range(len(ip_addr)):\n ip = ip_addr[j] + ':' + port[j]\n proxies.append(ip)\n return proxies\n\n\ndef verify_ip(currentIp):\n tmp_proxies = []\n testUrl = 'http://www.baidu.com'\n userAgent = getRandomUserAgnet()\n proxy_support = urllib.request.ProxyHandler({'http': currentIp})\n opener = urllib.request.build_opener(proxy_support)\n opener.addheaders = [('User-Agent', userAgent)]\n urllib.request.install_opener(opener)\n try:\n res = urllib.request.urlopen(testUrl, timeout=5).read()\n if len(res) != 0:\n tmp_proxies.append(currentIp)\n except urllib.error.URLError as er2:\n if hasattr(er2, 'code'):\n logging.debug('unvalid ipaddress' + currentIp + str(er2.code))\n if hasattr(er2, 'reason'):\n logging.debug('reason is the ' + currentIp + str(er2.reason))\n except Exception as er:\n logging.debug(er)\n sleep(2)\n return tmp_proxies\n\n\nif __name__ == '__main__':\n getProxies()\n",
"step-4": "__author__ = 'Administrator'\nfrom urllib import request\nimport urllib.parse\nimport logging\nfrom multiprocessing import pool\nfrom time import sleep\nimport random\nfrom lxml import etree\n\n\ndef getRandomUserAgnet():\n user_agents = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S'\n ]\n userAgent = random.choice(user_agents)\n return userAgent\n\n\ndef getProxies():\n proxies = []\n for i in range(1, 10):\n url = 'http://www.xicidaili.com/nn/{0}'.format(i)\n userAgent = getRandomUserAgnet()\n headers = {'User-Agent': userAgent}\n opener = urllib.request.build_opener()\n opener.addheaders = [headers]\n try:\n data = opener.open(url, timeout=5).read()\n sleep(3)\n except Exception as e:\n logging.debug(e)\n selector = etree.HTML(data)\n ip_addr = selector.xpath(\"//tr[@class='odd']/td[2]/text()\")\n port = selector.xpath(\"//tr[@class='odd']/td[3]/text()\")\n sur_time = selector.xpath(\"//tr[@class='odd']/td[9]/text()\")\n ver_time = selector.xpath(\"//tr[@class='odd']/td[10]/text()\")\n for j in range(len(ip_addr)):\n ip = ip_addr[j] + ':' + port[j]\n proxies.append(ip)\n return proxies\n\n\ndef verify_ip(currentIp):\n tmp_proxies = []\n testUrl = 'http://www.baidu.com'\n userAgent = getRandomUserAgnet()\n proxy_support = urllib.request.ProxyHandler({'http': currentIp})\n opener = urllib.request.build_opener(proxy_support)\n opener.addheaders = [('User-Agent', userAgent)]\n urllib.request.install_opener(opener)\n try:\n res = urllib.request.urlopen(testUrl, timeout=5).read()\n if len(res) != 0:\n tmp_proxies.append(currentIp)\n except urllib.error.URLError as er2:\n if hasattr(er2, 'code'):\n logging.debug('unvalid ipaddress' + currentIp + str(er2.code))\n if hasattr(er2, 'reason'):\n logging.debug('reason is the ' + currentIp + str(er2.reason))\n except Exception as er:\n logging.debug(er)\n sleep(2)\n return tmp_proxies\n\n\nif __name__ == '__main__':\n getProxies()\n",
"step-5": "__author__ = 'Administrator'\n# 抓取IP的主要逻辑\nfrom urllib import request\nimport urllib.parse\nimport logging\nfrom multiprocessing import pool\nfrom time import sleep\nimport random\nfrom lxml import etree\ndef getRandomUserAgnet():\n user_agents=[\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360S\"\n ]\n userAgent=random.choice(user_agents)\n return userAgent\ndef getProxies():\n proxies=[]\n for i in range(1,10):\n url=\"http://www.xicidaili.com/nn/{0}\".format(i)\n userAgent=getRandomUserAgnet()\n headers={\"User-Agent\":userAgent}\n opener=urllib.request.build_opener()\n opener.addheaders=[headers]\n try:\n data=opener.open(url,timeout=5).read()\n sleep(3)\n except Exception as e:\n logging.debug(e)\n selector=etree.HTML(data)\n ip_addr=selector.xpath(\"//tr[@class='odd']/td[2]/text()\")\n port=selector.xpath(\"//tr[@class='odd']/td[3]/text()\")\n sur_time=selector.xpath(\"//tr[@class='odd']/td[9]/text()\")\n ver_time=selector.xpath(\"//tr[@class='odd']/td[10]/text()\")\n for j in range(len(ip_addr)):\n ip=ip_addr[j]+\":\"+port[j]\n proxies.append(ip)\n return proxies\ndef verify_ip(currentIp):\n tmp_proxies=[]\n testUrl=\"http://www.baidu.com\"\n userAgent=getRandomUserAgnet()\n proxy_support=urllib.request.ProxyHandler({\"http\":currentIp})\n opener=urllib.request.build_opener(proxy_support)\n opener.addheaders=[(\"User-Agent\",userAgent)]\n urllib.request.install_opener(opener)\n try:\n res=urllib.request.urlopen(testUrl,timeout=5).read()\n if len(res)!=0:\n tmp_proxies.append(currentIp)\n except urllib.error.URLError as er2:\n if hasattr(er2,'code'):\n logging.debug(\"unvalid ipaddress\"+currentIp+str(er2.code))\n if hasattr(er2,\"reason\"):\n logging.debug(\"reason is the \"+currentIp+str(er2.reason))\n except Exception as er:\n logging.debug(er)\n sleep(2)\n return tmp_proxies\nif __name__==\"__main__\":\n getProxies()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
"""
Looks up values in createresistorvaluesdbm.py.
Outputs string value ( cmd ).
"""
import dbm
# Open a DB. The c option opens in read/write mode and creates the file if needed.
db = dbm.open( 'resistorvalues', 'c' )
with open( "dummyoutput.txt", "r" ) as file_object:
#print (file_object.readline(6))
data = file_object.readlines()
# Go through serial string line by line
for line in data:
# parse on semi-colon
words = line.split( ";" )
#print (line.rsplit(";"))
# Ignore position information and pull out resistor values
# Note every fourth item to compensate for word pairs
for i in range( 1, len( words ), 4 ):
# print(words[i])
# the get method has 2 vlues lookup, and what to return is no match in this case is `0`
if db.get( words[ i ], 0 ) != 0:
# Direction, i.e. "f"
cmd1 = db.get( words[ i ] )
# Value, i.e. "10"
cmd2 = db.get( words[ i + 2 ] )
# Formatting space
space = b( ' ' )
cmd = cmd1 + space + cmd2
#print (cmd.decode('ascii'))
print ( cmd )
|
normal
|
{
"blob_id": "69eb62ba47a63cf007334c777709b0513d75f396",
"index": 1504,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('dummyoutput.txt', 'r') as file_object:\n data = file_object.readlines()\n for line in data:\n words = line.split(';')\n for i in range(1, len(words), 4):\n if db.get(words[i], 0) != 0:\n cmd1 = db.get(words[i])\n cmd2 = db.get(words[i + 2])\n space = b(' ')\n cmd = cmd1 + space + cmd2\n print(cmd)\n",
"step-3": "<mask token>\ndb = dbm.open('resistorvalues', 'c')\nwith open('dummyoutput.txt', 'r') as file_object:\n data = file_object.readlines()\n for line in data:\n words = line.split(';')\n for i in range(1, len(words), 4):\n if db.get(words[i], 0) != 0:\n cmd1 = db.get(words[i])\n cmd2 = db.get(words[i + 2])\n space = b(' ')\n cmd = cmd1 + space + cmd2\n print(cmd)\n",
"step-4": "<mask token>\nimport dbm\ndb = dbm.open('resistorvalues', 'c')\nwith open('dummyoutput.txt', 'r') as file_object:\n data = file_object.readlines()\n for line in data:\n words = line.split(';')\n for i in range(1, len(words), 4):\n if db.get(words[i], 0) != 0:\n cmd1 = db.get(words[i])\n cmd2 = db.get(words[i + 2])\n space = b(' ')\n cmd = cmd1 + space + cmd2\n print(cmd)\n",
"step-5": "\"\"\"\r\n Looks up values in createresistorvaluesdbm.py.\r\n Outputs string value ( cmd ).\r\n\"\"\"\r\n\r\nimport dbm\r\n\r\n# Open a DB. The c option opens in read/write mode and creates the file if needed.\r\ndb = dbm.open( 'resistorvalues', 'c' )\r\n\r\n\r\nwith open( \"dummyoutput.txt\", \"r\" ) as file_object:\r\n#print (file_object.readline(6))\r\n data = file_object.readlines()\r\n # Go through serial string line by line\r\n for line in data:\r\n # parse on semi-colon\r\n words = line.split( \";\" )\r\n #print (line.rsplit(\";\"))\r\n # Ignore position information and pull out resistor values\r\n # Note every fourth item to compensate for word pairs\r\n for i in range( 1, len( words ), 4 ):\r\n # print(words[i])\r\n # the get method has 2 vlues lookup, and what to return is no match in this case is `0`\r\n if db.get( words[ i ], 0 ) != 0:\r\n # Direction, i.e. \"f\"\r\n cmd1 = db.get( words[ i ] )\r\n # Value, i.e. \"10\"\r\n cmd2 = db.get( words[ i + 2 ] )\r\n # Formatting space\r\n space = b( ' ' )\r\n cmd = cmd1 + space + cmd2\r\n #print (cmd.decode('ascii'))\r\n print ( cmd )\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
TTTSIZE = 4
def who_win_line(line):
elements = set(line)
if '.' in elements:
return '.'
elements.discard('T')
if len(elements) >= 2:
return 'D'
else:
return elements.pop()
def who_win_tic_tac_toe(original_rows):
#print('%s' % repr(original_rows))
board_full = True
rows = [row[0:TTTSIZE] for row in original_rows]
#print('%s' % repr(rows))
columns = [ [rows[0][0], rows[1][0], rows[2][0], rows[3][0]],
[rows[0][1], rows[1][1], rows[2][1], rows[3][1]],
[rows[0][2], rows[1][2], rows[2][2], rows[3][2]],
[rows[0][3], rows[1][3], rows[2][3], rows[3][3]] ]
diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]
diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]
lines = rows
lines.extend(columns)
lines.append(diagonal1)
lines.append(diagonal2)
for line in lines:
winner = who_win_line(line)
if winner == 'X':
return 'X won'
elif winner == 'O':
return 'O won'
elif winner == '.':
board_full = False
if board_full:
return 'Draw'
else:
return 'Game has not completed'
import sys
#import pdb
if __name__ == '__main__':
filename_prefix = sys.argv[1]
filename_in = filename_prefix + ".in"
filename_out = filename_prefix + ".out"
file_in = open(filename_in, 'r')
lines = file_in.readlines()
testcnt = int(lines[0])
idx = 1
file_out = open(filename_out, 'w')
#pdb.set_trace()
for test in range(testcnt):
res = who_win_tic_tac_toe(lines[idx : idx + TTTSIZE])
file_out.write("Case #{0}: {1}\n".format(test + 1, res))
idx += TTTSIZE + 1
|
normal
|
{
"blob_id": "2e041e33b5c34c2bddc72b36ff641817f1e21db2",
"index": 3735,
"step-1": "<mask token>\n\n\ndef who_win_line(line):\n elements = set(line)\n if '.' in elements:\n return '.'\n elements.discard('T')\n if len(elements) >= 2:\n return 'D'\n else:\n return elements.pop()\n\n\ndef who_win_tic_tac_toe(original_rows):\n board_full = True\n rows = [row[0:TTTSIZE] for row in original_rows]\n columns = [[rows[0][0], rows[1][0], rows[2][0], rows[3][0]], [rows[0][1\n ], rows[1][1], rows[2][1], rows[3][1]], [rows[0][2], rows[1][2],\n rows[2][2], rows[3][2]], [rows[0][3], rows[1][3], rows[2][3], rows[\n 3][3]]]\n diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]\n diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]\n lines = rows\n lines.extend(columns)\n lines.append(diagonal1)\n lines.append(diagonal2)\n for line in lines:\n winner = who_win_line(line)\n if winner == 'X':\n return 'X won'\n elif winner == 'O':\n return 'O won'\n elif winner == '.':\n board_full = False\n if board_full:\n return 'Draw'\n else:\n return 'Game has not completed'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef who_win_line(line):\n elements = set(line)\n if '.' in elements:\n return '.'\n elements.discard('T')\n if len(elements) >= 2:\n return 'D'\n else:\n return elements.pop()\n\n\ndef who_win_tic_tac_toe(original_rows):\n board_full = True\n rows = [row[0:TTTSIZE] for row in original_rows]\n columns = [[rows[0][0], rows[1][0], rows[2][0], rows[3][0]], [rows[0][1\n ], rows[1][1], rows[2][1], rows[3][1]], [rows[0][2], rows[1][2],\n rows[2][2], rows[3][2]], [rows[0][3], rows[1][3], rows[2][3], rows[\n 3][3]]]\n diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]\n diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]\n lines = rows\n lines.extend(columns)\n lines.append(diagonal1)\n lines.append(diagonal2)\n for line in lines:\n winner = who_win_line(line)\n if winner == 'X':\n return 'X won'\n elif winner == 'O':\n return 'O won'\n elif winner == '.':\n board_full = False\n if board_full:\n return 'Draw'\n else:\n return 'Game has not completed'\n\n\n<mask token>\nif __name__ == '__main__':\n filename_prefix = sys.argv[1]\n filename_in = filename_prefix + '.in'\n filename_out = filename_prefix + '.out'\n file_in = open(filename_in, 'r')\n lines = file_in.readlines()\n testcnt = int(lines[0])\n idx = 1\n file_out = open(filename_out, 'w')\n for test in range(testcnt):\n res = who_win_tic_tac_toe(lines[idx:idx + TTTSIZE])\n file_out.write('Case #{0}: {1}\\n'.format(test + 1, res))\n idx += TTTSIZE + 1\n",
"step-3": "TTTSIZE = 4\n\n\ndef who_win_line(line):\n elements = set(line)\n if '.' in elements:\n return '.'\n elements.discard('T')\n if len(elements) >= 2:\n return 'D'\n else:\n return elements.pop()\n\n\ndef who_win_tic_tac_toe(original_rows):\n board_full = True\n rows = [row[0:TTTSIZE] for row in original_rows]\n columns = [[rows[0][0], rows[1][0], rows[2][0], rows[3][0]], [rows[0][1\n ], rows[1][1], rows[2][1], rows[3][1]], [rows[0][2], rows[1][2],\n rows[2][2], rows[3][2]], [rows[0][3], rows[1][3], rows[2][3], rows[\n 3][3]]]\n diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]\n diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]\n lines = rows\n lines.extend(columns)\n lines.append(diagonal1)\n lines.append(diagonal2)\n for line in lines:\n winner = who_win_line(line)\n if winner == 'X':\n return 'X won'\n elif winner == 'O':\n return 'O won'\n elif winner == '.':\n board_full = False\n if board_full:\n return 'Draw'\n else:\n return 'Game has not completed'\n\n\n<mask token>\nif __name__ == '__main__':\n filename_prefix = sys.argv[1]\n filename_in = filename_prefix + '.in'\n filename_out = filename_prefix + '.out'\n file_in = open(filename_in, 'r')\n lines = file_in.readlines()\n testcnt = int(lines[0])\n idx = 1\n file_out = open(filename_out, 'w')\n for test in range(testcnt):\n res = who_win_tic_tac_toe(lines[idx:idx + TTTSIZE])\n file_out.write('Case #{0}: {1}\\n'.format(test + 1, res))\n idx += TTTSIZE + 1\n",
"step-4": "TTTSIZE = 4\n\n\ndef who_win_line(line):\n elements = set(line)\n if '.' in elements:\n return '.'\n elements.discard('T')\n if len(elements) >= 2:\n return 'D'\n else:\n return elements.pop()\n\n\ndef who_win_tic_tac_toe(original_rows):\n board_full = True\n rows = [row[0:TTTSIZE] for row in original_rows]\n columns = [[rows[0][0], rows[1][0], rows[2][0], rows[3][0]], [rows[0][1\n ], rows[1][1], rows[2][1], rows[3][1]], [rows[0][2], rows[1][2],\n rows[2][2], rows[3][2]], [rows[0][3], rows[1][3], rows[2][3], rows[\n 3][3]]]\n diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]\n diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]\n lines = rows\n lines.extend(columns)\n lines.append(diagonal1)\n lines.append(diagonal2)\n for line in lines:\n winner = who_win_line(line)\n if winner == 'X':\n return 'X won'\n elif winner == 'O':\n return 'O won'\n elif winner == '.':\n board_full = False\n if board_full:\n return 'Draw'\n else:\n return 'Game has not completed'\n\n\nimport sys\nif __name__ == '__main__':\n filename_prefix = sys.argv[1]\n filename_in = filename_prefix + '.in'\n filename_out = filename_prefix + '.out'\n file_in = open(filename_in, 'r')\n lines = file_in.readlines()\n testcnt = int(lines[0])\n idx = 1\n file_out = open(filename_out, 'w')\n for test in range(testcnt):\n res = who_win_tic_tac_toe(lines[idx:idx + TTTSIZE])\n file_out.write('Case #{0}: {1}\\n'.format(test + 1, res))\n idx += TTTSIZE + 1\n",
"step-5": "TTTSIZE = 4\r\n\r\ndef who_win_line(line):\r\n elements = set(line)\r\n if '.' in elements:\r\n return '.'\r\n elements.discard('T')\r\n if len(elements) >= 2:\r\n return 'D'\r\n else:\r\n return elements.pop()\r\n\r\ndef who_win_tic_tac_toe(original_rows):\r\n #print('%s' % repr(original_rows))\r\n board_full = True\r\n rows = [row[0:TTTSIZE] for row in original_rows]\r\n #print('%s' % repr(rows))\r\n columns = [ [rows[0][0], rows[1][0], rows[2][0], rows[3][0]],\r\n [rows[0][1], rows[1][1], rows[2][1], rows[3][1]],\r\n [rows[0][2], rows[1][2], rows[2][2], rows[3][2]],\r\n [rows[0][3], rows[1][3], rows[2][3], rows[3][3]] ]\r\n diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]\r\n diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]\r\n\r\n lines = rows\r\n lines.extend(columns)\r\n lines.append(diagonal1)\r\n lines.append(diagonal2)\r\n\r\n for line in lines:\r\n winner = who_win_line(line)\r\n if winner == 'X':\r\n return 'X won'\r\n elif winner == 'O':\r\n return 'O won'\r\n elif winner == '.':\r\n board_full = False\r\n if board_full:\r\n return 'Draw'\r\n else:\r\n return 'Game has not completed'\r\n\r\n\r\nimport sys\r\n#import pdb\r\n\r\nif __name__ == '__main__':\r\n filename_prefix = sys.argv[1]\r\n filename_in = filename_prefix + \".in\"\r\n filename_out = filename_prefix + \".out\"\r\n\r\n file_in = open(filename_in, 'r')\r\n lines = file_in.readlines()\r\n\r\n testcnt = int(lines[0])\r\n idx = 1\r\n\r\n file_out = open(filename_out, 'w')\r\n\r\n #pdb.set_trace()\r\n for test in range(testcnt):\r\n res = who_win_tic_tac_toe(lines[idx : idx + TTTSIZE])\r\n file_out.write(\"Case #{0}: {1}\\n\".format(test + 1, res))\r\n idx += TTTSIZE + 1\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def heapify(lst, index, heap_size):
largest = index
left_index = 2 * index + 1
right_index = 2 * index + 2
if left_index < heap_size and lst[left_index] > lst[largest]:
largest = left_index
if right_index < heap_size and lst[right_index] > lst[largest]:
largest = right_index
if largest != index:
lst[largest], lst[index] = lst[index], lst[largest]
heapify(lst, largest, heap_size)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def heapify(lst, index, heap_size):
largest = index
left_index = 2 * index + 1
right_index = 2 * index + 2
if left_index < heap_size and lst[left_index] > lst[largest]:
largest = left_index
if right_index < heap_size and lst[right_index] > lst[largest]:
largest = right_index
if largest != index:
lst[largest], lst[index] = lst[index], lst[largest]
heapify(lst, largest, heap_size)
def heap_sort(collection):
"""Pure implement of heap sort algorithm in Python
:param collection: some mutable ordered collection with heterogeneous
comparable items inside
:return: the same collection ordered by ascending
"""
n = len(collection)
for i in range(n // 2 - 1, -1, -1):
heapify(collection, i, n)
for i in range(n - 1, 0, -1):
collection[0], collection[i] = collection[i], collection[0]
heapify(collection, 0, i)
return collection
|
flexible
|
{
"blob_id": "d8ea396ff8514cc10e02072ea478f0276584153d",
"index": 3274,
"step-1": "<mask token>\n",
"step-2": "def heapify(lst, index, heap_size):\n largest = index\n left_index = 2 * index + 1\n right_index = 2 * index + 2\n if left_index < heap_size and lst[left_index] > lst[largest]:\n largest = left_index\n if right_index < heap_size and lst[right_index] > lst[largest]:\n largest = right_index\n if largest != index:\n lst[largest], lst[index] = lst[index], lst[largest]\n heapify(lst, largest, heap_size)\n\n\n<mask token>\n",
"step-3": "def heapify(lst, index, heap_size):\n largest = index\n left_index = 2 * index + 1\n right_index = 2 * index + 2\n if left_index < heap_size and lst[left_index] > lst[largest]:\n largest = left_index\n if right_index < heap_size and lst[right_index] > lst[largest]:\n largest = right_index\n if largest != index:\n lst[largest], lst[index] = lst[index], lst[largest]\n heapify(lst, largest, heap_size)\n\n\ndef heap_sort(collection):\n \"\"\"Pure implement of heap sort algorithm in Python\n\n :param collection: some mutable ordered collection with heterogeneous\n comparable items inside\n :return: the same collection ordered by ascending\n \"\"\"\n n = len(collection)\n for i in range(n // 2 - 1, -1, -1):\n heapify(collection, i, n)\n for i in range(n - 1, 0, -1):\n collection[0], collection[i] = collection[i], collection[0]\n heapify(collection, 0, i)\n return collection\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
import initialization as init
import evaluation as eval
import selection as sel
import recombination as rec
import mutation as mut
initialize = init.permutation
evaluate = eval.custom
select = sel.rank_based
mutate = mut.swap
reproduce = rec.pairwise
crossover = rec.order
replace = sel.rank_based
params = {'gens': 100, 'n_off': 50, 'n_pars': 100, 'n_objs': 1, 'pop_size':
150, 'len_gene': 100, 'mut_rate': 0.5}
population = initialize(params)
population = evaluate(params, population)
for gen in range(params['gens']):
parents = select(population, params['n_pars'])
offspring = reproduce(params, parents, crossover)
offspring = mutate(params, offspring)
offspring = evaluate(params, offspring)
population = replace(np.concatenate((population, offspring), axis=0),
params['pop_size'])
print(gen)
|
normal
|
{
"blob_id": "5eab41a2ef536365bab6f6b5ad97efb8d26d7687",
"index": 4456,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor gen in range(params['gens']):\n parents = select(population, params['n_pars'])\n offspring = reproduce(params, parents, crossover)\n offspring = mutate(params, offspring)\n offspring = evaluate(params, offspring)\n population = replace(np.concatenate((population, offspring), axis=0),\n params['pop_size'])\n print(gen)\n",
"step-3": "<mask token>\ninitialize = init.permutation\nevaluate = eval.custom\nselect = sel.rank_based\nmutate = mut.swap\nreproduce = rec.pairwise\ncrossover = rec.order\nreplace = sel.rank_based\nparams = {'gens': 100, 'n_off': 50, 'n_pars': 100, 'n_objs': 1, 'pop_size':\n 150, 'len_gene': 100, 'mut_rate': 0.5}\npopulation = initialize(params)\npopulation = evaluate(params, population)\nfor gen in range(params['gens']):\n parents = select(population, params['n_pars'])\n offspring = reproduce(params, parents, crossover)\n offspring = mutate(params, offspring)\n offspring = evaluate(params, offspring)\n population = replace(np.concatenate((population, offspring), axis=0),\n params['pop_size'])\n print(gen)\n",
"step-4": "import numpy as np\nimport initialization as init\nimport evaluation as eval\nimport selection as sel\nimport recombination as rec\nimport mutation as mut\ninitialize = init.permutation\nevaluate = eval.custom\nselect = sel.rank_based\nmutate = mut.swap\nreproduce = rec.pairwise\ncrossover = rec.order\nreplace = sel.rank_based\nparams = {'gens': 100, 'n_off': 50, 'n_pars': 100, 'n_objs': 1, 'pop_size':\n 150, 'len_gene': 100, 'mut_rate': 0.5}\npopulation = initialize(params)\npopulation = evaluate(params, population)\nfor gen in range(params['gens']):\n parents = select(population, params['n_pars'])\n offspring = reproduce(params, parents, crossover)\n offspring = mutate(params, offspring)\n offspring = evaluate(params, offspring)\n population = replace(np.concatenate((population, offspring), axis=0),\n params['pop_size'])\n print(gen)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def visualize_grayscale_intensities(img, out_path):
img_x, img_y = np.mgrid[0:img.shape[0], 0:img.shape[1]]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(img_x, img_y, img, rstride=1, cstride=1, cmap=plt.cm.
jet, linewidth=0)
plt.savefig(out_path + 'surface.png')
plt.close()
<|reserved_special_token_0|>
def visualize_histogram(img):
if len(img.shape) == 2:
hist, bins = np.histogram(img, bins=[x for x in range(0, 257)])
fig = plt.figure()
fig.plot(bins, hist)
fig.show()
def visualization_tests(path='../input_data/tunnel_1.png'):
path = '/Users/adamcatto/src/L0-Smoothing/src/output_tunnels/tunnel_1.png'
img = cv2.imread(path)
visualize_color_intensities(img, out_path=VIZ_PATH)
def experiments(path='../input_data/noisy_segments/honeycomb_1.png'):
img = cv2.imread(path)
img = color_to_gray_operations.luminosity_method(img)
visualize_grayscale_intensities(img, out_path=VIZ_PATH)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def visualize_grayscale_intensities(img, out_path):
img_x, img_y = np.mgrid[0:img.shape[0], 0:img.shape[1]]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(img_x, img_y, img, rstride=1, cstride=1, cmap=plt.cm.
jet, linewidth=0)
plt.savefig(out_path + 'surface.png')
plt.close()
def visualize_color_intensities(color_img, out_path):
b, g, r = cv2.split(color_img)
blue_x, blue_y = np.mgrid[0:b.shape[0], 0:b.shape[1]]
green_x, green_y = np.mgrid[0:g.shape[0], 0:g.shape[1]]
red_x, red_y = np.mgrid[0:r.shape[0], 0:r.shape[1]]
fig_blue = plt.figure()
ax_blue = fig_blue.gca(projection='3d')
ax_blue.plot_surface(blue_x, blue_y, b, rstride=1, cstride=1, cmap=plt.
cm.jet, linewidth=0)
plt.savefig(out_path + 'blue_surface.png')
plt.close()
fig_green = plt.figure()
ax_green = fig_green.gca(projection='3d')
ax_green.plot_surface(green_x, green_y, g, rstride=1, cstride=1, cmap=
plt.cm.jet, linewidth=0)
plt.savefig(out_path + 'green_surface.png')
plt.close()
fig_red = plt.figure()
ax_red = fig_red.gca(projection='3d')
ax_red.plot_surface(red_x, red_y, r, rstride=1, cstride=1, cmap=plt.cm.
jet, linewidth=0)
plt.savefig(out_path + 'red_surface.png')
plt.close()
def visualize_histogram(img):
if len(img.shape) == 2:
hist, bins = np.histogram(img, bins=[x for x in range(0, 257)])
fig = plt.figure()
fig.plot(bins, hist)
fig.show()
def visualization_tests(path='../input_data/tunnel_1.png'):
path = '/Users/adamcatto/src/L0-Smoothing/src/output_tunnels/tunnel_1.png'
img = cv2.imread(path)
visualize_color_intensities(img, out_path=VIZ_PATH)
def experiments(path='../input_data/noisy_segments/honeycomb_1.png'):
img = cv2.imread(path)
img = color_to_gray_operations.luminosity_method(img)
visualize_grayscale_intensities(img, out_path=VIZ_PATH)
experiments()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
VIZ_PATH = '../output_data/visualizations/gray_intensities/'
def visualize_grayscale_intensities(img, out_path):
img_x, img_y = np.mgrid[0:img.shape[0], 0:img.shape[1]]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(img_x, img_y, img, rstride=1, cstride=1, cmap=plt.cm.
jet, linewidth=0)
plt.savefig(out_path + 'surface.png')
plt.close()
def visualize_color_intensities(color_img, out_path):
b, g, r = cv2.split(color_img)
blue_x, blue_y = np.mgrid[0:b.shape[0], 0:b.shape[1]]
green_x, green_y = np.mgrid[0:g.shape[0], 0:g.shape[1]]
red_x, red_y = np.mgrid[0:r.shape[0], 0:r.shape[1]]
fig_blue = plt.figure()
ax_blue = fig_blue.gca(projection='3d')
ax_blue.plot_surface(blue_x, blue_y, b, rstride=1, cstride=1, cmap=plt.
cm.jet, linewidth=0)
plt.savefig(out_path + 'blue_surface.png')
plt.close()
fig_green = plt.figure()
ax_green = fig_green.gca(projection='3d')
ax_green.plot_surface(green_x, green_y, g, rstride=1, cstride=1, cmap=
plt.cm.jet, linewidth=0)
plt.savefig(out_path + 'green_surface.png')
plt.close()
fig_red = plt.figure()
ax_red = fig_red.gca(projection='3d')
ax_red.plot_surface(red_x, red_y, r, rstride=1, cstride=1, cmap=plt.cm.
jet, linewidth=0)
plt.savefig(out_path + 'red_surface.png')
plt.close()
def visualize_histogram(img):
if len(img.shape) == 2:
hist, bins = np.histogram(img, bins=[x for x in range(0, 257)])
fig = plt.figure()
fig.plot(bins, hist)
fig.show()
def visualization_tests(path='../input_data/tunnel_1.png'):
path = '/Users/adamcatto/src/L0-Smoothing/src/output_tunnels/tunnel_1.png'
img = cv2.imread(path)
visualize_color_intensities(img, out_path=VIZ_PATH)
def experiments(path='../input_data/noisy_segments/honeycomb_1.png'):
img = cv2.imread(path)
img = color_to_gray_operations.luminosity_method(img)
visualize_grayscale_intensities(img, out_path=VIZ_PATH)
experiments()
<|reserved_special_token_1|>
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import cv2
import color_to_gray_operations
VIZ_PATH = '../output_data/visualizations/gray_intensities/'
def visualize_grayscale_intensities(img, out_path):
img_x, img_y = np.mgrid[0:img.shape[0], 0:img.shape[1]]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(img_x, img_y, img, rstride=1, cstride=1, cmap=plt.cm.
jet, linewidth=0)
plt.savefig(out_path + 'surface.png')
plt.close()
def visualize_color_intensities(color_img, out_path):
b, g, r = cv2.split(color_img)
blue_x, blue_y = np.mgrid[0:b.shape[0], 0:b.shape[1]]
green_x, green_y = np.mgrid[0:g.shape[0], 0:g.shape[1]]
red_x, red_y = np.mgrid[0:r.shape[0], 0:r.shape[1]]
fig_blue = plt.figure()
ax_blue = fig_blue.gca(projection='3d')
ax_blue.plot_surface(blue_x, blue_y, b, rstride=1, cstride=1, cmap=plt.
cm.jet, linewidth=0)
plt.savefig(out_path + 'blue_surface.png')
plt.close()
fig_green = plt.figure()
ax_green = fig_green.gca(projection='3d')
ax_green.plot_surface(green_x, green_y, g, rstride=1, cstride=1, cmap=
plt.cm.jet, linewidth=0)
plt.savefig(out_path + 'green_surface.png')
plt.close()
fig_red = plt.figure()
ax_red = fig_red.gca(projection='3d')
ax_red.plot_surface(red_x, red_y, r, rstride=1, cstride=1, cmap=plt.cm.
jet, linewidth=0)
plt.savefig(out_path + 'red_surface.png')
plt.close()
def visualize_histogram(img):
if len(img.shape) == 2:
hist, bins = np.histogram(img, bins=[x for x in range(0, 257)])
fig = plt.figure()
fig.plot(bins, hist)
fig.show()
def visualization_tests(path='../input_data/tunnel_1.png'):
path = '/Users/adamcatto/src/L0-Smoothing/src/output_tunnels/tunnel_1.png'
img = cv2.imread(path)
visualize_color_intensities(img, out_path=VIZ_PATH)
def experiments(path='../input_data/noisy_segments/honeycomb_1.png'):
img = cv2.imread(path)
img = color_to_gray_operations.luminosity_method(img)
visualize_grayscale_intensities(img, out_path=VIZ_PATH)
experiments()
<|reserved_special_token_1|>
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import cv2
import color_to_gray_operations
VIZ_PATH = '../output_data/visualizations/gray_intensities/'
def visualize_grayscale_intensities(img, out_path):
img_x, img_y = np.mgrid[0: img.shape[0], 0: img.shape[1]]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(img_x, img_y, img, rstride=1, cstride=1, cmap=plt.cm.jet,
linewidth=0)
plt.savefig(out_path + 'surface.png')
plt.close()
def visualize_color_intensities(color_img, out_path):
b, g, r = cv2.split(color_img)
blue_x, blue_y = np.mgrid[0: b.shape[0], 0: b.shape[1]]
green_x, green_y = np.mgrid[0: g.shape[0], 0: g.shape[1]]
red_x, red_y = np.mgrid[0: r.shape[0], 0: r.shape[1]]
fig_blue = plt.figure()
ax_blue = fig_blue.gca(projection='3d')
ax_blue.plot_surface(blue_x, blue_y, b ,rstride=1, cstride=1, cmap=plt.cm.jet,
linewidth=0)
plt.savefig(out_path + 'blue_surface.png')
plt.close()
fig_green = plt.figure()
ax_green = fig_green.gca(projection='3d')
ax_green.plot_surface(green_x, green_y, g ,rstride=1, cstride=1, cmap=plt.cm.jet,
linewidth=0)
plt.savefig(out_path + 'green_surface.png')
plt.close()
fig_red = plt.figure()
ax_red = fig_red.gca(projection='3d')
ax_red.plot_surface(red_x, red_y, r ,rstride=1, cstride=1, cmap=plt.cm.jet,
linewidth=0)
plt.savefig(out_path + 'red_surface.png')
plt.close()
def visualize_histogram(img):
if len(img.shape) == 2:
hist, bins = np.histogram(img, bins=[x for x in range(0, 257)])
fig = plt.figure()
fig.plot(bins, hist)
fig.show()
def visualization_tests(path='../input_data/tunnel_1.png'):
path = '/Users/adamcatto/src/L0-Smoothing/src/output_tunnels/tunnel_1.png'
img = cv2.imread(path)
visualize_color_intensities(img, out_path=VIZ_PATH)
def experiments(path='../input_data/noisy_segments/honeycomb_1.png'):
img = cv2.imread(path)
img = color_to_gray_operations.luminosity_method(img)
visualize_grayscale_intensities(img, out_path=VIZ_PATH)
experiments()
#visualization_tests()
|
flexible
|
{
"blob_id": "21fec6d307b928a295f2ffbf267456f9cd9ea722",
"index": 9105,
"step-1": "<mask token>\n\n\ndef visualize_grayscale_intensities(img, out_path):\n img_x, img_y = np.mgrid[0:img.shape[0], 0:img.shape[1]]\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_surface(img_x, img_y, img, rstride=1, cstride=1, cmap=plt.cm.\n jet, linewidth=0)\n plt.savefig(out_path + 'surface.png')\n plt.close()\n\n\n<mask token>\n\n\ndef visualize_histogram(img):\n if len(img.shape) == 2:\n hist, bins = np.histogram(img, bins=[x for x in range(0, 257)])\n fig = plt.figure()\n fig.plot(bins, hist)\n fig.show()\n\n\ndef visualization_tests(path='../input_data/tunnel_1.png'):\n path = '/Users/adamcatto/src/L0-Smoothing/src/output_tunnels/tunnel_1.png'\n img = cv2.imread(path)\n visualize_color_intensities(img, out_path=VIZ_PATH)\n\n\ndef experiments(path='../input_data/noisy_segments/honeycomb_1.png'):\n img = cv2.imread(path)\n img = color_to_gray_operations.luminosity_method(img)\n visualize_grayscale_intensities(img, out_path=VIZ_PATH)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef visualize_grayscale_intensities(img, out_path):\n img_x, img_y = np.mgrid[0:img.shape[0], 0:img.shape[1]]\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_surface(img_x, img_y, img, rstride=1, cstride=1, cmap=plt.cm.\n jet, linewidth=0)\n plt.savefig(out_path + 'surface.png')\n plt.close()\n\n\ndef visualize_color_intensities(color_img, out_path):\n b, g, r = cv2.split(color_img)\n blue_x, blue_y = np.mgrid[0:b.shape[0], 0:b.shape[1]]\n green_x, green_y = np.mgrid[0:g.shape[0], 0:g.shape[1]]\n red_x, red_y = np.mgrid[0:r.shape[0], 0:r.shape[1]]\n fig_blue = plt.figure()\n ax_blue = fig_blue.gca(projection='3d')\n ax_blue.plot_surface(blue_x, blue_y, b, rstride=1, cstride=1, cmap=plt.\n cm.jet, linewidth=0)\n plt.savefig(out_path + 'blue_surface.png')\n plt.close()\n fig_green = plt.figure()\n ax_green = fig_green.gca(projection='3d')\n ax_green.plot_surface(green_x, green_y, g, rstride=1, cstride=1, cmap=\n plt.cm.jet, linewidth=0)\n plt.savefig(out_path + 'green_surface.png')\n plt.close()\n fig_red = plt.figure()\n ax_red = fig_red.gca(projection='3d')\n ax_red.plot_surface(red_x, red_y, r, rstride=1, cstride=1, cmap=plt.cm.\n jet, linewidth=0)\n plt.savefig(out_path + 'red_surface.png')\n plt.close()\n\n\ndef visualize_histogram(img):\n if len(img.shape) == 2:\n hist, bins = np.histogram(img, bins=[x for x in range(0, 257)])\n fig = plt.figure()\n fig.plot(bins, hist)\n fig.show()\n\n\ndef visualization_tests(path='../input_data/tunnel_1.png'):\n path = '/Users/adamcatto/src/L0-Smoothing/src/output_tunnels/tunnel_1.png'\n img = cv2.imread(path)\n visualize_color_intensities(img, out_path=VIZ_PATH)\n\n\ndef experiments(path='../input_data/noisy_segments/honeycomb_1.png'):\n img = cv2.imread(path)\n img = color_to_gray_operations.luminosity_method(img)\n visualize_grayscale_intensities(img, out_path=VIZ_PATH)\n\n\nexperiments()\n",
"step-3": "<mask token>\nVIZ_PATH = '../output_data/visualizations/gray_intensities/'\n\n\ndef visualize_grayscale_intensities(img, out_path):\n img_x, img_y = np.mgrid[0:img.shape[0], 0:img.shape[1]]\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_surface(img_x, img_y, img, rstride=1, cstride=1, cmap=plt.cm.\n jet, linewidth=0)\n plt.savefig(out_path + 'surface.png')\n plt.close()\n\n\ndef visualize_color_intensities(color_img, out_path):\n b, g, r = cv2.split(color_img)\n blue_x, blue_y = np.mgrid[0:b.shape[0], 0:b.shape[1]]\n green_x, green_y = np.mgrid[0:g.shape[0], 0:g.shape[1]]\n red_x, red_y = np.mgrid[0:r.shape[0], 0:r.shape[1]]\n fig_blue = plt.figure()\n ax_blue = fig_blue.gca(projection='3d')\n ax_blue.plot_surface(blue_x, blue_y, b, rstride=1, cstride=1, cmap=plt.\n cm.jet, linewidth=0)\n plt.savefig(out_path + 'blue_surface.png')\n plt.close()\n fig_green = plt.figure()\n ax_green = fig_green.gca(projection='3d')\n ax_green.plot_surface(green_x, green_y, g, rstride=1, cstride=1, cmap=\n plt.cm.jet, linewidth=0)\n plt.savefig(out_path + 'green_surface.png')\n plt.close()\n fig_red = plt.figure()\n ax_red = fig_red.gca(projection='3d')\n ax_red.plot_surface(red_x, red_y, r, rstride=1, cstride=1, cmap=plt.cm.\n jet, linewidth=0)\n plt.savefig(out_path + 'red_surface.png')\n plt.close()\n\n\ndef visualize_histogram(img):\n if len(img.shape) == 2:\n hist, bins = np.histogram(img, bins=[x for x in range(0, 257)])\n fig = plt.figure()\n fig.plot(bins, hist)\n fig.show()\n\n\ndef visualization_tests(path='../input_data/tunnel_1.png'):\n path = '/Users/adamcatto/src/L0-Smoothing/src/output_tunnels/tunnel_1.png'\n img = cv2.imread(path)\n visualize_color_intensities(img, out_path=VIZ_PATH)\n\n\ndef experiments(path='../input_data/noisy_segments/honeycomb_1.png'):\n img = cv2.imread(path)\n img = color_to_gray_operations.luminosity_method(img)\n visualize_grayscale_intensities(img, out_path=VIZ_PATH)\n\n\nexperiments()\n",
"step-4": "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport cv2\nimport color_to_gray_operations\nVIZ_PATH = '../output_data/visualizations/gray_intensities/'\n\n\ndef visualize_grayscale_intensities(img, out_path):\n img_x, img_y = np.mgrid[0:img.shape[0], 0:img.shape[1]]\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_surface(img_x, img_y, img, rstride=1, cstride=1, cmap=plt.cm.\n jet, linewidth=0)\n plt.savefig(out_path + 'surface.png')\n plt.close()\n\n\ndef visualize_color_intensities(color_img, out_path):\n b, g, r = cv2.split(color_img)\n blue_x, blue_y = np.mgrid[0:b.shape[0], 0:b.shape[1]]\n green_x, green_y = np.mgrid[0:g.shape[0], 0:g.shape[1]]\n red_x, red_y = np.mgrid[0:r.shape[0], 0:r.shape[1]]\n fig_blue = plt.figure()\n ax_blue = fig_blue.gca(projection='3d')\n ax_blue.plot_surface(blue_x, blue_y, b, rstride=1, cstride=1, cmap=plt.\n cm.jet, linewidth=0)\n plt.savefig(out_path + 'blue_surface.png')\n plt.close()\n fig_green = plt.figure()\n ax_green = fig_green.gca(projection='3d')\n ax_green.plot_surface(green_x, green_y, g, rstride=1, cstride=1, cmap=\n plt.cm.jet, linewidth=0)\n plt.savefig(out_path + 'green_surface.png')\n plt.close()\n fig_red = plt.figure()\n ax_red = fig_red.gca(projection='3d')\n ax_red.plot_surface(red_x, red_y, r, rstride=1, cstride=1, cmap=plt.cm.\n jet, linewidth=0)\n plt.savefig(out_path + 'red_surface.png')\n plt.close()\n\n\ndef visualize_histogram(img):\n if len(img.shape) == 2:\n hist, bins = np.histogram(img, bins=[x for x in range(0, 257)])\n fig = plt.figure()\n fig.plot(bins, hist)\n fig.show()\n\n\ndef visualization_tests(path='../input_data/tunnel_1.png'):\n path = '/Users/adamcatto/src/L0-Smoothing/src/output_tunnels/tunnel_1.png'\n img = cv2.imread(path)\n visualize_color_intensities(img, out_path=VIZ_PATH)\n\n\ndef experiments(path='../input_data/noisy_segments/honeycomb_1.png'):\n img = cv2.imread(path)\n img = color_to_gray_operations.luminosity_method(img)\n visualize_grayscale_intensities(img, out_path=VIZ_PATH)\n\n\nexperiments()\n",
"step-5": "import os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport cv2\n\nimport color_to_gray_operations\n\n\nVIZ_PATH = '../output_data/visualizations/gray_intensities/'\n\n\ndef visualize_grayscale_intensities(img, out_path):\n img_x, img_y = np.mgrid[0: img.shape[0], 0: img.shape[1]]\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_surface(img_x, img_y, img, rstride=1, cstride=1, cmap=plt.cm.jet,\n linewidth=0)\n plt.savefig(out_path + 'surface.png')\n plt.close()\n\n\ndef visualize_color_intensities(color_img, out_path):\n b, g, r = cv2.split(color_img)\n\n blue_x, blue_y = np.mgrid[0: b.shape[0], 0: b.shape[1]]\n green_x, green_y = np.mgrid[0: g.shape[0], 0: g.shape[1]]\n red_x, red_y = np.mgrid[0: r.shape[0], 0: r.shape[1]]\n\n fig_blue = plt.figure()\n ax_blue = fig_blue.gca(projection='3d')\n ax_blue.plot_surface(blue_x, blue_y, b ,rstride=1, cstride=1, cmap=plt.cm.jet,\n linewidth=0)\n plt.savefig(out_path + 'blue_surface.png')\n plt.close()\n\n fig_green = plt.figure()\n ax_green = fig_green.gca(projection='3d')\n ax_green.plot_surface(green_x, green_y, g ,rstride=1, cstride=1, cmap=plt.cm.jet,\n linewidth=0)\n plt.savefig(out_path + 'green_surface.png')\n plt.close()\n\n fig_red = plt.figure()\n ax_red = fig_red.gca(projection='3d')\n ax_red.plot_surface(red_x, red_y, r ,rstride=1, cstride=1, cmap=plt.cm.jet,\n linewidth=0)\n plt.savefig(out_path + 'red_surface.png')\n plt.close()\n\n\ndef visualize_histogram(img):\n if len(img.shape) == 2:\n hist, bins = np.histogram(img, bins=[x for x in range(0, 257)])\n fig = plt.figure()\n fig.plot(bins, hist)\n fig.show()\n\n\ndef visualization_tests(path='../input_data/tunnel_1.png'):\n path = '/Users/adamcatto/src/L0-Smoothing/src/output_tunnels/tunnel_1.png'\n img = cv2.imread(path)\n visualize_color_intensities(img, out_path=VIZ_PATH)\n\n\ndef experiments(path='../input_data/noisy_segments/honeycomb_1.png'):\n img = cv2.imread(path)\n img = color_to_gray_operations.luminosity_method(img)\n visualize_grayscale_intensities(img, out_path=VIZ_PATH)\n\n\nexperiments()\n#visualization_tests()",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
#!/usr/bin/env python
#coding:utf-8
import sys
import time
reload(sys)
sys.setdefaultencoding('utf8')
from bs4 import BeautifulSoup
import requests
import csv
import codecs
import xlwt
#from word_power_dict import get_url_dict
#from Vocabulary_Toefl_MP3s_5000_Words_Memory_Course_dict import get_url_dict
#from new_parade_1_dict import get_url_dict
#from new_parade_1_dict import name as xlsname
#from new_parade_2.new_parade_2_dict import get_url_dict
#from new_parade_2.new_parade_2_dict import name as xlsname
#from new_parade_3.new_parade_3_dict import get_url_dict
#from new_parade_3.new_parade_3_dict import name as xlsname
from new_parade_4.new_parade_4_dict import get_url_dict
from new_parade_4.new_parade_4_dict import name as xlsname
def check_link(url):
try:
r = requests.get(url)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except Exception as e:
print '----------'
print e
print '----------'
def is_alphabet(uchar):
if (u'\u0041' <= uchar <= u'\u005a') or (u'\u0061' <= uchar <= u'\u007a'):
return True
else:
return False
def save_contents(result):
'''result: all the useful result from urls'''
with codecs.open('merriam.csv', 'w', 'utf_8_sig') as f:
writer = csv.writer(f)
for i in range(len(result)):
try:
if is_alphabet(result[i][1][0]):
writer.writerow([result[i][1], result[i][3]])
print("write in line:", i)
except:
print("error in line:{}, contents is:{}".format(i, result[i]))
workbook = xlwt.Workbook(encoding='utf-8')
ENGLISH_WORD, CHINESE_TRANSLATE = (0, 1)
def write_sheet(unit_info, result):
sheet = workbook.add_sheet(unit_info, cell_overwrite_ok=True)
begin_row = 0
for i in range(len(result)):
try:
if is_alphabet(result[i][1][0]):
sheet.write(begin_row, ENGLISH_WORD, label=result[i][1])
sheet.write(begin_row, CHINESE_TRANSLATE, label=result[i][3])
print("write in line:", i)
begin_row += 1
except:
print("error in line:{}, contents is:{}".format(i, result[i]))
def save_xls(name):
workbook.save(name)
def get_contents(urls):
result = []
for one_url in urls:
content = check_link(one_url)
soup = BeautifulSoup(content, 'lxml')
trs = soup.find_all('tr')
for tr in trs:
ui = []
for td in tr:
ui.append(td.string)
result.append(ui)
time.sleep(1)
return result
'''
def get_urls(url_content, root_url="https://www.shanbay.com"):
ulist = []
soup = BeautifulSoup(url_content, 'lxml')
urls = soup.find_all('a')
for url in urls:
try:
if url.string.startswith('【无老师7天TOEFL】List'):
ulist.append(root_url + url.get('href'))
for j in range(2, 11):
extend_url = root_url + url.get('href') + '?page=' + str(j)
ulist.append(extend_url)
except:
pass
return ulist
'''
def main():
test_url = 'https://www.shanbay.com/wordlist/107125/213385/?page=1'
url_dict = get_url_dict()
for unit_info, url_list in url_dict.items():
result = get_contents(url_list)
write_sheet(unit_info, result)
save_xls(xlsname+'.xls')
main()
|
normal
|
{
"blob_id": "fab1d2270ae906ca92cf3be2c2d9767737ea6083",
"index": 6364,
"step-1": "#!/usr/bin/env python\n#coding:utf-8\n\nimport sys\nimport time\nreload(sys)\nsys.setdefaultencoding('utf8')\nfrom bs4 import BeautifulSoup\nimport requests\nimport csv\nimport codecs\nimport xlwt\n#from word_power_dict import get_url_dict\n#from Vocabulary_Toefl_MP3s_5000_Words_Memory_Course_dict import get_url_dict\n#from new_parade_1_dict import get_url_dict\n#from new_parade_1_dict import name as xlsname\n\n#from new_parade_2.new_parade_2_dict import get_url_dict\n#from new_parade_2.new_parade_2_dict import name as xlsname\n\n\n#from new_parade_3.new_parade_3_dict import get_url_dict\n#from new_parade_3.new_parade_3_dict import name as xlsname\n\n\nfrom new_parade_4.new_parade_4_dict import get_url_dict\nfrom new_parade_4.new_parade_4_dict import name as xlsname\n\ndef check_link(url):\n try:\n r = requests.get(url)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except Exception as e:\n print '----------'\n print e\n print '----------'\n\n\ndef is_alphabet(uchar):\n if (u'\\u0041' <= uchar <= u'\\u005a') or (u'\\u0061' <= uchar <= u'\\u007a'):\n return True\n else:\n return False\n\n\ndef save_contents(result):\n '''result: all the useful result from urls'''\n with codecs.open('merriam.csv', 'w', 'utf_8_sig') as f:\n writer = csv.writer(f)\n for i in range(len(result)):\n try:\n if is_alphabet(result[i][1][0]):\n writer.writerow([result[i][1], result[i][3]])\n print(\"write in line:\", i)\n except:\n print(\"error in line:{}, contents is:{}\".format(i, result[i]))\n\n\nworkbook = xlwt.Workbook(encoding='utf-8')\n\nENGLISH_WORD, CHINESE_TRANSLATE = (0, 1)\n\n\ndef write_sheet(unit_info, result):\n sheet = workbook.add_sheet(unit_info, cell_overwrite_ok=True)\n begin_row = 0\n for i in range(len(result)):\n try:\n if is_alphabet(result[i][1][0]):\n sheet.write(begin_row, ENGLISH_WORD, label=result[i][1])\n sheet.write(begin_row, CHINESE_TRANSLATE, label=result[i][3])\n print(\"write in line:\", i)\n begin_row += 1\n except:\n print(\"error in line:{}, contents is:{}\".format(i, result[i]))\n\n\n\n\ndef save_xls(name):\n workbook.save(name)\n\n\ndef get_contents(urls):\n result = []\n for one_url in urls:\n content = check_link(one_url)\n soup = BeautifulSoup(content, 'lxml')\n trs = soup.find_all('tr')\n for tr in trs:\n ui = []\n for td in tr:\n ui.append(td.string)\n result.append(ui)\n time.sleep(1)\n return result\n\n\n'''\ndef get_urls(url_content, root_url=\"https://www.shanbay.com\"): \n ulist = []\n soup = BeautifulSoup(url_content, 'lxml')\n urls = soup.find_all('a')\n for url in urls:\n try:\n if url.string.startswith('【无老师7天TOEFL】List'):\n ulist.append(root_url + url.get('href'))\n for j in range(2, 11):\n extend_url = root_url + url.get('href') + '?page=' + str(j)\n ulist.append(extend_url)\n except:\n pass\n return ulist\n'''\n\n\ndef main():\n test_url = 'https://www.shanbay.com/wordlist/107125/213385/?page=1'\n url_dict = get_url_dict()\n for unit_info, url_list in url_dict.items():\n result = get_contents(url_list)\n write_sheet(unit_info, result)\n\n save_xls(xlsname+'.xls')\n\n\n\n\nmain()\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
'''
Inspection of the network with unlabelled data
'''
import numpy as np
import matplotlib.pyplot as plt
from main import IMG_SIZE, MODEL_NAME, model
model.load(MODEL_NAME)
''' COMMENT OUT FOLLOWING AS APPROPRIATE '''
# if you need to create the data:
# test_data = process_test_data()
# if you already have some saved:
test_data = np.load('test_data.npy')
fig = plt.figure()
# plot last 12 of test data and predicted class
for num, data in enumerate(test_data[:12]):
# cat: [1,0]
# dog: [0,1]
img_num = data[1]
img_data = data[0]
y = fig.add_subplot(3, 4, num+1)
orig = img_data
data = img_data.reshape(IMG_SIZE, IMG_SIZE, 1)
model_out = model.predict([data])[0]
if np.argmax(model_out) == 1:
str_label = 'Dog'
else:
str_label = 'Cat'
y.imshow(orig, cmap='gray')
plt.title(str_label)
y.axes.get_xaxis().set_visible(False)
y.axes.get_yaxis().set_visible(False)
plt.show()
|
normal
|
{
"blob_id": "02d7022c7d864354379009577d64109601190998",
"index": 7034,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmodel.load(MODEL_NAME)\n<mask token>\nfor num, data in enumerate(test_data[:12]):\n img_num = data[1]\n img_data = data[0]\n y = fig.add_subplot(3, 4, num + 1)\n orig = img_data\n data = img_data.reshape(IMG_SIZE, IMG_SIZE, 1)\n model_out = model.predict([data])[0]\n if np.argmax(model_out) == 1:\n str_label = 'Dog'\n else:\n str_label = 'Cat'\n y.imshow(orig, cmap='gray')\n plt.title(str_label)\n y.axes.get_xaxis().set_visible(False)\n y.axes.get_yaxis().set_visible(False)\nplt.show()\n",
"step-3": "<mask token>\nmodel.load(MODEL_NAME)\n<mask token>\ntest_data = np.load('test_data.npy')\nfig = plt.figure()\nfor num, data in enumerate(test_data[:12]):\n img_num = data[1]\n img_data = data[0]\n y = fig.add_subplot(3, 4, num + 1)\n orig = img_data\n data = img_data.reshape(IMG_SIZE, IMG_SIZE, 1)\n model_out = model.predict([data])[0]\n if np.argmax(model_out) == 1:\n str_label = 'Dog'\n else:\n str_label = 'Cat'\n y.imshow(orig, cmap='gray')\n plt.title(str_label)\n y.axes.get_xaxis().set_visible(False)\n y.axes.get_yaxis().set_visible(False)\nplt.show()\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom main import IMG_SIZE, MODEL_NAME, model\nmodel.load(MODEL_NAME)\n<mask token>\ntest_data = np.load('test_data.npy')\nfig = plt.figure()\nfor num, data in enumerate(test_data[:12]):\n img_num = data[1]\n img_data = data[0]\n y = fig.add_subplot(3, 4, num + 1)\n orig = img_data\n data = img_data.reshape(IMG_SIZE, IMG_SIZE, 1)\n model_out = model.predict([data])[0]\n if np.argmax(model_out) == 1:\n str_label = 'Dog'\n else:\n str_label = 'Cat'\n y.imshow(orig, cmap='gray')\n plt.title(str_label)\n y.axes.get_xaxis().set_visible(False)\n y.axes.get_yaxis().set_visible(False)\nplt.show()\n",
"step-5": "'''\r\nInspection of the network with unlabelled data\r\n'''\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom main import IMG_SIZE, MODEL_NAME, model\r\n\r\nmodel.load(MODEL_NAME)\r\n\r\n''' COMMENT OUT FOLLOWING AS APPROPRIATE '''\r\n# if you need to create the data:\r\n# test_data = process_test_data()\r\n# if you already have some saved:\r\ntest_data = np.load('test_data.npy')\r\n\r\nfig = plt.figure()\r\n\r\n# plot last 12 of test data and predicted class\r\nfor num, data in enumerate(test_data[:12]):\r\n # cat: [1,0]\r\n # dog: [0,1]\r\n\r\n img_num = data[1]\r\n img_data = data[0]\r\n\r\n y = fig.add_subplot(3, 4, num+1)\r\n orig = img_data\r\n data = img_data.reshape(IMG_SIZE, IMG_SIZE, 1)\r\n model_out = model.predict([data])[0]\r\n\r\n if np.argmax(model_out) == 1:\r\n str_label = 'Dog'\r\n else:\r\n str_label = 'Cat'\r\n\r\n y.imshow(orig, cmap='gray')\r\n plt.title(str_label)\r\n y.axes.get_xaxis().set_visible(False)\r\n y.axes.get_yaxis().set_visible(False)\r\nplt.show()\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def start():
try:
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
dest_path = winshell.desktop() + '\\Spyware\\Output'
dest_path = dest_path.replace('\\', '/') + '/outputaudio.mp3'
WAVE_OUTPUT_FILENAME = dest_path
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=
True, frames_per_buffer=CHUNK)
frames = []
while True:
data = stream.read(CHUNK)
frames.append(data)
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
stream.stop_stream()
stream.close()
p.terminate()
except Exception:
print('Failed')
<|reserved_special_token_1|>
import pyaudio
import wave
import winshell
<|reserved_special_token_0|>
def start():
try:
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
dest_path = winshell.desktop() + '\\Spyware\\Output'
dest_path = dest_path.replace('\\', '/') + '/outputaudio.mp3'
WAVE_OUTPUT_FILENAME = dest_path
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=
True, frames_per_buffer=CHUNK)
frames = []
while True:
data = stream.read(CHUNK)
frames.append(data)
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
stream.stop_stream()
stream.close()
p.terminate()
except Exception:
print('Failed')
<|reserved_special_token_1|>
#!/usr/bin/env python
import pyaudio
import wave
import winshell
"""
This script accesses the Laptop's microphone using the library pyaudio and opens a stream to record the voice
and writes it to an mp3 file
"""
def start():
try:
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
dest_path = winshell.desktop() + r"\Spyware\Output"
dest_path = dest_path.replace('\\','/') + "/outputaudio.mp3"
WAVE_OUTPUT_FILENAME = dest_path
p = pyaudio.PyAudio()
# open stream
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
frames = []
# start streaming and writing to mp3 file
while True:
data = stream.read(CHUNK)
frames.append(data)
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
stream.stop_stream()
stream.close()
p.terminate()
except Exception:
print("Failed")
|
flexible
|
{
"blob_id": "bbbbf0e1bbd7ead034d8cd88ee6a09a61cde7803",
"index": 3463,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef start():\n try:\n CHUNK = 1024\n FORMAT = pyaudio.paInt16\n CHANNELS = 2\n RATE = 44100\n dest_path = winshell.desktop() + '\\\\Spyware\\\\Output'\n dest_path = dest_path.replace('\\\\', '/') + '/outputaudio.mp3'\n WAVE_OUTPUT_FILENAME = dest_path\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=\n True, frames_per_buffer=CHUNK)\n frames = []\n while True:\n data = stream.read(CHUNK)\n frames.append(data)\n wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n wf.close()\n stream.stop_stream()\n stream.close()\n p.terminate()\n except Exception:\n print('Failed')\n",
"step-3": "import pyaudio\nimport wave\nimport winshell\n<mask token>\n\n\ndef start():\n try:\n CHUNK = 1024\n FORMAT = pyaudio.paInt16\n CHANNELS = 2\n RATE = 44100\n dest_path = winshell.desktop() + '\\\\Spyware\\\\Output'\n dest_path = dest_path.replace('\\\\', '/') + '/outputaudio.mp3'\n WAVE_OUTPUT_FILENAME = dest_path\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=\n True, frames_per_buffer=CHUNK)\n frames = []\n while True:\n data = stream.read(CHUNK)\n frames.append(data)\n wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n wf.close()\n stream.stop_stream()\n stream.close()\n p.terminate()\n except Exception:\n print('Failed')\n",
"step-4": "#!/usr/bin/env python\nimport pyaudio\nimport wave\nimport winshell\n\n\"\"\"\nThis script accesses the Laptop's microphone using the library pyaudio and opens a stream to record the voice\nand writes it to an mp3 file\n\"\"\"\ndef start():\n try:\n CHUNK = 1024\n FORMAT = pyaudio.paInt16\n CHANNELS = 2\n RATE = 44100\n \n dest_path = winshell.desktop() + r\"\\Spyware\\Output\"\n dest_path = dest_path.replace('\\\\','/') + \"/outputaudio.mp3\"\n WAVE_OUTPUT_FILENAME = dest_path\n\n p = pyaudio.PyAudio()\n\n # open stream\n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK)\n\n frames = []\n\n # start streaming and writing to mp3 file\n while True:\n data = stream.read(CHUNK)\n frames.append(data)\n wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n wf.close()\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n except Exception:\n print(\"Failed\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def raiting_validator(value):
if value < 1 or value > 10:
raise ValidationError('%s is not a caorrect raiting!' % value)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def year_validator(value):
if value < 1 or value > timezone.now().year:
raise ValidationError('%s is not a correct year!' % value)
def raiting_validator(value):
if value < 1 or value > 10:
raise ValidationError('%s is not a caorrect raiting!' % value)
<|reserved_special_token_1|>
from django.core.exceptions import ValidationError
from django.utils import timezone
def year_validator(value):
if value < 1 or value > timezone.now().year:
raise ValidationError('%s is not a correct year!' % value)
def raiting_validator(value):
if value < 1 or value > 10:
raise ValidationError('%s is not a caorrect raiting!' % value)
<|reserved_special_token_1|>
from django.core.exceptions import ValidationError
from django.utils import timezone
def year_validator(value):
if value < 1 or value > timezone.now().year:
raise ValidationError(
('%s is not a correct year!' % value)
)
def raiting_validator(value):
if value < 1 or value > 10:
raise ValidationError(
('%s is not a caorrect raiting!' % value)
)
|
flexible
|
{
"blob_id": "7a6d5309580b673413f57047e631a08e61e837cf",
"index": 4447,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef raiting_validator(value):\n if value < 1 or value > 10:\n raise ValidationError('%s is not a caorrect raiting!' % value)\n",
"step-3": "<mask token>\n\n\ndef year_validator(value):\n if value < 1 or value > timezone.now().year:\n raise ValidationError('%s is not a correct year!' % value)\n\n\ndef raiting_validator(value):\n if value < 1 or value > 10:\n raise ValidationError('%s is not a caorrect raiting!' % value)\n",
"step-4": "from django.core.exceptions import ValidationError\nfrom django.utils import timezone\n\n\ndef year_validator(value):\n if value < 1 or value > timezone.now().year:\n raise ValidationError('%s is not a correct year!' % value)\n\n\ndef raiting_validator(value):\n if value < 1 or value > 10:\n raise ValidationError('%s is not a caorrect raiting!' % value)\n",
"step-5": "from django.core.exceptions import ValidationError\nfrom django.utils import timezone\n\n\ndef year_validator(value):\n if value < 1 or value > timezone.now().year:\n raise ValidationError(\n ('%s is not a correct year!' % value)\n )\n\n\ndef raiting_validator(value):\n if value < 1 or value > 10:\n raise ValidationError(\n ('%s is not a caorrect raiting!' % value)\n )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class DateFormat(TextFormat):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenDate(storedText).dateStr(self.format)
except GenDateError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
format = globalref.options.strData('EditDateFormat', True)
try:
return GenDate(storedText).dateStr(format), True
except GenDateError:
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
format = globalref.options.strData('EditDateFormat', True)
try:
return repr(GenDate().setFromStr(editText, format)), True
except GenDateError:
return editText, not editText and not self.isRequired
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
format = globalref.options.strData('EditDateFormat', True)
today = GenDate().dateStr(format)
yesterday = (GenDate() - 1).dateStr(format)
tomorrow = (GenDate() + 1).dateStr(format)
return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _(
'yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]
def getInitDefault(self):
"""Return initial stored value for new nodes"""
if self.initDefault in DateFormat.dateStampStrings:
return GenDate().dateStr()
return TextFormat.getInitDefault(self)
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
if editText in DateFormat.dateStampStrings:
self.initDefault = DateFormat.dateStampStrings[0]
else:
TextFormat.setInitDefault(self, editText)
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
if self.initDefault in DateFormat.dateStampStrings:
return DateFormat.dateStampStrings[1]
return TextFormat.getEditInitDefault(self)
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
choices = [entry[0] for entry in self.getEditChoices()]
choices.insert(0, DateFormat.dateStampStrings[1])
return choices
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
if value.startswith('now'):
return repr(GenDate())
return value
class TimeFormat(TextFormat):
"""Holds format info for a time field"""
typeName = 'Time'
sortSequence = 6
defaultFormat = u'h:MM:SS aa'
timeStampStrings = 'Now', _('Now', 'time stamp setting')
formatMenuList = [(u'%s\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),
'H'), (u'%s\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (
u'%s\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (
u'%s\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (
u'%s\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\t%s' %
(_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\t%s' % (_(
'Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\t%s' % (_(
'Second (2 digits)'), 'SS'), 'SS'), (u'%s\t%s' % (_(
'Fractional Seconds'), 's'), 's'), None, (u'%s\t%s' % (_('AM/PM'),
'AA'), 'AA'), (u'%s\t%s' % (_('am/pm'), 'aa'), 'aa')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenTime(storedText).timeStr(self.format)
except GenTimeError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
format = globalref.options.strData('EditTimeFormat', True)
try:
return GenTime(storedText).timeStr(format), True
except GenTimeError:
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
try:
return repr(GenTime(editText)), True
except GenTimeError:
return editText, not editText and not self.isRequired
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and annotated text"""
format = globalref.options.strData('EditTimeFormat', True)
now = GenTime().timeStr(format)
choices = [(now, '(%s)' % _('now'))]
for hr in (6, 9, 12, 15, 18, 21, 0):
time = GenTime((hr, 0)).timeStr(format)
choices.append((time, ''))
return choices
def getInitDefault(self):
"""Return initial stored value for new nodes"""
if self.initDefault in TimeFormat.timeStampStrings:
return GenTime().timeStr()
return TextFormat.getInitDefault(self)
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
if editText in TimeFormat.timeStampStrings:
self.initDefault = TimeFormat.timeStampStrings[0]
else:
TextFormat.setInitDefault(self, editText)
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
if self.initDefault in TimeFormat.timeStampStrings:
return TimeFormat.timeStampStrings[1]
return TextFormat.getEditInitDefault(self)
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
choices = [entry[0] for entry in self.getEditChoices()]
choices.insert(0, TimeFormat.timeStampStrings[1])
return choices
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
if value.startswith('now'):
return repr(GenTime())
return value
class BooleanFormat(ChoiceFormat):
"""Holds format info for a bool field"""
typeName = 'Boolean'
sortSequence = 1
defaultFormat = _('yes/no')
formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _(
'T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')),
None, ('1/0', '1/0')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
ChoiceFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
if storedText not in self.formatList:
try:
storedText = GenBoolean(storedText).boolStr(self.format)
except GenBooleanError:
storedText = _errorStr
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
if storedText in self.formatList:
return storedText, True
try:
return GenBoolean(storedText).boolStr(self.format), True
except GenBooleanError:
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
try:
return repr(GenBoolean(editText)), True
except GenBooleanError:
if editText in self.formatList:
return editText, True
return editText, not editText and not self.isRequired
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return repr(GenBoolean(storedText))
except GenBooleanError:
return ''
class UniqueIDFormat(TextFormat):
"""An unique ID automatically generated for new nodes"""
typeName = 'UniqueID'
sortSequence = 10
formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')
defaultFormat = u'0001'
formatMenuList = [(u'%s\t%s' % (_('Required Digit'), '0'), '0'), None,
(u'%s\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\t%s' %
(_('Prefix Example'), 'id0100'), 'id0100')]
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def nextValue(self, increment=True):
"""Return the next value for a new node,
increment format if increment is True"""
try:
prefix, numText, suffix = UniqueIDFormat.formatRe.match(self.format
).groups()
except AttributeError:
self.format = UniqueIDFormat.defaultFormat
return self.nextValue(increment)
value = self.format
if increment:
pattern = u'%%s%%0.%dd%%s' % len(numText)
num = int(numText) + 1
self.format = pattern % (prefix, num, suffix)
return value
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return int(UniqueIDFormat.formatRe.match(storedText).group(2))
except AttributeError:
return 0
class URLFormat(TextFormat):
"""Holds format info for a field with a URL path"""
typeName = 'URL'
sortSequence = 8
htmlOption = False
allowAltLinkText = True
hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')
URLMethod = u'http://'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.html = True
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
if self.useFileInfo:
item = globalref.docRef.fileInfoItem
altText = ''
if self.linkAltField:
field = item.nodeFormat().findField(self.linkAltField)
if field:
altText = field.outputText(item, titleMode, internal)
storedText = item.data.get(self.name, '')
if storedText:
return self.formatOutput(storedText, titleMode, altText, internal)
return ''
def formatOutput(self, storedText, titleMode, altText='', internal=False):
"""Return formatted text, properly escaped and with
a link reference if not in titleMode"""
if titleMode:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = []
for url in paths:
path = url
if not URLFormat.hasMethodRe.match(path):
path = u'%s%s' % (self.URLMethod, path)
path = u'<a href="%s">%s</a>' % (escape(path, treedoc.escDict),
altText or url)
results.append(TextFormat.formatOutput(self, path, titleMode,
internal))
return u'<br />'.join(results)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return (
u'<xsl:for-each select = "./%s">%s<xsl:choose><xsl:when test="contains(., \':\')"><a href="{.}"><xsl:value-of select="."/></a></xsl:when><xsl:otherwise><a href="%s{.}"><xsl:value-of select="."/></a></xsl:otherwise></xsl:choose>%s</xsl:for-each>'
% (self.name, xslEscape(self.prefix), self.URLMethod,
xslEscape(self.suffix)))
class PathFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'Path'
URLMethod = u'file:///'
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class EmailFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'Email'
URLMethod = u'mailto:'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class InternalLinkFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'InternalLink'
URLMethod = u'#'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class ExecuteLinkFormat(URLFormat):
"""Holds format info for an executable field"""
typeName = 'ExecuteLink'
URLMethod = u'exec:'
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, altText='', internal=False):
"""Return formatted text, properly escaped and with
a link reference if not in titleMode"""
if titleMode or not internal:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = []
for url in paths:
url = TextFormat.formatOutput(self, url, titleMode, internal)
path = url
if not URLFormat.hasMethodRe.match(path):
path = u'%s%s' % (self.URLMethod, path)
results.append(u'<a href="%s">%s</a>' % (escape(path, treedoc.
escDict), altText or url))
return u'<br />'.join(results)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return TextFormat.xslText(self)
class PictureFormat(TextFormat):
"""Holds format info for a field with a link to a picture"""
typeName = 'Picture'
sortSequence = 8
htmlOption = False
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.html = True
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped and with
a link to the picture if not in titleMode"""
if titleMode:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = [('<img src="%s">' % escape(url, treedoc.escDict)) for
url in paths]
return u'<br />'.join(results)
class ParentFormat(TextFormat):
"""Placeholder format for references to specific parents"""
typeName = 'Parent'
def __init__(self, name, parentLevel=1):
TextFormat.__init__(self, name, {})
self.parentLevel = parentLevel
def sepName(self, englishOnly=False):
"""Return name enclosed with {* *} separators"""
name = englishOnly and self.enName or self.name
return u'{*%s%s*}' % (self.parentLevel * '*', name)
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
for num in range(self.parentLevel):
item = item.parent
if not item:
return ''
field = item.nodeFormat().findField(self.name)
if not field:
return ''
return field.outputText(item, titleMode, internal)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="%s%s"/>' % (self.parentLevel * '../',
self.name)
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)
class AncestorFormat(TextFormat):
"""Placeholder format for references to any parent with data"""
typeName = 'Ancestor'
def __init__(self, name):
TextFormat.__init__(self, name, {})
self.parentLevel = 1000
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*?%s*}' % name
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
field = None
while not field:
item = item.parent
if item:
field = item.nodeFormat().findField(self.name)
else:
return ''
return field.outputText(item, titleMode, internal)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="ancestor::*/%s"/>' % self.name
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(ancestor::*/%s)' % self.name
class ChildFormat(TextFormat):
"""Placeholder format for references to a sequence of child data"""
typeName = 'Child'
def __init__(self, name):
TextFormat.__init__(self, name, {})
self.parentLevel = -1
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*&%s*}' % name
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
result = []
for child in item.childList:
field = child.nodeFormat().findField(self.name)
if field:
text = field.outputText(child, titleMode, internal)
if text:
result.append(text)
return globalref.docRef.childFieldSep.join(result)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="child::*/%s"/>' % self.name
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(child::*/%s)' % self.name
class CountFormat(TextFormat):
"""Placeholder format for a count of children at the given level"""
typeName = 'Count'
def __init__(self, name, level):
TextFormat.__init__(self, name, {})
self.parentLevel = -level
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*#%s*}' % name
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
return repr(len(item.descendLevelList(-self.parentLevel)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TextFormat(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, name, attrs={}):
"""Any prefix, suffix, html info in attrs dict"""
self.name = name
self.enName = ''
self.format = attrs.get(u'format', self.defaultFormat)
self.prefix = attrs.get(u'prefix', '')
self.suffix = attrs.get(u'suffix', '')
self.html = attrs.get(u'html', '').startswith('y') and True or False
self.isRequired = attrs.get(u'required', '').startswith('y'
) and True or False
self.hidden = attrs.get(u'hidden', '').startswith('y'
) and True or False
try:
self.numLines = int(attrs.get(u'lines', repr(self.defaultNumLines))
)
except ValueError:
self.numLines = 1
self.initDefault = attrs.get(u'init', '')
self.linkAltField = attrs.get(u'linkalt', '')
self.parentLevel = 0
self.useFileInfo = False
self.showInDialog = True
self.initFormat()
def initFormat(self):
"""Called by base init, after class change or format text change"""
pass
def duplicateSettings(self, otherField):
"""Assign other field's parameters to this field"""
self.name = otherField.name
self.enName = otherField.enName
self.format = otherField.format
self.prefix = otherField.prefix
self.suffix = otherField.suffix
self.html = otherField.html
self.isRequired = otherField.isRequired
self.hidden = otherField.hidden
self.numLines = otherField.numLines
self.initDefault = otherField.initDefault
self.linkAltField = otherField.linkAltField
self.parentLevel = otherField.parentLevel
self.useFileInfo = otherField.useFileInfo
self.showInDialog = otherField.showInDialog
def changeType(self, newType):
"""Change this field's type to newType with default format"""
self.__class__ = globals()[newType + 'Format']
self.format = self.defaultFormat
self.initFormat()
<|reserved_special_token_0|>
def sepName(self, englishOnly=False):
"""Return name enclosed with {* *} separators"""
name = englishOnly and self.enName or self.name
if not self.useFileInfo:
return u'{*%s*}' % name
return u'{*!%s*}' % name
def labelName(self):
"""Return name used for labels - add * for required fields"""
if self.isRequired:
return '%s*' % self.name
return self.name
def writeXml(self):
"""Return text for xml attributes"""
text = u' type="%s"' % self.typeName
if self.format:
text += u' format="%s"' % escape(self.format, treedoc.escDict)
if self.prefix:
text += u' prefix="%s"' % escape(self.prefix, treedoc.escDict)
if self.suffix:
text += u' suffix="%s"' % escape(self.suffix, treedoc.escDict)
if self.html:
text += u' html="y"'
if self.isRequired:
text += u' required="y"'
if self.hidden:
text += u' hidden="y"'
if self.numLines > 1:
text += u' lines="%d"' % self.numLines
if self.initDefault:
text += u' init="%s"' % escape(self.initDefault, treedoc.escDict)
if self.linkAltField:
text += u' linkalt="%s"' % escape(self.linkAltField, treedoc.
escDict)
return text
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
if self.useFileInfo:
item = globalref.docRef.fileInfoItem
storedText = item.data.get(self.name, '')
if storedText:
return self.formatOutput(storedText, titleMode, internal)
return ''
def removeMarkup(self, text):
"""Remove HTML Markup and unescape entities"""
text = TextFormat.stripTagRe.sub('', text)
return unescape(text)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
prefix = self.prefix
suffix = self.suffix
if titleMode:
if self.html:
storedText = self.removeMarkup(storedText)
if globalref.docRef.formHtml:
prefix = self.removeMarkup(prefix)
suffix = self.removeMarkup(suffix)
else:
if not self.html:
storedText = escape(storedText).replace('\n', '<br />')
if not globalref.docRef.formHtml:
prefix = escape(prefix)
suffix = escape(suffix)
return u'%s%s%s' % (prefix, storedText, suffix)
def editText(self, item):
"""Return tuple of this field's text in edit format and bool validity,
using edit format option"""
storedText = item.data.get(self.name, '')
result = self.formatEditText(storedText)
if self.isRequired and not result[0]:
return result[0], False
return result
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
return storedText, True
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
return editText, editText or not self.isRequired
def getInitDefault(self):
"""Return initial stored value for new nodes"""
return self.initDefault
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
self.initDefault = self.storedText(editText)[0]
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
return self.formatEditText(self.initDefault)[0]
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return []
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
return storedText.lower()
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
return value
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return (
u'<xsl:if test="normalize-space(./%s)">%s<xsl:value-of select="./%s"/>%s</xsl:if>'
% (self.name, xslEscape(self.prefix), self.name, xslEscape(
self.suffix)))
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(./%s)' % self.name
class LongTextFormat(TextFormat):
"""Holds format info for a long text field - Obsolete -
kept for compatability with old files"""
defaultNumLines = 7
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
class NumberFormat(TextFormat):
"""Holds format info for a number field"""
typeName = 'Number'
sortSequence = 10
defaultFormat = u'#.##'
formatMenuList = [(u'%s\t%s' % (_('Optional Digit'), '#'), '#'), (
u'%s\t%s' % (_('Required Digit'), '0'), '0'), (u'%s\t%s' % (_(
'Digit or Space (external)'), _('<space>')), ' '), None, (u'%s\t%s' %
(_('Decimal Point'), '.'), '.'), (u'%s\t%s' % (_('Decimal Comma'),
','), ','), None, (u'%s\t%s' % (_('Comma Separator'), '\\,'), '\\,'
), (u'%s\t%s' % (_('Dot Separator'), '\\.'), '\\.'), (u'%s\t%s' % (
_('Space Separator (internal)'), _('<space>')), ' '), None, (
u'%s\t%s' % (_('Optional Sign'), '-'), '-'), (u'%s\t%s' % (_(
'Required Sign'), '+'), '+'), None, (u'%s\t%s' % (_(
'Exponent (capital)'), 'E'), 'E'), (u'%s\t%s' % (_(
'Exponent (small)'), 'e'), 'e')]
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenNumber(storedText).numStr(self.format)
except GenNumberError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using self.format"""
try:
return GenNumber(storedText).numStr(self.format), True
except GenNumberError:
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using self.format"""
try:
return repr(GenNumber().setFromStr(editText, self.format)), True
except GenNumberError:
return editText, not editText and not self.isRequired
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return GenNumber(storedText).num
except GenNumberError:
return ''
class ChoiceFormat(TextFormat):
"""Holds format info for a field with one of several text options"""
typeName = 'Choice'
sortSequence = 20
editSep = '/'
defaultFormat = '1/2/3/4'
formatMenuList = [(u'%s\t%s' % (_('Separator'), '/'), '/'), None, (
u'%s\t%s' % (_('"/" Character'), '//'), '//'), None, (u'%s\t%s' % (
_('Example'), '1/2/3/4'), '1/2/3/4')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.formatList = self.splitText(self.format)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
if storedText not in self.formatList:
storedText = _errorStr
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
if storedText in self.formatList:
return storedText, True
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
if editText in self.formatList:
return editText, True
return editText, not editText and not self.isRequired
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
return [(text, '') for text in self.formatList]
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return [text for text in self.formatList]
def splitText(self, textStr):
"""Split textStr using editSep, double sep's become char"""
return [text.strip().replace('\x00', self.editSep) for text in
textStr.replace(self.editSep * 2, '\x00').split(self.editSep)]
class CombinationFormat(ChoiceFormat):
"""Holds format info for a field of combinations of text options"""
typeName = 'Combination'
outputSepList = ',', ';', ':', '|', '/', '\\', '~'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
ChoiceFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
ChoiceFormat.initFormat(self)
fullFormat = ''.join(self.formatList)
try:
self.sep = [sep for sep in CombinationFormat.outputSepList if
sep not in fullFormat][0] + ' '
except IndexError:
self.sep = CombinationFormat.outputSepList[0] + ' '
def sortedChoices(self, inText):
"""Return tuple of choices from inText sorted like format and
True if all splits are valid and included"""
choices = self.splitText(inText)
sortedChoices = [text for text in self.formatList if text in choices]
if len(choices) == len(sortedChoices):
return sortedChoices, True
else:
return sortedChoices, False
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
choices, valid = self.sortedChoices(storedText)
if valid:
result = self.sep.join(choices)
else:
result = _errorStr
return TextFormat.formatOutput(self, result, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
for choice in self.splitText(storedText):
if choice not in self.formatList:
return storedText, not storedText
return storedText, True
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
choices, valid = self.sortedChoices(editText)
if valid:
return self.editSep.join(choices), True
else:
return editText, not editText and not self.isRequired
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
currentChoices, valid = self.sortedChoices(currentText)
nonChoices = [text for text in self.formatList if text not in
currentChoices]
results = []
for choice in nonChoices:
allChoices = currentChoices + [choice]
allChoices = [text for text in self.formatList if text in
allChoices]
results.append((self.editSep.join(allChoices), '(%s %s)' % (_(
'add'), choice)))
if currentChoices:
results.append((None, None))
for choice in currentChoices:
allChoices = currentChoices[:]
allChoices.remove(choice)
allChoices = [text for text in self.formatList if text in
allChoices]
results.append((self.editSep.join(allChoices), '(%s %s)' % (_(
'remove'), choice)))
return results
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return [entry[0] for entry in self.getEditChoices()]
class AutoChoiceFormat(ChoiceFormat):
"""Holds format info for a field with one of several text options"""
typeName = 'AutoChoice'
defaultFormat = ''
formatMenuList = ()
hasEditChoices = True
autoAddChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.formatList = []
def addChoice(self, choice, sort=False):
"""Add choice to edit menu list if not already there"""
if choice and choice not in self.formatList:
self.formatList.append(choice)
if sort:
self.sortChoices()
def sortChoices(self):
"""Sort menu list choices"""
self.formatList.sort()
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
return storedText, True
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
if editText:
return editText, True
return editText, not self.isRequired
class DateFormat(TextFormat):
"""Holds format info for a date field"""
typeName = 'Date'
sortSequence = 5
defaultFormat = u'mmmm d, yyyy'
dateStampStrings = 'Now', _('Now', 'date stamp setting')
formatMenuList = [(u'%s\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'), (
u'%s\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'), None, (u'%s\t%s' %
(_('Month (1 or 2 digits)'), 'm'), 'm'), (u'%s\t%s' % (_(
'Month (2 digits)'), 'mm'), 'mm'), (u'%s\t%s' % (_(
'Month Abbreviation'), 'mmm'), 'mmm'), (u'%s\t%s' % (_('Month Name'
), 'mmmm'), 'mmmm'), None, (u'%s\t%s' % (_('Year (2 digits)'), 'yy'
), 'yy'), (u'%s\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),
None, (u'%s\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'), (u'%s\t%s' %
(_('Weekday Abbreviation'), 'www'), 'www'), (u'%s\t%s' % (_(
'Weekday Name'), 'wwww'), 'wwww')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenDate(storedText).dateStr(self.format)
except GenDateError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
format = globalref.options.strData('EditDateFormat', True)
try:
return GenDate(storedText).dateStr(format), True
except GenDateError:
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
format = globalref.options.strData('EditDateFormat', True)
try:
return repr(GenDate().setFromStr(editText, format)), True
except GenDateError:
return editText, not editText and not self.isRequired
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
format = globalref.options.strData('EditDateFormat', True)
today = GenDate().dateStr(format)
yesterday = (GenDate() - 1).dateStr(format)
tomorrow = (GenDate() + 1).dateStr(format)
return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _(
'yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]
def getInitDefault(self):
"""Return initial stored value for new nodes"""
if self.initDefault in DateFormat.dateStampStrings:
return GenDate().dateStr()
return TextFormat.getInitDefault(self)
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
if editText in DateFormat.dateStampStrings:
self.initDefault = DateFormat.dateStampStrings[0]
else:
TextFormat.setInitDefault(self, editText)
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
if self.initDefault in DateFormat.dateStampStrings:
return DateFormat.dateStampStrings[1]
return TextFormat.getEditInitDefault(self)
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
choices = [entry[0] for entry in self.getEditChoices()]
choices.insert(0, DateFormat.dateStampStrings[1])
return choices
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
if value.startswith('now'):
return repr(GenDate())
return value
class TimeFormat(TextFormat):
"""Holds format info for a time field"""
typeName = 'Time'
sortSequence = 6
defaultFormat = u'h:MM:SS aa'
timeStampStrings = 'Now', _('Now', 'time stamp setting')
formatMenuList = [(u'%s\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),
'H'), (u'%s\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (
u'%s\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (
u'%s\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (
u'%s\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\t%s' %
(_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\t%s' % (_(
'Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\t%s' % (_(
'Second (2 digits)'), 'SS'), 'SS'), (u'%s\t%s' % (_(
'Fractional Seconds'), 's'), 's'), None, (u'%s\t%s' % (_('AM/PM'),
'AA'), 'AA'), (u'%s\t%s' % (_('am/pm'), 'aa'), 'aa')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenTime(storedText).timeStr(self.format)
except GenTimeError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
format = globalref.options.strData('EditTimeFormat', True)
try:
return GenTime(storedText).timeStr(format), True
except GenTimeError:
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
try:
return repr(GenTime(editText)), True
except GenTimeError:
return editText, not editText and not self.isRequired
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and annotated text"""
format = globalref.options.strData('EditTimeFormat', True)
now = GenTime().timeStr(format)
choices = [(now, '(%s)' % _('now'))]
for hr in (6, 9, 12, 15, 18, 21, 0):
time = GenTime((hr, 0)).timeStr(format)
choices.append((time, ''))
return choices
def getInitDefault(self):
"""Return initial stored value for new nodes"""
if self.initDefault in TimeFormat.timeStampStrings:
return GenTime().timeStr()
return TextFormat.getInitDefault(self)
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
if editText in TimeFormat.timeStampStrings:
self.initDefault = TimeFormat.timeStampStrings[0]
else:
TextFormat.setInitDefault(self, editText)
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
if self.initDefault in TimeFormat.timeStampStrings:
return TimeFormat.timeStampStrings[1]
return TextFormat.getEditInitDefault(self)
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
choices = [entry[0] for entry in self.getEditChoices()]
choices.insert(0, TimeFormat.timeStampStrings[1])
return choices
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
if value.startswith('now'):
return repr(GenTime())
return value
class BooleanFormat(ChoiceFormat):
"""Holds format info for a bool field"""
typeName = 'Boolean'
sortSequence = 1
defaultFormat = _('yes/no')
formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _(
'T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')),
None, ('1/0', '1/0')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
ChoiceFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
if storedText not in self.formatList:
try:
storedText = GenBoolean(storedText).boolStr(self.format)
except GenBooleanError:
storedText = _errorStr
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
if storedText in self.formatList:
return storedText, True
try:
return GenBoolean(storedText).boolStr(self.format), True
except GenBooleanError:
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
try:
return repr(GenBoolean(editText)), True
except GenBooleanError:
if editText in self.formatList:
return editText, True
return editText, not editText and not self.isRequired
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return repr(GenBoolean(storedText))
except GenBooleanError:
return ''
class UniqueIDFormat(TextFormat):
"""An unique ID automatically generated for new nodes"""
typeName = 'UniqueID'
sortSequence = 10
formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')
defaultFormat = u'0001'
formatMenuList = [(u'%s\t%s' % (_('Required Digit'), '0'), '0'), None,
(u'%s\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\t%s' %
(_('Prefix Example'), 'id0100'), 'id0100')]
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def nextValue(self, increment=True):
"""Return the next value for a new node,
increment format if increment is True"""
try:
prefix, numText, suffix = UniqueIDFormat.formatRe.match(self.format
).groups()
except AttributeError:
self.format = UniqueIDFormat.defaultFormat
return self.nextValue(increment)
value = self.format
if increment:
pattern = u'%%s%%0.%dd%%s' % len(numText)
num = int(numText) + 1
self.format = pattern % (prefix, num, suffix)
return value
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return int(UniqueIDFormat.formatRe.match(storedText).group(2))
except AttributeError:
return 0
class URLFormat(TextFormat):
"""Holds format info for a field with a URL path"""
typeName = 'URL'
sortSequence = 8
htmlOption = False
allowAltLinkText = True
hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')
URLMethod = u'http://'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.html = True
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
if self.useFileInfo:
item = globalref.docRef.fileInfoItem
altText = ''
if self.linkAltField:
field = item.nodeFormat().findField(self.linkAltField)
if field:
altText = field.outputText(item, titleMode, internal)
storedText = item.data.get(self.name, '')
if storedText:
return self.formatOutput(storedText, titleMode, altText, internal)
return ''
def formatOutput(self, storedText, titleMode, altText='', internal=False):
"""Return formatted text, properly escaped and with
a link reference if not in titleMode"""
if titleMode:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = []
for url in paths:
path = url
if not URLFormat.hasMethodRe.match(path):
path = u'%s%s' % (self.URLMethod, path)
path = u'<a href="%s">%s</a>' % (escape(path, treedoc.escDict),
altText or url)
results.append(TextFormat.formatOutput(self, path, titleMode,
internal))
return u'<br />'.join(results)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return (
u'<xsl:for-each select = "./%s">%s<xsl:choose><xsl:when test="contains(., \':\')"><a href="{.}"><xsl:value-of select="."/></a></xsl:when><xsl:otherwise><a href="%s{.}"><xsl:value-of select="."/></a></xsl:otherwise></xsl:choose>%s</xsl:for-each>'
% (self.name, xslEscape(self.prefix), self.URLMethod,
xslEscape(self.suffix)))
class PathFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'Path'
URLMethod = u'file:///'
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class EmailFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'Email'
URLMethod = u'mailto:'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class InternalLinkFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'InternalLink'
URLMethod = u'#'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class ExecuteLinkFormat(URLFormat):
"""Holds format info for an executable field"""
typeName = 'ExecuteLink'
URLMethod = u'exec:'
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, altText='', internal=False):
"""Return formatted text, properly escaped and with
a link reference if not in titleMode"""
if titleMode or not internal:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = []
for url in paths:
url = TextFormat.formatOutput(self, url, titleMode, internal)
path = url
if not URLFormat.hasMethodRe.match(path):
path = u'%s%s' % (self.URLMethod, path)
results.append(u'<a href="%s">%s</a>' % (escape(path, treedoc.
escDict), altText or url))
return u'<br />'.join(results)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return TextFormat.xslText(self)
class PictureFormat(TextFormat):
"""Holds format info for a field with a link to a picture"""
typeName = 'Picture'
sortSequence = 8
htmlOption = False
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.html = True
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped and with
a link to the picture if not in titleMode"""
if titleMode:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = [('<img src="%s">' % escape(url, treedoc.escDict)) for
url in paths]
return u'<br />'.join(results)
class ParentFormat(TextFormat):
"""Placeholder format for references to specific parents"""
typeName = 'Parent'
def __init__(self, name, parentLevel=1):
TextFormat.__init__(self, name, {})
self.parentLevel = parentLevel
def sepName(self, englishOnly=False):
"""Return name enclosed with {* *} separators"""
name = englishOnly and self.enName or self.name
return u'{*%s%s*}' % (self.parentLevel * '*', name)
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
for num in range(self.parentLevel):
item = item.parent
if not item:
return ''
field = item.nodeFormat().findField(self.name)
if not field:
return ''
return field.outputText(item, titleMode, internal)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="%s%s"/>' % (self.parentLevel * '../',
self.name)
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)
class AncestorFormat(TextFormat):
"""Placeholder format for references to any parent with data"""
typeName = 'Ancestor'
def __init__(self, name):
TextFormat.__init__(self, name, {})
self.parentLevel = 1000
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*?%s*}' % name
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
field = None
while not field:
item = item.parent
if item:
field = item.nodeFormat().findField(self.name)
else:
return ''
return field.outputText(item, titleMode, internal)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="ancestor::*/%s"/>' % self.name
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(ancestor::*/%s)' % self.name
class ChildFormat(TextFormat):
"""Placeholder format for references to a sequence of child data"""
typeName = 'Child'
def __init__(self, name):
TextFormat.__init__(self, name, {})
self.parentLevel = -1
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*&%s*}' % name
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
result = []
for child in item.childList:
field = child.nodeFormat().findField(self.name)
if field:
text = field.outputText(child, titleMode, internal)
if text:
result.append(text)
return globalref.docRef.childFieldSep.join(result)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="child::*/%s"/>' % self.name
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(child::*/%s)' % self.name
class CountFormat(TextFormat):
"""Placeholder format for a count of children at the given level"""
typeName = 'Count'
def __init__(self, name, level):
TextFormat.__init__(self, name, {})
self.parentLevel = -level
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*#%s*}' % name
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
return repr(len(item.descendLevelList(-self.parentLevel)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TextFormat(object):
"""Holds format info for a normal text field"""
typeName = 'Text'
sortSequence = 20
stripTagRe = re.compile('<.*?>')
defaultNumLines = 1
defaultFormat = ''
formatMenuList = []
htmlOption = True
hasEditChoices = False
autoAddChoices = False
hasFileBrowse = False
allowAltLinkText = False
def __init__(self, name, attrs={}):
"""Any prefix, suffix, html info in attrs dict"""
self.name = name
self.enName = ''
self.format = attrs.get(u'format', self.defaultFormat)
self.prefix = attrs.get(u'prefix', '')
self.suffix = attrs.get(u'suffix', '')
self.html = attrs.get(u'html', '').startswith('y') and True or False
self.isRequired = attrs.get(u'required', '').startswith('y'
) and True or False
self.hidden = attrs.get(u'hidden', '').startswith('y'
) and True or False
try:
self.numLines = int(attrs.get(u'lines', repr(self.defaultNumLines))
)
except ValueError:
self.numLines = 1
self.initDefault = attrs.get(u'init', '')
self.linkAltField = attrs.get(u'linkalt', '')
self.parentLevel = 0
self.useFileInfo = False
self.showInDialog = True
self.initFormat()
def initFormat(self):
"""Called by base init, after class change or format text change"""
pass
def duplicateSettings(self, otherField):
"""Assign other field's parameters to this field"""
self.name = otherField.name
self.enName = otherField.enName
self.format = otherField.format
self.prefix = otherField.prefix
self.suffix = otherField.suffix
self.html = otherField.html
self.isRequired = otherField.isRequired
self.hidden = otherField.hidden
self.numLines = otherField.numLines
self.initDefault = otherField.initDefault
self.linkAltField = otherField.linkAltField
self.parentLevel = otherField.parentLevel
self.useFileInfo = otherField.useFileInfo
self.showInDialog = otherField.showInDialog
def changeType(self, newType):
"""Change this field's type to newType with default format"""
self.__class__ = globals()[newType + 'Format']
self.format = self.defaultFormat
self.initFormat()
def englishName(self):
"""Returns English name if assigned, o/w name"""
if self.enName:
return self.enName
return self.name
def sepName(self, englishOnly=False):
"""Return name enclosed with {* *} separators"""
name = englishOnly and self.enName or self.name
if not self.useFileInfo:
return u'{*%s*}' % name
return u'{*!%s*}' % name
def labelName(self):
"""Return name used for labels - add * for required fields"""
if self.isRequired:
return '%s*' % self.name
return self.name
def writeXml(self):
"""Return text for xml attributes"""
text = u' type="%s"' % self.typeName
if self.format:
text += u' format="%s"' % escape(self.format, treedoc.escDict)
if self.prefix:
text += u' prefix="%s"' % escape(self.prefix, treedoc.escDict)
if self.suffix:
text += u' suffix="%s"' % escape(self.suffix, treedoc.escDict)
if self.html:
text += u' html="y"'
if self.isRequired:
text += u' required="y"'
if self.hidden:
text += u' hidden="y"'
if self.numLines > 1:
text += u' lines="%d"' % self.numLines
if self.initDefault:
text += u' init="%s"' % escape(self.initDefault, treedoc.escDict)
if self.linkAltField:
text += u' linkalt="%s"' % escape(self.linkAltField, treedoc.
escDict)
return text
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
if self.useFileInfo:
item = globalref.docRef.fileInfoItem
storedText = item.data.get(self.name, '')
if storedText:
return self.formatOutput(storedText, titleMode, internal)
return ''
def removeMarkup(self, text):
"""Remove HTML Markup and unescape entities"""
text = TextFormat.stripTagRe.sub('', text)
return unescape(text)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
prefix = self.prefix
suffix = self.suffix
if titleMode:
if self.html:
storedText = self.removeMarkup(storedText)
if globalref.docRef.formHtml:
prefix = self.removeMarkup(prefix)
suffix = self.removeMarkup(suffix)
else:
if not self.html:
storedText = escape(storedText).replace('\n', '<br />')
if not globalref.docRef.formHtml:
prefix = escape(prefix)
suffix = escape(suffix)
return u'%s%s%s' % (prefix, storedText, suffix)
def editText(self, item):
"""Return tuple of this field's text in edit format and bool validity,
using edit format option"""
storedText = item.data.get(self.name, '')
result = self.formatEditText(storedText)
if self.isRequired and not result[0]:
return result[0], False
return result
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
return storedText, True
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
return editText, editText or not self.isRequired
def getInitDefault(self):
"""Return initial stored value for new nodes"""
return self.initDefault
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
self.initDefault = self.storedText(editText)[0]
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
return self.formatEditText(self.initDefault)[0]
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return []
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
return storedText.lower()
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
return value
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return (
u'<xsl:if test="normalize-space(./%s)">%s<xsl:value-of select="./%s"/>%s</xsl:if>'
% (self.name, xslEscape(self.prefix), self.name, xslEscape(
self.suffix)))
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(./%s)' % self.name
class LongTextFormat(TextFormat):
"""Holds format info for a long text field - Obsolete -
kept for compatability with old files"""
defaultNumLines = 7
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
class NumberFormat(TextFormat):
"""Holds format info for a number field"""
typeName = 'Number'
sortSequence = 10
defaultFormat = u'#.##'
formatMenuList = [(u'%s\t%s' % (_('Optional Digit'), '#'), '#'), (
u'%s\t%s' % (_('Required Digit'), '0'), '0'), (u'%s\t%s' % (_(
'Digit or Space (external)'), _('<space>')), ' '), None, (u'%s\t%s' %
(_('Decimal Point'), '.'), '.'), (u'%s\t%s' % (_('Decimal Comma'),
','), ','), None, (u'%s\t%s' % (_('Comma Separator'), '\\,'), '\\,'
), (u'%s\t%s' % (_('Dot Separator'), '\\.'), '\\.'), (u'%s\t%s' % (
_('Space Separator (internal)'), _('<space>')), ' '), None, (
u'%s\t%s' % (_('Optional Sign'), '-'), '-'), (u'%s\t%s' % (_(
'Required Sign'), '+'), '+'), None, (u'%s\t%s' % (_(
'Exponent (capital)'), 'E'), 'E'), (u'%s\t%s' % (_(
'Exponent (small)'), 'e'), 'e')]
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenNumber(storedText).numStr(self.format)
except GenNumberError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using self.format"""
try:
return GenNumber(storedText).numStr(self.format), True
except GenNumberError:
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using self.format"""
try:
return repr(GenNumber().setFromStr(editText, self.format)), True
except GenNumberError:
return editText, not editText and not self.isRequired
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return GenNumber(storedText).num
except GenNumberError:
return ''
class ChoiceFormat(TextFormat):
"""Holds format info for a field with one of several text options"""
typeName = 'Choice'
sortSequence = 20
editSep = '/'
defaultFormat = '1/2/3/4'
formatMenuList = [(u'%s\t%s' % (_('Separator'), '/'), '/'), None, (
u'%s\t%s' % (_('"/" Character'), '//'), '//'), None, (u'%s\t%s' % (
_('Example'), '1/2/3/4'), '1/2/3/4')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.formatList = self.splitText(self.format)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
if storedText not in self.formatList:
storedText = _errorStr
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
if storedText in self.formatList:
return storedText, True
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
if editText in self.formatList:
return editText, True
return editText, not editText and not self.isRequired
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
return [(text, '') for text in self.formatList]
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return [text for text in self.formatList]
def splitText(self, textStr):
"""Split textStr using editSep, double sep's become char"""
return [text.strip().replace('\x00', self.editSep) for text in
textStr.replace(self.editSep * 2, '\x00').split(self.editSep)]
class CombinationFormat(ChoiceFormat):
"""Holds format info for a field of combinations of text options"""
typeName = 'Combination'
outputSepList = ',', ';', ':', '|', '/', '\\', '~'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
ChoiceFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
ChoiceFormat.initFormat(self)
fullFormat = ''.join(self.formatList)
try:
self.sep = [sep for sep in CombinationFormat.outputSepList if
sep not in fullFormat][0] + ' '
except IndexError:
self.sep = CombinationFormat.outputSepList[0] + ' '
def sortedChoices(self, inText):
"""Return tuple of choices from inText sorted like format and
True if all splits are valid and included"""
choices = self.splitText(inText)
sortedChoices = [text for text in self.formatList if text in choices]
if len(choices) == len(sortedChoices):
return sortedChoices, True
else:
return sortedChoices, False
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
choices, valid = self.sortedChoices(storedText)
if valid:
result = self.sep.join(choices)
else:
result = _errorStr
return TextFormat.formatOutput(self, result, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
for choice in self.splitText(storedText):
if choice not in self.formatList:
return storedText, not storedText
return storedText, True
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
choices, valid = self.sortedChoices(editText)
if valid:
return self.editSep.join(choices), True
else:
return editText, not editText and not self.isRequired
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
currentChoices, valid = self.sortedChoices(currentText)
nonChoices = [text for text in self.formatList if text not in
currentChoices]
results = []
for choice in nonChoices:
allChoices = currentChoices + [choice]
allChoices = [text for text in self.formatList if text in
allChoices]
results.append((self.editSep.join(allChoices), '(%s %s)' % (_(
'add'), choice)))
if currentChoices:
results.append((None, None))
for choice in currentChoices:
allChoices = currentChoices[:]
allChoices.remove(choice)
allChoices = [text for text in self.formatList if text in
allChoices]
results.append((self.editSep.join(allChoices), '(%s %s)' % (_(
'remove'), choice)))
return results
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return [entry[0] for entry in self.getEditChoices()]
class AutoChoiceFormat(ChoiceFormat):
"""Holds format info for a field with one of several text options"""
typeName = 'AutoChoice'
defaultFormat = ''
formatMenuList = ()
hasEditChoices = True
autoAddChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.formatList = []
def addChoice(self, choice, sort=False):
"""Add choice to edit menu list if not already there"""
if choice and choice not in self.formatList:
self.formatList.append(choice)
if sort:
self.sortChoices()
def sortChoices(self):
"""Sort menu list choices"""
self.formatList.sort()
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
return storedText, True
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
if editText:
return editText, True
return editText, not self.isRequired
class DateFormat(TextFormat):
"""Holds format info for a date field"""
typeName = 'Date'
sortSequence = 5
defaultFormat = u'mmmm d, yyyy'
dateStampStrings = 'Now', _('Now', 'date stamp setting')
formatMenuList = [(u'%s\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'), (
u'%s\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'), None, (u'%s\t%s' %
(_('Month (1 or 2 digits)'), 'm'), 'm'), (u'%s\t%s' % (_(
'Month (2 digits)'), 'mm'), 'mm'), (u'%s\t%s' % (_(
'Month Abbreviation'), 'mmm'), 'mmm'), (u'%s\t%s' % (_('Month Name'
), 'mmmm'), 'mmmm'), None, (u'%s\t%s' % (_('Year (2 digits)'), 'yy'
), 'yy'), (u'%s\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),
None, (u'%s\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'), (u'%s\t%s' %
(_('Weekday Abbreviation'), 'www'), 'www'), (u'%s\t%s' % (_(
'Weekday Name'), 'wwww'), 'wwww')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenDate(storedText).dateStr(self.format)
except GenDateError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
format = globalref.options.strData('EditDateFormat', True)
try:
return GenDate(storedText).dateStr(format), True
except GenDateError:
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
format = globalref.options.strData('EditDateFormat', True)
try:
return repr(GenDate().setFromStr(editText, format)), True
except GenDateError:
return editText, not editText and not self.isRequired
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
format = globalref.options.strData('EditDateFormat', True)
today = GenDate().dateStr(format)
yesterday = (GenDate() - 1).dateStr(format)
tomorrow = (GenDate() + 1).dateStr(format)
return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _(
'yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]
def getInitDefault(self):
"""Return initial stored value for new nodes"""
if self.initDefault in DateFormat.dateStampStrings:
return GenDate().dateStr()
return TextFormat.getInitDefault(self)
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
if editText in DateFormat.dateStampStrings:
self.initDefault = DateFormat.dateStampStrings[0]
else:
TextFormat.setInitDefault(self, editText)
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
if self.initDefault in DateFormat.dateStampStrings:
return DateFormat.dateStampStrings[1]
return TextFormat.getEditInitDefault(self)
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
choices = [entry[0] for entry in self.getEditChoices()]
choices.insert(0, DateFormat.dateStampStrings[1])
return choices
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
if value.startswith('now'):
return repr(GenDate())
return value
class TimeFormat(TextFormat):
"""Holds format info for a time field"""
typeName = 'Time'
sortSequence = 6
defaultFormat = u'h:MM:SS aa'
timeStampStrings = 'Now', _('Now', 'time stamp setting')
formatMenuList = [(u'%s\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),
'H'), (u'%s\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (
u'%s\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (
u'%s\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (
u'%s\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\t%s' %
(_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\t%s' % (_(
'Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\t%s' % (_(
'Second (2 digits)'), 'SS'), 'SS'), (u'%s\t%s' % (_(
'Fractional Seconds'), 's'), 's'), None, (u'%s\t%s' % (_('AM/PM'),
'AA'), 'AA'), (u'%s\t%s' % (_('am/pm'), 'aa'), 'aa')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenTime(storedText).timeStr(self.format)
except GenTimeError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
format = globalref.options.strData('EditTimeFormat', True)
try:
return GenTime(storedText).timeStr(format), True
except GenTimeError:
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
try:
return repr(GenTime(editText)), True
except GenTimeError:
return editText, not editText and not self.isRequired
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and annotated text"""
format = globalref.options.strData('EditTimeFormat', True)
now = GenTime().timeStr(format)
choices = [(now, '(%s)' % _('now'))]
for hr in (6, 9, 12, 15, 18, 21, 0):
time = GenTime((hr, 0)).timeStr(format)
choices.append((time, ''))
return choices
def getInitDefault(self):
"""Return initial stored value for new nodes"""
if self.initDefault in TimeFormat.timeStampStrings:
return GenTime().timeStr()
return TextFormat.getInitDefault(self)
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
if editText in TimeFormat.timeStampStrings:
self.initDefault = TimeFormat.timeStampStrings[0]
else:
TextFormat.setInitDefault(self, editText)
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
if self.initDefault in TimeFormat.timeStampStrings:
return TimeFormat.timeStampStrings[1]
return TextFormat.getEditInitDefault(self)
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
choices = [entry[0] for entry in self.getEditChoices()]
choices.insert(0, TimeFormat.timeStampStrings[1])
return choices
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
if value.startswith('now'):
return repr(GenTime())
return value
class BooleanFormat(ChoiceFormat):
"""Holds format info for a bool field"""
typeName = 'Boolean'
sortSequence = 1
defaultFormat = _('yes/no')
formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _(
'T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')),
None, ('1/0', '1/0')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
ChoiceFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
if storedText not in self.formatList:
try:
storedText = GenBoolean(storedText).boolStr(self.format)
except GenBooleanError:
storedText = _errorStr
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
if storedText in self.formatList:
return storedText, True
try:
return GenBoolean(storedText).boolStr(self.format), True
except GenBooleanError:
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
try:
return repr(GenBoolean(editText)), True
except GenBooleanError:
if editText in self.formatList:
return editText, True
return editText, not editText and not self.isRequired
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return repr(GenBoolean(storedText))
except GenBooleanError:
return ''
class UniqueIDFormat(TextFormat):
"""An unique ID automatically generated for new nodes"""
typeName = 'UniqueID'
sortSequence = 10
formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')
defaultFormat = u'0001'
formatMenuList = [(u'%s\t%s' % (_('Required Digit'), '0'), '0'), None,
(u'%s\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\t%s' %
(_('Prefix Example'), 'id0100'), 'id0100')]
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def nextValue(self, increment=True):
"""Return the next value for a new node,
increment format if increment is True"""
try:
prefix, numText, suffix = UniqueIDFormat.formatRe.match(self.format
).groups()
except AttributeError:
self.format = UniqueIDFormat.defaultFormat
return self.nextValue(increment)
value = self.format
if increment:
pattern = u'%%s%%0.%dd%%s' % len(numText)
num = int(numText) + 1
self.format = pattern % (prefix, num, suffix)
return value
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return int(UniqueIDFormat.formatRe.match(storedText).group(2))
except AttributeError:
return 0
class URLFormat(TextFormat):
"""Holds format info for a field with a URL path"""
typeName = 'URL'
sortSequence = 8
htmlOption = False
allowAltLinkText = True
hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')
URLMethod = u'http://'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.html = True
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
if self.useFileInfo:
item = globalref.docRef.fileInfoItem
altText = ''
if self.linkAltField:
field = item.nodeFormat().findField(self.linkAltField)
if field:
altText = field.outputText(item, titleMode, internal)
storedText = item.data.get(self.name, '')
if storedText:
return self.formatOutput(storedText, titleMode, altText, internal)
return ''
def formatOutput(self, storedText, titleMode, altText='', internal=False):
"""Return formatted text, properly escaped and with
a link reference if not in titleMode"""
if titleMode:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = []
for url in paths:
path = url
if not URLFormat.hasMethodRe.match(path):
path = u'%s%s' % (self.URLMethod, path)
path = u'<a href="%s">%s</a>' % (escape(path, treedoc.escDict),
altText or url)
results.append(TextFormat.formatOutput(self, path, titleMode,
internal))
return u'<br />'.join(results)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return (
u'<xsl:for-each select = "./%s">%s<xsl:choose><xsl:when test="contains(., \':\')"><a href="{.}"><xsl:value-of select="."/></a></xsl:when><xsl:otherwise><a href="%s{.}"><xsl:value-of select="."/></a></xsl:otherwise></xsl:choose>%s</xsl:for-each>'
% (self.name, xslEscape(self.prefix), self.URLMethod,
xslEscape(self.suffix)))
class PathFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'Path'
URLMethod = u'file:///'
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class EmailFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'Email'
URLMethod = u'mailto:'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class InternalLinkFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'InternalLink'
URLMethod = u'#'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class ExecuteLinkFormat(URLFormat):
"""Holds format info for an executable field"""
typeName = 'ExecuteLink'
URLMethod = u'exec:'
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, altText='', internal=False):
"""Return formatted text, properly escaped and with
a link reference if not in titleMode"""
if titleMode or not internal:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = []
for url in paths:
url = TextFormat.formatOutput(self, url, titleMode, internal)
path = url
if not URLFormat.hasMethodRe.match(path):
path = u'%s%s' % (self.URLMethod, path)
results.append(u'<a href="%s">%s</a>' % (escape(path, treedoc.
escDict), altText or url))
return u'<br />'.join(results)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return TextFormat.xslText(self)
class PictureFormat(TextFormat):
"""Holds format info for a field with a link to a picture"""
typeName = 'Picture'
sortSequence = 8
htmlOption = False
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.html = True
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped and with
a link to the picture if not in titleMode"""
if titleMode:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = [('<img src="%s">' % escape(url, treedoc.escDict)) for
url in paths]
return u'<br />'.join(results)
class ParentFormat(TextFormat):
"""Placeholder format for references to specific parents"""
typeName = 'Parent'
def __init__(self, name, parentLevel=1):
TextFormat.__init__(self, name, {})
self.parentLevel = parentLevel
def sepName(self, englishOnly=False):
"""Return name enclosed with {* *} separators"""
name = englishOnly and self.enName or self.name
return u'{*%s%s*}' % (self.parentLevel * '*', name)
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
for num in range(self.parentLevel):
item = item.parent
if not item:
return ''
field = item.nodeFormat().findField(self.name)
if not field:
return ''
return field.outputText(item, titleMode, internal)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="%s%s"/>' % (self.parentLevel * '../',
self.name)
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)
class AncestorFormat(TextFormat):
"""Placeholder format for references to any parent with data"""
typeName = 'Ancestor'
def __init__(self, name):
TextFormat.__init__(self, name, {})
self.parentLevel = 1000
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*?%s*}' % name
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
field = None
while not field:
item = item.parent
if item:
field = item.nodeFormat().findField(self.name)
else:
return ''
return field.outputText(item, titleMode, internal)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="ancestor::*/%s"/>' % self.name
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(ancestor::*/%s)' % self.name
class ChildFormat(TextFormat):
"""Placeholder format for references to a sequence of child data"""
typeName = 'Child'
def __init__(self, name):
TextFormat.__init__(self, name, {})
self.parentLevel = -1
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*&%s*}' % name
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
result = []
for child in item.childList:
field = child.nodeFormat().findField(self.name)
if field:
text = field.outputText(child, titleMode, internal)
if text:
result.append(text)
return globalref.docRef.childFieldSep.join(result)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="child::*/%s"/>' % self.name
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(child::*/%s)' % self.name
class CountFormat(TextFormat):
"""Placeholder format for a count of children at the given level"""
typeName = 'Count'
def __init__(self, name, level):
TextFormat.__init__(self, name, {})
self.parentLevel = -level
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*#%s*}' % name
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
return repr(len(item.descendLevelList(-self.parentLevel)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_errorStr = '#####'
def xslEscape(text):
"""Encapsulate all literal text in <xsl:text> elements
and transform/escape some non-XML entities.
For the moment, only is supported"""
nonTagRe = re.compile('(.*?)(<.*?>)|(.*)')
escDict = {'&nbsp;': ' '}
def esc(matchObj):
"""Return escaped replacement text"""
if matchObj.group(1) == None:
return u'<xsl:text>%s</xsl:text>' % escape(matchObj.group(3),
escDict)
if matchObj.group(1):
return u'<xsl:text>%s</xsl:text>%s' % (escape(matchObj.group(1),
escDict), matchObj.group(2))
return matchObj.group(2)
return nonTagRe.sub(esc, text)
class TextFormat(object):
"""Holds format info for a normal text field"""
typeName = 'Text'
sortSequence = 20
stripTagRe = re.compile('<.*?>')
defaultNumLines = 1
defaultFormat = ''
formatMenuList = []
htmlOption = True
hasEditChoices = False
autoAddChoices = False
hasFileBrowse = False
allowAltLinkText = False
def __init__(self, name, attrs={}):
"""Any prefix, suffix, html info in attrs dict"""
self.name = name
self.enName = ''
self.format = attrs.get(u'format', self.defaultFormat)
self.prefix = attrs.get(u'prefix', '')
self.suffix = attrs.get(u'suffix', '')
self.html = attrs.get(u'html', '').startswith('y') and True or False
self.isRequired = attrs.get(u'required', '').startswith('y'
) and True or False
self.hidden = attrs.get(u'hidden', '').startswith('y'
) and True or False
try:
self.numLines = int(attrs.get(u'lines', repr(self.defaultNumLines))
)
except ValueError:
self.numLines = 1
self.initDefault = attrs.get(u'init', '')
self.linkAltField = attrs.get(u'linkalt', '')
self.parentLevel = 0
self.useFileInfo = False
self.showInDialog = True
self.initFormat()
def initFormat(self):
"""Called by base init, after class change or format text change"""
pass
def duplicateSettings(self, otherField):
"""Assign other field's parameters to this field"""
self.name = otherField.name
self.enName = otherField.enName
self.format = otherField.format
self.prefix = otherField.prefix
self.suffix = otherField.suffix
self.html = otherField.html
self.isRequired = otherField.isRequired
self.hidden = otherField.hidden
self.numLines = otherField.numLines
self.initDefault = otherField.initDefault
self.linkAltField = otherField.linkAltField
self.parentLevel = otherField.parentLevel
self.useFileInfo = otherField.useFileInfo
self.showInDialog = otherField.showInDialog
def changeType(self, newType):
"""Change this field's type to newType with default format"""
self.__class__ = globals()[newType + 'Format']
self.format = self.defaultFormat
self.initFormat()
def englishName(self):
"""Returns English name if assigned, o/w name"""
if self.enName:
return self.enName
return self.name
def sepName(self, englishOnly=False):
"""Return name enclosed with {* *} separators"""
name = englishOnly and self.enName or self.name
if not self.useFileInfo:
return u'{*%s*}' % name
return u'{*!%s*}' % name
def labelName(self):
"""Return name used for labels - add * for required fields"""
if self.isRequired:
return '%s*' % self.name
return self.name
def writeXml(self):
"""Return text for xml attributes"""
text = u' type="%s"' % self.typeName
if self.format:
text += u' format="%s"' % escape(self.format, treedoc.escDict)
if self.prefix:
text += u' prefix="%s"' % escape(self.prefix, treedoc.escDict)
if self.suffix:
text += u' suffix="%s"' % escape(self.suffix, treedoc.escDict)
if self.html:
text += u' html="y"'
if self.isRequired:
text += u' required="y"'
if self.hidden:
text += u' hidden="y"'
if self.numLines > 1:
text += u' lines="%d"' % self.numLines
if self.initDefault:
text += u' init="%s"' % escape(self.initDefault, treedoc.escDict)
if self.linkAltField:
text += u' linkalt="%s"' % escape(self.linkAltField, treedoc.
escDict)
return text
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
if self.useFileInfo:
item = globalref.docRef.fileInfoItem
storedText = item.data.get(self.name, '')
if storedText:
return self.formatOutput(storedText, titleMode, internal)
return ''
def removeMarkup(self, text):
"""Remove HTML Markup and unescape entities"""
text = TextFormat.stripTagRe.sub('', text)
return unescape(text)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
prefix = self.prefix
suffix = self.suffix
if titleMode:
if self.html:
storedText = self.removeMarkup(storedText)
if globalref.docRef.formHtml:
prefix = self.removeMarkup(prefix)
suffix = self.removeMarkup(suffix)
else:
if not self.html:
storedText = escape(storedText).replace('\n', '<br />')
if not globalref.docRef.formHtml:
prefix = escape(prefix)
suffix = escape(suffix)
return u'%s%s%s' % (prefix, storedText, suffix)
def editText(self, item):
"""Return tuple of this field's text in edit format and bool validity,
using edit format option"""
storedText = item.data.get(self.name, '')
result = self.formatEditText(storedText)
if self.isRequired and not result[0]:
return result[0], False
return result
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
return storedText, True
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
return editText, editText or not self.isRequired
def getInitDefault(self):
"""Return initial stored value for new nodes"""
return self.initDefault
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
self.initDefault = self.storedText(editText)[0]
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
return self.formatEditText(self.initDefault)[0]
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return []
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
return storedText.lower()
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
return value
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return (
u'<xsl:if test="normalize-space(./%s)">%s<xsl:value-of select="./%s"/>%s</xsl:if>'
% (self.name, xslEscape(self.prefix), self.name, xslEscape(
self.suffix)))
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(./%s)' % self.name
class LongTextFormat(TextFormat):
"""Holds format info for a long text field - Obsolete -
kept for compatability with old files"""
defaultNumLines = 7
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
class NumberFormat(TextFormat):
"""Holds format info for a number field"""
typeName = 'Number'
sortSequence = 10
defaultFormat = u'#.##'
formatMenuList = [(u'%s\t%s' % (_('Optional Digit'), '#'), '#'), (
u'%s\t%s' % (_('Required Digit'), '0'), '0'), (u'%s\t%s' % (_(
'Digit or Space (external)'), _('<space>')), ' '), None, (u'%s\t%s' %
(_('Decimal Point'), '.'), '.'), (u'%s\t%s' % (_('Decimal Comma'),
','), ','), None, (u'%s\t%s' % (_('Comma Separator'), '\\,'), '\\,'
), (u'%s\t%s' % (_('Dot Separator'), '\\.'), '\\.'), (u'%s\t%s' % (
_('Space Separator (internal)'), _('<space>')), ' '), None, (
u'%s\t%s' % (_('Optional Sign'), '-'), '-'), (u'%s\t%s' % (_(
'Required Sign'), '+'), '+'), None, (u'%s\t%s' % (_(
'Exponent (capital)'), 'E'), 'E'), (u'%s\t%s' % (_(
'Exponent (small)'), 'e'), 'e')]
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenNumber(storedText).numStr(self.format)
except GenNumberError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using self.format"""
try:
return GenNumber(storedText).numStr(self.format), True
except GenNumberError:
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using self.format"""
try:
return repr(GenNumber().setFromStr(editText, self.format)), True
except GenNumberError:
return editText, not editText and not self.isRequired
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return GenNumber(storedText).num
except GenNumberError:
return ''
class ChoiceFormat(TextFormat):
"""Holds format info for a field with one of several text options"""
typeName = 'Choice'
sortSequence = 20
editSep = '/'
defaultFormat = '1/2/3/4'
formatMenuList = [(u'%s\t%s' % (_('Separator'), '/'), '/'), None, (
u'%s\t%s' % (_('"/" Character'), '//'), '//'), None, (u'%s\t%s' % (
_('Example'), '1/2/3/4'), '1/2/3/4')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.formatList = self.splitText(self.format)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
if storedText not in self.formatList:
storedText = _errorStr
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
if storedText in self.formatList:
return storedText, True
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
if editText in self.formatList:
return editText, True
return editText, not editText and not self.isRequired
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
return [(text, '') for text in self.formatList]
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return [text for text in self.formatList]
def splitText(self, textStr):
"""Split textStr using editSep, double sep's become char"""
return [text.strip().replace('\x00', self.editSep) for text in
textStr.replace(self.editSep * 2, '\x00').split(self.editSep)]
class CombinationFormat(ChoiceFormat):
"""Holds format info for a field of combinations of text options"""
typeName = 'Combination'
outputSepList = ',', ';', ':', '|', '/', '\\', '~'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
ChoiceFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
ChoiceFormat.initFormat(self)
fullFormat = ''.join(self.formatList)
try:
self.sep = [sep for sep in CombinationFormat.outputSepList if
sep not in fullFormat][0] + ' '
except IndexError:
self.sep = CombinationFormat.outputSepList[0] + ' '
def sortedChoices(self, inText):
"""Return tuple of choices from inText sorted like format and
True if all splits are valid and included"""
choices = self.splitText(inText)
sortedChoices = [text for text in self.formatList if text in choices]
if len(choices) == len(sortedChoices):
return sortedChoices, True
else:
return sortedChoices, False
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
choices, valid = self.sortedChoices(storedText)
if valid:
result = self.sep.join(choices)
else:
result = _errorStr
return TextFormat.formatOutput(self, result, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
for choice in self.splitText(storedText):
if choice not in self.formatList:
return storedText, not storedText
return storedText, True
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
choices, valid = self.sortedChoices(editText)
if valid:
return self.editSep.join(choices), True
else:
return editText, not editText and not self.isRequired
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
currentChoices, valid = self.sortedChoices(currentText)
nonChoices = [text for text in self.formatList if text not in
currentChoices]
results = []
for choice in nonChoices:
allChoices = currentChoices + [choice]
allChoices = [text for text in self.formatList if text in
allChoices]
results.append((self.editSep.join(allChoices), '(%s %s)' % (_(
'add'), choice)))
if currentChoices:
results.append((None, None))
for choice in currentChoices:
allChoices = currentChoices[:]
allChoices.remove(choice)
allChoices = [text for text in self.formatList if text in
allChoices]
results.append((self.editSep.join(allChoices), '(%s %s)' % (_(
'remove'), choice)))
return results
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return [entry[0] for entry in self.getEditChoices()]
class AutoChoiceFormat(ChoiceFormat):
"""Holds format info for a field with one of several text options"""
typeName = 'AutoChoice'
defaultFormat = ''
formatMenuList = ()
hasEditChoices = True
autoAddChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.formatList = []
def addChoice(self, choice, sort=False):
"""Add choice to edit menu list if not already there"""
if choice and choice not in self.formatList:
self.formatList.append(choice)
if sort:
self.sortChoices()
def sortChoices(self):
"""Sort menu list choices"""
self.formatList.sort()
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
return storedText, True
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
if editText:
return editText, True
return editText, not self.isRequired
class DateFormat(TextFormat):
"""Holds format info for a date field"""
typeName = 'Date'
sortSequence = 5
defaultFormat = u'mmmm d, yyyy'
dateStampStrings = 'Now', _('Now', 'date stamp setting')
formatMenuList = [(u'%s\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'), (
u'%s\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'), None, (u'%s\t%s' %
(_('Month (1 or 2 digits)'), 'm'), 'm'), (u'%s\t%s' % (_(
'Month (2 digits)'), 'mm'), 'mm'), (u'%s\t%s' % (_(
'Month Abbreviation'), 'mmm'), 'mmm'), (u'%s\t%s' % (_('Month Name'
), 'mmmm'), 'mmmm'), None, (u'%s\t%s' % (_('Year (2 digits)'), 'yy'
), 'yy'), (u'%s\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),
None, (u'%s\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'), (u'%s\t%s' %
(_('Weekday Abbreviation'), 'www'), 'www'), (u'%s\t%s' % (_(
'Weekday Name'), 'wwww'), 'wwww')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenDate(storedText).dateStr(self.format)
except GenDateError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
format = globalref.options.strData('EditDateFormat', True)
try:
return GenDate(storedText).dateStr(format), True
except GenDateError:
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
format = globalref.options.strData('EditDateFormat', True)
try:
return repr(GenDate().setFromStr(editText, format)), True
except GenDateError:
return editText, not editText and not self.isRequired
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
format = globalref.options.strData('EditDateFormat', True)
today = GenDate().dateStr(format)
yesterday = (GenDate() - 1).dateStr(format)
tomorrow = (GenDate() + 1).dateStr(format)
return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _(
'yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]
def getInitDefault(self):
"""Return initial stored value for new nodes"""
if self.initDefault in DateFormat.dateStampStrings:
return GenDate().dateStr()
return TextFormat.getInitDefault(self)
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
if editText in DateFormat.dateStampStrings:
self.initDefault = DateFormat.dateStampStrings[0]
else:
TextFormat.setInitDefault(self, editText)
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
if self.initDefault in DateFormat.dateStampStrings:
return DateFormat.dateStampStrings[1]
return TextFormat.getEditInitDefault(self)
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
choices = [entry[0] for entry in self.getEditChoices()]
choices.insert(0, DateFormat.dateStampStrings[1])
return choices
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
if value.startswith('now'):
return repr(GenDate())
return value
class TimeFormat(TextFormat):
"""Holds format info for a time field"""
typeName = 'Time'
sortSequence = 6
defaultFormat = u'h:MM:SS aa'
timeStampStrings = 'Now', _('Now', 'time stamp setting')
formatMenuList = [(u'%s\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),
'H'), (u'%s\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (
u'%s\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (
u'%s\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (
u'%s\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\t%s' %
(_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\t%s' % (_(
'Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\t%s' % (_(
'Second (2 digits)'), 'SS'), 'SS'), (u'%s\t%s' % (_(
'Fractional Seconds'), 's'), 's'), None, (u'%s\t%s' % (_('AM/PM'),
'AA'), 'AA'), (u'%s\t%s' % (_('am/pm'), 'aa'), 'aa')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenTime(storedText).timeStr(self.format)
except GenTimeError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
format = globalref.options.strData('EditTimeFormat', True)
try:
return GenTime(storedText).timeStr(format), True
except GenTimeError:
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
try:
return repr(GenTime(editText)), True
except GenTimeError:
return editText, not editText and not self.isRequired
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and annotated text"""
format = globalref.options.strData('EditTimeFormat', True)
now = GenTime().timeStr(format)
choices = [(now, '(%s)' % _('now'))]
for hr in (6, 9, 12, 15, 18, 21, 0):
time = GenTime((hr, 0)).timeStr(format)
choices.append((time, ''))
return choices
def getInitDefault(self):
"""Return initial stored value for new nodes"""
if self.initDefault in TimeFormat.timeStampStrings:
return GenTime().timeStr()
return TextFormat.getInitDefault(self)
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
if editText in TimeFormat.timeStampStrings:
self.initDefault = TimeFormat.timeStampStrings[0]
else:
TextFormat.setInitDefault(self, editText)
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
if self.initDefault in TimeFormat.timeStampStrings:
return TimeFormat.timeStampStrings[1]
return TextFormat.getEditInitDefault(self)
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
choices = [entry[0] for entry in self.getEditChoices()]
choices.insert(0, TimeFormat.timeStampStrings[1])
return choices
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
if value.startswith('now'):
return repr(GenTime())
return value
class BooleanFormat(ChoiceFormat):
"""Holds format info for a bool field"""
typeName = 'Boolean'
sortSequence = 1
defaultFormat = _('yes/no')
formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _(
'T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')),
None, ('1/0', '1/0')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
ChoiceFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
if storedText not in self.formatList:
try:
storedText = GenBoolean(storedText).boolStr(self.format)
except GenBooleanError:
storedText = _errorStr
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
if storedText in self.formatList:
return storedText, True
try:
return GenBoolean(storedText).boolStr(self.format), True
except GenBooleanError:
return storedText, not storedText
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
try:
return repr(GenBoolean(editText)), True
except GenBooleanError:
if editText in self.formatList:
return editText, True
return editText, not editText and not self.isRequired
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return repr(GenBoolean(storedText))
except GenBooleanError:
return ''
class UniqueIDFormat(TextFormat):
"""An unique ID automatically generated for new nodes"""
typeName = 'UniqueID'
sortSequence = 10
formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')
defaultFormat = u'0001'
formatMenuList = [(u'%s\t%s' % (_('Required Digit'), '0'), '0'), None,
(u'%s\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\t%s' %
(_('Prefix Example'), 'id0100'), 'id0100')]
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def nextValue(self, increment=True):
"""Return the next value for a new node,
increment format if increment is True"""
try:
prefix, numText, suffix = UniqueIDFormat.formatRe.match(self.format
).groups()
except AttributeError:
self.format = UniqueIDFormat.defaultFormat
return self.nextValue(increment)
value = self.format
if increment:
pattern = u'%%s%%0.%dd%%s' % len(numText)
num = int(numText) + 1
self.format = pattern % (prefix, num, suffix)
return value
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return int(UniqueIDFormat.formatRe.match(storedText).group(2))
except AttributeError:
return 0
class URLFormat(TextFormat):
"""Holds format info for a field with a URL path"""
typeName = 'URL'
sortSequence = 8
htmlOption = False
allowAltLinkText = True
hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')
URLMethod = u'http://'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.html = True
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
if self.useFileInfo:
item = globalref.docRef.fileInfoItem
altText = ''
if self.linkAltField:
field = item.nodeFormat().findField(self.linkAltField)
if field:
altText = field.outputText(item, titleMode, internal)
storedText = item.data.get(self.name, '')
if storedText:
return self.formatOutput(storedText, titleMode, altText, internal)
return ''
def formatOutput(self, storedText, titleMode, altText='', internal=False):
"""Return formatted text, properly escaped and with
a link reference if not in titleMode"""
if titleMode:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = []
for url in paths:
path = url
if not URLFormat.hasMethodRe.match(path):
path = u'%s%s' % (self.URLMethod, path)
path = u'<a href="%s">%s</a>' % (escape(path, treedoc.escDict),
altText or url)
results.append(TextFormat.formatOutput(self, path, titleMode,
internal))
return u'<br />'.join(results)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return (
u'<xsl:for-each select = "./%s">%s<xsl:choose><xsl:when test="contains(., \':\')"><a href="{.}"><xsl:value-of select="."/></a></xsl:when><xsl:otherwise><a href="%s{.}"><xsl:value-of select="."/></a></xsl:otherwise></xsl:choose>%s</xsl:for-each>'
% (self.name, xslEscape(self.prefix), self.URLMethod,
xslEscape(self.suffix)))
class PathFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'Path'
URLMethod = u'file:///'
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class EmailFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'Email'
URLMethod = u'mailto:'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class InternalLinkFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'InternalLink'
URLMethod = u'#'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class ExecuteLinkFormat(URLFormat):
"""Holds format info for an executable field"""
typeName = 'ExecuteLink'
URLMethod = u'exec:'
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, altText='', internal=False):
"""Return formatted text, properly escaped and with
a link reference if not in titleMode"""
if titleMode or not internal:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = []
for url in paths:
url = TextFormat.formatOutput(self, url, titleMode, internal)
path = url
if not URLFormat.hasMethodRe.match(path):
path = u'%s%s' % (self.URLMethod, path)
results.append(u'<a href="%s">%s</a>' % (escape(path, treedoc.
escDict), altText or url))
return u'<br />'.join(results)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return TextFormat.xslText(self)
class PictureFormat(TextFormat):
"""Holds format info for a field with a link to a picture"""
typeName = 'Picture'
sortSequence = 8
htmlOption = False
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.html = True
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped and with
a link to the picture if not in titleMode"""
if titleMode:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = [('<img src="%s">' % escape(url, treedoc.escDict)) for
url in paths]
return u'<br />'.join(results)
class ParentFormat(TextFormat):
"""Placeholder format for references to specific parents"""
typeName = 'Parent'
def __init__(self, name, parentLevel=1):
TextFormat.__init__(self, name, {})
self.parentLevel = parentLevel
def sepName(self, englishOnly=False):
"""Return name enclosed with {* *} separators"""
name = englishOnly and self.enName or self.name
return u'{*%s%s*}' % (self.parentLevel * '*', name)
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
for num in range(self.parentLevel):
item = item.parent
if not item:
return ''
field = item.nodeFormat().findField(self.name)
if not field:
return ''
return field.outputText(item, titleMode, internal)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="%s%s"/>' % (self.parentLevel * '../',
self.name)
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)
class AncestorFormat(TextFormat):
"""Placeholder format for references to any parent with data"""
typeName = 'Ancestor'
def __init__(self, name):
TextFormat.__init__(self, name, {})
self.parentLevel = 1000
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*?%s*}' % name
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
field = None
while not field:
item = item.parent
if item:
field = item.nodeFormat().findField(self.name)
else:
return ''
return field.outputText(item, titleMode, internal)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="ancestor::*/%s"/>' % self.name
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(ancestor::*/%s)' % self.name
class ChildFormat(TextFormat):
"""Placeholder format for references to a sequence of child data"""
typeName = 'Child'
def __init__(self, name):
TextFormat.__init__(self, name, {})
self.parentLevel = -1
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*&%s*}' % name
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
result = []
for child in item.childList:
field = child.nodeFormat().findField(self.name)
if field:
text = field.outputText(child, titleMode, internal)
if text:
result.append(text)
return globalref.docRef.childFieldSep.join(result)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="child::*/%s"/>' % self.name
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(child::*/%s)' % self.name
class CountFormat(TextFormat):
"""Placeholder format for a count of children at the given level"""
typeName = 'Count'
def __init__(self, name, level):
TextFormat.__init__(self, name, {})
self.parentLevel = -level
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*#%s*}' % name
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
return repr(len(item.descendLevelList(-self.parentLevel)))
<|reserved_special_token_1|>
#!/usr/bin/env python
#****************************************************************************
# fieldformat.py, provides non-GUI base classes for field formating
#
# TreeLine, an information storage program
# Copyright (C) 2006, Douglas W. Bell
#
# This is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License, either Version 2 or any later
# version. This program is distributed in the hope that it will be useful,
# but WITTHOUT ANY WARRANTY. See the included LICENSE file for details.
#****************************************************************************
import re
from xml.sax.saxutils import escape, unescape
from gennumber import GenNumber, GenNumberError
from gendate import GenDate, GenDateError
from gentime import GenTime, GenTimeError
from genboolean import GenBoolean, GenBooleanError
import treedoc
import globalref
_errorStr = '#####'
def xslEscape(text):
"""Encapsulate all literal text in <xsl:text> elements
and transform/escape some non-XML entities.
For the moment, only is supported"""
nonTagRe = re.compile(r'(.*?)(<.*?>)|(.*)')
escDict = {'&nbsp;': ' '} # escape function does '&' first
def esc(matchObj):
"""Return escaped replacement text"""
if matchObj.group(1) == None: # no tags found
return u'<xsl:text>%s</xsl:text>' % \
escape(matchObj.group(3), escDict)
if matchObj.group(1): # leading text and tag
return u'<xsl:text>%s</xsl:text>%s' % \
(escape(matchObj.group(1), escDict), matchObj.group(2))
return matchObj.group(2) # tag only
return nonTagRe.sub(esc, text)
class TextFormat(object):
"""Holds format info for a normal text field"""
typeName = 'Text'
sortSequence = 20
stripTagRe = re.compile('<.*?>')
defaultNumLines = 1
#field format edit options:
defaultFormat = ''
formatMenuList = []
htmlOption = True
hasEditChoices = False
autoAddChoices = False
hasFileBrowse = False
allowAltLinkText = False
def __init__(self, name, attrs={}):
"""Any prefix, suffix, html info in attrs dict"""
self.name = name
self.enName = '' # used only by fileFormat field for i18n
self.format = attrs.get(u'format', self.defaultFormat)
self.prefix = attrs.get(u'prefix', '')
self.suffix = attrs.get(u'suffix', '')
# defaults to no html (line breaks preserved)
self.html = attrs.get(u'html', '').startswith('y') and True or False
self.isRequired = attrs.get(u'required', '').startswith('y') and \
True or False
self.hidden = attrs.get(u'hidden', '').startswith('y') and \
True or False
try:
self.numLines = int(attrs.get(u'lines',
repr(self.defaultNumLines)))
except ValueError:
self.numLines = 1
self.initDefault = attrs.get(u'init', '')
self.linkAltField = attrs.get(u'linkalt', '')
self.parentLevel = 0
self.useFileInfo = False
self.showInDialog = True
self.initFormat()
def initFormat(self):
"""Called by base init, after class change or format text change"""
pass
def duplicateSettings(self, otherField):
"""Assign other field's parameters to this field"""
self.name = otherField.name
self.enName = otherField.enName
self.format = otherField.format
self.prefix = otherField.prefix
self.suffix = otherField.suffix
self.html = otherField.html
self.isRequired = otherField.isRequired
self.hidden = otherField.hidden
self.numLines = otherField.numLines
self.initDefault = otherField.initDefault
self.linkAltField = otherField.linkAltField
self.parentLevel = otherField.parentLevel
self.useFileInfo = otherField.useFileInfo
self.showInDialog = otherField.showInDialog
def changeType(self, newType):
"""Change this field's type to newType with default format"""
self.__class__ = globals()[newType + 'Format']
self.format = self.defaultFormat
self.initFormat()
def englishName(self):
"""Returns English name if assigned, o/w name"""
if self.enName:
return self.enName
return self.name
def sepName(self, englishOnly=False):
"""Return name enclosed with {* *} separators"""
name = englishOnly and self.enName or self.name
if not self.useFileInfo:
return u'{*%s*}' % name
return u'{*!%s*}' % name
def labelName(self):
"""Return name used for labels - add * for required fields"""
if self.isRequired:
return '%s*' % self.name
return self.name
def writeXml(self):
"""Return text for xml attributes"""
text = u' type="%s"' % self.typeName
if self.format:
text += u' format="%s"' % escape(self.format, treedoc.escDict)
if self.prefix:
text += u' prefix="%s"' % escape(self.prefix, treedoc.escDict)
if self.suffix:
text += u' suffix="%s"' % escape(self.suffix, treedoc.escDict)
if self.html:
text += u' html="y"'
if self.isRequired:
text += u' required="y"'
if self.hidden:
text += u' hidden="y"'
if self.numLines > 1:
text += u' lines="%d"' % self.numLines
if self.initDefault:
text += u' init="%s"' % escape(self.initDefault, treedoc.escDict)
if self.linkAltField:
text += u' linkalt="%s"' % escape(self.linkAltField,
treedoc.escDict)
return text
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
if self.useFileInfo:
item = globalref.docRef.fileInfoItem
storedText = item.data.get(self.name, '')
if storedText:
return self.formatOutput(storedText, titleMode, internal)
return ''
def removeMarkup(self, text):
"""Remove HTML Markup and unescape entities"""
text = TextFormat.stripTagRe.sub('', text)
return unescape(text)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
prefix = self.prefix
suffix = self.suffix
if titleMode:
if self.html:
storedText = self.removeMarkup(storedText)
if globalref.docRef.formHtml:
prefix = self.removeMarkup(prefix)
suffix = self.removeMarkup(suffix)
else:
if not self.html:
storedText = escape(storedText).replace('\n', '<br />')
if not globalref.docRef.formHtml:
prefix = escape(prefix)
suffix = escape(suffix)
return u'%s%s%s' % (prefix, storedText, suffix)
def editText(self, item):
"""Return tuple of this field's text in edit format and bool validity,
using edit format option"""
storedText = item.data.get(self.name, '')
result = self.formatEditText(storedText)
if self.isRequired and not result[0]:
return (result[0], False)
return result
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
return (storedText, True)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
return (editText, editText or not self.isRequired)
def getInitDefault(self):
"""Return initial stored value for new nodes"""
return self.initDefault
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
self.initDefault = self.storedText(editText)[0]
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
return self.formatEditText(self.initDefault)[0]
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return []
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
return storedText.lower()
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
return value
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:if test="normalize-space(./%s)">%s'\
'<xsl:value-of select="./%s"/>%s</xsl:if>' % \
(self.name, xslEscape(self.prefix), self.name,
xslEscape(self.suffix))
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(./%s)' % self.name
class LongTextFormat(TextFormat):
"""Holds format info for a long text field - Obsolete -
kept for compatability with old files"""
# typeName = 'LongText'
defaultNumLines = 7
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
class NumberFormat(TextFormat):
"""Holds format info for a number field"""
typeName = 'Number'
sortSequence = 10
#field format edit options:
defaultFormat = u'#.##'
formatMenuList = [(u'%s\t%s' % (_('Optional Digit'), '#'), '#'),
(u'%s\t%s' % (_('Required Digit'), '0'), '0'),
(u'%s\t%s' % (_('Digit or Space (external)'),
_('<space>')), ' '),
None,
(u'%s\t%s' % (_('Decimal Point'), '.'), '.'),
(u'%s\t%s' % (_('Decimal Comma'), ','), ','),
None,
(u'%s\t%s' % (_('Comma Separator'), '\,'), '\,'),
(u'%s\t%s' % (_('Dot Separator'), '\.'), '\.'),
(u'%s\t%s' % (_('Space Separator (internal)'),
_('<space>')), ' '),
None,
(u'%s\t%s' % (_('Optional Sign'), '-'), '-'),
(u'%s\t%s' % (_('Required Sign'), '+'), '+'),
None,
(u'%s\t%s' % (_('Exponent (capital)'), 'E'), 'E'),
(u'%s\t%s' % (_('Exponent (small)'), 'e'), 'e')]
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenNumber(storedText).numStr(self.format)
except GenNumberError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using self.format"""
try:
return (GenNumber(storedText).numStr(self.format), True)
except GenNumberError:
return (storedText, not storedText)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using self.format"""
try:
return (repr(GenNumber().setFromStr(editText, self.format)), True)
except GenNumberError:
return (editText, not editText and not self.isRequired)
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return GenNumber(storedText).num
except GenNumberError:
return ''
class ChoiceFormat(TextFormat):
"""Holds format info for a field with one of several text options"""
typeName = 'Choice'
sortSequence = 20
editSep = '/'
#field format edit options:
defaultFormat = '1/2/3/4'
formatMenuList = [(u'%s\t%s' % (_('Separator'), '/'), '/'), None,
(u'%s\t%s' % (_('"/" Character'), '//'), '//'), None,
(u'%s\t%s' % (_('Example'), '1/2/3/4'), '1/2/3/4')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.formatList = self.splitText(self.format)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
if storedText not in self.formatList:
storedText = _errorStr
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
if storedText in self.formatList:
return (storedText, True)
return (storedText, not storedText)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
if editText in self.formatList:
return (editText, True)
return (editText, not editText and not self.isRequired)
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
return [(text, '') for text in self.formatList]
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return [text for text in self.formatList]
def splitText(self, textStr):
"""Split textStr using editSep, double sep's become char"""
return [text.strip().replace('\0', self.editSep) for text in
textStr.replace(self.editSep * 2, '\0').
split(self.editSep)]
class CombinationFormat(ChoiceFormat):
"""Holds format info for a field of combinations of text options"""
typeName = 'Combination'
outputSepList = (',', ';', ':', '|', '/', '\\', '~')
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
ChoiceFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
ChoiceFormat.initFormat(self)
fullFormat = ''.join(self.formatList)
try:
self.sep = [sep for sep in CombinationFormat.outputSepList
if sep not in fullFormat][0] + ' '
except IndexError:
self.sep = CombinationFormat.outputSepList[0] + ' '
def sortedChoices(self, inText):
"""Return tuple of choices from inText sorted like format and
True if all splits are valid and included"""
choices = self.splitText(inText)
sortedChoices = [text for text in self.formatList if text in choices]
if len(choices) == len(sortedChoices):
return (sortedChoices, True)
else:
return (sortedChoices, False)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
choices, valid = self.sortedChoices(storedText)
if valid:
result = self.sep.join(choices)
else:
result = _errorStr
return TextFormat.formatOutput(self, result, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
for choice in self.splitText(storedText):
if choice not in self.formatList:
return (storedText, not storedText)
return (storedText, True)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
choices, valid = self.sortedChoices(editText)
if valid:
return (self.editSep.join(choices), True)
else:
return (editText, not editText and not self.isRequired)
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
currentChoices, valid = self.sortedChoices(currentText)
nonChoices = [text for text in self.formatList
if text not in currentChoices]
results = []
for choice in nonChoices: # menu entries to add a choice
allChoices = currentChoices + [choice]
allChoices = [text for text in self.formatList
if text in allChoices]
results.append((self.editSep.join(allChoices),
'(%s %s)' % (_('add'), choice)))
if currentChoices:
results.append((None, None)) # separator
for choice in currentChoices: # menu entries to remove a choice
allChoices = currentChoices[:]
allChoices.remove(choice)
allChoices = [text for text in self.formatList
if text in allChoices]
results.append((self.editSep.join(allChoices),
'(%s %s)' % (_('remove'), choice)))
return results
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return [entry[0] for entry in self.getEditChoices()]
class AutoChoiceFormat(ChoiceFormat):
"""Holds format info for a field with one of several text options"""
typeName = 'AutoChoice'
#field format edit options:
defaultFormat = ''
formatMenuList = ()
hasEditChoices = True
autoAddChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.formatList = []
def addChoice(self, choice, sort=False):
"""Add choice to edit menu list if not already there"""
if choice and choice not in self.formatList:
self.formatList.append(choice)
if sort:
self.sortChoices()
def sortChoices(self):
"""Sort menu list choices"""
self.formatList.sort()
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
return (storedText, True)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
if editText:
return (editText, True)
return (editText, not self.isRequired)
class DateFormat(TextFormat):
"""Holds format info for a date field"""
typeName = 'Date'
sortSequence = 5
#field format edit options:
defaultFormat = u'mmmm d, yyyy'
dateStampStrings = ('Now', _('Now', 'date stamp setting'))
formatMenuList = [(u'%s\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'),
(u'%s\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'),
None,
(u'%s\t%s' % (_('Month (1 or 2 digits)'), 'm'), 'm'),
(u'%s\t%s' % (_('Month (2 digits)'), 'mm'), 'mm'),
(u'%s\t%s' % (_('Month Abbreviation'), 'mmm'), 'mmm'),
(u'%s\t%s' % (_('Month Name'), 'mmmm'), 'mmmm'),
None,
(u'%s\t%s' % (_('Year (2 digits)'), 'yy'), 'yy'),
(u'%s\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),
None,
(u'%s\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'),
(u'%s\t%s' % (_('Weekday Abbreviation'), 'www'), 'www'),
(u'%s\t%s' % (_('Weekday Name'), 'wwww'), 'wwww')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenDate(storedText).dateStr(self.format)
except GenDateError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
format = globalref.options.strData('EditDateFormat', True)
try:
return (GenDate(storedText).dateStr(format), True)
except GenDateError:
return (storedText, not storedText)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
format = globalref.options.strData('EditDateFormat', True)
try:
return (repr(GenDate().setFromStr(editText, format)), True)
except GenDateError:
return (editText, not editText and not self.isRequired)
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
format = globalref.options.strData('EditDateFormat', True)
today = GenDate().dateStr(format)
yesterday = (GenDate() - 1).dateStr(format)
tomorrow = (GenDate() + 1).dateStr(format)
return [(today, '(%s)' % _('today')),
(yesterday, '(%s)' % _('yesterday')),
(tomorrow, '(%s)' % _('tomorrow'))]
def getInitDefault(self):
"""Return initial stored value for new nodes"""
if self.initDefault in DateFormat.dateStampStrings:
return GenDate().dateStr()
return TextFormat.getInitDefault(self)
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
if editText in DateFormat.dateStampStrings:
self.initDefault = DateFormat.dateStampStrings[0]
else:
TextFormat.setInitDefault(self, editText)
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
if self.initDefault in DateFormat.dateStampStrings:
return DateFormat.dateStampStrings[1]
return TextFormat.getEditInitDefault(self)
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
choices = [entry[0] for entry in self.getEditChoices()]
choices.insert(0, DateFormat.dateStampStrings[1])
return choices
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
if value.startswith('now'):
return repr(GenDate())
return value
class TimeFormat(TextFormat):
"""Holds format info for a time field"""
typeName = 'Time'
sortSequence = 6
#field format edit options:
defaultFormat = u'h:MM:SS aa'
timeStampStrings = ('Now', _('Now', 'time stamp setting'))
formatMenuList = [(u'%s\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),
'H'),
(u'%s\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'),
(u'%s\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'),
'h'),
(u'%s\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'),
None,
(u'%s\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'),
(u'%s\t%s' % (_('Minute (2 digits)'), 'MM'), 'MM'),
None,
(u'%s\t%s' % (_('Second (1 or 2 digits)'), 'S'), 'S'),
(u'%s\t%s' % (_('Second (2 digits)'), 'SS'), 'SS'),
(u'%s\t%s' % (_('Fractional Seconds'), 's'), 's'),
None,
(u'%s\t%s' % (_('AM/PM'), 'AA'), 'AA'),
(u'%s\t%s' % (_('am/pm'), 'aa'),'aa')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenTime(storedText).timeStr(self.format)
except GenTimeError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
format = globalref.options.strData('EditTimeFormat', True)
try:
return (GenTime(storedText).timeStr(format), True)
except GenTimeError:
return (storedText, not storedText)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
try:
return (repr(GenTime(editText)), True)
except GenTimeError:
return (editText, not editText and not self.isRequired)
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and annotated text"""
format = globalref.options.strData('EditTimeFormat', True)
now = GenTime().timeStr(format)
choices = [(now, '(%s)' % _('now'))]
for hr in (6, 9, 12, 15, 18, 21, 0):
time = GenTime((hr, 0)).timeStr(format)
choices.append((time, ''))
return choices
def getInitDefault(self):
"""Return initial stored value for new nodes"""
if self.initDefault in TimeFormat.timeStampStrings:
return GenTime().timeStr()
return TextFormat.getInitDefault(self)
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
if editText in TimeFormat.timeStampStrings:
self.initDefault = TimeFormat.timeStampStrings[0]
else:
TextFormat.setInitDefault(self, editText)
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
if self.initDefault in TimeFormat.timeStampStrings:
return TimeFormat.timeStampStrings[1]
return TextFormat.getEditInitDefault(self)
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
choices = [entry[0] for entry in self.getEditChoices()]
choices.insert(0, TimeFormat.timeStampStrings[1])
return choices
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
if value.startswith('now'):
return repr(GenTime())
return value
class BooleanFormat(ChoiceFormat):
"""Holds format info for a bool field"""
typeName = 'Boolean'
sortSequence = 1
#field format edit options:
defaultFormat = _('yes/no')
formatMenuList = [(_('true/false'), _('true/false')),
(_('T/F'), _('T/F')), None,
(_('yes/no'), _('yes/no')),
(_('Y/N'), _('Y/N')), None,
('1/0', '1/0')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
ChoiceFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
if storedText not in self.formatList:
try:
storedText = GenBoolean(storedText).boolStr(self.format)
except GenBooleanError:
storedText = _errorStr
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
if storedText in self.formatList:
return (storedText, True)
try:
return (GenBoolean(storedText).boolStr(self.format), True)
except GenBooleanError:
return (storedText, not storedText)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
try:
return (repr(GenBoolean(editText)), True)
except GenBooleanError:
if editText in self.formatList:
return (editText, True)
return (editText, not editText and not self.isRequired)
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return repr(GenBoolean(storedText))
except GenBooleanError:
return ''
class UniqueIDFormat(TextFormat):
"""An unique ID automatically generated for new nodes"""
typeName = 'UniqueID'
sortSequence = 10
formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')
#field format edit options:
defaultFormat = u'0001'
formatMenuList = [(u'%s\t%s' % (_('Required Digit'), '0'), '0'), None,
(u'%s\t%s' % (_('Start Num Example'), '0100'), '0100'),
(u'%s\t%s' % (_('Prefix Example'), 'id0100'), 'id0100')]
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def nextValue(self, increment=True):
"""Return the next value for a new node,
increment format if increment is True"""
try:
prefix, numText, suffix = UniqueIDFormat.formatRe.\
match(self.format).groups()
except AttributeError:
self.format = UniqueIDFormat.defaultFormat
return self.nextValue(increment)
value = self.format
if increment:
pattern = u'%%s%%0.%dd%%s' % len(numText)
num = int(numText) + 1
self.format = pattern % (prefix, num, suffix)
return value
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return int(UniqueIDFormat.formatRe.match(storedText).group(2))
except AttributeError:
return 0
class URLFormat(TextFormat):
"""Holds format info for a field with a URL path"""
typeName = 'URL'
sortSequence = 8
htmlOption = False
allowAltLinkText = True
hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')
URLMethod = u'http://'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.html = True
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
if self.useFileInfo:
item = globalref.docRef.fileInfoItem
altText = ''
if self.linkAltField:
field = item.nodeFormat().findField(self.linkAltField)
if field:
altText = field.outputText(item, titleMode, internal)
storedText = item.data.get(self.name, '')
if storedText:
return self.formatOutput(storedText, titleMode, altText, internal)
return ''
def formatOutput(self, storedText, titleMode, altText='', internal=False):
"""Return formatted text, properly escaped and with
a link reference if not in titleMode"""
if titleMode:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = []
for url in paths:
path = url
if not URLFormat.hasMethodRe.match(path):
path = u'%s%s' % (self.URLMethod, path)
path = u'<a href="%s">%s</a>' % (escape(path, treedoc.escDict),
altText or url)
results.append(TextFormat.formatOutput(self, path, titleMode,
internal))
return u'<br />'.join(results)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:for-each select = "./%s">%s<xsl:choose>'\
'<xsl:when test="contains(., \':\')"><a href="{.}">'\
'<xsl:value-of select="."/></a></xsl:when><xsl:otherwise>'\
'<a href="%s{.}"><xsl:value-of select="."/></a>'\
'</xsl:otherwise></xsl:choose>%s</xsl:for-each>' % \
(self.name, xslEscape(self.prefix), self.URLMethod,
xslEscape(self.suffix))
class PathFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'Path'
URLMethod = u'file:///'
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class EmailFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'Email'
URLMethod = u'mailto:'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class InternalLinkFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'InternalLink'
URLMethod = u'#'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class ExecuteLinkFormat(URLFormat):
"""Holds format info for an executable field"""
typeName = 'ExecuteLink'
URLMethod = u'exec:'
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, altText='', internal=False):
"""Return formatted text, properly escaped and with
a link reference if not in titleMode"""
if titleMode or not internal:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = []
for url in paths:
# add prefix/suffix within the executable path:
url = TextFormat.formatOutput(self, url, titleMode, internal)
path = url
if not URLFormat.hasMethodRe.match(path):
path = u'%s%s' % (self.URLMethod, path)
results.append(u'<a href="%s">%s</a>' %
(escape(path, treedoc.escDict), altText or url))
return u'<br />'.join(results)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return TextFormat.xslText(self)
class PictureFormat(TextFormat):
"""Holds format info for a field with a link to a picture"""
typeName = 'Picture'
sortSequence = 8
htmlOption = False
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.html = True
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped and with
a link to the picture if not in titleMode"""
if titleMode:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = ['<img src="%s">' % escape(url, treedoc.escDict) for url
in paths]
return u'<br />'.join(results)
class ParentFormat(TextFormat):
"""Placeholder format for references to specific parents"""
typeName = 'Parent'
def __init__(self, name, parentLevel=1):
TextFormat.__init__(self, name, {})
self.parentLevel = parentLevel
def sepName(self, englishOnly=False):
"""Return name enclosed with {* *} separators"""
name = englishOnly and self.enName or self.name
return u'{*%s%s*}' % (self.parentLevel * '*', name)
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
for num in range(self.parentLevel):
item = item.parent
if not item:
return ''
field = item.nodeFormat().findField(self.name)
if not field:
return ''
return field.outputText(item, titleMode, internal)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="%s%s"/>' % (self.parentLevel * '../',
self.name)
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)
class AncestorFormat(TextFormat):
"""Placeholder format for references to any parent with data"""
typeName = 'Ancestor'
def __init__(self, name):
TextFormat.__init__(self, name, {})
self.parentLevel = 1000
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*?%s*}' % (name)
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
field = None
while not field:
item = item.parent
if item:
field = item.nodeFormat().findField(self.name)
else:
return ''
return field.outputText(item, titleMode, internal)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="ancestor::*/%s"/>' % self.name
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(ancestor::*/%s)' % self.name
class ChildFormat(TextFormat):
"""Placeholder format for references to a sequence of child data"""
typeName = 'Child'
def __init__(self, name):
TextFormat.__init__(self, name, {})
self.parentLevel = -1
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*&%s*}' % (name)
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
result = []
for child in item.childList:
field = child.nodeFormat().findField(self.name)
if field:
text = field.outputText(child, titleMode, internal)
if text:
result.append(text)
return globalref.docRef.childFieldSep.join(result)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="child::*/%s"/>' % self.name
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(child::*/%s)' % self.name
class CountFormat(TextFormat):
"""Placeholder format for a count of children at the given level"""
typeName = 'Count'
def __init__(self, name, level):
TextFormat.__init__(self, name, {})
self.parentLevel = -level
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*#%s*}' % (name)
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
return repr(len(item.descendLevelList(-self.parentLevel)))
|
flexible
|
{
"blob_id": "5e1398ed628917a42cc465e7cc2979601f0f4fbc",
"index": 7865,
"step-1": "<mask token>\n\n\nclass DateFormat(TextFormat):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return GenDate(storedText).dateStr(format), True\n except GenDateError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return repr(GenDate().setFromStr(editText, format)), True\n except GenDateError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _(\n 'yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenDate())\n return value\n\n\nclass TimeFormat(TextFormat):\n \"\"\"Holds format info for a time field\"\"\"\n typeName = 'Time'\n sortSequence = 6\n defaultFormat = u'h:MM:SS aa'\n timeStampStrings = 'Now', _('Now', 'time stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),\n 'H'), (u'%s\\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (\n u'%s\\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (\n u'%s\\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (\n u'%s\\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\\t%s' %\n (_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\\t%s' % (_(\n 'Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\\t%s' % (_(\n 'Second (2 digits)'), 'SS'), 'SS'), (u'%s\\t%s' % (_(\n 'Fractional Seconds'), 's'), 's'), None, (u'%s\\t%s' % (_('AM/PM'),\n 'AA'), 'AA'), (u'%s\\t%s' % (_('am/pm'), 'aa'), 'aa')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return GenTime(storedText).timeStr(format), True\n except GenTimeError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenTime(editText)), True\n except GenTimeError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and annotated text\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenTime())\n return value\n\n\nclass BooleanFormat(ChoiceFormat):\n \"\"\"Holds format info for a bool field\"\"\"\n typeName = 'Boolean'\n sortSequence = 1\n defaultFormat = _('yes/no')\n formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _(\n 'T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')),\n None, ('1/0', '1/0')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n try:\n return GenBoolean(storedText).boolStr(self.format), True\n except GenBooleanError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenBoolean(editText)), True\n except GenBooleanError:\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''\n\n\nclass UniqueIDFormat(TextFormat):\n \"\"\"An unique ID automatically generated for new nodes\"\"\"\n typeName = 'UniqueID'\n sortSequence = 10\n formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')\n defaultFormat = u'0001'\n formatMenuList = [(u'%s\\t%s' % (_('Required Digit'), '0'), '0'), None,\n (u'%s\\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\\t%s' %\n (_('Prefix Example'), 'id0100'), 'id0100')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def nextValue(self, increment=True):\n \"\"\"Return the next value for a new node,\n increment format if increment is True\"\"\"\n try:\n prefix, numText, suffix = UniqueIDFormat.formatRe.match(self.format\n ).groups()\n except AttributeError:\n self.format = UniqueIDFormat.defaultFormat\n return self.nextValue(increment)\n value = self.format\n if increment:\n pattern = u'%%s%%0.%dd%%s' % len(numText)\n num = int(numText) + 1\n self.format = pattern % (prefix, num, suffix)\n return value\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return int(UniqueIDFormat.formatRe.match(storedText).group(2))\n except AttributeError:\n return 0\n\n\nclass URLFormat(TextFormat):\n \"\"\"Holds format info for a field with a URL path\"\"\"\n typeName = 'URL'\n sortSequence = 8\n htmlOption = False\n allowAltLinkText = True\n hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')\n URLMethod = u'http://'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:for-each select = \"./%s\">%s<xsl:choose><xsl:when test=\"contains(., \\':\\')\"><a href=\"{.}\"><xsl:value-of select=\".\"/></a></xsl:when><xsl:otherwise><a href=\"%s{.}\"><xsl:value-of select=\".\"/></a></xsl:otherwise></xsl:choose>%s</xsl:for-each>'\n % (self.name, xslEscape(self.prefix), self.URLMethod,\n xslEscape(self.suffix)))\n\n\nclass PathFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Path'\n URLMethod = u'file:///'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass EmailFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Email'\n URLMethod = u'mailto:'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass InternalLinkFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'InternalLink'\n URLMethod = u'#'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass ExecuteLinkFormat(URLFormat):\n \"\"\"Holds format info for an executable field\"\"\"\n typeName = 'ExecuteLink'\n URLMethod = u'exec:'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.\n escDict), altText or url))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return TextFormat.xslText(self)\n\n\nclass PictureFormat(TextFormat):\n \"\"\"Holds format info for a field with a link to a picture\"\"\"\n typeName = 'Picture'\n sortSequence = 8\n htmlOption = False\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link to the picture if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = [('<img src=\"%s\">' % escape(url, treedoc.escDict)) for\n url in paths]\n return u'<br />'.join(results)\n\n\nclass ParentFormat(TextFormat):\n \"\"\"Placeholder format for references to specific parents\"\"\"\n typeName = 'Parent'\n\n def __init__(self, name, parentLevel=1):\n TextFormat.__init__(self, name, {})\n self.parentLevel = parentLevel\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*%s%s*}' % (self.parentLevel * '*', name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"%s%s\"/>' % (self.parentLevel * '../',\n self.name)\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)\n\n\nclass AncestorFormat(TextFormat):\n \"\"\"Placeholder format for references to any parent with data\"\"\"\n typeName = 'Ancestor'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = 1000\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*?%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"ancestor::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(ancestor::*/%s)' % self.name\n\n\nclass ChildFormat(TextFormat):\n \"\"\"Placeholder format for references to a sequence of child data\"\"\"\n typeName = 'Child'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -1\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*&%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n result = []\n for child in item.childList:\n field = child.nodeFormat().findField(self.name)\n if field:\n text = field.outputText(child, titleMode, internal)\n if text:\n result.append(text)\n return globalref.docRef.childFieldSep.join(result)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"child::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(child::*/%s)' % self.name\n\n\nclass CountFormat(TextFormat):\n \"\"\"Placeholder format for a count of children at the given level\"\"\"\n typeName = 'Count'\n\n def __init__(self, name, level):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -level\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*#%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n return repr(len(item.descendLevelList(-self.parentLevel)))\n",
"step-2": "<mask token>\n\n\nclass TextFormat(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, name, attrs={}):\n \"\"\"Any prefix, suffix, html info in attrs dict\"\"\"\n self.name = name\n self.enName = ''\n self.format = attrs.get(u'format', self.defaultFormat)\n self.prefix = attrs.get(u'prefix', '')\n self.suffix = attrs.get(u'suffix', '')\n self.html = attrs.get(u'html', '').startswith('y') and True or False\n self.isRequired = attrs.get(u'required', '').startswith('y'\n ) and True or False\n self.hidden = attrs.get(u'hidden', '').startswith('y'\n ) and True or False\n try:\n self.numLines = int(attrs.get(u'lines', repr(self.defaultNumLines))\n )\n except ValueError:\n self.numLines = 1\n self.initDefault = attrs.get(u'init', '')\n self.linkAltField = attrs.get(u'linkalt', '')\n self.parentLevel = 0\n self.useFileInfo = False\n self.showInDialog = True\n self.initFormat()\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n pass\n\n def duplicateSettings(self, otherField):\n \"\"\"Assign other field's parameters to this field\"\"\"\n self.name = otherField.name\n self.enName = otherField.enName\n self.format = otherField.format\n self.prefix = otherField.prefix\n self.suffix = otherField.suffix\n self.html = otherField.html\n self.isRequired = otherField.isRequired\n self.hidden = otherField.hidden\n self.numLines = otherField.numLines\n self.initDefault = otherField.initDefault\n self.linkAltField = otherField.linkAltField\n self.parentLevel = otherField.parentLevel\n self.useFileInfo = otherField.useFileInfo\n self.showInDialog = otherField.showInDialog\n\n def changeType(self, newType):\n \"\"\"Change this field's type to newType with default format\"\"\"\n self.__class__ = globals()[newType + 'Format']\n self.format = self.defaultFormat\n self.initFormat()\n <mask token>\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n if not self.useFileInfo:\n return u'{*%s*}' % name\n return u'{*!%s*}' % name\n\n def labelName(self):\n \"\"\"Return name used for labels - add * for required fields\"\"\"\n if self.isRequired:\n return '%s*' % self.name\n return self.name\n\n def writeXml(self):\n \"\"\"Return text for xml attributes\"\"\"\n text = u' type=\"%s\"' % self.typeName\n if self.format:\n text += u' format=\"%s\"' % escape(self.format, treedoc.escDict)\n if self.prefix:\n text += u' prefix=\"%s\"' % escape(self.prefix, treedoc.escDict)\n if self.suffix:\n text += u' suffix=\"%s\"' % escape(self.suffix, treedoc.escDict)\n if self.html:\n text += u' html=\"y\"'\n if self.isRequired:\n text += u' required=\"y\"'\n if self.hidden:\n text += u' hidden=\"y\"'\n if self.numLines > 1:\n text += u' lines=\"%d\"' % self.numLines\n if self.initDefault:\n text += u' init=\"%s\"' % escape(self.initDefault, treedoc.escDict)\n if self.linkAltField:\n text += u' linkalt=\"%s\"' % escape(self.linkAltField, treedoc.\n escDict)\n return text\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''\n\n def removeMarkup(self, text):\n \"\"\"Remove HTML Markup and unescape entities\"\"\"\n text = TextFormat.stripTagRe.sub('', text)\n return unescape(text)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)\n\n def editText(self, item):\n \"\"\"Return tuple of this field's text in edit format and bool validity,\n using edit format option\"\"\"\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return result[0], False\n return result\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n return editText, editText or not self.isRequired\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n return self.initDefault\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n self.initDefault = self.storedText(editText)[0]\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n return self.formatEditText(self.initDefault)[0]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return []\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n return storedText.lower()\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n return value\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:if test=\"normalize-space(./%s)\">%s<xsl:value-of select=\"./%s\"/>%s</xsl:if>'\n % (self.name, xslEscape(self.prefix), self.name, xslEscape(\n self.suffix)))\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(./%s)' % self.name\n\n\nclass LongTextFormat(TextFormat):\n \"\"\"Holds format info for a long text field - Obsolete -\n kept for compatability with old files\"\"\"\n defaultNumLines = 7\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n\nclass NumberFormat(TextFormat):\n \"\"\"Holds format info for a number field\"\"\"\n typeName = 'Number'\n sortSequence = 10\n defaultFormat = u'#.##'\n formatMenuList = [(u'%s\\t%s' % (_('Optional Digit'), '#'), '#'), (\n u'%s\\t%s' % (_('Required Digit'), '0'), '0'), (u'%s\\t%s' % (_(\n 'Digit or Space (external)'), _('<space>')), ' '), None, (u'%s\\t%s' %\n (_('Decimal Point'), '.'), '.'), (u'%s\\t%s' % (_('Decimal Comma'),\n ','), ','), None, (u'%s\\t%s' % (_('Comma Separator'), '\\\\,'), '\\\\,'\n ), (u'%s\\t%s' % (_('Dot Separator'), '\\\\.'), '\\\\.'), (u'%s\\t%s' % (\n _('Space Separator (internal)'), _('<space>')), ' '), None, (\n u'%s\\t%s' % (_('Optional Sign'), '-'), '-'), (u'%s\\t%s' % (_(\n 'Required Sign'), '+'), '+'), None, (u'%s\\t%s' % (_(\n 'Exponent (capital)'), 'E'), 'E'), (u'%s\\t%s' % (_(\n 'Exponent (small)'), 'e'), 'e')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using self.format\"\"\"\n try:\n return GenNumber(storedText).numStr(self.format), True\n except GenNumberError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using self.format\"\"\"\n try:\n return repr(GenNumber().setFromStr(editText, self.format)), True\n except GenNumberError:\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return GenNumber(storedText).num\n except GenNumberError:\n return ''\n\n\nclass ChoiceFormat(TextFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'Choice'\n sortSequence = 20\n editSep = '/'\n defaultFormat = '1/2/3/4'\n formatMenuList = [(u'%s\\t%s' % (_('Separator'), '/'), '/'), None, (\n u'%s\\t%s' % (_('\"/\" Character'), '//'), '//'), None, (u'%s\\t%s' % (\n _('Example'), '1/2/3/4'), '1/2/3/4')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = self.splitText(self.format)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n return [(text, '') for text in self.formatList]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [text for text in self.formatList]\n\n def splitText(self, textStr):\n \"\"\"Split textStr using editSep, double sep's become char\"\"\"\n return [text.strip().replace('\\x00', self.editSep) for text in\n textStr.replace(self.editSep * 2, '\\x00').split(self.editSep)]\n\n\nclass CombinationFormat(ChoiceFormat):\n \"\"\"Holds format info for a field of combinations of text options\"\"\"\n typeName = 'Combination'\n outputSepList = ',', ';', ':', '|', '/', '\\\\', '~'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n ChoiceFormat.initFormat(self)\n fullFormat = ''.join(self.formatList)\n try:\n self.sep = [sep for sep in CombinationFormat.outputSepList if \n sep not in fullFormat][0] + ' '\n except IndexError:\n self.sep = CombinationFormat.outputSepList[0] + ' '\n\n def sortedChoices(self, inText):\n \"\"\"Return tuple of choices from inText sorted like format and\n True if all splits are valid and included\"\"\"\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return sortedChoices, True\n else:\n return sortedChoices, False\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return storedText, not storedText\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n choices, valid = self.sortedChoices(editText)\n if valid:\n return self.editSep.join(choices), True\n else:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box,\n each a tuple of edit text and any annotation text\"\"\"\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList if text not in\n currentChoices]\n results = []\n for choice in nonChoices:\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'add'), choice)))\n if currentChoices:\n results.append((None, None))\n for choice in currentChoices:\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'remove'), choice)))\n return results\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [entry[0] for entry in self.getEditChoices()]\n\n\nclass AutoChoiceFormat(ChoiceFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'AutoChoice'\n defaultFormat = ''\n formatMenuList = ()\n hasEditChoices = True\n autoAddChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = []\n\n def addChoice(self, choice, sort=False):\n \"\"\"Add choice to edit menu list if not already there\"\"\"\n if choice and choice not in self.formatList:\n self.formatList.append(choice)\n if sort:\n self.sortChoices()\n\n def sortChoices(self):\n \"\"\"Sort menu list choices\"\"\"\n self.formatList.sort()\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText:\n return editText, True\n return editText, not self.isRequired\n\n\nclass DateFormat(TextFormat):\n \"\"\"Holds format info for a date field\"\"\"\n typeName = 'Date'\n sortSequence = 5\n defaultFormat = u'mmmm d, yyyy'\n dateStampStrings = 'Now', _('Now', 'date stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'), (\n u'%s\\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'), None, (u'%s\\t%s' %\n (_('Month (1 or 2 digits)'), 'm'), 'm'), (u'%s\\t%s' % (_(\n 'Month (2 digits)'), 'mm'), 'mm'), (u'%s\\t%s' % (_(\n 'Month Abbreviation'), 'mmm'), 'mmm'), (u'%s\\t%s' % (_('Month Name'\n ), 'mmmm'), 'mmmm'), None, (u'%s\\t%s' % (_('Year (2 digits)'), 'yy'\n ), 'yy'), (u'%s\\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),\n None, (u'%s\\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'), (u'%s\\t%s' %\n (_('Weekday Abbreviation'), 'www'), 'www'), (u'%s\\t%s' % (_(\n 'Weekday Name'), 'wwww'), 'wwww')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return GenDate(storedText).dateStr(format), True\n except GenDateError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return repr(GenDate().setFromStr(editText, format)), True\n except GenDateError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _(\n 'yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenDate())\n return value\n\n\nclass TimeFormat(TextFormat):\n \"\"\"Holds format info for a time field\"\"\"\n typeName = 'Time'\n sortSequence = 6\n defaultFormat = u'h:MM:SS aa'\n timeStampStrings = 'Now', _('Now', 'time stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),\n 'H'), (u'%s\\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (\n u'%s\\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (\n u'%s\\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (\n u'%s\\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\\t%s' %\n (_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\\t%s' % (_(\n 'Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\\t%s' % (_(\n 'Second (2 digits)'), 'SS'), 'SS'), (u'%s\\t%s' % (_(\n 'Fractional Seconds'), 's'), 's'), None, (u'%s\\t%s' % (_('AM/PM'),\n 'AA'), 'AA'), (u'%s\\t%s' % (_('am/pm'), 'aa'), 'aa')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return GenTime(storedText).timeStr(format), True\n except GenTimeError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenTime(editText)), True\n except GenTimeError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and annotated text\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenTime())\n return value\n\n\nclass BooleanFormat(ChoiceFormat):\n \"\"\"Holds format info for a bool field\"\"\"\n typeName = 'Boolean'\n sortSequence = 1\n defaultFormat = _('yes/no')\n formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _(\n 'T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')),\n None, ('1/0', '1/0')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n try:\n return GenBoolean(storedText).boolStr(self.format), True\n except GenBooleanError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenBoolean(editText)), True\n except GenBooleanError:\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''\n\n\nclass UniqueIDFormat(TextFormat):\n \"\"\"An unique ID automatically generated for new nodes\"\"\"\n typeName = 'UniqueID'\n sortSequence = 10\n formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')\n defaultFormat = u'0001'\n formatMenuList = [(u'%s\\t%s' % (_('Required Digit'), '0'), '0'), None,\n (u'%s\\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\\t%s' %\n (_('Prefix Example'), 'id0100'), 'id0100')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def nextValue(self, increment=True):\n \"\"\"Return the next value for a new node,\n increment format if increment is True\"\"\"\n try:\n prefix, numText, suffix = UniqueIDFormat.formatRe.match(self.format\n ).groups()\n except AttributeError:\n self.format = UniqueIDFormat.defaultFormat\n return self.nextValue(increment)\n value = self.format\n if increment:\n pattern = u'%%s%%0.%dd%%s' % len(numText)\n num = int(numText) + 1\n self.format = pattern % (prefix, num, suffix)\n return value\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return int(UniqueIDFormat.formatRe.match(storedText).group(2))\n except AttributeError:\n return 0\n\n\nclass URLFormat(TextFormat):\n \"\"\"Holds format info for a field with a URL path\"\"\"\n typeName = 'URL'\n sortSequence = 8\n htmlOption = False\n allowAltLinkText = True\n hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')\n URLMethod = u'http://'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:for-each select = \"./%s\">%s<xsl:choose><xsl:when test=\"contains(., \\':\\')\"><a href=\"{.}\"><xsl:value-of select=\".\"/></a></xsl:when><xsl:otherwise><a href=\"%s{.}\"><xsl:value-of select=\".\"/></a></xsl:otherwise></xsl:choose>%s</xsl:for-each>'\n % (self.name, xslEscape(self.prefix), self.URLMethod,\n xslEscape(self.suffix)))\n\n\nclass PathFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Path'\n URLMethod = u'file:///'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass EmailFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Email'\n URLMethod = u'mailto:'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass InternalLinkFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'InternalLink'\n URLMethod = u'#'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass ExecuteLinkFormat(URLFormat):\n \"\"\"Holds format info for an executable field\"\"\"\n typeName = 'ExecuteLink'\n URLMethod = u'exec:'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.\n escDict), altText or url))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return TextFormat.xslText(self)\n\n\nclass PictureFormat(TextFormat):\n \"\"\"Holds format info for a field with a link to a picture\"\"\"\n typeName = 'Picture'\n sortSequence = 8\n htmlOption = False\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link to the picture if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = [('<img src=\"%s\">' % escape(url, treedoc.escDict)) for\n url in paths]\n return u'<br />'.join(results)\n\n\nclass ParentFormat(TextFormat):\n \"\"\"Placeholder format for references to specific parents\"\"\"\n typeName = 'Parent'\n\n def __init__(self, name, parentLevel=1):\n TextFormat.__init__(self, name, {})\n self.parentLevel = parentLevel\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*%s%s*}' % (self.parentLevel * '*', name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"%s%s\"/>' % (self.parentLevel * '../',\n self.name)\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)\n\n\nclass AncestorFormat(TextFormat):\n \"\"\"Placeholder format for references to any parent with data\"\"\"\n typeName = 'Ancestor'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = 1000\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*?%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"ancestor::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(ancestor::*/%s)' % self.name\n\n\nclass ChildFormat(TextFormat):\n \"\"\"Placeholder format for references to a sequence of child data\"\"\"\n typeName = 'Child'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -1\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*&%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n result = []\n for child in item.childList:\n field = child.nodeFormat().findField(self.name)\n if field:\n text = field.outputText(child, titleMode, internal)\n if text:\n result.append(text)\n return globalref.docRef.childFieldSep.join(result)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"child::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(child::*/%s)' % self.name\n\n\nclass CountFormat(TextFormat):\n \"\"\"Placeholder format for a count of children at the given level\"\"\"\n typeName = 'Count'\n\n def __init__(self, name, level):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -level\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*#%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n return repr(len(item.descendLevelList(-self.parentLevel)))\n",
"step-3": "<mask token>\n\n\nclass TextFormat(object):\n \"\"\"Holds format info for a normal text field\"\"\"\n typeName = 'Text'\n sortSequence = 20\n stripTagRe = re.compile('<.*?>')\n defaultNumLines = 1\n defaultFormat = ''\n formatMenuList = []\n htmlOption = True\n hasEditChoices = False\n autoAddChoices = False\n hasFileBrowse = False\n allowAltLinkText = False\n\n def __init__(self, name, attrs={}):\n \"\"\"Any prefix, suffix, html info in attrs dict\"\"\"\n self.name = name\n self.enName = ''\n self.format = attrs.get(u'format', self.defaultFormat)\n self.prefix = attrs.get(u'prefix', '')\n self.suffix = attrs.get(u'suffix', '')\n self.html = attrs.get(u'html', '').startswith('y') and True or False\n self.isRequired = attrs.get(u'required', '').startswith('y'\n ) and True or False\n self.hidden = attrs.get(u'hidden', '').startswith('y'\n ) and True or False\n try:\n self.numLines = int(attrs.get(u'lines', repr(self.defaultNumLines))\n )\n except ValueError:\n self.numLines = 1\n self.initDefault = attrs.get(u'init', '')\n self.linkAltField = attrs.get(u'linkalt', '')\n self.parentLevel = 0\n self.useFileInfo = False\n self.showInDialog = True\n self.initFormat()\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n pass\n\n def duplicateSettings(self, otherField):\n \"\"\"Assign other field's parameters to this field\"\"\"\n self.name = otherField.name\n self.enName = otherField.enName\n self.format = otherField.format\n self.prefix = otherField.prefix\n self.suffix = otherField.suffix\n self.html = otherField.html\n self.isRequired = otherField.isRequired\n self.hidden = otherField.hidden\n self.numLines = otherField.numLines\n self.initDefault = otherField.initDefault\n self.linkAltField = otherField.linkAltField\n self.parentLevel = otherField.parentLevel\n self.useFileInfo = otherField.useFileInfo\n self.showInDialog = otherField.showInDialog\n\n def changeType(self, newType):\n \"\"\"Change this field's type to newType with default format\"\"\"\n self.__class__ = globals()[newType + 'Format']\n self.format = self.defaultFormat\n self.initFormat()\n\n def englishName(self):\n \"\"\"Returns English name if assigned, o/w name\"\"\"\n if self.enName:\n return self.enName\n return self.name\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n if not self.useFileInfo:\n return u'{*%s*}' % name\n return u'{*!%s*}' % name\n\n def labelName(self):\n \"\"\"Return name used for labels - add * for required fields\"\"\"\n if self.isRequired:\n return '%s*' % self.name\n return self.name\n\n def writeXml(self):\n \"\"\"Return text for xml attributes\"\"\"\n text = u' type=\"%s\"' % self.typeName\n if self.format:\n text += u' format=\"%s\"' % escape(self.format, treedoc.escDict)\n if self.prefix:\n text += u' prefix=\"%s\"' % escape(self.prefix, treedoc.escDict)\n if self.suffix:\n text += u' suffix=\"%s\"' % escape(self.suffix, treedoc.escDict)\n if self.html:\n text += u' html=\"y\"'\n if self.isRequired:\n text += u' required=\"y\"'\n if self.hidden:\n text += u' hidden=\"y\"'\n if self.numLines > 1:\n text += u' lines=\"%d\"' % self.numLines\n if self.initDefault:\n text += u' init=\"%s\"' % escape(self.initDefault, treedoc.escDict)\n if self.linkAltField:\n text += u' linkalt=\"%s\"' % escape(self.linkAltField, treedoc.\n escDict)\n return text\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''\n\n def removeMarkup(self, text):\n \"\"\"Remove HTML Markup and unescape entities\"\"\"\n text = TextFormat.stripTagRe.sub('', text)\n return unescape(text)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)\n\n def editText(self, item):\n \"\"\"Return tuple of this field's text in edit format and bool validity,\n using edit format option\"\"\"\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return result[0], False\n return result\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n return editText, editText or not self.isRequired\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n return self.initDefault\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n self.initDefault = self.storedText(editText)[0]\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n return self.formatEditText(self.initDefault)[0]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return []\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n return storedText.lower()\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n return value\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:if test=\"normalize-space(./%s)\">%s<xsl:value-of select=\"./%s\"/>%s</xsl:if>'\n % (self.name, xslEscape(self.prefix), self.name, xslEscape(\n self.suffix)))\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(./%s)' % self.name\n\n\nclass LongTextFormat(TextFormat):\n \"\"\"Holds format info for a long text field - Obsolete -\n kept for compatability with old files\"\"\"\n defaultNumLines = 7\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n\nclass NumberFormat(TextFormat):\n \"\"\"Holds format info for a number field\"\"\"\n typeName = 'Number'\n sortSequence = 10\n defaultFormat = u'#.##'\n formatMenuList = [(u'%s\\t%s' % (_('Optional Digit'), '#'), '#'), (\n u'%s\\t%s' % (_('Required Digit'), '0'), '0'), (u'%s\\t%s' % (_(\n 'Digit or Space (external)'), _('<space>')), ' '), None, (u'%s\\t%s' %\n (_('Decimal Point'), '.'), '.'), (u'%s\\t%s' % (_('Decimal Comma'),\n ','), ','), None, (u'%s\\t%s' % (_('Comma Separator'), '\\\\,'), '\\\\,'\n ), (u'%s\\t%s' % (_('Dot Separator'), '\\\\.'), '\\\\.'), (u'%s\\t%s' % (\n _('Space Separator (internal)'), _('<space>')), ' '), None, (\n u'%s\\t%s' % (_('Optional Sign'), '-'), '-'), (u'%s\\t%s' % (_(\n 'Required Sign'), '+'), '+'), None, (u'%s\\t%s' % (_(\n 'Exponent (capital)'), 'E'), 'E'), (u'%s\\t%s' % (_(\n 'Exponent (small)'), 'e'), 'e')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using self.format\"\"\"\n try:\n return GenNumber(storedText).numStr(self.format), True\n except GenNumberError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using self.format\"\"\"\n try:\n return repr(GenNumber().setFromStr(editText, self.format)), True\n except GenNumberError:\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return GenNumber(storedText).num\n except GenNumberError:\n return ''\n\n\nclass ChoiceFormat(TextFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'Choice'\n sortSequence = 20\n editSep = '/'\n defaultFormat = '1/2/3/4'\n formatMenuList = [(u'%s\\t%s' % (_('Separator'), '/'), '/'), None, (\n u'%s\\t%s' % (_('\"/\" Character'), '//'), '//'), None, (u'%s\\t%s' % (\n _('Example'), '1/2/3/4'), '1/2/3/4')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = self.splitText(self.format)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n return [(text, '') for text in self.formatList]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [text for text in self.formatList]\n\n def splitText(self, textStr):\n \"\"\"Split textStr using editSep, double sep's become char\"\"\"\n return [text.strip().replace('\\x00', self.editSep) for text in\n textStr.replace(self.editSep * 2, '\\x00').split(self.editSep)]\n\n\nclass CombinationFormat(ChoiceFormat):\n \"\"\"Holds format info for a field of combinations of text options\"\"\"\n typeName = 'Combination'\n outputSepList = ',', ';', ':', '|', '/', '\\\\', '~'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n ChoiceFormat.initFormat(self)\n fullFormat = ''.join(self.formatList)\n try:\n self.sep = [sep for sep in CombinationFormat.outputSepList if \n sep not in fullFormat][0] + ' '\n except IndexError:\n self.sep = CombinationFormat.outputSepList[0] + ' '\n\n def sortedChoices(self, inText):\n \"\"\"Return tuple of choices from inText sorted like format and\n True if all splits are valid and included\"\"\"\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return sortedChoices, True\n else:\n return sortedChoices, False\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return storedText, not storedText\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n choices, valid = self.sortedChoices(editText)\n if valid:\n return self.editSep.join(choices), True\n else:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box,\n each a tuple of edit text and any annotation text\"\"\"\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList if text not in\n currentChoices]\n results = []\n for choice in nonChoices:\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'add'), choice)))\n if currentChoices:\n results.append((None, None))\n for choice in currentChoices:\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'remove'), choice)))\n return results\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [entry[0] for entry in self.getEditChoices()]\n\n\nclass AutoChoiceFormat(ChoiceFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'AutoChoice'\n defaultFormat = ''\n formatMenuList = ()\n hasEditChoices = True\n autoAddChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = []\n\n def addChoice(self, choice, sort=False):\n \"\"\"Add choice to edit menu list if not already there\"\"\"\n if choice and choice not in self.formatList:\n self.formatList.append(choice)\n if sort:\n self.sortChoices()\n\n def sortChoices(self):\n \"\"\"Sort menu list choices\"\"\"\n self.formatList.sort()\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText:\n return editText, True\n return editText, not self.isRequired\n\n\nclass DateFormat(TextFormat):\n \"\"\"Holds format info for a date field\"\"\"\n typeName = 'Date'\n sortSequence = 5\n defaultFormat = u'mmmm d, yyyy'\n dateStampStrings = 'Now', _('Now', 'date stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'), (\n u'%s\\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'), None, (u'%s\\t%s' %\n (_('Month (1 or 2 digits)'), 'm'), 'm'), (u'%s\\t%s' % (_(\n 'Month (2 digits)'), 'mm'), 'mm'), (u'%s\\t%s' % (_(\n 'Month Abbreviation'), 'mmm'), 'mmm'), (u'%s\\t%s' % (_('Month Name'\n ), 'mmmm'), 'mmmm'), None, (u'%s\\t%s' % (_('Year (2 digits)'), 'yy'\n ), 'yy'), (u'%s\\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),\n None, (u'%s\\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'), (u'%s\\t%s' %\n (_('Weekday Abbreviation'), 'www'), 'www'), (u'%s\\t%s' % (_(\n 'Weekday Name'), 'wwww'), 'wwww')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return GenDate(storedText).dateStr(format), True\n except GenDateError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return repr(GenDate().setFromStr(editText, format)), True\n except GenDateError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _(\n 'yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenDate())\n return value\n\n\nclass TimeFormat(TextFormat):\n \"\"\"Holds format info for a time field\"\"\"\n typeName = 'Time'\n sortSequence = 6\n defaultFormat = u'h:MM:SS aa'\n timeStampStrings = 'Now', _('Now', 'time stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),\n 'H'), (u'%s\\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (\n u'%s\\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (\n u'%s\\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (\n u'%s\\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\\t%s' %\n (_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\\t%s' % (_(\n 'Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\\t%s' % (_(\n 'Second (2 digits)'), 'SS'), 'SS'), (u'%s\\t%s' % (_(\n 'Fractional Seconds'), 's'), 's'), None, (u'%s\\t%s' % (_('AM/PM'),\n 'AA'), 'AA'), (u'%s\\t%s' % (_('am/pm'), 'aa'), 'aa')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return GenTime(storedText).timeStr(format), True\n except GenTimeError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenTime(editText)), True\n except GenTimeError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and annotated text\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenTime())\n return value\n\n\nclass BooleanFormat(ChoiceFormat):\n \"\"\"Holds format info for a bool field\"\"\"\n typeName = 'Boolean'\n sortSequence = 1\n defaultFormat = _('yes/no')\n formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _(\n 'T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')),\n None, ('1/0', '1/0')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n try:\n return GenBoolean(storedText).boolStr(self.format), True\n except GenBooleanError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenBoolean(editText)), True\n except GenBooleanError:\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''\n\n\nclass UniqueIDFormat(TextFormat):\n \"\"\"An unique ID automatically generated for new nodes\"\"\"\n typeName = 'UniqueID'\n sortSequence = 10\n formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')\n defaultFormat = u'0001'\n formatMenuList = [(u'%s\\t%s' % (_('Required Digit'), '0'), '0'), None,\n (u'%s\\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\\t%s' %\n (_('Prefix Example'), 'id0100'), 'id0100')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def nextValue(self, increment=True):\n \"\"\"Return the next value for a new node,\n increment format if increment is True\"\"\"\n try:\n prefix, numText, suffix = UniqueIDFormat.formatRe.match(self.format\n ).groups()\n except AttributeError:\n self.format = UniqueIDFormat.defaultFormat\n return self.nextValue(increment)\n value = self.format\n if increment:\n pattern = u'%%s%%0.%dd%%s' % len(numText)\n num = int(numText) + 1\n self.format = pattern % (prefix, num, suffix)\n return value\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return int(UniqueIDFormat.formatRe.match(storedText).group(2))\n except AttributeError:\n return 0\n\n\nclass URLFormat(TextFormat):\n \"\"\"Holds format info for a field with a URL path\"\"\"\n typeName = 'URL'\n sortSequence = 8\n htmlOption = False\n allowAltLinkText = True\n hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')\n URLMethod = u'http://'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:for-each select = \"./%s\">%s<xsl:choose><xsl:when test=\"contains(., \\':\\')\"><a href=\"{.}\"><xsl:value-of select=\".\"/></a></xsl:when><xsl:otherwise><a href=\"%s{.}\"><xsl:value-of select=\".\"/></a></xsl:otherwise></xsl:choose>%s</xsl:for-each>'\n % (self.name, xslEscape(self.prefix), self.URLMethod,\n xslEscape(self.suffix)))\n\n\nclass PathFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Path'\n URLMethod = u'file:///'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass EmailFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Email'\n URLMethod = u'mailto:'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass InternalLinkFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'InternalLink'\n URLMethod = u'#'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass ExecuteLinkFormat(URLFormat):\n \"\"\"Holds format info for an executable field\"\"\"\n typeName = 'ExecuteLink'\n URLMethod = u'exec:'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.\n escDict), altText or url))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return TextFormat.xslText(self)\n\n\nclass PictureFormat(TextFormat):\n \"\"\"Holds format info for a field with a link to a picture\"\"\"\n typeName = 'Picture'\n sortSequence = 8\n htmlOption = False\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link to the picture if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = [('<img src=\"%s\">' % escape(url, treedoc.escDict)) for\n url in paths]\n return u'<br />'.join(results)\n\n\nclass ParentFormat(TextFormat):\n \"\"\"Placeholder format for references to specific parents\"\"\"\n typeName = 'Parent'\n\n def __init__(self, name, parentLevel=1):\n TextFormat.__init__(self, name, {})\n self.parentLevel = parentLevel\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*%s%s*}' % (self.parentLevel * '*', name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"%s%s\"/>' % (self.parentLevel * '../',\n self.name)\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)\n\n\nclass AncestorFormat(TextFormat):\n \"\"\"Placeholder format for references to any parent with data\"\"\"\n typeName = 'Ancestor'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = 1000\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*?%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"ancestor::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(ancestor::*/%s)' % self.name\n\n\nclass ChildFormat(TextFormat):\n \"\"\"Placeholder format for references to a sequence of child data\"\"\"\n typeName = 'Child'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -1\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*&%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n result = []\n for child in item.childList:\n field = child.nodeFormat().findField(self.name)\n if field:\n text = field.outputText(child, titleMode, internal)\n if text:\n result.append(text)\n return globalref.docRef.childFieldSep.join(result)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"child::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(child::*/%s)' % self.name\n\n\nclass CountFormat(TextFormat):\n \"\"\"Placeholder format for a count of children at the given level\"\"\"\n typeName = 'Count'\n\n def __init__(self, name, level):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -level\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*#%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n return repr(len(item.descendLevelList(-self.parentLevel)))\n",
"step-4": "<mask token>\n_errorStr = '#####'\n\n\ndef xslEscape(text):\n \"\"\"Encapsulate all literal text in <xsl:text> elements\n and transform/escape some non-XML entities.\n For the moment, only is supported\"\"\"\n nonTagRe = re.compile('(.*?)(<.*?>)|(.*)')\n escDict = {'&nbsp;': ' '}\n\n def esc(matchObj):\n \"\"\"Return escaped replacement text\"\"\"\n if matchObj.group(1) == None:\n return u'<xsl:text>%s</xsl:text>' % escape(matchObj.group(3),\n escDict)\n if matchObj.group(1):\n return u'<xsl:text>%s</xsl:text>%s' % (escape(matchObj.group(1),\n escDict), matchObj.group(2))\n return matchObj.group(2)\n return nonTagRe.sub(esc, text)\n\n\nclass TextFormat(object):\n \"\"\"Holds format info for a normal text field\"\"\"\n typeName = 'Text'\n sortSequence = 20\n stripTagRe = re.compile('<.*?>')\n defaultNumLines = 1\n defaultFormat = ''\n formatMenuList = []\n htmlOption = True\n hasEditChoices = False\n autoAddChoices = False\n hasFileBrowse = False\n allowAltLinkText = False\n\n def __init__(self, name, attrs={}):\n \"\"\"Any prefix, suffix, html info in attrs dict\"\"\"\n self.name = name\n self.enName = ''\n self.format = attrs.get(u'format', self.defaultFormat)\n self.prefix = attrs.get(u'prefix', '')\n self.suffix = attrs.get(u'suffix', '')\n self.html = attrs.get(u'html', '').startswith('y') and True or False\n self.isRequired = attrs.get(u'required', '').startswith('y'\n ) and True or False\n self.hidden = attrs.get(u'hidden', '').startswith('y'\n ) and True or False\n try:\n self.numLines = int(attrs.get(u'lines', repr(self.defaultNumLines))\n )\n except ValueError:\n self.numLines = 1\n self.initDefault = attrs.get(u'init', '')\n self.linkAltField = attrs.get(u'linkalt', '')\n self.parentLevel = 0\n self.useFileInfo = False\n self.showInDialog = True\n self.initFormat()\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n pass\n\n def duplicateSettings(self, otherField):\n \"\"\"Assign other field's parameters to this field\"\"\"\n self.name = otherField.name\n self.enName = otherField.enName\n self.format = otherField.format\n self.prefix = otherField.prefix\n self.suffix = otherField.suffix\n self.html = otherField.html\n self.isRequired = otherField.isRequired\n self.hidden = otherField.hidden\n self.numLines = otherField.numLines\n self.initDefault = otherField.initDefault\n self.linkAltField = otherField.linkAltField\n self.parentLevel = otherField.parentLevel\n self.useFileInfo = otherField.useFileInfo\n self.showInDialog = otherField.showInDialog\n\n def changeType(self, newType):\n \"\"\"Change this field's type to newType with default format\"\"\"\n self.__class__ = globals()[newType + 'Format']\n self.format = self.defaultFormat\n self.initFormat()\n\n def englishName(self):\n \"\"\"Returns English name if assigned, o/w name\"\"\"\n if self.enName:\n return self.enName\n return self.name\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n if not self.useFileInfo:\n return u'{*%s*}' % name\n return u'{*!%s*}' % name\n\n def labelName(self):\n \"\"\"Return name used for labels - add * for required fields\"\"\"\n if self.isRequired:\n return '%s*' % self.name\n return self.name\n\n def writeXml(self):\n \"\"\"Return text for xml attributes\"\"\"\n text = u' type=\"%s\"' % self.typeName\n if self.format:\n text += u' format=\"%s\"' % escape(self.format, treedoc.escDict)\n if self.prefix:\n text += u' prefix=\"%s\"' % escape(self.prefix, treedoc.escDict)\n if self.suffix:\n text += u' suffix=\"%s\"' % escape(self.suffix, treedoc.escDict)\n if self.html:\n text += u' html=\"y\"'\n if self.isRequired:\n text += u' required=\"y\"'\n if self.hidden:\n text += u' hidden=\"y\"'\n if self.numLines > 1:\n text += u' lines=\"%d\"' % self.numLines\n if self.initDefault:\n text += u' init=\"%s\"' % escape(self.initDefault, treedoc.escDict)\n if self.linkAltField:\n text += u' linkalt=\"%s\"' % escape(self.linkAltField, treedoc.\n escDict)\n return text\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''\n\n def removeMarkup(self, text):\n \"\"\"Remove HTML Markup and unescape entities\"\"\"\n text = TextFormat.stripTagRe.sub('', text)\n return unescape(text)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)\n\n def editText(self, item):\n \"\"\"Return tuple of this field's text in edit format and bool validity,\n using edit format option\"\"\"\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return result[0], False\n return result\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n return editText, editText or not self.isRequired\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n return self.initDefault\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n self.initDefault = self.storedText(editText)[0]\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n return self.formatEditText(self.initDefault)[0]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return []\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n return storedText.lower()\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n return value\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:if test=\"normalize-space(./%s)\">%s<xsl:value-of select=\"./%s\"/>%s</xsl:if>'\n % (self.name, xslEscape(self.prefix), self.name, xslEscape(\n self.suffix)))\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(./%s)' % self.name\n\n\nclass LongTextFormat(TextFormat):\n \"\"\"Holds format info for a long text field - Obsolete -\n kept for compatability with old files\"\"\"\n defaultNumLines = 7\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n\nclass NumberFormat(TextFormat):\n \"\"\"Holds format info for a number field\"\"\"\n typeName = 'Number'\n sortSequence = 10\n defaultFormat = u'#.##'\n formatMenuList = [(u'%s\\t%s' % (_('Optional Digit'), '#'), '#'), (\n u'%s\\t%s' % (_('Required Digit'), '0'), '0'), (u'%s\\t%s' % (_(\n 'Digit or Space (external)'), _('<space>')), ' '), None, (u'%s\\t%s' %\n (_('Decimal Point'), '.'), '.'), (u'%s\\t%s' % (_('Decimal Comma'),\n ','), ','), None, (u'%s\\t%s' % (_('Comma Separator'), '\\\\,'), '\\\\,'\n ), (u'%s\\t%s' % (_('Dot Separator'), '\\\\.'), '\\\\.'), (u'%s\\t%s' % (\n _('Space Separator (internal)'), _('<space>')), ' '), None, (\n u'%s\\t%s' % (_('Optional Sign'), '-'), '-'), (u'%s\\t%s' % (_(\n 'Required Sign'), '+'), '+'), None, (u'%s\\t%s' % (_(\n 'Exponent (capital)'), 'E'), 'E'), (u'%s\\t%s' % (_(\n 'Exponent (small)'), 'e'), 'e')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using self.format\"\"\"\n try:\n return GenNumber(storedText).numStr(self.format), True\n except GenNumberError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using self.format\"\"\"\n try:\n return repr(GenNumber().setFromStr(editText, self.format)), True\n except GenNumberError:\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return GenNumber(storedText).num\n except GenNumberError:\n return ''\n\n\nclass ChoiceFormat(TextFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'Choice'\n sortSequence = 20\n editSep = '/'\n defaultFormat = '1/2/3/4'\n formatMenuList = [(u'%s\\t%s' % (_('Separator'), '/'), '/'), None, (\n u'%s\\t%s' % (_('\"/\" Character'), '//'), '//'), None, (u'%s\\t%s' % (\n _('Example'), '1/2/3/4'), '1/2/3/4')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = self.splitText(self.format)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n return [(text, '') for text in self.formatList]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [text for text in self.formatList]\n\n def splitText(self, textStr):\n \"\"\"Split textStr using editSep, double sep's become char\"\"\"\n return [text.strip().replace('\\x00', self.editSep) for text in\n textStr.replace(self.editSep * 2, '\\x00').split(self.editSep)]\n\n\nclass CombinationFormat(ChoiceFormat):\n \"\"\"Holds format info for a field of combinations of text options\"\"\"\n typeName = 'Combination'\n outputSepList = ',', ';', ':', '|', '/', '\\\\', '~'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n ChoiceFormat.initFormat(self)\n fullFormat = ''.join(self.formatList)\n try:\n self.sep = [sep for sep in CombinationFormat.outputSepList if \n sep not in fullFormat][0] + ' '\n except IndexError:\n self.sep = CombinationFormat.outputSepList[0] + ' '\n\n def sortedChoices(self, inText):\n \"\"\"Return tuple of choices from inText sorted like format and\n True if all splits are valid and included\"\"\"\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return sortedChoices, True\n else:\n return sortedChoices, False\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return storedText, not storedText\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n choices, valid = self.sortedChoices(editText)\n if valid:\n return self.editSep.join(choices), True\n else:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box,\n each a tuple of edit text and any annotation text\"\"\"\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList if text not in\n currentChoices]\n results = []\n for choice in nonChoices:\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'add'), choice)))\n if currentChoices:\n results.append((None, None))\n for choice in currentChoices:\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'remove'), choice)))\n return results\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [entry[0] for entry in self.getEditChoices()]\n\n\nclass AutoChoiceFormat(ChoiceFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'AutoChoice'\n defaultFormat = ''\n formatMenuList = ()\n hasEditChoices = True\n autoAddChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = []\n\n def addChoice(self, choice, sort=False):\n \"\"\"Add choice to edit menu list if not already there\"\"\"\n if choice and choice not in self.formatList:\n self.formatList.append(choice)\n if sort:\n self.sortChoices()\n\n def sortChoices(self):\n \"\"\"Sort menu list choices\"\"\"\n self.formatList.sort()\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText:\n return editText, True\n return editText, not self.isRequired\n\n\nclass DateFormat(TextFormat):\n \"\"\"Holds format info for a date field\"\"\"\n typeName = 'Date'\n sortSequence = 5\n defaultFormat = u'mmmm d, yyyy'\n dateStampStrings = 'Now', _('Now', 'date stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'), (\n u'%s\\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'), None, (u'%s\\t%s' %\n (_('Month (1 or 2 digits)'), 'm'), 'm'), (u'%s\\t%s' % (_(\n 'Month (2 digits)'), 'mm'), 'mm'), (u'%s\\t%s' % (_(\n 'Month Abbreviation'), 'mmm'), 'mmm'), (u'%s\\t%s' % (_('Month Name'\n ), 'mmmm'), 'mmmm'), None, (u'%s\\t%s' % (_('Year (2 digits)'), 'yy'\n ), 'yy'), (u'%s\\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),\n None, (u'%s\\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'), (u'%s\\t%s' %\n (_('Weekday Abbreviation'), 'www'), 'www'), (u'%s\\t%s' % (_(\n 'Weekday Name'), 'wwww'), 'wwww')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return GenDate(storedText).dateStr(format), True\n except GenDateError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return repr(GenDate().setFromStr(editText, format)), True\n except GenDateError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _(\n 'yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenDate())\n return value\n\n\nclass TimeFormat(TextFormat):\n \"\"\"Holds format info for a time field\"\"\"\n typeName = 'Time'\n sortSequence = 6\n defaultFormat = u'h:MM:SS aa'\n timeStampStrings = 'Now', _('Now', 'time stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),\n 'H'), (u'%s\\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (\n u'%s\\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (\n u'%s\\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (\n u'%s\\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\\t%s' %\n (_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\\t%s' % (_(\n 'Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\\t%s' % (_(\n 'Second (2 digits)'), 'SS'), 'SS'), (u'%s\\t%s' % (_(\n 'Fractional Seconds'), 's'), 's'), None, (u'%s\\t%s' % (_('AM/PM'),\n 'AA'), 'AA'), (u'%s\\t%s' % (_('am/pm'), 'aa'), 'aa')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return GenTime(storedText).timeStr(format), True\n except GenTimeError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenTime(editText)), True\n except GenTimeError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and annotated text\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenTime())\n return value\n\n\nclass BooleanFormat(ChoiceFormat):\n \"\"\"Holds format info for a bool field\"\"\"\n typeName = 'Boolean'\n sortSequence = 1\n defaultFormat = _('yes/no')\n formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _(\n 'T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')),\n None, ('1/0', '1/0')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n try:\n return GenBoolean(storedText).boolStr(self.format), True\n except GenBooleanError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenBoolean(editText)), True\n except GenBooleanError:\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''\n\n\nclass UniqueIDFormat(TextFormat):\n \"\"\"An unique ID automatically generated for new nodes\"\"\"\n typeName = 'UniqueID'\n sortSequence = 10\n formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')\n defaultFormat = u'0001'\n formatMenuList = [(u'%s\\t%s' % (_('Required Digit'), '0'), '0'), None,\n (u'%s\\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\\t%s' %\n (_('Prefix Example'), 'id0100'), 'id0100')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def nextValue(self, increment=True):\n \"\"\"Return the next value for a new node,\n increment format if increment is True\"\"\"\n try:\n prefix, numText, suffix = UniqueIDFormat.formatRe.match(self.format\n ).groups()\n except AttributeError:\n self.format = UniqueIDFormat.defaultFormat\n return self.nextValue(increment)\n value = self.format\n if increment:\n pattern = u'%%s%%0.%dd%%s' % len(numText)\n num = int(numText) + 1\n self.format = pattern % (prefix, num, suffix)\n return value\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return int(UniqueIDFormat.formatRe.match(storedText).group(2))\n except AttributeError:\n return 0\n\n\nclass URLFormat(TextFormat):\n \"\"\"Holds format info for a field with a URL path\"\"\"\n typeName = 'URL'\n sortSequence = 8\n htmlOption = False\n allowAltLinkText = True\n hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')\n URLMethod = u'http://'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:for-each select = \"./%s\">%s<xsl:choose><xsl:when test=\"contains(., \\':\\')\"><a href=\"{.}\"><xsl:value-of select=\".\"/></a></xsl:when><xsl:otherwise><a href=\"%s{.}\"><xsl:value-of select=\".\"/></a></xsl:otherwise></xsl:choose>%s</xsl:for-each>'\n % (self.name, xslEscape(self.prefix), self.URLMethod,\n xslEscape(self.suffix)))\n\n\nclass PathFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Path'\n URLMethod = u'file:///'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass EmailFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Email'\n URLMethod = u'mailto:'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass InternalLinkFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'InternalLink'\n URLMethod = u'#'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass ExecuteLinkFormat(URLFormat):\n \"\"\"Holds format info for an executable field\"\"\"\n typeName = 'ExecuteLink'\n URLMethod = u'exec:'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.\n escDict), altText or url))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return TextFormat.xslText(self)\n\n\nclass PictureFormat(TextFormat):\n \"\"\"Holds format info for a field with a link to a picture\"\"\"\n typeName = 'Picture'\n sortSequence = 8\n htmlOption = False\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link to the picture if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = [('<img src=\"%s\">' % escape(url, treedoc.escDict)) for\n url in paths]\n return u'<br />'.join(results)\n\n\nclass ParentFormat(TextFormat):\n \"\"\"Placeholder format for references to specific parents\"\"\"\n typeName = 'Parent'\n\n def __init__(self, name, parentLevel=1):\n TextFormat.__init__(self, name, {})\n self.parentLevel = parentLevel\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*%s%s*}' % (self.parentLevel * '*', name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"%s%s\"/>' % (self.parentLevel * '../',\n self.name)\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)\n\n\nclass AncestorFormat(TextFormat):\n \"\"\"Placeholder format for references to any parent with data\"\"\"\n typeName = 'Ancestor'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = 1000\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*?%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"ancestor::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(ancestor::*/%s)' % self.name\n\n\nclass ChildFormat(TextFormat):\n \"\"\"Placeholder format for references to a sequence of child data\"\"\"\n typeName = 'Child'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -1\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*&%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n result = []\n for child in item.childList:\n field = child.nodeFormat().findField(self.name)\n if field:\n text = field.outputText(child, titleMode, internal)\n if text:\n result.append(text)\n return globalref.docRef.childFieldSep.join(result)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"child::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(child::*/%s)' % self.name\n\n\nclass CountFormat(TextFormat):\n \"\"\"Placeholder format for a count of children at the given level\"\"\"\n typeName = 'Count'\n\n def __init__(self, name, level):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -level\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*#%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n return repr(len(item.descendLevelList(-self.parentLevel)))\n",
"step-5": "#!/usr/bin/env python\n\n#****************************************************************************\n# fieldformat.py, provides non-GUI base classes for field formating\n#\n# TreeLine, an information storage program\n# Copyright (C) 2006, Douglas W. Bell\n#\n# This is free software; you can redistribute it and/or modify it under the\n# terms of the GNU General Public License, either Version 2 or any later\n# version. This program is distributed in the hope that it will be useful,\n# but WITTHOUT ANY WARRANTY. See the included LICENSE file for details.\n#****************************************************************************\n\nimport re\nfrom xml.sax.saxutils import escape, unescape\nfrom gennumber import GenNumber, GenNumberError\nfrom gendate import GenDate, GenDateError\nfrom gentime import GenTime, GenTimeError\nfrom genboolean import GenBoolean, GenBooleanError\nimport treedoc\nimport globalref\n\n_errorStr = '#####'\n\n\ndef xslEscape(text):\n \"\"\"Encapsulate all literal text in <xsl:text> elements\n and transform/escape some non-XML entities.\n For the moment, only is supported\"\"\"\n nonTagRe = re.compile(r'(.*?)(<.*?>)|(.*)')\n escDict = {'&nbsp;': ' '} # escape function does '&' first\n def esc(matchObj):\n \"\"\"Return escaped replacement text\"\"\"\n if matchObj.group(1) == None: # no tags found\n return u'<xsl:text>%s</xsl:text>' % \\\n escape(matchObj.group(3), escDict)\n if matchObj.group(1): # leading text and tag\n return u'<xsl:text>%s</xsl:text>%s' % \\\n (escape(matchObj.group(1), escDict), matchObj.group(2))\n return matchObj.group(2) # tag only\n return nonTagRe.sub(esc, text)\n\n\nclass TextFormat(object):\n \"\"\"Holds format info for a normal text field\"\"\"\n typeName = 'Text'\n sortSequence = 20\n stripTagRe = re.compile('<.*?>')\n defaultNumLines = 1\n #field format edit options:\n defaultFormat = ''\n formatMenuList = []\n htmlOption = True\n hasEditChoices = False\n autoAddChoices = False\n hasFileBrowse = False\n allowAltLinkText = False\n\n def __init__(self, name, attrs={}):\n \"\"\"Any prefix, suffix, html info in attrs dict\"\"\"\n self.name = name\n self.enName = '' # used only by fileFormat field for i18n\n self.format = attrs.get(u'format', self.defaultFormat)\n self.prefix = attrs.get(u'prefix', '')\n self.suffix = attrs.get(u'suffix', '')\n # defaults to no html (line breaks preserved)\n self.html = attrs.get(u'html', '').startswith('y') and True or False\n self.isRequired = attrs.get(u'required', '').startswith('y') and \\\n True or False\n self.hidden = attrs.get(u'hidden', '').startswith('y') and \\\n True or False\n try:\n self.numLines = int(attrs.get(u'lines',\n repr(self.defaultNumLines)))\n except ValueError:\n self.numLines = 1\n self.initDefault = attrs.get(u'init', '')\n self.linkAltField = attrs.get(u'linkalt', '')\n self.parentLevel = 0\n self.useFileInfo = False\n self.showInDialog = True\n self.initFormat()\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n pass\n\n def duplicateSettings(self, otherField):\n \"\"\"Assign other field's parameters to this field\"\"\"\n self.name = otherField.name\n self.enName = otherField.enName\n self.format = otherField.format\n self.prefix = otherField.prefix\n self.suffix = otherField.suffix\n self.html = otherField.html\n self.isRequired = otherField.isRequired\n self.hidden = otherField.hidden\n self.numLines = otherField.numLines\n self.initDefault = otherField.initDefault\n self.linkAltField = otherField.linkAltField\n self.parentLevel = otherField.parentLevel\n self.useFileInfo = otherField.useFileInfo\n self.showInDialog = otherField.showInDialog\n\n def changeType(self, newType):\n \"\"\"Change this field's type to newType with default format\"\"\"\n self.__class__ = globals()[newType + 'Format']\n self.format = self.defaultFormat\n self.initFormat()\n\n def englishName(self):\n \"\"\"Returns English name if assigned, o/w name\"\"\"\n if self.enName:\n return self.enName\n return self.name\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n if not self.useFileInfo:\n return u'{*%s*}' % name\n return u'{*!%s*}' % name\n\n def labelName(self):\n \"\"\"Return name used for labels - add * for required fields\"\"\"\n if self.isRequired:\n return '%s*' % self.name\n return self.name\n\n def writeXml(self):\n \"\"\"Return text for xml attributes\"\"\"\n text = u' type=\"%s\"' % self.typeName\n if self.format:\n text += u' format=\"%s\"' % escape(self.format, treedoc.escDict)\n if self.prefix:\n text += u' prefix=\"%s\"' % escape(self.prefix, treedoc.escDict)\n if self.suffix:\n text += u' suffix=\"%s\"' % escape(self.suffix, treedoc.escDict)\n if self.html:\n text += u' html=\"y\"'\n if self.isRequired:\n text += u' required=\"y\"'\n if self.hidden:\n text += u' hidden=\"y\"'\n if self.numLines > 1:\n text += u' lines=\"%d\"' % self.numLines\n if self.initDefault:\n text += u' init=\"%s\"' % escape(self.initDefault, treedoc.escDict)\n if self.linkAltField:\n text += u' linkalt=\"%s\"' % escape(self.linkAltField,\n treedoc.escDict)\n return text\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''\n\n def removeMarkup(self, text):\n \"\"\"Remove HTML Markup and unescape entities\"\"\"\n text = TextFormat.stripTagRe.sub('', text)\n return unescape(text)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)\n\n def editText(self, item):\n \"\"\"Return tuple of this field's text in edit format and bool validity,\n using edit format option\"\"\"\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return (storedText, True)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n return (editText, editText or not self.isRequired)\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n return self.initDefault\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n self.initDefault = self.storedText(editText)[0]\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n return self.formatEditText(self.initDefault)[0]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return []\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n return storedText.lower()\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n return value\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:if test=\"normalize-space(./%s)\">%s'\\\n '<xsl:value-of select=\"./%s\"/>%s</xsl:if>' % \\\n (self.name, xslEscape(self.prefix), self.name,\n xslEscape(self.suffix))\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(./%s)' % self.name\n\n\nclass LongTextFormat(TextFormat):\n \"\"\"Holds format info for a long text field - Obsolete -\n kept for compatability with old files\"\"\"\n # typeName = 'LongText'\n defaultNumLines = 7\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n\nclass NumberFormat(TextFormat):\n \"\"\"Holds format info for a number field\"\"\"\n typeName = 'Number'\n sortSequence = 10\n #field format edit options:\n defaultFormat = u'#.##'\n formatMenuList = [(u'%s\\t%s' % (_('Optional Digit'), '#'), '#'),\n (u'%s\\t%s' % (_('Required Digit'), '0'), '0'),\n (u'%s\\t%s' % (_('Digit or Space (external)'),\n _('<space>')), ' '),\n None,\n (u'%s\\t%s' % (_('Decimal Point'), '.'), '.'),\n (u'%s\\t%s' % (_('Decimal Comma'), ','), ','),\n None,\n (u'%s\\t%s' % (_('Comma Separator'), '\\,'), '\\,'),\n (u'%s\\t%s' % (_('Dot Separator'), '\\.'), '\\.'),\n (u'%s\\t%s' % (_('Space Separator (internal)'),\n _('<space>')), ' '),\n None,\n (u'%s\\t%s' % (_('Optional Sign'), '-'), '-'),\n (u'%s\\t%s' % (_('Required Sign'), '+'), '+'),\n None,\n (u'%s\\t%s' % (_('Exponent (capital)'), 'E'), 'E'),\n (u'%s\\t%s' % (_('Exponent (small)'), 'e'), 'e')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using self.format\"\"\"\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using self.format\"\"\"\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return GenNumber(storedText).num\n except GenNumberError:\n return ''\n\n\nclass ChoiceFormat(TextFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'Choice'\n sortSequence = 20\n editSep = '/'\n #field format edit options:\n defaultFormat = '1/2/3/4'\n formatMenuList = [(u'%s\\t%s' % (_('Separator'), '/'), '/'), None,\n (u'%s\\t%s' % (_('\"/\" Character'), '//'), '//'), None,\n (u'%s\\t%s' % (_('Example'), '1/2/3/4'), '1/2/3/4')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = self.splitText(self.format)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n return [(text, '') for text in self.formatList]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [text for text in self.formatList]\n\n def splitText(self, textStr):\n \"\"\"Split textStr using editSep, double sep's become char\"\"\"\n return [text.strip().replace('\\0', self.editSep) for text in\n textStr.replace(self.editSep * 2, '\\0').\n split(self.editSep)]\n\n\nclass CombinationFormat(ChoiceFormat):\n \"\"\"Holds format info for a field of combinations of text options\"\"\"\n typeName = 'Combination'\n outputSepList = (',', ';', ':', '|', '/', '\\\\', '~')\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n ChoiceFormat.initFormat(self)\n fullFormat = ''.join(self.formatList)\n try:\n self.sep = [sep for sep in CombinationFormat.outputSepList\n if sep not in fullFormat][0] + ' '\n except IndexError:\n self.sep = CombinationFormat.outputSepList[0] + ' '\n\n def sortedChoices(self, inText):\n \"\"\"Return tuple of choices from inText sorted like format and\n True if all splits are valid and included\"\"\"\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return (sortedChoices, True)\n else:\n return (sortedChoices, False)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box,\n each a tuple of edit text and any annotation text\"\"\"\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [entry[0] for entry in self.getEditChoices()]\n\n\nclass AutoChoiceFormat(ChoiceFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'AutoChoice'\n #field format edit options:\n defaultFormat = ''\n formatMenuList = ()\n hasEditChoices = True\n autoAddChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = []\n\n def addChoice(self, choice, sort=False):\n \"\"\"Add choice to edit menu list if not already there\"\"\"\n if choice and choice not in self.formatList:\n self.formatList.append(choice)\n if sort:\n self.sortChoices()\n\n def sortChoices(self):\n \"\"\"Sort menu list choices\"\"\"\n self.formatList.sort()\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return (storedText, True)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)\n\n\nclass DateFormat(TextFormat):\n \"\"\"Holds format info for a date field\"\"\"\n typeName = 'Date'\n sortSequence = 5\n #field format edit options:\n defaultFormat = u'mmmm d, yyyy'\n dateStampStrings = ('Now', _('Now', 'date stamp setting'))\n formatMenuList = [(u'%s\\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'),\n (u'%s\\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'),\n None,\n (u'%s\\t%s' % (_('Month (1 or 2 digits)'), 'm'), 'm'),\n (u'%s\\t%s' % (_('Month (2 digits)'), 'mm'), 'mm'),\n (u'%s\\t%s' % (_('Month Abbreviation'), 'mmm'), 'mmm'),\n (u'%s\\t%s' % (_('Month Name'), 'mmmm'), 'mmmm'),\n None,\n (u'%s\\t%s' % (_('Year (2 digits)'), 'yy'), 'yy'),\n (u'%s\\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),\n None,\n (u'%s\\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'),\n (u'%s\\t%s' % (_('Weekday Abbreviation'), 'www'), 'www'),\n (u'%s\\t%s' % (_('Weekday Name'), 'wwww'), 'wwww')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenDate())\n return value\n\n\nclass TimeFormat(TextFormat):\n \"\"\"Holds format info for a time field\"\"\"\n typeName = 'Time'\n sortSequence = 6\n #field format edit options:\n defaultFormat = u'h:MM:SS aa'\n timeStampStrings = ('Now', _('Now', 'time stamp setting'))\n formatMenuList = [(u'%s\\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),\n 'H'),\n (u'%s\\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'),\n (u'%s\\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'),\n 'h'),\n (u'%s\\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'),\n None,\n (u'%s\\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'),\n (u'%s\\t%s' % (_('Minute (2 digits)'), 'MM'), 'MM'),\n None,\n (u'%s\\t%s' % (_('Second (1 or 2 digits)'), 'S'), 'S'),\n (u'%s\\t%s' % (_('Second (2 digits)'), 'SS'), 'SS'),\n (u'%s\\t%s' % (_('Fractional Seconds'), 's'), 's'),\n None,\n (u'%s\\t%s' % (_('AM/PM'), 'AA'), 'AA'),\n (u'%s\\t%s' % (_('am/pm'), 'aa'),'aa')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and annotated text\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenTime())\n return value\n\n\nclass BooleanFormat(ChoiceFormat):\n \"\"\"Holds format info for a bool field\"\"\"\n typeName = 'Boolean'\n sortSequence = 1\n #field format edit options:\n defaultFormat = _('yes/no')\n formatMenuList = [(_('true/false'), _('true/false')),\n (_('T/F'), _('T/F')), None,\n (_('yes/no'), _('yes/no')),\n (_('Y/N'), _('Y/N')), None,\n ('1/0', '1/0')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''\n\n\nclass UniqueIDFormat(TextFormat):\n \"\"\"An unique ID automatically generated for new nodes\"\"\"\n typeName = 'UniqueID'\n sortSequence = 10\n formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')\n #field format edit options:\n defaultFormat = u'0001'\n formatMenuList = [(u'%s\\t%s' % (_('Required Digit'), '0'), '0'), None,\n (u'%s\\t%s' % (_('Start Num Example'), '0100'), '0100'),\n (u'%s\\t%s' % (_('Prefix Example'), 'id0100'), 'id0100')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def nextValue(self, increment=True):\n \"\"\"Return the next value for a new node,\n increment format if increment is True\"\"\"\n try:\n prefix, numText, suffix = UniqueIDFormat.formatRe.\\\n match(self.format).groups()\n except AttributeError:\n self.format = UniqueIDFormat.defaultFormat\n return self.nextValue(increment)\n value = self.format\n if increment:\n pattern = u'%%s%%0.%dd%%s' % len(numText)\n num = int(numText) + 1\n self.format = pattern % (prefix, num, suffix)\n return value\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return int(UniqueIDFormat.formatRe.match(storedText).group(2))\n except AttributeError:\n return 0\n\n\nclass URLFormat(TextFormat):\n \"\"\"Holds format info for a field with a URL path\"\"\"\n typeName = 'URL'\n sortSequence = 8\n htmlOption = False\n allowAltLinkText = True\n hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')\n URLMethod = u'http://'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:for-each select = \"./%s\">%s<xsl:choose>'\\\n '<xsl:when test=\"contains(., \\':\\')\"><a href=\"{.}\">'\\\n '<xsl:value-of select=\".\"/></a></xsl:when><xsl:otherwise>'\\\n '<a href=\"%s{.}\"><xsl:value-of select=\".\"/></a>'\\\n '</xsl:otherwise></xsl:choose>%s</xsl:for-each>' % \\\n (self.name, xslEscape(self.prefix), self.URLMethod,\n xslEscape(self.suffix))\n\n\nclass PathFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Path'\n URLMethod = u'file:///'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass EmailFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Email'\n URLMethod = u'mailto:'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass InternalLinkFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'InternalLink'\n URLMethod = u'#'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass ExecuteLinkFormat(URLFormat):\n \"\"\"Holds format info for an executable field\"\"\"\n typeName = 'ExecuteLink'\n URLMethod = u'exec:'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n # add prefix/suffix within the executable path:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' %\n (escape(path, treedoc.escDict), altText or url))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return TextFormat.xslText(self)\n\n\nclass PictureFormat(TextFormat):\n \"\"\"Holds format info for a field with a link to a picture\"\"\"\n typeName = 'Picture'\n sortSequence = 8\n htmlOption = False\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link to the picture if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = ['<img src=\"%s\">' % escape(url, treedoc.escDict) for url\n in paths]\n return u'<br />'.join(results)\n\n\nclass ParentFormat(TextFormat):\n \"\"\"Placeholder format for references to specific parents\"\"\"\n typeName = 'Parent'\n\n def __init__(self, name, parentLevel=1):\n TextFormat.__init__(self, name, {})\n self.parentLevel = parentLevel\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*%s%s*}' % (self.parentLevel * '*', name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"%s%s\"/>' % (self.parentLevel * '../',\n self.name)\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)\n\n\nclass AncestorFormat(TextFormat):\n \"\"\"Placeholder format for references to any parent with data\"\"\"\n typeName = 'Ancestor'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = 1000\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*?%s*}' % (name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"ancestor::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(ancestor::*/%s)' % self.name\n\n\nclass ChildFormat(TextFormat):\n \"\"\"Placeholder format for references to a sequence of child data\"\"\"\n typeName = 'Child'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -1\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*&%s*}' % (name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n result = []\n for child in item.childList:\n field = child.nodeFormat().findField(self.name)\n if field:\n text = field.outputText(child, titleMode, internal)\n if text:\n result.append(text)\n return globalref.docRef.childFieldSep.join(result)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"child::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(child::*/%s)' % self.name\n\n\nclass CountFormat(TextFormat):\n \"\"\"Placeholder format for a count of children at the given level\"\"\"\n typeName = 'Count'\n\n def __init__(self, name, level):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -level\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*#%s*}' % (name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n return repr(len(item.descendLevelList(-self.parentLevel)))\n",
"step-ids": [
100,
168,
171,
173,
175
]
}
|
[
100,
168,
171,
173,
175
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test():
raw_text = (
"通化辉南县经济适用房_通化辉南县经适房_通化辉南县经济适用房转让_通化去114网通化切换城市var googlequerykey ='二手经适房 二手房买卖 二手房地产公司' ; var AdKeyWords = 'jingshifang';var cityname ='通化' ; var ChildURL = 'ershoufang';不限出售求购不限东昌区二道江区梅河口市集安市通化县辉南县柳河县其他不限一室两室三室四室四室以上不限毛坯简单中档精装豪华不限个人经纪人免费发布二手房信息»"
)
pattern = collection.pattern_test('js_var')
print(f'匹配模式为:{pattern}')
print('----------------------------------------------')
pattern = compile(pattern)
return_text = sub(pattern, '替换成功', raw_text)
print(return_text)
""" if(return_text):
for i, each in enumerate(return_text):
print(f"第{i+1}个匹配结果:{each}")
else:
print("Not Found pattern-like string!") """
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
warnings.filterwarnings('ignore')
def test():
raw_text = (
"通化辉南县经济适用房_通化辉南县经适房_通化辉南县经济适用房转让_通化去114网通化切换城市var googlequerykey ='二手经适房 二手房买卖 二手房地产公司' ; var AdKeyWords = 'jingshifang';var cityname ='通化' ; var ChildURL = 'ershoufang';不限出售求购不限东昌区二道江区梅河口市集安市通化县辉南县柳河县其他不限一室两室三室四室四室以上不限毛坯简单中档精装豪华不限个人经纪人免费发布二手房信息»"
)
pattern = collection.pattern_test('js_var')
print(f'匹配模式为:{pattern}')
print('----------------------------------------------')
pattern = compile(pattern)
return_text = sub(pattern, '替换成功', raw_text)
print(return_text)
""" if(return_text):
for i, each in enumerate(return_text):
print(f"第{i+1}个匹配结果:{each}")
else:
print("Not Found pattern-like string!") """
if __name__ == '__main__':
test()
<|reserved_special_token_1|>
import warnings
from re import *
from pattern import collection
warnings.filterwarnings('ignore')
def test():
raw_text = (
"通化辉南县经济适用房_通化辉南县经适房_通化辉南县经济适用房转让_通化去114网通化切换城市var googlequerykey ='二手经适房 二手房买卖 二手房地产公司' ; var AdKeyWords = 'jingshifang';var cityname ='通化' ; var ChildURL = 'ershoufang';不限出售求购不限东昌区二道江区梅河口市集安市通化县辉南县柳河县其他不限一室两室三室四室四室以上不限毛坯简单中档精装豪华不限个人经纪人免费发布二手房信息»"
)
pattern = collection.pattern_test('js_var')
print(f'匹配模式为:{pattern}')
print('----------------------------------------------')
pattern = compile(pattern)
return_text = sub(pattern, '替换成功', raw_text)
print(return_text)
""" if(return_text):
for i, each in enumerate(return_text):
print(f"第{i+1}个匹配结果:{each}")
else:
print("Not Found pattern-like string!") """
if __name__ == '__main__':
test()
<|reserved_special_token_1|>
import warnings
from re import *
from pattern import collection
warnings.filterwarnings("ignore")
def test():
raw_text = "通化辉南县经济适用房_通化辉南县经适房_通化辉南县经济适用房转让_通化去114网通化切换城市var googlequerykey ='二手经适房 二手房买卖 二手房地产公司' ; var AdKeyWords = 'jingshifang';var cityname ='通化' ; var ChildURL = 'ershoufang';不限出售求购不限东昌区二道江区梅河口市集安市通化县辉南县柳河县其他不限一室两室三室四室四室以上不限毛坯简单中档精装豪华不限个人经纪人免费发布二手房信息»"
pattern = collection.pattern_test("js_var")
print(f"匹配模式为:{pattern}")
print("----------------------------------------------")
#return_text = findall(pattern, raw_text)
pattern = compile(pattern)
return_text = sub(pattern, "替换成功", raw_text)
print(return_text)
''' if(return_text):
for i, each in enumerate(return_text):
print(f"第{i+1}个匹配结果:{each}")
else:
print("Not Found pattern-like string!") '''
if __name__ == "__main__":
test()
|
flexible
|
{
"blob_id": "488d20a86c5bddbca2db09b26fb8df4b6f87a1dc",
"index": 2354,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test():\n raw_text = (\n \"通化辉南县经济适用房_通化辉南县经适房_通化辉南县经济适用房转让_通化去114网通化切换城市var googlequerykey ='二手经适房 二手房买卖 二手房地产公司' ; var AdKeyWords = 'jingshifang';var cityname ='通化' ; var ChildURL = 'ershoufang';不限出售求购不限东昌区二道江区梅河口市集安市通化县辉南县柳河县其他不限一室两室三室四室四室以上不限毛坯简单中档精装豪华不限个人经纪人免费发布二手房信息»\"\n )\n pattern = collection.pattern_test('js_var')\n print(f'匹配模式为:{pattern}')\n print('----------------------------------------------')\n pattern = compile(pattern)\n return_text = sub(pattern, '替换成功', raw_text)\n print(return_text)\n \"\"\" if(return_text):\n for i, each in enumerate(return_text):\n print(f\"第{i+1}个匹配结果:{each}\")\n else:\n print(\"Not Found pattern-like string!\") \"\"\"\n\n\n<mask token>\n",
"step-3": "<mask token>\nwarnings.filterwarnings('ignore')\n\n\ndef test():\n raw_text = (\n \"通化辉南县经济适用房_通化辉南县经适房_通化辉南县经济适用房转让_通化去114网通化切换城市var googlequerykey ='二手经适房 二手房买卖 二手房地产公司' ; var AdKeyWords = 'jingshifang';var cityname ='通化' ; var ChildURL = 'ershoufang';不限出售求购不限东昌区二道江区梅河口市集安市通化县辉南县柳河县其他不限一室两室三室四室四室以上不限毛坯简单中档精装豪华不限个人经纪人免费发布二手房信息»\"\n )\n pattern = collection.pattern_test('js_var')\n print(f'匹配模式为:{pattern}')\n print('----------------------------------------------')\n pattern = compile(pattern)\n return_text = sub(pattern, '替换成功', raw_text)\n print(return_text)\n \"\"\" if(return_text):\n for i, each in enumerate(return_text):\n print(f\"第{i+1}个匹配结果:{each}\")\n else:\n print(\"Not Found pattern-like string!\") \"\"\"\n\n\nif __name__ == '__main__':\n test()\n",
"step-4": "import warnings\nfrom re import *\nfrom pattern import collection\nwarnings.filterwarnings('ignore')\n\n\ndef test():\n raw_text = (\n \"通化辉南县经济适用房_通化辉南县经适房_通化辉南县经济适用房转让_通化去114网通化切换城市var googlequerykey ='二手经适房 二手房买卖 二手房地产公司' ; var AdKeyWords = 'jingshifang';var cityname ='通化' ; var ChildURL = 'ershoufang';不限出售求购不限东昌区二道江区梅河口市集安市通化县辉南县柳河县其他不限一室两室三室四室四室以上不限毛坯简单中档精装豪华不限个人经纪人免费发布二手房信息»\"\n )\n pattern = collection.pattern_test('js_var')\n print(f'匹配模式为:{pattern}')\n print('----------------------------------------------')\n pattern = compile(pattern)\n return_text = sub(pattern, '替换成功', raw_text)\n print(return_text)\n \"\"\" if(return_text):\n for i, each in enumerate(return_text):\n print(f\"第{i+1}个匹配结果:{each}\")\n else:\n print(\"Not Found pattern-like string!\") \"\"\"\n\n\nif __name__ == '__main__':\n test()\n",
"step-5": "import warnings\nfrom re import *\n\nfrom pattern import collection\n\nwarnings.filterwarnings(\"ignore\")\n\ndef test():\n raw_text = \"通化辉南县经济适用房_通化辉南县经适房_通化辉南县经济适用房转让_通化去114网通化切换城市var googlequerykey ='二手经适房 二手房买卖 二手房地产公司' ; var AdKeyWords = 'jingshifang';var cityname ='通化' ; var ChildURL = 'ershoufang';不限出售求购不限东昌区二道江区梅河口市集安市通化县辉南县柳河县其他不限一室两室三室四室四室以上不限毛坯简单中档精装豪华不限个人经纪人免费发布二手房信息»\"\n pattern = collection.pattern_test(\"js_var\")\n print(f\"匹配模式为:{pattern}\")\n print(\"----------------------------------------------\")\n #return_text = findall(pattern, raw_text)\n pattern = compile(pattern)\n return_text = sub(pattern, \"替换成功\", raw_text)\n print(return_text)\n\n ''' if(return_text):\n for i, each in enumerate(return_text):\n print(f\"第{i+1}个匹配结果:{each}\")\n else:\n print(\"Not Found pattern-like string!\") '''\n\nif __name__ == \"__main__\":\n test()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Lad(a1, a2, b1, b2):
if (a1 == b1) | (a2 == b2):
return 'YES'
else:
return 'NO'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Lad(a1, a2, b1, b2):
if (a1 == b1) | (a2 == b2):
return 'YES'
else:
return 'NO'
<|reserved_special_token_0|>
print(Lad(a1, a2, b1, b2))
<|reserved_special_token_1|>
__author__ = 'NikolaiEgorov'
def Lad(a1, a2, b1, b2):
if (a1 == b1) | (a2 == b2):
return 'YES'
else:
return 'NO'
a1 = int(input())
a2 = int(input())
b1 = int(input())
b2 = int(input())
print(Lad(a1, a2, b1, b2))
|
flexible
|
{
"blob_id": "0f55b598058b65c9dbf9cd4761d1ff6fc7091b19",
"index": 8791,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Lad(a1, a2, b1, b2):\n if (a1 == b1) | (a2 == b2):\n return 'YES'\n else:\n return 'NO'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef Lad(a1, a2, b1, b2):\n if (a1 == b1) | (a2 == b2):\n return 'YES'\n else:\n return 'NO'\n\n\n<mask token>\nprint(Lad(a1, a2, b1, b2))\n",
"step-4": "__author__ = 'NikolaiEgorov'\n\n\ndef Lad(a1, a2, b1, b2):\n if (a1 == b1) | (a2 == b2):\n return 'YES'\n else:\n return 'NO'\n\n\na1 = int(input())\na2 = int(input())\nb1 = int(input())\nb2 = int(input())\nprint(Lad(a1, a2, b1, b2))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import gzip
import pickle as pkl
import time
from datetime import datetime
import grpc
import numpy as np
from sklearn.utils import shuffle
import neural_nets_pb2 as nn_pb
import neural_nets_pb2_grpc as nn_pb_grpc
from mnist_loader import load_data
from activations import *
# pylint: disable=too-many-arguments
class Layer(nn_pb_grpc.LayerDataExchangeServicer):
"""
abstract layer extract common methods
"""
# pylint: disable=too-many-arguments
def __init__(self, layer_name, upper_layer, lower_layer,
lower_layer_nodes, current_layer_nodes,
nonlin, nonlin_prime):
"""
datasets : the path of mnist dataset
nonlin: activation function
nonlin_prime: the derivative of activation function
"""
self.layer_name = layer_name
self.upper_layer_addr = upper_layer
self.lower_layer_addr = lower_layer
self.nonlin = nonlin
self.nonlin_prime = nonlin_prime
# lazy initialization
self.upper_layer_stub = None
self.lower_layer_stub = None
# weights dimension
self.weights_shape = (current_layer_nodes, lower_layer_nodes)
self.weights = None
self.biases = None
# record outputs from lower layer
# use batch id as key
# Purposes:
# 1) used for computing the weighted sum of current layer
# 2) used for computing the gradients for updating weights of current layer
self.lower_layer_outputs = {}
# computed from lower layer outputs for cache purpose
# cache for computing delta for current layer
# delta = partial_delta_rec * nonlin_prime(weighted_sum)
# with different batch we have different weighted sum
self.weighted_sum_inputs = {}
def forward_to_upper(self, batch_id, forward_matrix, forward_labels, istrain):
"""
forward output to upper layer
"""
if not self.upper_layer_stub:
self.create_upper_stub()
# convert numpy array to byte string
bytes_matrix = pkl.dumps(forward_matrix, 2)
bytes_labels = pkl.dumps(forward_labels, 2)
# send message to next layer
res = self.upper_layer_stub.UpdateInput(
nn_pb.ForwardMsg(batch_id=batch_id,
output_matrix=bytes_matrix,
labels=bytes_labels,
is_train=istrain))
# print("get response form upper layer", res.message)
def backward_to_lower(self, batch_id, partial_delta, labels):
"""
back propagate error partial_delta to lower layer
partial_delta = dot(self.weights.T, self.delta)
self.delta = delta_received_from_upper * nonlin_prime(z)
"""
# create stub for lower layer
if not self.lower_layer_stub:
self.create_lower_stub()
# convert partial_delta matrix to bytes string
bytes_delta = pkl.dumps(partial_delta)
bytes_labels = pkl.dumps(labels)
res = self.lower_layer_stub.UpdateDelta(
nn_pb.BackwardMsg(batch_id=batch_id,
partial_delta=bytes_delta,
labels=bytes_labels))
# print("get response from lower layer", res.message)
def create_upper_stub(self):
""" create upper_layer_stub for exchanging data between grpc"""
if self.upper_layer_addr:
channel = grpc.insecure_channel(self.upper_layer_addr)
self.upper_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)
else:
print("no upper layer has been specified")
def create_lower_stub(self):
""" stub for lower layer communication"""
if self.lower_layer_addr:
channel = grpc.insecure_channel(self.lower_layer_addr)
self.lower_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)
else:
print("no lower layer has been specified")
def init_weights(self, load_weights=None):
"""
if load_weights is specified load the trained weights
"""
if load_weights:
# TODO
pass
else:
# x: lower layer nodes n
# y: current layer nodes n
x = self.weights_shape[1]
y = self.weights_shape[0]
self.weights = np.random.randn(y, x) / np.sqrt(x) # pylint: disable=no-member
self.biases = np.random.randn(y, 1) # pylint: disable=no-member
def check_weights(self):
if self.weights is None or self.biases is None:
print("Weights of {} have not initialized".format(self.layer_name))
import sys
sys.exit(-1)
def update_weights(self, lr, delta, outputs_of_lower):
"""
outputs of lower: equals to inputs of this layer
"""
delta_shape = delta.shape
inputs_shape = outputs_of_lower.shape
# update biases
avg_delta = np.mean(delta, axis=0).reshape(delta_shape[1], 1)
self.biases = self.biases - lr * avg_delta
# compute gradients for weights
delta = delta.reshape(delta_shape[0], delta_shape[1], 1)
inputs = outputs_of_lower.reshape(inputs_shape[0], 1, inputs_shape[1])
gradients = delta * inputs
gradients_avg = np.mean(gradients, axis=0)
self.weights = self.weights - lr * gradients_avg
def parse_forward_msg(self, req):
""" extract and transform data in forward message"""
batch_id = req.batch_id
bytes_outputs_of_lower = req.output_matrix
bytes_labels = req.labels
is_train = req.is_train
outputs_of_lower = pkl.loads(bytes_outputs_of_lower)
labels = pkl.loads(bytes_labels)
return batch_id, outputs_of_lower, labels, is_train
# implementing rpc services
def UpdateInput(self, request, context):
# implemented in Hidden Layer and Output Layer
pass
def UpdateDelta(self, request, context):
""" Invoked by upper layer
will be implemented by hidden layer
"""
pass
class InputLayer(Layer):
""" for input data"""
def __init__(self, upper_layer, data_path, input_dim, layer_name="input"):
super().__init__(layer_name, upper_layer,
None, None, input_dim,
None, None)
self.train, self.val, self.test = load_data(data_path)
def start_feed_data(self, batch_size, epochs):
""""""
train_X = self.train[0]
train_y = self.train[1]
val_X = self.val[0]
val_y = self.val[1]
train_size = train_X.shape[0]
batch_id = 0
test_batch_id = -1 # use negative number, diff with batch_id
for i in range(epochs):
print("Start feed {0} epoch data".format(i))
train_X, train_y = shuffle(train_X, train_y)
for j in range(0, train_size, batch_size):
minibatch_X = train_X[j:j+batch_size]
minibatch_y = train_y[j:j+batch_size]
self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)
batch_id += 1
# send test data for evaluation
self.forward_to_upper(test_batch_id, val_X, val_y, False)
test_batch_id -= 1
def UpdateInput(self, req, ctx):
""""""
print("Should not have lower layer")
return nn_pb.PlainResponse(message="Wrong invoke!")
def UpdateDelta(self, req, ctx):
""""""
batch_id = req.batch_id
if batch_id % 100 == 0:
print("Complete backpropagation for batch {} at {}".format(
batch_id,
datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return nn_pb.PlainResponse(message="Received at layer {}".format(
self.layer_name))
class HiddenLayer(Layer):
""" hidden layer"""
def __init__(self, layer_name,
upper_layer,
lower_layer,
lower_layer_size,
layer_size,
nonlin,
nonlin_prime,
learning_rate,
enable_synthetic_gradients,
sg_learning_rate
):
"""
enable_synthetic_gradients: whether use synthetic gradients
to do error approximating
"""
super().__init__(layer_name, upper_layer,
lower_layer, lower_layer_size,
layer_size, nonlin,
nonlin_prime)
self.lr = learning_rate
self.enable_sg = enable_synthetic_gradients
self.sg_lr = sg_learning_rate
self.sg_weights = None
self.sg_deltas = {}
def init_sg_weights(self):
""" using linear synthetic gradients model
SG(h, y) = hA + yB + C
refer to paper, Understanding synthetic gradients and decoupled neural networks
"""
n = self.weights_shape[0] # size of current layer
# pylint: disable=no-member
A = np.random.randn(n, n) / np.sqrt(n)
B = np.random.randn(10, n) / np.sqrt(n)
C = np.random.randn(1, n) / np.sqrt(n)
# pylint: enable=no-member
self.sg_weights = [A, B, C]
def check_sg_weights(self):
if self.sg_weights is None:
self.init_sg_weights()
def SG(self, h, y):
""" generate delta by weighted sum and label
h: outputs of this layer
y: labels for this batch
"""
self.check_sg_weights()
A = self.sg_weights[0] #(n, n)
B = self.sg_weights[1] #(10, n)
C = self.sg_weights[2] #(1, n)
delta = np.matmul(h, A) + np.matmul(y, B) + C
return delta
def update_sg_weights(self, true_delta, batch_id):
""" name conventions refer paper :
Understanding synthetic gradients and decoupled neural interface
TODO: synthetic gradient estimates the partial delta instead true gradients
"""
sg_delta = self.sg_deltas[batch_id]
weighted_sum = self.weighted_sum_inputs[batch_id]
labels = self.lower_layer_outputs[batch_id]['labels']
y = labels
h = self.nonlin(weighted_sum)
Err = sg_delta - true_delta
A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err) / h.shape[0]
B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err) / y.shape[0]
C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)
self.sg_weights = [A, B, C]
# del stored delta
del self.sg_deltas[batch_id]
def UpdateInput(self, request, context):
""" Invoked by lower layer
Once inputs updated, start computing the weighted sum
then activation outputs,
then forward outputs to next layer
request: ForwardMsg
"""
self.check_weights()
# get values from message
batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(request)
print("Get inputs id: {0}, matrix shape: {1}, labels shape: {2}".format(
batch_id, outputs_of_lower.shape, labels.shape))
weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \
+ self.biases.transpose()
# saving inputs during training, because for weights updating
if is_train:
inputs = {'matrix': outputs_of_lower,
'labels': labels}
self.lower_layer_outputs[batch_id] = inputs
self.weighted_sum_inputs[batch_id] = weighted_sum
activations = self.nonlin(weighted_sum) # apply element wise
# update weights immediately with SG, if enabled SG
if self.enable_sg and is_train:
print("update weights based on SG delta")
sg_delta = self.SG(activations, labels)
# TODO use sg_delta to compute the gradients by sg_delta * self.nonline_prime(z)
self.update_weights(self.lr, sg_delta, outputs_of_lower)
self.sg_deltas[batch_id] = sg_delta
# forward layer outputs
self.forward_to_upper(batch_id, activations, labels, is_train)
print("batch id: {0}, activations shape {1}".format(
batch_id, activations.shape))
# return received
return nn_pb.PlainResponse(message="Inputs received by layer {}".format(
self.layer_name))
def UpdateDelta(self, req, ctx):
"""
delta shape: (batch_size, size_of_current_layer)
req: BackwardMsg
"""
batch_id = req.batch_id
bytes_partial_delta = req.partial_delta
partial_delta = pkl.loads(bytes_partial_delta)
bytes_labels = req.labels # variable currently not useful
labels = pkl.loads(bytes_labels)
# compute delta for current layer
z = self.weighted_sum_inputs[batch_id]
z_nonlin_prime = self.nonlin_prime(z)
# shape of delta: (batch_size, size_of_layer)
delta = partial_delta * z_nonlin_prime
# compute partial delta for lower layer
partial_delta_for_lower = np.dot(delta, self.weights)
# send partial delta to lower layer
self.backward_to_lower(batch_id,
partial_delta_for_lower,
labels)
if self.enable_sg:
# train the SG
# TODO pass partial delta instead
self.update_sg_weights(delta, batch_id)
else:
# update weights regularly
inputs = self.lower_layer_outputs[batch_id]['matrix']
self.update_weights(self.lr, delta, inputs)
# delete stored for weighted sum
del self.weighted_sum_inputs[batch_id]
# delete stored for lower layer outputs
del self.lower_layer_outputs[batch_id]
return nn_pb.PlainResponse(
message="Partial delta received at {}".format(self.layer_name))
class OutputLayer(Layer):
""" output layer
computing the error based on labels and prediction
using softmax as output activations and cross entropy loss
"""
def __init__(self, layer_name, lower_layer, lower_layer_size,
num_classes, learning_rate ):
super().__init__(layer_name, None,
lower_layer,
lower_layer_size,
num_classes,
None,
None)
self.lr = learning_rate
def UpdateInput(self, req, ctx):
""" once received input from lower layer:
compute weighted sum -> softmax output -> loss -> back propagate
"""
self.check_weights()
batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(req)
weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \
+ self.biases.transpose()
softmax_output = softmax(weighted_sum, axis=1)
# print("weighted sum", weighted_sum)
# print("outputs of lower", outputs_of_lower)
if is_train:
delta = softmax_output - labels
# compute delta for lower layer first
# because current error is based on current weights
partial_delta_for_lower = np.dot(delta, self.weights)
# send to lower layer
self.backward_to_lower(batch_id, partial_delta_for_lower, labels)
# cross entropy loss
if batch_id % 100 == 0:
total_loss = np.log(softmax_output) * labels # pylint: disable=no-member
# print("total loss: ", np.sum(total_loss))
loss = -1 * np.sum(total_loss) / labels.shape[0]
print("For batch id {}, avg loss: {}".format(batch_id, loss))
# update weights
self.update_weights(self.lr, delta, outputs_of_lower)
else:
# test evaluation
pred_results = np.argmax(softmax_output, axis=1)
matched = sum(int(y == t) for (y, t) in zip(pred_results, labels))
print("Epoch {}, Performance test {} / {}".format(
-1*batch_id, matched, labels.shape[0]))
return nn_pb.PlainResponse(message="Inputs received at {}".format(
self.layer_name))
def UpdateDelta(self, req, ctx):
""" No upper layer"""
print("Error: No upper layer for output layer")
return nn_pb.PlainResponse(message="Invalid Operation!!")
|
normal
|
{
"blob_id": "fa6f251f27b645fc6827285b5578fd9634c8bb30",
"index": 6361,
"step-1": "<mask token>\n\n\nclass Layer(nn_pb_grpc.LayerDataExchangeServicer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def init_weights(self, load_weights=None):\n \"\"\"\n if load_weights is specified load the trained weights\n \"\"\"\n if load_weights:\n pass\n else:\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x)\n self.biases = np.random.randn(y, 1)\n\n def check_weights(self):\n if self.weights is None or self.biases is None:\n print('Weights of {} have not initialized'.format(self.layer_name))\n import sys\n sys.exit(-1)\n <mask token>\n <mask token>\n <mask token>\n\n def UpdateDelta(self, request, context):\n \"\"\" Invoked by upper layer\n will be implemented by hidden layer\n \"\"\"\n pass\n\n\nclass InputLayer(Layer):\n \"\"\" for input data\"\"\"\n\n def __init__(self, upper_layer, data_path, input_dim, layer_name='input'):\n super().__init__(layer_name, upper_layer, None, None, input_dim,\n None, None)\n self.train, self.val, self.test = load_data(data_path)\n\n def start_feed_data(self, batch_size, epochs):\n \"\"\"\"\"\"\n train_X = self.train[0]\n train_y = self.train[1]\n val_X = self.val[0]\n val_y = self.val[1]\n train_size = train_X.shape[0]\n batch_id = 0\n test_batch_id = -1\n for i in range(epochs):\n print('Start feed {0} epoch data'.format(i))\n train_X, train_y = shuffle(train_X, train_y)\n for j in range(0, train_size, batch_size):\n minibatch_X = train_X[j:j + batch_size]\n minibatch_y = train_y[j:j + batch_size]\n self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)\n batch_id += 1\n self.forward_to_upper(test_batch_id, val_X, val_y, False)\n test_batch_id -= 1\n\n def UpdateInput(self, req, ctx):\n \"\"\"\"\"\"\n print('Should not have lower layer')\n return nn_pb.PlainResponse(message='Wrong invoke!')\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\"\"\"\n batch_id = req.batch_id\n if batch_id % 100 == 0:\n print('Complete backpropagation for batch {} at {}'.format(\n batch_id, datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n return nn_pb.PlainResponse(message='Received at layer {}'.format(\n self.layer_name))\n\n\nclass HiddenLayer(Layer):\n \"\"\" hidden layer\"\"\"\n\n def __init__(self, layer_name, upper_layer, lower_layer,\n lower_layer_size, layer_size, nonlin, nonlin_prime, learning_rate,\n enable_synthetic_gradients, sg_learning_rate):\n \"\"\"\n enable_synthetic_gradients: whether use synthetic gradients\n to do error approximating\n \"\"\"\n super().__init__(layer_name, upper_layer, lower_layer,\n lower_layer_size, layer_size, nonlin, nonlin_prime)\n self.lr = learning_rate\n self.enable_sg = enable_synthetic_gradients\n self.sg_lr = sg_learning_rate\n self.sg_weights = None\n self.sg_deltas = {}\n\n def init_sg_weights(self):\n \"\"\" using linear synthetic gradients model\n SG(h, y) = hA + yB + C\n refer to paper, Understanding synthetic gradients and decoupled neural networks\n \"\"\"\n n = self.weights_shape[0]\n A = np.random.randn(n, n) / np.sqrt(n)\n B = np.random.randn(10, n) / np.sqrt(n)\n C = np.random.randn(1, n) / np.sqrt(n)\n self.sg_weights = [A, B, C]\n\n def check_sg_weights(self):\n if self.sg_weights is None:\n self.init_sg_weights()\n\n def SG(self, h, y):\n \"\"\" generate delta by weighted sum and label\n\n h: outputs of this layer\n y: labels for this batch\n \"\"\"\n self.check_sg_weights()\n A = self.sg_weights[0]\n B = self.sg_weights[1]\n C = self.sg_weights[2]\n delta = np.matmul(h, A) + np.matmul(y, B) + C\n return delta\n\n def update_sg_weights(self, true_delta, batch_id):\n \"\"\" name conventions refer paper :\n Understanding synthetic gradients and decoupled neural interface\n TODO: synthetic gradient estimates the partial delta instead true gradients\n \"\"\"\n sg_delta = self.sg_deltas[batch_id]\n weighted_sum = self.weighted_sum_inputs[batch_id]\n labels = self.lower_layer_outputs[batch_id]['labels']\n y = labels\n h = self.nonlin(weighted_sum)\n Err = sg_delta - true_delta\n A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err\n ) / h.shape[0]\n B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err\n ) / y.shape[0]\n C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)\n self.sg_weights = [A, B, C]\n del self.sg_deltas[batch_id]\n\n def UpdateInput(self, request, context):\n \"\"\" Invoked by lower layer\n Once inputs updated, start computing the weighted sum\n then activation outputs,\n then forward outputs to next layer\n request: ForwardMsg\n \"\"\"\n self.check_weights()\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(\n request)\n print('Get inputs id: {0}, matrix shape: {1}, labels shape: {2}'.\n format(batch_id, outputs_of_lower.shape, labels.shape))\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()\n ) + self.biases.transpose()\n if is_train:\n inputs = {'matrix': outputs_of_lower, 'labels': labels}\n self.lower_layer_outputs[batch_id] = inputs\n self.weighted_sum_inputs[batch_id] = weighted_sum\n activations = self.nonlin(weighted_sum)\n if self.enable_sg and is_train:\n print('update weights based on SG delta')\n sg_delta = self.SG(activations, labels)\n self.update_weights(self.lr, sg_delta, outputs_of_lower)\n self.sg_deltas[batch_id] = sg_delta\n self.forward_to_upper(batch_id, activations, labels, is_train)\n print('batch id: {0}, activations shape {1}'.format(batch_id,\n activations.shape))\n return nn_pb.PlainResponse(message='Inputs received by layer {}'.\n format(self.layer_name))\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\n delta shape: (batch_size, size_of_current_layer)\n req: BackwardMsg\n \"\"\"\n batch_id = req.batch_id\n bytes_partial_delta = req.partial_delta\n partial_delta = pkl.loads(bytes_partial_delta)\n bytes_labels = req.labels\n labels = pkl.loads(bytes_labels)\n z = self.weighted_sum_inputs[batch_id]\n z_nonlin_prime = self.nonlin_prime(z)\n delta = partial_delta * z_nonlin_prime\n partial_delta_for_lower = np.dot(delta, self.weights)\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n if self.enable_sg:\n self.update_sg_weights(delta, batch_id)\n else:\n inputs = self.lower_layer_outputs[batch_id]['matrix']\n self.update_weights(self.lr, delta, inputs)\n del self.weighted_sum_inputs[batch_id]\n del self.lower_layer_outputs[batch_id]\n return nn_pb.PlainResponse(message='Partial delta received at {}'.\n format(self.layer_name))\n\n\nclass OutputLayer(Layer):\n \"\"\" output layer\n computing the error based on labels and prediction\n using softmax as output activations and cross entropy loss\n\n \"\"\"\n\n def __init__(self, layer_name, lower_layer, lower_layer_size,\n num_classes, learning_rate):\n super().__init__(layer_name, None, lower_layer, lower_layer_size,\n num_classes, None, None)\n self.lr = learning_rate\n\n def UpdateInput(self, req, ctx):\n \"\"\" once received input from lower layer:\n compute weighted sum -> softmax output -> loss -> back propagate\n \"\"\"\n self.check_weights()\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(\n req)\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()\n ) + self.biases.transpose()\n softmax_output = softmax(weighted_sum, axis=1)\n if is_train:\n delta = softmax_output - labels\n partial_delta_for_lower = np.dot(delta, self.weights)\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n if batch_id % 100 == 0:\n total_loss = np.log(softmax_output) * labels\n loss = -1 * np.sum(total_loss) / labels.shape[0]\n print('For batch id {}, avg loss: {}'.format(batch_id, loss))\n self.update_weights(self.lr, delta, outputs_of_lower)\n else:\n pred_results = np.argmax(softmax_output, axis=1)\n matched = sum(int(y == t) for y, t in zip(pred_results, labels))\n print('Epoch {}, Performance test {} / {}'.format(-1 * batch_id,\n matched, labels.shape[0]))\n return nn_pb.PlainResponse(message='Inputs received at {}'.format(\n self.layer_name))\n\n def UpdateDelta(self, req, ctx):\n \"\"\" No upper layer\"\"\"\n print('Error: No upper layer for output layer')\n return nn_pb.PlainResponse(message='Invalid Operation!!')\n",
"step-2": "<mask token>\n\n\nclass Layer(nn_pb_grpc.LayerDataExchangeServicer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def create_upper_stub(self):\n \"\"\" create upper_layer_stub for exchanging data between grpc\"\"\"\n if self.upper_layer_addr:\n channel = grpc.insecure_channel(self.upper_layer_addr)\n self.upper_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)\n else:\n print('no upper layer has been specified')\n <mask token>\n\n def init_weights(self, load_weights=None):\n \"\"\"\n if load_weights is specified load the trained weights\n \"\"\"\n if load_weights:\n pass\n else:\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x)\n self.biases = np.random.randn(y, 1)\n\n def check_weights(self):\n if self.weights is None or self.biases is None:\n print('Weights of {} have not initialized'.format(self.layer_name))\n import sys\n sys.exit(-1)\n\n def update_weights(self, lr, delta, outputs_of_lower):\n \"\"\"\n outputs of lower: equals to inputs of this layer\n \"\"\"\n delta_shape = delta.shape\n inputs_shape = outputs_of_lower.shape\n avg_delta = np.mean(delta, axis=0).reshape(delta_shape[1], 1)\n self.biases = self.biases - lr * avg_delta\n delta = delta.reshape(delta_shape[0], delta_shape[1], 1)\n inputs = outputs_of_lower.reshape(inputs_shape[0], 1, inputs_shape[1])\n gradients = delta * inputs\n gradients_avg = np.mean(gradients, axis=0)\n self.weights = self.weights - lr * gradients_avg\n <mask token>\n\n def UpdateInput(self, request, context):\n pass\n\n def UpdateDelta(self, request, context):\n \"\"\" Invoked by upper layer\n will be implemented by hidden layer\n \"\"\"\n pass\n\n\nclass InputLayer(Layer):\n \"\"\" for input data\"\"\"\n\n def __init__(self, upper_layer, data_path, input_dim, layer_name='input'):\n super().__init__(layer_name, upper_layer, None, None, input_dim,\n None, None)\n self.train, self.val, self.test = load_data(data_path)\n\n def start_feed_data(self, batch_size, epochs):\n \"\"\"\"\"\"\n train_X = self.train[0]\n train_y = self.train[1]\n val_X = self.val[0]\n val_y = self.val[1]\n train_size = train_X.shape[0]\n batch_id = 0\n test_batch_id = -1\n for i in range(epochs):\n print('Start feed {0} epoch data'.format(i))\n train_X, train_y = shuffle(train_X, train_y)\n for j in range(0, train_size, batch_size):\n minibatch_X = train_X[j:j + batch_size]\n minibatch_y = train_y[j:j + batch_size]\n self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)\n batch_id += 1\n self.forward_to_upper(test_batch_id, val_X, val_y, False)\n test_batch_id -= 1\n\n def UpdateInput(self, req, ctx):\n \"\"\"\"\"\"\n print('Should not have lower layer')\n return nn_pb.PlainResponse(message='Wrong invoke!')\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\"\"\"\n batch_id = req.batch_id\n if batch_id % 100 == 0:\n print('Complete backpropagation for batch {} at {}'.format(\n batch_id, datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n return nn_pb.PlainResponse(message='Received at layer {}'.format(\n self.layer_name))\n\n\nclass HiddenLayer(Layer):\n \"\"\" hidden layer\"\"\"\n\n def __init__(self, layer_name, upper_layer, lower_layer,\n lower_layer_size, layer_size, nonlin, nonlin_prime, learning_rate,\n enable_synthetic_gradients, sg_learning_rate):\n \"\"\"\n enable_synthetic_gradients: whether use synthetic gradients\n to do error approximating\n \"\"\"\n super().__init__(layer_name, upper_layer, lower_layer,\n lower_layer_size, layer_size, nonlin, nonlin_prime)\n self.lr = learning_rate\n self.enable_sg = enable_synthetic_gradients\n self.sg_lr = sg_learning_rate\n self.sg_weights = None\n self.sg_deltas = {}\n\n def init_sg_weights(self):\n \"\"\" using linear synthetic gradients model\n SG(h, y) = hA + yB + C\n refer to paper, Understanding synthetic gradients and decoupled neural networks\n \"\"\"\n n = self.weights_shape[0]\n A = np.random.randn(n, n) / np.sqrt(n)\n B = np.random.randn(10, n) / np.sqrt(n)\n C = np.random.randn(1, n) / np.sqrt(n)\n self.sg_weights = [A, B, C]\n\n def check_sg_weights(self):\n if self.sg_weights is None:\n self.init_sg_weights()\n\n def SG(self, h, y):\n \"\"\" generate delta by weighted sum and label\n\n h: outputs of this layer\n y: labels for this batch\n \"\"\"\n self.check_sg_weights()\n A = self.sg_weights[0]\n B = self.sg_weights[1]\n C = self.sg_weights[2]\n delta = np.matmul(h, A) + np.matmul(y, B) + C\n return delta\n\n def update_sg_weights(self, true_delta, batch_id):\n \"\"\" name conventions refer paper :\n Understanding synthetic gradients and decoupled neural interface\n TODO: synthetic gradient estimates the partial delta instead true gradients\n \"\"\"\n sg_delta = self.sg_deltas[batch_id]\n weighted_sum = self.weighted_sum_inputs[batch_id]\n labels = self.lower_layer_outputs[batch_id]['labels']\n y = labels\n h = self.nonlin(weighted_sum)\n Err = sg_delta - true_delta\n A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err\n ) / h.shape[0]\n B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err\n ) / y.shape[0]\n C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)\n self.sg_weights = [A, B, C]\n del self.sg_deltas[batch_id]\n\n def UpdateInput(self, request, context):\n \"\"\" Invoked by lower layer\n Once inputs updated, start computing the weighted sum\n then activation outputs,\n then forward outputs to next layer\n request: ForwardMsg\n \"\"\"\n self.check_weights()\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(\n request)\n print('Get inputs id: {0}, matrix shape: {1}, labels shape: {2}'.\n format(batch_id, outputs_of_lower.shape, labels.shape))\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()\n ) + self.biases.transpose()\n if is_train:\n inputs = {'matrix': outputs_of_lower, 'labels': labels}\n self.lower_layer_outputs[batch_id] = inputs\n self.weighted_sum_inputs[batch_id] = weighted_sum\n activations = self.nonlin(weighted_sum)\n if self.enable_sg and is_train:\n print('update weights based on SG delta')\n sg_delta = self.SG(activations, labels)\n self.update_weights(self.lr, sg_delta, outputs_of_lower)\n self.sg_deltas[batch_id] = sg_delta\n self.forward_to_upper(batch_id, activations, labels, is_train)\n print('batch id: {0}, activations shape {1}'.format(batch_id,\n activations.shape))\n return nn_pb.PlainResponse(message='Inputs received by layer {}'.\n format(self.layer_name))\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\n delta shape: (batch_size, size_of_current_layer)\n req: BackwardMsg\n \"\"\"\n batch_id = req.batch_id\n bytes_partial_delta = req.partial_delta\n partial_delta = pkl.loads(bytes_partial_delta)\n bytes_labels = req.labels\n labels = pkl.loads(bytes_labels)\n z = self.weighted_sum_inputs[batch_id]\n z_nonlin_prime = self.nonlin_prime(z)\n delta = partial_delta * z_nonlin_prime\n partial_delta_for_lower = np.dot(delta, self.weights)\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n if self.enable_sg:\n self.update_sg_weights(delta, batch_id)\n else:\n inputs = self.lower_layer_outputs[batch_id]['matrix']\n self.update_weights(self.lr, delta, inputs)\n del self.weighted_sum_inputs[batch_id]\n del self.lower_layer_outputs[batch_id]\n return nn_pb.PlainResponse(message='Partial delta received at {}'.\n format(self.layer_name))\n\n\nclass OutputLayer(Layer):\n \"\"\" output layer\n computing the error based on labels and prediction\n using softmax as output activations and cross entropy loss\n\n \"\"\"\n\n def __init__(self, layer_name, lower_layer, lower_layer_size,\n num_classes, learning_rate):\n super().__init__(layer_name, None, lower_layer, lower_layer_size,\n num_classes, None, None)\n self.lr = learning_rate\n\n def UpdateInput(self, req, ctx):\n \"\"\" once received input from lower layer:\n compute weighted sum -> softmax output -> loss -> back propagate\n \"\"\"\n self.check_weights()\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(\n req)\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()\n ) + self.biases.transpose()\n softmax_output = softmax(weighted_sum, axis=1)\n if is_train:\n delta = softmax_output - labels\n partial_delta_for_lower = np.dot(delta, self.weights)\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n if batch_id % 100 == 0:\n total_loss = np.log(softmax_output) * labels\n loss = -1 * np.sum(total_loss) / labels.shape[0]\n print('For batch id {}, avg loss: {}'.format(batch_id, loss))\n self.update_weights(self.lr, delta, outputs_of_lower)\n else:\n pred_results = np.argmax(softmax_output, axis=1)\n matched = sum(int(y == t) for y, t in zip(pred_results, labels))\n print('Epoch {}, Performance test {} / {}'.format(-1 * batch_id,\n matched, labels.shape[0]))\n return nn_pb.PlainResponse(message='Inputs received at {}'.format(\n self.layer_name))\n\n def UpdateDelta(self, req, ctx):\n \"\"\" No upper layer\"\"\"\n print('Error: No upper layer for output layer')\n return nn_pb.PlainResponse(message='Invalid Operation!!')\n",
"step-3": "<mask token>\n\n\nclass Layer(nn_pb_grpc.LayerDataExchangeServicer):\n <mask token>\n <mask token>\n\n def forward_to_upper(self, batch_id, forward_matrix, forward_labels,\n istrain):\n \"\"\"\n forward output to upper layer\n \"\"\"\n if not self.upper_layer_stub:\n self.create_upper_stub()\n bytes_matrix = pkl.dumps(forward_matrix, 2)\n bytes_labels = pkl.dumps(forward_labels, 2)\n res = self.upper_layer_stub.UpdateInput(nn_pb.ForwardMsg(batch_id=\n batch_id, output_matrix=bytes_matrix, labels=bytes_labels,\n is_train=istrain))\n <mask token>\n\n def create_upper_stub(self):\n \"\"\" create upper_layer_stub for exchanging data between grpc\"\"\"\n if self.upper_layer_addr:\n channel = grpc.insecure_channel(self.upper_layer_addr)\n self.upper_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)\n else:\n print('no upper layer has been specified')\n <mask token>\n\n def init_weights(self, load_weights=None):\n \"\"\"\n if load_weights is specified load the trained weights\n \"\"\"\n if load_weights:\n pass\n else:\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x)\n self.biases = np.random.randn(y, 1)\n\n def check_weights(self):\n if self.weights is None or self.biases is None:\n print('Weights of {} have not initialized'.format(self.layer_name))\n import sys\n sys.exit(-1)\n\n def update_weights(self, lr, delta, outputs_of_lower):\n \"\"\"\n outputs of lower: equals to inputs of this layer\n \"\"\"\n delta_shape = delta.shape\n inputs_shape = outputs_of_lower.shape\n avg_delta = np.mean(delta, axis=0).reshape(delta_shape[1], 1)\n self.biases = self.biases - lr * avg_delta\n delta = delta.reshape(delta_shape[0], delta_shape[1], 1)\n inputs = outputs_of_lower.reshape(inputs_shape[0], 1, inputs_shape[1])\n gradients = delta * inputs\n gradients_avg = np.mean(gradients, axis=0)\n self.weights = self.weights - lr * gradients_avg\n <mask token>\n\n def UpdateInput(self, request, context):\n pass\n\n def UpdateDelta(self, request, context):\n \"\"\" Invoked by upper layer\n will be implemented by hidden layer\n \"\"\"\n pass\n\n\nclass InputLayer(Layer):\n \"\"\" for input data\"\"\"\n\n def __init__(self, upper_layer, data_path, input_dim, layer_name='input'):\n super().__init__(layer_name, upper_layer, None, None, input_dim,\n None, None)\n self.train, self.val, self.test = load_data(data_path)\n\n def start_feed_data(self, batch_size, epochs):\n \"\"\"\"\"\"\n train_X = self.train[0]\n train_y = self.train[1]\n val_X = self.val[0]\n val_y = self.val[1]\n train_size = train_X.shape[0]\n batch_id = 0\n test_batch_id = -1\n for i in range(epochs):\n print('Start feed {0} epoch data'.format(i))\n train_X, train_y = shuffle(train_X, train_y)\n for j in range(0, train_size, batch_size):\n minibatch_X = train_X[j:j + batch_size]\n minibatch_y = train_y[j:j + batch_size]\n self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)\n batch_id += 1\n self.forward_to_upper(test_batch_id, val_X, val_y, False)\n test_batch_id -= 1\n\n def UpdateInput(self, req, ctx):\n \"\"\"\"\"\"\n print('Should not have lower layer')\n return nn_pb.PlainResponse(message='Wrong invoke!')\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\"\"\"\n batch_id = req.batch_id\n if batch_id % 100 == 0:\n print('Complete backpropagation for batch {} at {}'.format(\n batch_id, datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n return nn_pb.PlainResponse(message='Received at layer {}'.format(\n self.layer_name))\n\n\nclass HiddenLayer(Layer):\n \"\"\" hidden layer\"\"\"\n\n def __init__(self, layer_name, upper_layer, lower_layer,\n lower_layer_size, layer_size, nonlin, nonlin_prime, learning_rate,\n enable_synthetic_gradients, sg_learning_rate):\n \"\"\"\n enable_synthetic_gradients: whether use synthetic gradients\n to do error approximating\n \"\"\"\n super().__init__(layer_name, upper_layer, lower_layer,\n lower_layer_size, layer_size, nonlin, nonlin_prime)\n self.lr = learning_rate\n self.enable_sg = enable_synthetic_gradients\n self.sg_lr = sg_learning_rate\n self.sg_weights = None\n self.sg_deltas = {}\n\n def init_sg_weights(self):\n \"\"\" using linear synthetic gradients model\n SG(h, y) = hA + yB + C\n refer to paper, Understanding synthetic gradients and decoupled neural networks\n \"\"\"\n n = self.weights_shape[0]\n A = np.random.randn(n, n) / np.sqrt(n)\n B = np.random.randn(10, n) / np.sqrt(n)\n C = np.random.randn(1, n) / np.sqrt(n)\n self.sg_weights = [A, B, C]\n\n def check_sg_weights(self):\n if self.sg_weights is None:\n self.init_sg_weights()\n\n def SG(self, h, y):\n \"\"\" generate delta by weighted sum and label\n\n h: outputs of this layer\n y: labels for this batch\n \"\"\"\n self.check_sg_weights()\n A = self.sg_weights[0]\n B = self.sg_weights[1]\n C = self.sg_weights[2]\n delta = np.matmul(h, A) + np.matmul(y, B) + C\n return delta\n\n def update_sg_weights(self, true_delta, batch_id):\n \"\"\" name conventions refer paper :\n Understanding synthetic gradients and decoupled neural interface\n TODO: synthetic gradient estimates the partial delta instead true gradients\n \"\"\"\n sg_delta = self.sg_deltas[batch_id]\n weighted_sum = self.weighted_sum_inputs[batch_id]\n labels = self.lower_layer_outputs[batch_id]['labels']\n y = labels\n h = self.nonlin(weighted_sum)\n Err = sg_delta - true_delta\n A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err\n ) / h.shape[0]\n B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err\n ) / y.shape[0]\n C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)\n self.sg_weights = [A, B, C]\n del self.sg_deltas[batch_id]\n\n def UpdateInput(self, request, context):\n \"\"\" Invoked by lower layer\n Once inputs updated, start computing the weighted sum\n then activation outputs,\n then forward outputs to next layer\n request: ForwardMsg\n \"\"\"\n self.check_weights()\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(\n request)\n print('Get inputs id: {0}, matrix shape: {1}, labels shape: {2}'.\n format(batch_id, outputs_of_lower.shape, labels.shape))\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()\n ) + self.biases.transpose()\n if is_train:\n inputs = {'matrix': outputs_of_lower, 'labels': labels}\n self.lower_layer_outputs[batch_id] = inputs\n self.weighted_sum_inputs[batch_id] = weighted_sum\n activations = self.nonlin(weighted_sum)\n if self.enable_sg and is_train:\n print('update weights based on SG delta')\n sg_delta = self.SG(activations, labels)\n self.update_weights(self.lr, sg_delta, outputs_of_lower)\n self.sg_deltas[batch_id] = sg_delta\n self.forward_to_upper(batch_id, activations, labels, is_train)\n print('batch id: {0}, activations shape {1}'.format(batch_id,\n activations.shape))\n return nn_pb.PlainResponse(message='Inputs received by layer {}'.\n format(self.layer_name))\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\n delta shape: (batch_size, size_of_current_layer)\n req: BackwardMsg\n \"\"\"\n batch_id = req.batch_id\n bytes_partial_delta = req.partial_delta\n partial_delta = pkl.loads(bytes_partial_delta)\n bytes_labels = req.labels\n labels = pkl.loads(bytes_labels)\n z = self.weighted_sum_inputs[batch_id]\n z_nonlin_prime = self.nonlin_prime(z)\n delta = partial_delta * z_nonlin_prime\n partial_delta_for_lower = np.dot(delta, self.weights)\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n if self.enable_sg:\n self.update_sg_weights(delta, batch_id)\n else:\n inputs = self.lower_layer_outputs[batch_id]['matrix']\n self.update_weights(self.lr, delta, inputs)\n del self.weighted_sum_inputs[batch_id]\n del self.lower_layer_outputs[batch_id]\n return nn_pb.PlainResponse(message='Partial delta received at {}'.\n format(self.layer_name))\n\n\nclass OutputLayer(Layer):\n \"\"\" output layer\n computing the error based on labels and prediction\n using softmax as output activations and cross entropy loss\n\n \"\"\"\n\n def __init__(self, layer_name, lower_layer, lower_layer_size,\n num_classes, learning_rate):\n super().__init__(layer_name, None, lower_layer, lower_layer_size,\n num_classes, None, None)\n self.lr = learning_rate\n\n def UpdateInput(self, req, ctx):\n \"\"\" once received input from lower layer:\n compute weighted sum -> softmax output -> loss -> back propagate\n \"\"\"\n self.check_weights()\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(\n req)\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()\n ) + self.biases.transpose()\n softmax_output = softmax(weighted_sum, axis=1)\n if is_train:\n delta = softmax_output - labels\n partial_delta_for_lower = np.dot(delta, self.weights)\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n if batch_id % 100 == 0:\n total_loss = np.log(softmax_output) * labels\n loss = -1 * np.sum(total_loss) / labels.shape[0]\n print('For batch id {}, avg loss: {}'.format(batch_id, loss))\n self.update_weights(self.lr, delta, outputs_of_lower)\n else:\n pred_results = np.argmax(softmax_output, axis=1)\n matched = sum(int(y == t) for y, t in zip(pred_results, labels))\n print('Epoch {}, Performance test {} / {}'.format(-1 * batch_id,\n matched, labels.shape[0]))\n return nn_pb.PlainResponse(message='Inputs received at {}'.format(\n self.layer_name))\n\n def UpdateDelta(self, req, ctx):\n \"\"\" No upper layer\"\"\"\n print('Error: No upper layer for output layer')\n return nn_pb.PlainResponse(message='Invalid Operation!!')\n",
"step-4": "<mask token>\n\n\nclass Layer(nn_pb_grpc.LayerDataExchangeServicer):\n <mask token>\n\n def __init__(self, layer_name, upper_layer, lower_layer,\n lower_layer_nodes, current_layer_nodes, nonlin, nonlin_prime):\n \"\"\"\n datasets : the path of mnist dataset\n nonlin: activation function\n nonlin_prime: the derivative of activation function\n \"\"\"\n self.layer_name = layer_name\n self.upper_layer_addr = upper_layer\n self.lower_layer_addr = lower_layer\n self.nonlin = nonlin\n self.nonlin_prime = nonlin_prime\n self.upper_layer_stub = None\n self.lower_layer_stub = None\n self.weights_shape = current_layer_nodes, lower_layer_nodes\n self.weights = None\n self.biases = None\n self.lower_layer_outputs = {}\n self.weighted_sum_inputs = {}\n\n def forward_to_upper(self, batch_id, forward_matrix, forward_labels,\n istrain):\n \"\"\"\n forward output to upper layer\n \"\"\"\n if not self.upper_layer_stub:\n self.create_upper_stub()\n bytes_matrix = pkl.dumps(forward_matrix, 2)\n bytes_labels = pkl.dumps(forward_labels, 2)\n res = self.upper_layer_stub.UpdateInput(nn_pb.ForwardMsg(batch_id=\n batch_id, output_matrix=bytes_matrix, labels=bytes_labels,\n is_train=istrain))\n\n def backward_to_lower(self, batch_id, partial_delta, labels):\n \"\"\"\n back propagate error partial_delta to lower layer\n partial_delta = dot(self.weights.T, self.delta)\n self.delta = delta_received_from_upper * nonlin_prime(z)\n \"\"\"\n if not self.lower_layer_stub:\n self.create_lower_stub()\n bytes_delta = pkl.dumps(partial_delta)\n bytes_labels = pkl.dumps(labels)\n res = self.lower_layer_stub.UpdateDelta(nn_pb.BackwardMsg(batch_id=\n batch_id, partial_delta=bytes_delta, labels=bytes_labels))\n\n def create_upper_stub(self):\n \"\"\" create upper_layer_stub for exchanging data between grpc\"\"\"\n if self.upper_layer_addr:\n channel = grpc.insecure_channel(self.upper_layer_addr)\n self.upper_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)\n else:\n print('no upper layer has been specified')\n\n def create_lower_stub(self):\n \"\"\" stub for lower layer communication\"\"\"\n if self.lower_layer_addr:\n channel = grpc.insecure_channel(self.lower_layer_addr)\n self.lower_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)\n else:\n print('no lower layer has been specified')\n\n def init_weights(self, load_weights=None):\n \"\"\"\n if load_weights is specified load the trained weights\n \"\"\"\n if load_weights:\n pass\n else:\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x)\n self.biases = np.random.randn(y, 1)\n\n def check_weights(self):\n if self.weights is None or self.biases is None:\n print('Weights of {} have not initialized'.format(self.layer_name))\n import sys\n sys.exit(-1)\n\n def update_weights(self, lr, delta, outputs_of_lower):\n \"\"\"\n outputs of lower: equals to inputs of this layer\n \"\"\"\n delta_shape = delta.shape\n inputs_shape = outputs_of_lower.shape\n avg_delta = np.mean(delta, axis=0).reshape(delta_shape[1], 1)\n self.biases = self.biases - lr * avg_delta\n delta = delta.reshape(delta_shape[0], delta_shape[1], 1)\n inputs = outputs_of_lower.reshape(inputs_shape[0], 1, inputs_shape[1])\n gradients = delta * inputs\n gradients_avg = np.mean(gradients, axis=0)\n self.weights = self.weights - lr * gradients_avg\n\n def parse_forward_msg(self, req):\n \"\"\" extract and transform data in forward message\"\"\"\n batch_id = req.batch_id\n bytes_outputs_of_lower = req.output_matrix\n bytes_labels = req.labels\n is_train = req.is_train\n outputs_of_lower = pkl.loads(bytes_outputs_of_lower)\n labels = pkl.loads(bytes_labels)\n return batch_id, outputs_of_lower, labels, is_train\n\n def UpdateInput(self, request, context):\n pass\n\n def UpdateDelta(self, request, context):\n \"\"\" Invoked by upper layer\n will be implemented by hidden layer\n \"\"\"\n pass\n\n\nclass InputLayer(Layer):\n \"\"\" for input data\"\"\"\n\n def __init__(self, upper_layer, data_path, input_dim, layer_name='input'):\n super().__init__(layer_name, upper_layer, None, None, input_dim,\n None, None)\n self.train, self.val, self.test = load_data(data_path)\n\n def start_feed_data(self, batch_size, epochs):\n \"\"\"\"\"\"\n train_X = self.train[0]\n train_y = self.train[1]\n val_X = self.val[0]\n val_y = self.val[1]\n train_size = train_X.shape[0]\n batch_id = 0\n test_batch_id = -1\n for i in range(epochs):\n print('Start feed {0} epoch data'.format(i))\n train_X, train_y = shuffle(train_X, train_y)\n for j in range(0, train_size, batch_size):\n minibatch_X = train_X[j:j + batch_size]\n minibatch_y = train_y[j:j + batch_size]\n self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)\n batch_id += 1\n self.forward_to_upper(test_batch_id, val_X, val_y, False)\n test_batch_id -= 1\n\n def UpdateInput(self, req, ctx):\n \"\"\"\"\"\"\n print('Should not have lower layer')\n return nn_pb.PlainResponse(message='Wrong invoke!')\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\"\"\"\n batch_id = req.batch_id\n if batch_id % 100 == 0:\n print('Complete backpropagation for batch {} at {}'.format(\n batch_id, datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n return nn_pb.PlainResponse(message='Received at layer {}'.format(\n self.layer_name))\n\n\nclass HiddenLayer(Layer):\n \"\"\" hidden layer\"\"\"\n\n def __init__(self, layer_name, upper_layer, lower_layer,\n lower_layer_size, layer_size, nonlin, nonlin_prime, learning_rate,\n enable_synthetic_gradients, sg_learning_rate):\n \"\"\"\n enable_synthetic_gradients: whether use synthetic gradients\n to do error approximating\n \"\"\"\n super().__init__(layer_name, upper_layer, lower_layer,\n lower_layer_size, layer_size, nonlin, nonlin_prime)\n self.lr = learning_rate\n self.enable_sg = enable_synthetic_gradients\n self.sg_lr = sg_learning_rate\n self.sg_weights = None\n self.sg_deltas = {}\n\n def init_sg_weights(self):\n \"\"\" using linear synthetic gradients model\n SG(h, y) = hA + yB + C\n refer to paper, Understanding synthetic gradients and decoupled neural networks\n \"\"\"\n n = self.weights_shape[0]\n A = np.random.randn(n, n) / np.sqrt(n)\n B = np.random.randn(10, n) / np.sqrt(n)\n C = np.random.randn(1, n) / np.sqrt(n)\n self.sg_weights = [A, B, C]\n\n def check_sg_weights(self):\n if self.sg_weights is None:\n self.init_sg_weights()\n\n def SG(self, h, y):\n \"\"\" generate delta by weighted sum and label\n\n h: outputs of this layer\n y: labels for this batch\n \"\"\"\n self.check_sg_weights()\n A = self.sg_weights[0]\n B = self.sg_weights[1]\n C = self.sg_weights[2]\n delta = np.matmul(h, A) + np.matmul(y, B) + C\n return delta\n\n def update_sg_weights(self, true_delta, batch_id):\n \"\"\" name conventions refer paper :\n Understanding synthetic gradients and decoupled neural interface\n TODO: synthetic gradient estimates the partial delta instead true gradients\n \"\"\"\n sg_delta = self.sg_deltas[batch_id]\n weighted_sum = self.weighted_sum_inputs[batch_id]\n labels = self.lower_layer_outputs[batch_id]['labels']\n y = labels\n h = self.nonlin(weighted_sum)\n Err = sg_delta - true_delta\n A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err\n ) / h.shape[0]\n B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err\n ) / y.shape[0]\n C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)\n self.sg_weights = [A, B, C]\n del self.sg_deltas[batch_id]\n\n def UpdateInput(self, request, context):\n \"\"\" Invoked by lower layer\n Once inputs updated, start computing the weighted sum\n then activation outputs,\n then forward outputs to next layer\n request: ForwardMsg\n \"\"\"\n self.check_weights()\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(\n request)\n print('Get inputs id: {0}, matrix shape: {1}, labels shape: {2}'.\n format(batch_id, outputs_of_lower.shape, labels.shape))\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()\n ) + self.biases.transpose()\n if is_train:\n inputs = {'matrix': outputs_of_lower, 'labels': labels}\n self.lower_layer_outputs[batch_id] = inputs\n self.weighted_sum_inputs[batch_id] = weighted_sum\n activations = self.nonlin(weighted_sum)\n if self.enable_sg and is_train:\n print('update weights based on SG delta')\n sg_delta = self.SG(activations, labels)\n self.update_weights(self.lr, sg_delta, outputs_of_lower)\n self.sg_deltas[batch_id] = sg_delta\n self.forward_to_upper(batch_id, activations, labels, is_train)\n print('batch id: {0}, activations shape {1}'.format(batch_id,\n activations.shape))\n return nn_pb.PlainResponse(message='Inputs received by layer {}'.\n format(self.layer_name))\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\n delta shape: (batch_size, size_of_current_layer)\n req: BackwardMsg\n \"\"\"\n batch_id = req.batch_id\n bytes_partial_delta = req.partial_delta\n partial_delta = pkl.loads(bytes_partial_delta)\n bytes_labels = req.labels\n labels = pkl.loads(bytes_labels)\n z = self.weighted_sum_inputs[batch_id]\n z_nonlin_prime = self.nonlin_prime(z)\n delta = partial_delta * z_nonlin_prime\n partial_delta_for_lower = np.dot(delta, self.weights)\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n if self.enable_sg:\n self.update_sg_weights(delta, batch_id)\n else:\n inputs = self.lower_layer_outputs[batch_id]['matrix']\n self.update_weights(self.lr, delta, inputs)\n del self.weighted_sum_inputs[batch_id]\n del self.lower_layer_outputs[batch_id]\n return nn_pb.PlainResponse(message='Partial delta received at {}'.\n format(self.layer_name))\n\n\nclass OutputLayer(Layer):\n \"\"\" output layer\n computing the error based on labels and prediction\n using softmax as output activations and cross entropy loss\n\n \"\"\"\n\n def __init__(self, layer_name, lower_layer, lower_layer_size,\n num_classes, learning_rate):\n super().__init__(layer_name, None, lower_layer, lower_layer_size,\n num_classes, None, None)\n self.lr = learning_rate\n\n def UpdateInput(self, req, ctx):\n \"\"\" once received input from lower layer:\n compute weighted sum -> softmax output -> loss -> back propagate\n \"\"\"\n self.check_weights()\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(\n req)\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()\n ) + self.biases.transpose()\n softmax_output = softmax(weighted_sum, axis=1)\n if is_train:\n delta = softmax_output - labels\n partial_delta_for_lower = np.dot(delta, self.weights)\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n if batch_id % 100 == 0:\n total_loss = np.log(softmax_output) * labels\n loss = -1 * np.sum(total_loss) / labels.shape[0]\n print('For batch id {}, avg loss: {}'.format(batch_id, loss))\n self.update_weights(self.lr, delta, outputs_of_lower)\n else:\n pred_results = np.argmax(softmax_output, axis=1)\n matched = sum(int(y == t) for y, t in zip(pred_results, labels))\n print('Epoch {}, Performance test {} / {}'.format(-1 * batch_id,\n matched, labels.shape[0]))\n return nn_pb.PlainResponse(message='Inputs received at {}'.format(\n self.layer_name))\n\n def UpdateDelta(self, req, ctx):\n \"\"\" No upper layer\"\"\"\n print('Error: No upper layer for output layer')\n return nn_pb.PlainResponse(message='Invalid Operation!!')\n",
"step-5": "import gzip\nimport pickle as pkl\nimport time\nfrom datetime import datetime\n\nimport grpc\nimport numpy as np\nfrom sklearn.utils import shuffle\n\nimport neural_nets_pb2 as nn_pb\nimport neural_nets_pb2_grpc as nn_pb_grpc\nfrom mnist_loader import load_data\nfrom activations import *\n\n\n# pylint: disable=too-many-arguments\n\n\nclass Layer(nn_pb_grpc.LayerDataExchangeServicer):\n \"\"\"\n abstract layer extract common methods\n \"\"\"\n # pylint: disable=too-many-arguments\n def __init__(self, layer_name, upper_layer, lower_layer,\n lower_layer_nodes, current_layer_nodes,\n nonlin, nonlin_prime):\n \"\"\"\n datasets : the path of mnist dataset\n nonlin: activation function\n nonlin_prime: the derivative of activation function\n \"\"\"\n self.layer_name = layer_name\n self.upper_layer_addr = upper_layer\n self.lower_layer_addr = lower_layer\n self.nonlin = nonlin\n self.nonlin_prime = nonlin_prime\n\n # lazy initialization\n self.upper_layer_stub = None\n self.lower_layer_stub = None\n\n # weights dimension\n self.weights_shape = (current_layer_nodes, lower_layer_nodes)\n self.weights = None\n self.biases = None\n\n # record outputs from lower layer\n # use batch id as key\n # Purposes:\n # 1) used for computing the weighted sum of current layer\n # 2) used for computing the gradients for updating weights of current layer\n self.lower_layer_outputs = {}\n\n # computed from lower layer outputs for cache purpose\n # cache for computing delta for current layer\n # delta = partial_delta_rec * nonlin_prime(weighted_sum)\n # with different batch we have different weighted sum\n self.weighted_sum_inputs = {}\n\n\n def forward_to_upper(self, batch_id, forward_matrix, forward_labels, istrain):\n \"\"\"\n forward output to upper layer\n \"\"\"\n if not self.upper_layer_stub:\n self.create_upper_stub()\n\n # convert numpy array to byte string\n bytes_matrix = pkl.dumps(forward_matrix, 2)\n bytes_labels = pkl.dumps(forward_labels, 2)\n\n # send message to next layer\n res = self.upper_layer_stub.UpdateInput(\n nn_pb.ForwardMsg(batch_id=batch_id,\n output_matrix=bytes_matrix,\n labels=bytes_labels,\n is_train=istrain))\n # print(\"get response form upper layer\", res.message)\n\n\n def backward_to_lower(self, batch_id, partial_delta, labels):\n \"\"\"\n back propagate error partial_delta to lower layer\n partial_delta = dot(self.weights.T, self.delta)\n self.delta = delta_received_from_upper * nonlin_prime(z)\n \"\"\"\n # create stub for lower layer\n if not self.lower_layer_stub:\n self.create_lower_stub()\n\n # convert partial_delta matrix to bytes string\n bytes_delta = pkl.dumps(partial_delta)\n bytes_labels = pkl.dumps(labels)\n\n res = self.lower_layer_stub.UpdateDelta(\n nn_pb.BackwardMsg(batch_id=batch_id,\n partial_delta=bytes_delta,\n labels=bytes_labels))\n # print(\"get response from lower layer\", res.message)\n\n\n def create_upper_stub(self):\n \"\"\" create upper_layer_stub for exchanging data between grpc\"\"\"\n if self.upper_layer_addr:\n channel = grpc.insecure_channel(self.upper_layer_addr)\n self.upper_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)\n else:\n print(\"no upper layer has been specified\")\n\n\n def create_lower_stub(self):\n \"\"\" stub for lower layer communication\"\"\"\n if self.lower_layer_addr:\n channel = grpc.insecure_channel(self.lower_layer_addr)\n self.lower_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)\n else:\n print(\"no lower layer has been specified\")\n\n\n def init_weights(self, load_weights=None):\n \"\"\"\n if load_weights is specified load the trained weights\n \"\"\"\n if load_weights:\n # TODO\n pass\n else:\n # x: lower layer nodes n\n # y: current layer nodes n\n x = self.weights_shape[1]\n y = self.weights_shape[0]\n self.weights = np.random.randn(y, x) / np.sqrt(x) # pylint: disable=no-member\n self.biases = np.random.randn(y, 1) # pylint: disable=no-member\n\n\n def check_weights(self):\n if self.weights is None or self.biases is None:\n print(\"Weights of {} have not initialized\".format(self.layer_name))\n import sys\n sys.exit(-1)\n\n\n def update_weights(self, lr, delta, outputs_of_lower):\n \"\"\"\n outputs of lower: equals to inputs of this layer\n \"\"\"\n delta_shape = delta.shape\n inputs_shape = outputs_of_lower.shape\n\n # update biases\n avg_delta = np.mean(delta, axis=0).reshape(delta_shape[1], 1)\n self.biases = self.biases - lr * avg_delta\n\n # compute gradients for weights\n delta = delta.reshape(delta_shape[0], delta_shape[1], 1)\n inputs = outputs_of_lower.reshape(inputs_shape[0], 1, inputs_shape[1])\n gradients = delta * inputs\n gradients_avg = np.mean(gradients, axis=0)\n\n self.weights = self.weights - lr * gradients_avg\n\n\n def parse_forward_msg(self, req):\n \"\"\" extract and transform data in forward message\"\"\"\n batch_id = req.batch_id\n bytes_outputs_of_lower = req.output_matrix\n bytes_labels = req.labels\n is_train = req.is_train\n\n outputs_of_lower = pkl.loads(bytes_outputs_of_lower)\n labels = pkl.loads(bytes_labels)\n return batch_id, outputs_of_lower, labels, is_train\n\n\n # implementing rpc services\n def UpdateInput(self, request, context):\n # implemented in Hidden Layer and Output Layer\n pass\n\n\n def UpdateDelta(self, request, context):\n \"\"\" Invoked by upper layer\n will be implemented by hidden layer\n \"\"\"\n pass\n\n\n\nclass InputLayer(Layer):\n \"\"\" for input data\"\"\"\n\n def __init__(self, upper_layer, data_path, input_dim, layer_name=\"input\"):\n super().__init__(layer_name, upper_layer,\n None, None, input_dim,\n None, None)\n\n self.train, self.val, self.test = load_data(data_path)\n\n\n def start_feed_data(self, batch_size, epochs):\n \"\"\"\"\"\"\n train_X = self.train[0]\n train_y = self.train[1]\n val_X = self.val[0]\n val_y = self.val[1]\n train_size = train_X.shape[0]\n batch_id = 0\n test_batch_id = -1 # use negative number, diff with batch_id\n for i in range(epochs):\n print(\"Start feed {0} epoch data\".format(i))\n\n train_X, train_y = shuffle(train_X, train_y)\n for j in range(0, train_size, batch_size):\n minibatch_X = train_X[j:j+batch_size]\n minibatch_y = train_y[j:j+batch_size]\n self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)\n batch_id += 1\n\n # send test data for evaluation\n self.forward_to_upper(test_batch_id, val_X, val_y, False)\n test_batch_id -= 1\n\n\n def UpdateInput(self, req, ctx):\n \"\"\"\"\"\"\n print(\"Should not have lower layer\")\n return nn_pb.PlainResponse(message=\"Wrong invoke!\")\n\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\"\"\"\n batch_id = req.batch_id\n if batch_id % 100 == 0:\n print(\"Complete backpropagation for batch {} at {}\".format(\n batch_id,\n datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n return nn_pb.PlainResponse(message=\"Received at layer {}\".format(\n self.layer_name))\n\n\n\nclass HiddenLayer(Layer):\n \"\"\" hidden layer\"\"\"\n\n def __init__(self, layer_name,\n upper_layer,\n lower_layer,\n lower_layer_size,\n layer_size,\n nonlin,\n nonlin_prime,\n learning_rate,\n enable_synthetic_gradients,\n sg_learning_rate\n ):\n \"\"\"\n enable_synthetic_gradients: whether use synthetic gradients\n to do error approximating\n \"\"\"\n super().__init__(layer_name, upper_layer,\n lower_layer, lower_layer_size,\n layer_size, nonlin,\n nonlin_prime)\n self.lr = learning_rate\n self.enable_sg = enable_synthetic_gradients\n self.sg_lr = sg_learning_rate\n self.sg_weights = None\n self.sg_deltas = {}\n\n\n def init_sg_weights(self):\n \"\"\" using linear synthetic gradients model\n SG(h, y) = hA + yB + C\n refer to paper, Understanding synthetic gradients and decoupled neural networks\n \"\"\"\n n = self.weights_shape[0] # size of current layer\n # pylint: disable=no-member\n A = np.random.randn(n, n) / np.sqrt(n)\n B = np.random.randn(10, n) / np.sqrt(n)\n C = np.random.randn(1, n) / np.sqrt(n)\n # pylint: enable=no-member\n self.sg_weights = [A, B, C]\n\n def check_sg_weights(self):\n if self.sg_weights is None:\n self.init_sg_weights()\n\n\n def SG(self, h, y):\n \"\"\" generate delta by weighted sum and label\n\n h: outputs of this layer\n y: labels for this batch\n \"\"\"\n self.check_sg_weights()\n\n A = self.sg_weights[0] #(n, n)\n B = self.sg_weights[1] #(10, n)\n C = self.sg_weights[2] #(1, n)\n\n delta = np.matmul(h, A) + np.matmul(y, B) + C\n return delta\n\n def update_sg_weights(self, true_delta, batch_id):\n \"\"\" name conventions refer paper :\n Understanding synthetic gradients and decoupled neural interface\n TODO: synthetic gradient estimates the partial delta instead true gradients\n \"\"\"\n sg_delta = self.sg_deltas[batch_id]\n weighted_sum = self.weighted_sum_inputs[batch_id]\n labels = self.lower_layer_outputs[batch_id]['labels']\n y = labels\n\n h = self.nonlin(weighted_sum)\n\n Err = sg_delta - true_delta\n A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err) / h.shape[0]\n B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err) / y.shape[0]\n C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)\n\n self.sg_weights = [A, B, C]\n\n # del stored delta\n del self.sg_deltas[batch_id]\n\n\n def UpdateInput(self, request, context):\n \"\"\" Invoked by lower layer\n Once inputs updated, start computing the weighted sum\n then activation outputs,\n then forward outputs to next layer\n request: ForwardMsg\n \"\"\"\n self.check_weights()\n\n # get values from message\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(request)\n print(\"Get inputs id: {0}, matrix shape: {1}, labels shape: {2}\".format(\n batch_id, outputs_of_lower.shape, labels.shape))\n\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \\\n + self.biases.transpose()\n # saving inputs during training, because for weights updating\n if is_train:\n inputs = {'matrix': outputs_of_lower,\n 'labels': labels}\n self.lower_layer_outputs[batch_id] = inputs\n self.weighted_sum_inputs[batch_id] = weighted_sum\n\n activations = self.nonlin(weighted_sum) # apply element wise\n\n # update weights immediately with SG, if enabled SG\n if self.enable_sg and is_train:\n print(\"update weights based on SG delta\")\n sg_delta = self.SG(activations, labels)\n # TODO use sg_delta to compute the gradients by sg_delta * self.nonline_prime(z)\n self.update_weights(self.lr, sg_delta, outputs_of_lower)\n self.sg_deltas[batch_id] = sg_delta\n\n # forward layer outputs\n self.forward_to_upper(batch_id, activations, labels, is_train)\n print(\"batch id: {0}, activations shape {1}\".format(\n batch_id, activations.shape))\n\n # return received\n return nn_pb.PlainResponse(message=\"Inputs received by layer {}\".format(\n self.layer_name))\n\n\n def UpdateDelta(self, req, ctx):\n \"\"\"\n delta shape: (batch_size, size_of_current_layer)\n req: BackwardMsg\n \"\"\"\n batch_id = req.batch_id\n bytes_partial_delta = req.partial_delta\n partial_delta = pkl.loads(bytes_partial_delta)\n bytes_labels = req.labels # variable currently not useful\n labels = pkl.loads(bytes_labels)\n\n # compute delta for current layer\n z = self.weighted_sum_inputs[batch_id]\n z_nonlin_prime = self.nonlin_prime(z)\n\n # shape of delta: (batch_size, size_of_layer)\n delta = partial_delta * z_nonlin_prime\n\n # compute partial delta for lower layer\n partial_delta_for_lower = np.dot(delta, self.weights)\n # send partial delta to lower layer\n self.backward_to_lower(batch_id,\n partial_delta_for_lower,\n labels)\n\n if self.enable_sg:\n # train the SG\n # TODO pass partial delta instead\n self.update_sg_weights(delta, batch_id)\n else:\n # update weights regularly\n inputs = self.lower_layer_outputs[batch_id]['matrix']\n self.update_weights(self.lr, delta, inputs)\n\n\n # delete stored for weighted sum\n del self.weighted_sum_inputs[batch_id]\n # delete stored for lower layer outputs\n del self.lower_layer_outputs[batch_id]\n\n return nn_pb.PlainResponse(\n message=\"Partial delta received at {}\".format(self.layer_name))\n\n\n\nclass OutputLayer(Layer):\n \"\"\" output layer\n computing the error based on labels and prediction\n using softmax as output activations and cross entropy loss\n\n \"\"\"\n def __init__(self, layer_name, lower_layer, lower_layer_size,\n num_classes, learning_rate ):\n super().__init__(layer_name, None,\n lower_layer,\n lower_layer_size,\n num_classes,\n None,\n None)\n self.lr = learning_rate\n\n def UpdateInput(self, req, ctx):\n \"\"\" once received input from lower layer:\n compute weighted sum -> softmax output -> loss -> back propagate\n \"\"\"\n self.check_weights()\n\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(req)\n\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \\\n + self.biases.transpose()\n softmax_output = softmax(weighted_sum, axis=1)\n # print(\"weighted sum\", weighted_sum)\n # print(\"outputs of lower\", outputs_of_lower)\n\n if is_train:\n delta = softmax_output - labels\n # compute delta for lower layer first\n # because current error is based on current weights\n partial_delta_for_lower = np.dot(delta, self.weights)\n # send to lower layer\n self.backward_to_lower(batch_id, partial_delta_for_lower, labels)\n\n # cross entropy loss\n if batch_id % 100 == 0:\n total_loss = np.log(softmax_output) * labels # pylint: disable=no-member\n # print(\"total loss: \", np.sum(total_loss))\n loss = -1 * np.sum(total_loss) / labels.shape[0]\n print(\"For batch id {}, avg loss: {}\".format(batch_id, loss))\n\n # update weights\n self.update_weights(self.lr, delta, outputs_of_lower)\n\n else:\n # test evaluation\n pred_results = np.argmax(softmax_output, axis=1)\n matched = sum(int(y == t) for (y, t) in zip(pred_results, labels))\n print(\"Epoch {}, Performance test {} / {}\".format(\n -1*batch_id, matched, labels.shape[0]))\n\n\n return nn_pb.PlainResponse(message=\"Inputs received at {}\".format(\n self.layer_name))\n\n\n def UpdateDelta(self, req, ctx):\n \"\"\" No upper layer\"\"\"\n print(\"Error: No upper layer for output layer\")\n return nn_pb.PlainResponse(message=\"Invalid Operation!!\")\n",
"step-ids": [
24,
27,
28,
32,
35
]
}
|
[
24,
27,
28,
32,
35
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Max: {}'.format(max_value))
print('Max: {}'.format(max_value1))
print('Max: {}'.format(max_value2))
print('Max: {}'.format(max_value3))
<|reserved_special_token_1|>
max_integer = __import__('9-max_integer').max_integer
my_list = [1, 90, 2, 13, 34, 5, -13, 3]
my_list1 = []
my_list2 = [1, 90, 2, 13, 34, 100, -13, 3]
max_value = max_integer(my_list)
max_value1 = max_integer(my_list1)
max_value2 = max_integer(my_list2)
max_value3 = max_integer()
print('Max: {}'.format(max_value))
print('Max: {}'.format(max_value1))
print('Max: {}'.format(max_value2))
print('Max: {}'.format(max_value3))
<|reserved_special_token_1|>
#!/usr/bin/python3
max_integer = __import__('9-max_integer').max_integer
my_list = [1, 90, 2, 13, 34, 5, -13, 3]
my_list1 = []
my_list2 = [1, 90, 2, 13, 34, 100, -13, 3]
max_value = max_integer(my_list)
max_value1 = max_integer(my_list1)
max_value2 = max_integer(my_list2)
max_value3 = max_integer()
print("Max: {}".format(max_value))
print("Max: {}".format(max_value1))
print("Max: {}".format(max_value2))
print("Max: {}".format(max_value3))
|
flexible
|
{
"blob_id": "f5b74ca95cb368d70139b5d36e3c8d553b8c5393",
"index": 1393,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Max: {}'.format(max_value))\nprint('Max: {}'.format(max_value1))\nprint('Max: {}'.format(max_value2))\nprint('Max: {}'.format(max_value3))\n",
"step-3": "max_integer = __import__('9-max_integer').max_integer\nmy_list = [1, 90, 2, 13, 34, 5, -13, 3]\nmy_list1 = []\nmy_list2 = [1, 90, 2, 13, 34, 100, -13, 3]\nmax_value = max_integer(my_list)\nmax_value1 = max_integer(my_list1)\nmax_value2 = max_integer(my_list2)\nmax_value3 = max_integer()\nprint('Max: {}'.format(max_value))\nprint('Max: {}'.format(max_value1))\nprint('Max: {}'.format(max_value2))\nprint('Max: {}'.format(max_value3))\n",
"step-4": "#!/usr/bin/python3\nmax_integer = __import__('9-max_integer').max_integer\n\nmy_list = [1, 90, 2, 13, 34, 5, -13, 3]\nmy_list1 = []\nmy_list2 = [1, 90, 2, 13, 34, 100, -13, 3]\nmax_value = max_integer(my_list)\nmax_value1 = max_integer(my_list1)\nmax_value2 = max_integer(my_list2)\nmax_value3 = max_integer()\nprint(\"Max: {}\".format(max_value))\nprint(\"Max: {}\".format(max_value1))\nprint(\"Max: {}\".format(max_value2))\nprint(\"Max: {}\".format(max_value3))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def sigmoid(x):
return 0.5 * (1 + np.tanh(0.5 * x))
def bernoulli_array(prob_array, dim):
sample = np.zeros(dim)
uni_sample = np.random.uniform(0, 1, dim)
diff = uni_sample - prob_array
coords = np.argwhere(diff < 0)
sample[[*coords.T]] = 1
return sample
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Loaded learned weights from RBM')
print('W', W.shape)
print('b_h', b_h.shape)
print('b_v', b_v.shape)
def sigmoid(x):
return 0.5 * (1 + np.tanh(0.5 * x))
def bernoulli_array(prob_array, dim):
sample = np.zeros(dim)
uni_sample = np.random.uniform(0, 1, dim)
diff = uni_sample - prob_array
coords = np.argwhere(diff < 0)
sample[[*coords.T]] = 1
return sample
for count in range(5):
hidden = []
for i in range(train_frames.shape[0]):
v = train_frames[i].T
p_h_v = sigmoid(W @ v + b_h)
h = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))
hidden.append(h.T)
hidden = np.array(hidden)
print('Train Hidden h ', count, ': ', hidden.shape)
pickle.dump(hidden, open(f'{out_dir}\\train_h_{count}.pkl', 'wb'))
<|reserved_special_token_0|>
for i in range(train_frames.shape[0]):
v = train_frames[i].T
p_h_v = sigmoid(W @ v + b_h)
hidden.append(p_h_v.T)
<|reserved_special_token_0|>
print('Train Hidden p_h_v : ', hidden.shape)
pickle.dump(hidden, open(f'{out_dir}\\train_p_h_v.pkl', 'wb'))
for count in range(5):
hidden = []
for i in range(test_frames.shape[0]):
v = test_frames[i].T
p_h_v = sigmoid(W @ v + b_h)
h = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))
hidden.append(h.T)
hidden = np.array(hidden)
print('Test Latent Dynamics h ', count, ': ', hidden.shape)
pickle.dump(hidden, open(f'{out_dir}\\test_h_{count}.pkl', 'wb'))
<|reserved_special_token_0|>
for i in range(test_frames.shape[0]):
v = test_frames[i].T
p_h_v = sigmoid(W @ v + b_h)
hidden.append(p_h_v.T)
<|reserved_special_token_0|>
print('Test Hidden p_h_v : ', hidden.shape)
pickle.dump(hidden, open(f'{out_dir}\\test_p_h_v.pkl', 'wb'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
in_dir = (
'C:\\Users\\ganga\\Github\\Generative-Models\\Project\\Data\\Dynamics\\')
out_dir = (
f'C:\\Users\\ganga\\Github\\Generative-Models\\Project\\Data\\Dynamics\\')
train_frames = pickle.load(open(in_dir + '\\train_frames.pkl', 'rb'))
test_frames = pickle.load(open(in_dir + '\\test_frames.pkl', 'rb'))
rbm_dir = (
'C:\\Users\\ganga\\Github\\Generative-Models\\Project\\Outputs\\RBM\\')
W, b_h, b_v = pickle.load(open(rbm_dir + '\\weights.pkl', 'rb'))
print('Loaded learned weights from RBM')
print('W', W.shape)
print('b_h', b_h.shape)
print('b_v', b_v.shape)
def sigmoid(x):
return 0.5 * (1 + np.tanh(0.5 * x))
def bernoulli_array(prob_array, dim):
sample = np.zeros(dim)
uni_sample = np.random.uniform(0, 1, dim)
diff = uni_sample - prob_array
coords = np.argwhere(diff < 0)
sample[[*coords.T]] = 1
return sample
for count in range(5):
hidden = []
for i in range(train_frames.shape[0]):
v = train_frames[i].T
p_h_v = sigmoid(W @ v + b_h)
h = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))
hidden.append(h.T)
hidden = np.array(hidden)
print('Train Hidden h ', count, ': ', hidden.shape)
pickle.dump(hidden, open(f'{out_dir}\\train_h_{count}.pkl', 'wb'))
hidden = []
for i in range(train_frames.shape[0]):
v = train_frames[i].T
p_h_v = sigmoid(W @ v + b_h)
hidden.append(p_h_v.T)
hidden = np.array(hidden)
print('Train Hidden p_h_v : ', hidden.shape)
pickle.dump(hidden, open(f'{out_dir}\\train_p_h_v.pkl', 'wb'))
for count in range(5):
hidden = []
for i in range(test_frames.shape[0]):
v = test_frames[i].T
p_h_v = sigmoid(W @ v + b_h)
h = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))
hidden.append(h.T)
hidden = np.array(hidden)
print('Test Latent Dynamics h ', count, ': ', hidden.shape)
pickle.dump(hidden, open(f'{out_dir}\\test_h_{count}.pkl', 'wb'))
hidden = []
for i in range(test_frames.shape[0]):
v = test_frames[i].T
p_h_v = sigmoid(W @ v + b_h)
hidden.append(p_h_v.T)
hidden = np.array(hidden)
print('Test Hidden p_h_v : ', hidden.shape)
pickle.dump(hidden, open(f'{out_dir}\\test_p_h_v.pkl', 'wb'))
<|reserved_special_token_1|>
import pickle
import numpy as np
in_dir = (
'C:\\Users\\ganga\\Github\\Generative-Models\\Project\\Data\\Dynamics\\')
out_dir = (
f'C:\\Users\\ganga\\Github\\Generative-Models\\Project\\Data\\Dynamics\\')
train_frames = pickle.load(open(in_dir + '\\train_frames.pkl', 'rb'))
test_frames = pickle.load(open(in_dir + '\\test_frames.pkl', 'rb'))
rbm_dir = (
'C:\\Users\\ganga\\Github\\Generative-Models\\Project\\Outputs\\RBM\\')
W, b_h, b_v = pickle.load(open(rbm_dir + '\\weights.pkl', 'rb'))
print('Loaded learned weights from RBM')
print('W', W.shape)
print('b_h', b_h.shape)
print('b_v', b_v.shape)
def sigmoid(x):
return 0.5 * (1 + np.tanh(0.5 * x))
def bernoulli_array(prob_array, dim):
sample = np.zeros(dim)
uni_sample = np.random.uniform(0, 1, dim)
diff = uni_sample - prob_array
coords = np.argwhere(diff < 0)
sample[[*coords.T]] = 1
return sample
for count in range(5):
hidden = []
for i in range(train_frames.shape[0]):
v = train_frames[i].T
p_h_v = sigmoid(W @ v + b_h)
h = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))
hidden.append(h.T)
hidden = np.array(hidden)
print('Train Hidden h ', count, ': ', hidden.shape)
pickle.dump(hidden, open(f'{out_dir}\\train_h_{count}.pkl', 'wb'))
hidden = []
for i in range(train_frames.shape[0]):
v = train_frames[i].T
p_h_v = sigmoid(W @ v + b_h)
hidden.append(p_h_v.T)
hidden = np.array(hidden)
print('Train Hidden p_h_v : ', hidden.shape)
pickle.dump(hidden, open(f'{out_dir}\\train_p_h_v.pkl', 'wb'))
for count in range(5):
hidden = []
for i in range(test_frames.shape[0]):
v = test_frames[i].T
p_h_v = sigmoid(W @ v + b_h)
h = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))
hidden.append(h.T)
hidden = np.array(hidden)
print('Test Latent Dynamics h ', count, ': ', hidden.shape)
pickle.dump(hidden, open(f'{out_dir}\\test_h_{count}.pkl', 'wb'))
hidden = []
for i in range(test_frames.shape[0]):
v = test_frames[i].T
p_h_v = sigmoid(W @ v + b_h)
hidden.append(p_h_v.T)
hidden = np.array(hidden)
print('Test Hidden p_h_v : ', hidden.shape)
pickle.dump(hidden, open(f'{out_dir}\\test_p_h_v.pkl', 'wb'))
<|reserved_special_token_1|>
import pickle
import numpy as np
in_dir = "C:\\Users\\ganga\\Github\\Generative-Models\\Project\\Data\\Dynamics\\"
out_dir = f"C:\\Users\\ganga\\Github\\Generative-Models\\Project\\Data\\Dynamics\\"
# Read frames
train_frames = pickle.load( open(in_dir +'\\train_frames.pkl' , 'rb' ))
test_frames = pickle.load( open(in_dir +'\\test_frames.pkl' , 'rb' ))
# Read the rbm learned weights
rbm_dir = "C:\\Users\\ganga\\Github\\Generative-Models\\Project\\Outputs\\RBM\\"
W, b_h, b_v = pickle.load( open(rbm_dir+'\\weights.pkl' , 'rb' ))
print("Loaded learned weights from RBM")
print("W", W.shape)
print("b_h", b_h.shape)
print("b_v", b_v.shape)
def sigmoid(x):
#Sigmoid activation
#Implemented interms of tanh for increased stability
return .5 * (1 + np.tanh(.5 * x))
def bernoulli_array(prob_array, dim):
# Simulating Bernoulli from uniform
sample = np.zeros(dim)
# Draw x~Uni[0,1]
uni_sample = np.random.uniform(0, 1, dim)
# return 1 if x < p else return 0
diff = uni_sample - prob_array
coords = np.argwhere(diff<0)
sample[[*coords.T]] = 1
return sample
# ------------------------ Train Data ----------------------------------------
for count in range(5):
hidden = []
for i in range(train_frames.shape[0]):
v = train_frames[i].T
# Getting hidden states of RBM using frames
# (h x v) @ (v x b) + (h x 1) = (h x b)
p_h_v = sigmoid(W @ v + b_h)
h = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))
hidden.append(h.T)
hidden = np.array(hidden)
print("Train Hidden h ", count, ": ", hidden.shape)
pickle.dump(hidden, open(f"{out_dir}\\train_h_{count}.pkl" , 'wb' ) )
hidden = []
for i in range(train_frames.shape[0]):
v = train_frames[i].T
# Getting hidden states of RBM using frames
# (h x v) @ (v x b) + (h x 1) = (h x b)
p_h_v = sigmoid(W @ v + b_h)
hidden.append(p_h_v.T)
hidden = np.array(hidden)
print("Train Hidden p_h_v : ", hidden.shape)
pickle.dump(hidden, open(f"{out_dir}\\train_p_h_v.pkl" , 'wb' ) )
# ------------------------ Test Data ----------------------------------------
for count in range(5):
hidden = []
for i in range(test_frames.shape[0]):
v = test_frames[i].T
# Getting hidden states of RBM using frames
# (h x v) @ (v x b) + (h x 1) = (h x b)
p_h_v = sigmoid(W @ v + b_h)
h = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))
hidden.append(h.T)
hidden = np.array(hidden)
print("Test Latent Dynamics h ", count, ": ", hidden.shape)
pickle.dump(hidden, open(f"{out_dir}\\test_h_{count}.pkl" , 'wb' ) )
hidden = []
for i in range(test_frames.shape[0]):
v = test_frames[i].T
# Getting hidden states of RBM using frames
# (h x v) @ (v x b) + (h x 1) = (h x b)
p_h_v = sigmoid(W @ v + b_h)
hidden.append(p_h_v.T)
hidden = np.array(hidden)
print("Test Hidden p_h_v : ", hidden.shape)
pickle.dump(hidden, open(f"{out_dir}\\test_p_h_v.pkl" , 'wb' ) )
|
flexible
|
{
"blob_id": "e048170775c589cf0a9fb3d54c72dab4df3f1bcb",
"index": 7558,
"step-1": "<mask token>\n\n\ndef sigmoid(x):\n return 0.5 * (1 + np.tanh(0.5 * x))\n\n\ndef bernoulli_array(prob_array, dim):\n sample = np.zeros(dim)\n uni_sample = np.random.uniform(0, 1, dim)\n diff = uni_sample - prob_array\n coords = np.argwhere(diff < 0)\n sample[[*coords.T]] = 1\n return sample\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint('Loaded learned weights from RBM')\nprint('W', W.shape)\nprint('b_h', b_h.shape)\nprint('b_v', b_v.shape)\n\n\ndef sigmoid(x):\n return 0.5 * (1 + np.tanh(0.5 * x))\n\n\ndef bernoulli_array(prob_array, dim):\n sample = np.zeros(dim)\n uni_sample = np.random.uniform(0, 1, dim)\n diff = uni_sample - prob_array\n coords = np.argwhere(diff < 0)\n sample[[*coords.T]] = 1\n return sample\n\n\nfor count in range(5):\n hidden = []\n for i in range(train_frames.shape[0]):\n v = train_frames[i].T\n p_h_v = sigmoid(W @ v + b_h)\n h = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))\n hidden.append(h.T)\n hidden = np.array(hidden)\n print('Train Hidden h ', count, ': ', hidden.shape)\n pickle.dump(hidden, open(f'{out_dir}\\\\train_h_{count}.pkl', 'wb'))\n<mask token>\nfor i in range(train_frames.shape[0]):\n v = train_frames[i].T\n p_h_v = sigmoid(W @ v + b_h)\n hidden.append(p_h_v.T)\n<mask token>\nprint('Train Hidden p_h_v : ', hidden.shape)\npickle.dump(hidden, open(f'{out_dir}\\\\train_p_h_v.pkl', 'wb'))\nfor count in range(5):\n hidden = []\n for i in range(test_frames.shape[0]):\n v = test_frames[i].T\n p_h_v = sigmoid(W @ v + b_h)\n h = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))\n hidden.append(h.T)\n hidden = np.array(hidden)\n print('Test Latent Dynamics h ', count, ': ', hidden.shape)\n pickle.dump(hidden, open(f'{out_dir}\\\\test_h_{count}.pkl', 'wb'))\n<mask token>\nfor i in range(test_frames.shape[0]):\n v = test_frames[i].T\n p_h_v = sigmoid(W @ v + b_h)\n hidden.append(p_h_v.T)\n<mask token>\nprint('Test Hidden p_h_v : ', hidden.shape)\npickle.dump(hidden, open(f'{out_dir}\\\\test_p_h_v.pkl', 'wb'))\n",
"step-3": "<mask token>\nin_dir = (\n 'C:\\\\Users\\\\ganga\\\\Github\\\\Generative-Models\\\\Project\\\\Data\\\\Dynamics\\\\')\nout_dir = (\n f'C:\\\\Users\\\\ganga\\\\Github\\\\Generative-Models\\\\Project\\\\Data\\\\Dynamics\\\\')\ntrain_frames = pickle.load(open(in_dir + '\\\\train_frames.pkl', 'rb'))\ntest_frames = pickle.load(open(in_dir + '\\\\test_frames.pkl', 'rb'))\nrbm_dir = (\n 'C:\\\\Users\\\\ganga\\\\Github\\\\Generative-Models\\\\Project\\\\Outputs\\\\RBM\\\\')\nW, b_h, b_v = pickle.load(open(rbm_dir + '\\\\weights.pkl', 'rb'))\nprint('Loaded learned weights from RBM')\nprint('W', W.shape)\nprint('b_h', b_h.shape)\nprint('b_v', b_v.shape)\n\n\ndef sigmoid(x):\n return 0.5 * (1 + np.tanh(0.5 * x))\n\n\ndef bernoulli_array(prob_array, dim):\n sample = np.zeros(dim)\n uni_sample = np.random.uniform(0, 1, dim)\n diff = uni_sample - prob_array\n coords = np.argwhere(diff < 0)\n sample[[*coords.T]] = 1\n return sample\n\n\nfor count in range(5):\n hidden = []\n for i in range(train_frames.shape[0]):\n v = train_frames[i].T\n p_h_v = sigmoid(W @ v + b_h)\n h = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))\n hidden.append(h.T)\n hidden = np.array(hidden)\n print('Train Hidden h ', count, ': ', hidden.shape)\n pickle.dump(hidden, open(f'{out_dir}\\\\train_h_{count}.pkl', 'wb'))\nhidden = []\nfor i in range(train_frames.shape[0]):\n v = train_frames[i].T\n p_h_v = sigmoid(W @ v + b_h)\n hidden.append(p_h_v.T)\nhidden = np.array(hidden)\nprint('Train Hidden p_h_v : ', hidden.shape)\npickle.dump(hidden, open(f'{out_dir}\\\\train_p_h_v.pkl', 'wb'))\nfor count in range(5):\n hidden = []\n for i in range(test_frames.shape[0]):\n v = test_frames[i].T\n p_h_v = sigmoid(W @ v + b_h)\n h = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))\n hidden.append(h.T)\n hidden = np.array(hidden)\n print('Test Latent Dynamics h ', count, ': ', hidden.shape)\n pickle.dump(hidden, open(f'{out_dir}\\\\test_h_{count}.pkl', 'wb'))\nhidden = []\nfor i in range(test_frames.shape[0]):\n v = test_frames[i].T\n p_h_v = sigmoid(W @ v + b_h)\n hidden.append(p_h_v.T)\nhidden = np.array(hidden)\nprint('Test Hidden p_h_v : ', hidden.shape)\npickle.dump(hidden, open(f'{out_dir}\\\\test_p_h_v.pkl', 'wb'))\n",
"step-4": "import pickle\nimport numpy as np\nin_dir = (\n 'C:\\\\Users\\\\ganga\\\\Github\\\\Generative-Models\\\\Project\\\\Data\\\\Dynamics\\\\')\nout_dir = (\n f'C:\\\\Users\\\\ganga\\\\Github\\\\Generative-Models\\\\Project\\\\Data\\\\Dynamics\\\\')\ntrain_frames = pickle.load(open(in_dir + '\\\\train_frames.pkl', 'rb'))\ntest_frames = pickle.load(open(in_dir + '\\\\test_frames.pkl', 'rb'))\nrbm_dir = (\n 'C:\\\\Users\\\\ganga\\\\Github\\\\Generative-Models\\\\Project\\\\Outputs\\\\RBM\\\\')\nW, b_h, b_v = pickle.load(open(rbm_dir + '\\\\weights.pkl', 'rb'))\nprint('Loaded learned weights from RBM')\nprint('W', W.shape)\nprint('b_h', b_h.shape)\nprint('b_v', b_v.shape)\n\n\ndef sigmoid(x):\n return 0.5 * (1 + np.tanh(0.5 * x))\n\n\ndef bernoulli_array(prob_array, dim):\n sample = np.zeros(dim)\n uni_sample = np.random.uniform(0, 1, dim)\n diff = uni_sample - prob_array\n coords = np.argwhere(diff < 0)\n sample[[*coords.T]] = 1\n return sample\n\n\nfor count in range(5):\n hidden = []\n for i in range(train_frames.shape[0]):\n v = train_frames[i].T\n p_h_v = sigmoid(W @ v + b_h)\n h = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))\n hidden.append(h.T)\n hidden = np.array(hidden)\n print('Train Hidden h ', count, ': ', hidden.shape)\n pickle.dump(hidden, open(f'{out_dir}\\\\train_h_{count}.pkl', 'wb'))\nhidden = []\nfor i in range(train_frames.shape[0]):\n v = train_frames[i].T\n p_h_v = sigmoid(W @ v + b_h)\n hidden.append(p_h_v.T)\nhidden = np.array(hidden)\nprint('Train Hidden p_h_v : ', hidden.shape)\npickle.dump(hidden, open(f'{out_dir}\\\\train_p_h_v.pkl', 'wb'))\nfor count in range(5):\n hidden = []\n for i in range(test_frames.shape[0]):\n v = test_frames[i].T\n p_h_v = sigmoid(W @ v + b_h)\n h = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))\n hidden.append(h.T)\n hidden = np.array(hidden)\n print('Test Latent Dynamics h ', count, ': ', hidden.shape)\n pickle.dump(hidden, open(f'{out_dir}\\\\test_h_{count}.pkl', 'wb'))\nhidden = []\nfor i in range(test_frames.shape[0]):\n v = test_frames[i].T\n p_h_v = sigmoid(W @ v + b_h)\n hidden.append(p_h_v.T)\nhidden = np.array(hidden)\nprint('Test Hidden p_h_v : ', hidden.shape)\npickle.dump(hidden, open(f'{out_dir}\\\\test_p_h_v.pkl', 'wb'))\n",
"step-5": "import pickle\nimport numpy as np\n\nin_dir = \"C:\\\\Users\\\\ganga\\\\Github\\\\Generative-Models\\\\Project\\\\Data\\\\Dynamics\\\\\"\nout_dir = f\"C:\\\\Users\\\\ganga\\\\Github\\\\Generative-Models\\\\Project\\\\Data\\\\Dynamics\\\\\"\n\n\n# Read frames\ntrain_frames = pickle.load( open(in_dir +'\\\\train_frames.pkl' , 'rb' ))\ntest_frames = pickle.load( open(in_dir +'\\\\test_frames.pkl' , 'rb' ))\n\n# Read the rbm learned weights\nrbm_dir = \"C:\\\\Users\\\\ganga\\\\Github\\\\Generative-Models\\\\Project\\\\Outputs\\\\RBM\\\\\"\nW, b_h, b_v = pickle.load( open(rbm_dir+'\\\\weights.pkl' , 'rb' ))\n\nprint(\"Loaded learned weights from RBM\")\nprint(\"W\", W.shape)\nprint(\"b_h\", b_h.shape)\nprint(\"b_v\", b_v.shape)\n\ndef sigmoid(x): \n\t#Sigmoid activation \n\t#Implemented interms of tanh for increased stability\n\treturn .5 * (1 + np.tanh(.5 * x))\n\ndef bernoulli_array(prob_array, dim):\n\t# Simulating Bernoulli from uniform\n\tsample = np.zeros(dim)\n\n\t# Draw x~Uni[0,1]\n\tuni_sample = np.random.uniform(0, 1, dim)\n\n\t# return 1 if x < p else return 0\n\tdiff = uni_sample - prob_array\n\tcoords = np.argwhere(diff<0)\n\tsample[[*coords.T]] = 1 \n\n\treturn sample\n\n# ------------------------ Train Data ----------------------------------------\n\nfor count in range(5):\n\thidden = []\n\tfor i in range(train_frames.shape[0]):\n\n\t\tv = train_frames[i].T\n\n\t\t# Getting hidden states of RBM using frames\n\t\t# (h x v) @ (v x b) + (h x 1) = (h x b)\n\t\tp_h_v = sigmoid(W @ v + b_h)\n\t\th = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))\n\n\t\thidden.append(h.T)\n\n\thidden = np.array(hidden)\n\tprint(\"Train Hidden h \", count, \": \", hidden.shape)\n\n\tpickle.dump(hidden, open(f\"{out_dir}\\\\train_h_{count}.pkl\" , 'wb' ) )\n\n\nhidden = []\nfor i in range(train_frames.shape[0]):\n\n\tv = train_frames[i].T\n\n\t# Getting hidden states of RBM using frames\n\t# (h x v) @ (v x b) + (h x 1) = (h x b)\n\tp_h_v = sigmoid(W @ v + b_h)\n\n\thidden.append(p_h_v.T)\n\nhidden = np.array(hidden)\nprint(\"Train Hidden p_h_v : \", hidden.shape)\n\npickle.dump(hidden, open(f\"{out_dir}\\\\train_p_h_v.pkl\" , 'wb' ) )\n\n\n# ------------------------ Test Data ----------------------------------------\n\nfor count in range(5):\n\thidden = []\n\tfor i in range(test_frames.shape[0]):\n\n\t\tv = test_frames[i].T\n\n\t\t# Getting hidden states of RBM using frames\n\t\t# (h x v) @ (v x b) + (h x 1) = (h x b)\n\t\tp_h_v = sigmoid(W @ v + b_h)\n\t\th = bernoulli_array(p_h_v, (p_h_v.shape[0], p_h_v.shape[1]))\n\n\t\thidden.append(h.T)\n\n\thidden = np.array(hidden)\n\tprint(\"Test Latent Dynamics h \", count, \": \", hidden.shape)\n\n\tpickle.dump(hidden, open(f\"{out_dir}\\\\test_h_{count}.pkl\" , 'wb' ) )\n\n\n\nhidden = []\nfor i in range(test_frames.shape[0]):\n\n\tv = test_frames[i].T\n\n\t# Getting hidden states of RBM using frames\n\t# (h x v) @ (v x b) + (h x 1) = (h x b)\n\tp_h_v = sigmoid(W @ v + b_h)\n\n\thidden.append(p_h_v.T)\n\nhidden = np.array(hidden)\nprint(\"Test Hidden p_h_v : \", hidden.shape)\n\npickle.dump(hidden, open(f\"{out_dir}\\\\test_p_h_v.pkl\" , 'wb' ) )",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(sys.argv) != 3:
print('Wrong number of arguments! Exiting.')
<|reserved_special_token_0|>
for line in infile.readlines():
fields = line.split()
node_id = int(fields[0])
lat = float(fields[1])
lon = float(fields[2])
elev = get_elevation(lat, lon)
if elev < 0:
print('Warning: bad elevation result')
fail_count += 1
else:
fail_count = 0
node_coords[node_id] = [lat, lon, elev]
nodes_processed += 1
if nodes_processed % 1000 == 0:
print(f'Processed {nodes_processed} nodes so far...')
time.sleep(10)
if fail_count > 100:
print('Aborting due to 100 consecutive failures')
break
infile.close()
with open(outfile_name, 'w') as outfile:
json.dump(node_coords, outfile)
print(f'Wrote {nodes_processed} nodes to file {outfile_name}.')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(sys.argv) != 3:
print('Wrong number of arguments! Exiting.')
infile_name = sys.argv[1]
outfile_name = sys.argv[2]
node_coords = {}
fail_count = 0
nodes_processed = 0
infile = open(infile_name, 'r')
for line in infile.readlines():
fields = line.split()
node_id = int(fields[0])
lat = float(fields[1])
lon = float(fields[2])
elev = get_elevation(lat, lon)
if elev < 0:
print('Warning: bad elevation result')
fail_count += 1
else:
fail_count = 0
node_coords[node_id] = [lat, lon, elev]
nodes_processed += 1
if nodes_processed % 1000 == 0:
print(f'Processed {nodes_processed} nodes so far...')
time.sleep(10)
if fail_count > 100:
print('Aborting due to 100 consecutive failures')
break
infile.close()
with open(outfile_name, 'w') as outfile:
json.dump(node_coords, outfile)
print(f'Wrote {nodes_processed} nodes to file {outfile_name}.')
<|reserved_special_token_1|>
import io
import json
import sys
import time
from coord_tools import get_elevation
if len(sys.argv) != 3:
print('Wrong number of arguments! Exiting.')
infile_name = sys.argv[1]
outfile_name = sys.argv[2]
node_coords = {}
fail_count = 0
nodes_processed = 0
infile = open(infile_name, 'r')
for line in infile.readlines():
fields = line.split()
node_id = int(fields[0])
lat = float(fields[1])
lon = float(fields[2])
elev = get_elevation(lat, lon)
if elev < 0:
print('Warning: bad elevation result')
fail_count += 1
else:
fail_count = 0
node_coords[node_id] = [lat, lon, elev]
nodes_processed += 1
if nodes_processed % 1000 == 0:
print(f'Processed {nodes_processed} nodes so far...')
time.sleep(10)
if fail_count > 100:
print('Aborting due to 100 consecutive failures')
break
infile.close()
with open(outfile_name, 'w') as outfile:
json.dump(node_coords, outfile)
print(f'Wrote {nodes_processed} nodes to file {outfile_name}.')
<|reserved_special_token_1|>
import io
import json
import sys
import time
from coord_tools import get_elevation
if len(sys.argv) != 3:
print('Wrong number of arguments! Exiting.')
infile_name = sys.argv[1]
outfile_name = sys.argv[2]
# Declare dict to hold coordinates
node_coords = {}
fail_count = 0
nodes_processed = 0
# Read in each node from a file
infile = open(infile_name,'r')
for line in infile.readlines():
fields = line.split()
node_id = int(fields[0])
lat = float(fields[1])
lon = float(fields[2])
elev = get_elevation(lat,lon)
if elev < 0:
print('Warning: bad elevation result')
fail_count += 1
else:
fail_count = 0
node_coords[node_id] = [lat,lon,elev]
nodes_processed += 1
if nodes_processed % 1000 == 0:
print(f'Processed {nodes_processed} nodes so far...')
time.sleep(10)
if fail_count > 100:
print('Aborting due to 100 consecutive failures')
break
#time.sleep(.5)
infile.close()
# Print the 3-coord nodes to the outfile
with open(outfile_name,'w') as outfile:
json.dump(node_coords,outfile)
print(f'Wrote {nodes_processed} nodes to file {outfile_name}.')
|
flexible
|
{
"blob_id": "4744d594c0599f1aa807eefa0cb40a2a2a3c7926",
"index": 6677,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) != 3:\n print('Wrong number of arguments! Exiting.')\n<mask token>\nfor line in infile.readlines():\n fields = line.split()\n node_id = int(fields[0])\n lat = float(fields[1])\n lon = float(fields[2])\n elev = get_elevation(lat, lon)\n if elev < 0:\n print('Warning: bad elevation result')\n fail_count += 1\n else:\n fail_count = 0\n node_coords[node_id] = [lat, lon, elev]\n nodes_processed += 1\n if nodes_processed % 1000 == 0:\n print(f'Processed {nodes_processed} nodes so far...')\n time.sleep(10)\n if fail_count > 100:\n print('Aborting due to 100 consecutive failures')\n break\ninfile.close()\nwith open(outfile_name, 'w') as outfile:\n json.dump(node_coords, outfile)\nprint(f'Wrote {nodes_processed} nodes to file {outfile_name}.')\n",
"step-3": "<mask token>\nif len(sys.argv) != 3:\n print('Wrong number of arguments! Exiting.')\ninfile_name = sys.argv[1]\noutfile_name = sys.argv[2]\nnode_coords = {}\nfail_count = 0\nnodes_processed = 0\ninfile = open(infile_name, 'r')\nfor line in infile.readlines():\n fields = line.split()\n node_id = int(fields[0])\n lat = float(fields[1])\n lon = float(fields[2])\n elev = get_elevation(lat, lon)\n if elev < 0:\n print('Warning: bad elevation result')\n fail_count += 1\n else:\n fail_count = 0\n node_coords[node_id] = [lat, lon, elev]\n nodes_processed += 1\n if nodes_processed % 1000 == 0:\n print(f'Processed {nodes_processed} nodes so far...')\n time.sleep(10)\n if fail_count > 100:\n print('Aborting due to 100 consecutive failures')\n break\ninfile.close()\nwith open(outfile_name, 'w') as outfile:\n json.dump(node_coords, outfile)\nprint(f'Wrote {nodes_processed} nodes to file {outfile_name}.')\n",
"step-4": "import io\nimport json\nimport sys\nimport time\nfrom coord_tools import get_elevation\nif len(sys.argv) != 3:\n print('Wrong number of arguments! Exiting.')\ninfile_name = sys.argv[1]\noutfile_name = sys.argv[2]\nnode_coords = {}\nfail_count = 0\nnodes_processed = 0\ninfile = open(infile_name, 'r')\nfor line in infile.readlines():\n fields = line.split()\n node_id = int(fields[0])\n lat = float(fields[1])\n lon = float(fields[2])\n elev = get_elevation(lat, lon)\n if elev < 0:\n print('Warning: bad elevation result')\n fail_count += 1\n else:\n fail_count = 0\n node_coords[node_id] = [lat, lon, elev]\n nodes_processed += 1\n if nodes_processed % 1000 == 0:\n print(f'Processed {nodes_processed} nodes so far...')\n time.sleep(10)\n if fail_count > 100:\n print('Aborting due to 100 consecutive failures')\n break\ninfile.close()\nwith open(outfile_name, 'w') as outfile:\n json.dump(node_coords, outfile)\nprint(f'Wrote {nodes_processed} nodes to file {outfile_name}.')\n",
"step-5": "import io\nimport json\nimport sys\nimport time\n\nfrom coord_tools import get_elevation\n\nif len(sys.argv) != 3:\n\tprint('Wrong number of arguments! Exiting.')\n\ninfile_name = sys.argv[1]\noutfile_name = sys.argv[2]\n\n# Declare dict to hold coordinates\nnode_coords = {}\nfail_count = 0\nnodes_processed = 0\n\n# Read in each node from a file\ninfile = open(infile_name,'r')\nfor line in infile.readlines():\n\tfields = line.split()\n\tnode_id = int(fields[0])\n\tlat = float(fields[1])\n\tlon = float(fields[2])\n\n\telev = get_elevation(lat,lon)\n\tif elev < 0:\n\t\tprint('Warning: bad elevation result')\n\t\tfail_count += 1\n\telse:\n\t\tfail_count = 0\n\n\tnode_coords[node_id] = [lat,lon,elev]\n\tnodes_processed += 1\n\tif nodes_processed % 1000 == 0:\n\t\tprint(f'Processed {nodes_processed} nodes so far...')\n\t\ttime.sleep(10)\n\tif fail_count > 100:\n\t\tprint('Aborting due to 100 consecutive failures')\n\t\tbreak\n\t#time.sleep(.5)\n\ninfile.close()\n\n# Print the 3-coord nodes to the outfile\nwith open(outfile_name,'w') as outfile:\n\tjson.dump(node_coords,outfile)\n\nprint(f'Wrote {nodes_processed} nodes to file {outfile_name}.')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def end_num(s):
text = re.compile('.*[0-9]$')
if text.match(s):
return 'Yes!Number is present at the end of string'
else:
return 'No!Number is not present at the end of string'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def end_num(s):
text = re.compile('.*[0-9]$')
if text.match(s):
return 'Yes!Number is present at the end of string'
else:
return 'No!Number is not present at the end of string'
print(end_num(s))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s = input('enter the string:')
def end_num(s):
text = re.compile('.*[0-9]$')
if text.match(s):
return 'Yes!Number is present at the end of string'
else:
return 'No!Number is not present at the end of string'
print(end_num(s))
<|reserved_special_token_1|>
import re
s = input('enter the string:')
def end_num(s):
text = re.compile('.*[0-9]$')
if text.match(s):
return 'Yes!Number is present at the end of string'
else:
return 'No!Number is not present at the end of string'
print(end_num(s))
<|reserved_special_token_1|>
import re
s=input('enter the string:')
def end_num(s):
text = re.compile(r".*[0-9]$")
if text.match(s):
return 'Yes!Number is present at the end of string'
else:
return 'No!Number is not present at the end of string'
print(end_num(s))
|
flexible
|
{
"blob_id": "94334f91b1556c05dce0ed6f23c074bb8875f185",
"index": 2505,
"step-1": "<mask token>\n\n\ndef end_num(s):\n text = re.compile('.*[0-9]$')\n if text.match(s):\n return 'Yes!Number is present at the end of string'\n else:\n return 'No!Number is not present at the end of string'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef end_num(s):\n text = re.compile('.*[0-9]$')\n if text.match(s):\n return 'Yes!Number is present at the end of string'\n else:\n return 'No!Number is not present at the end of string'\n\n\nprint(end_num(s))\n",
"step-3": "<mask token>\ns = input('enter the string:')\n\n\ndef end_num(s):\n text = re.compile('.*[0-9]$')\n if text.match(s):\n return 'Yes!Number is present at the end of string'\n else:\n return 'No!Number is not present at the end of string'\n\n\nprint(end_num(s))\n",
"step-4": "import re\ns = input('enter the string:')\n\n\ndef end_num(s):\n text = re.compile('.*[0-9]$')\n if text.match(s):\n return 'Yes!Number is present at the end of string'\n else:\n return 'No!Number is not present at the end of string'\n\n\nprint(end_num(s))\n",
"step-5": "import re\r\ns=input('enter the string:')\r\ndef end_num(s):\r\n text = re.compile(r\".*[0-9]$\")\r\n if text.match(s):\r\n return 'Yes!Number is present at the end of string'\r\n else:\r\n return 'No!Number is not present at the end of string'\r\n\r\nprint(end_num(s))\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def eventualSafeNodes(self, graph: List[List[int]]) ->List[int]:
res = []
d = {}
def dfs(node):
if graph[node] == []:
return True
if node in d:
return d[node]
if node in visit:
return False
visit.add(node)
for nei in graph[node]:
if dfs(nei) == False:
d[node] = False
return False
d[node] = True
return True
visit = set()
for i in range(len(graph)):
if dfs(i):
res.append(i)
return res
|
flexible
|
{
"blob_id": "b815f72e2cad351fd9411361a0e7cc75d39ae826",
"index": 9270,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def eventualSafeNodes(self, graph: List[List[int]]) ->List[int]:\n res = []\n d = {}\n\n def dfs(node):\n if graph[node] == []:\n return True\n if node in d:\n return d[node]\n if node in visit:\n return False\n visit.add(node)\n for nei in graph[node]:\n if dfs(nei) == False:\n d[node] = False\n return False\n d[node] = True\n return True\n visit = set()\n for i in range(len(graph)):\n if dfs(i):\n res.append(i)\n return res\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class TestUnwrap(object):
@pytest.fixture
def fn(self):
def fn():
pass
return fn
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TestVisitTask(object):
def test_no_args(self):
def fn():
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':
None, 'keywords': None, 'defaults': None}}
def test_simple_args(self):
def fn(a, b):
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b'],
'varargs': None, 'keywords': None, 'defaults': None}}
def test_arg_defaults(self):
def fn(a, b=1, c=None):
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b', 'c'],
'varargs': None, 'keywords': None, 'defaults': (1, None)}}
def test_varargs(self):
def fn(*args, **kwargs):
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':
'args', 'keywords': 'kwargs', 'defaults': None}}
def test_docs(self):
def fn(*args, **kwargs):
"""I am a teapot."""
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': 'I am a teapot.', 'cron': None, 'argspec': {'args': [],
'varargs': 'args', 'keywords': 'kwargs', 'defaults': None}}
class TestVisit(object):
def test_single(self):
def fn():
pass
callables = {'fn': fn}
data = visitor.visit(callables)
assert len(data) == 1
assert data[0]['name'] == 'fn'
def test_multi(self):
def fn():
pass
def fn2():
pass
def fn3():
pass
callables = {'fn': fn, 'fn2': fn2, 'fn3': fn3}
data = visitor.visit(callables)
assert len(data) == 3
assert data[0]['name'] == 'fn'
assert data[1]['name'] == 'fn2'
assert data[2]['name'] == 'fn3'
def test_nested(self):
def fn():
pass
def fn2():
pass
def fn3():
pass
callables = {'fn': fn, 'mod': {'fn2': fn2, 'fn3': fn3}}
data = visitor.visit(callables)
assert len(data) == 3
assert data[0]['name'] == 'fn'
assert data[0]['path'] == ()
assert data[1]['name'] == 'fn2'
assert data[1]['path'] == ('mod',)
assert data[2]['name'] == 'fn3'
assert data[2]['path'] == ('mod',)
class TestVisitFabfile(object):
def test_one(self):
data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))
assert len(data) == 3
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestUnwrap(object):
@pytest.fixture
def fn(self):
def fn():
pass
return fn
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_roles_task(self, fn):
t = roles('foo')(task(fn))
assert visitor.unwrap(t) is fn
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TestVisitTask(object):
def test_no_args(self):
def fn():
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':
None, 'keywords': None, 'defaults': None}}
def test_simple_args(self):
def fn(a, b):
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b'],
'varargs': None, 'keywords': None, 'defaults': None}}
def test_arg_defaults(self):
def fn(a, b=1, c=None):
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b', 'c'],
'varargs': None, 'keywords': None, 'defaults': (1, None)}}
def test_varargs(self):
def fn(*args, **kwargs):
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':
'args', 'keywords': 'kwargs', 'defaults': None}}
def test_docs(self):
def fn(*args, **kwargs):
"""I am a teapot."""
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': 'I am a teapot.', 'cron': None, 'argspec': {'args': [],
'varargs': 'args', 'keywords': 'kwargs', 'defaults': None}}
class TestVisit(object):
def test_single(self):
def fn():
pass
callables = {'fn': fn}
data = visitor.visit(callables)
assert len(data) == 1
assert data[0]['name'] == 'fn'
def test_multi(self):
def fn():
pass
def fn2():
pass
def fn3():
pass
callables = {'fn': fn, 'fn2': fn2, 'fn3': fn3}
data = visitor.visit(callables)
assert len(data) == 3
assert data[0]['name'] == 'fn'
assert data[1]['name'] == 'fn2'
assert data[2]['name'] == 'fn3'
def test_nested(self):
def fn():
pass
def fn2():
pass
def fn3():
pass
callables = {'fn': fn, 'mod': {'fn2': fn2, 'fn3': fn3}}
data = visitor.visit(callables)
assert len(data) == 3
assert data[0]['name'] == 'fn'
assert data[0]['path'] == ()
assert data[1]['name'] == 'fn2'
assert data[1]['path'] == ('mod',)
assert data[2]['name'] == 'fn3'
assert data[2]['path'] == ('mod',)
class TestVisitFabfile(object):
def test_one(self):
data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))
assert len(data) == 3
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestUnwrap(object):
@pytest.fixture
def fn(self):
def fn():
pass
return fn
def test_fn(self, fn):
assert visitor.unwrap(fn) is fn
def test_task(self, fn):
t = task(fn)
assert visitor.unwrap(t) is fn
<|reserved_special_token_0|>
def test_task_roles(self, fn):
t = task(roles('foo')(fn))
assert visitor.unwrap(t) is fn
def test_taskcall_roles(self, fn):
t = task()(roles('foo')(fn))
assert visitor.unwrap(t) is fn
def test_roles_task(self, fn):
t = roles('foo')(task(fn))
assert visitor.unwrap(t) is fn
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_lambda_task(self):
fn = lambda : None
t = task(fn)
assert visitor.unwrap(t) is fn
class TestVisitTask(object):
def test_no_args(self):
def fn():
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':
None, 'keywords': None, 'defaults': None}}
def test_simple_args(self):
def fn(a, b):
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b'],
'varargs': None, 'keywords': None, 'defaults': None}}
def test_arg_defaults(self):
def fn(a, b=1, c=None):
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b', 'c'],
'varargs': None, 'keywords': None, 'defaults': (1, None)}}
def test_varargs(self):
def fn(*args, **kwargs):
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':
'args', 'keywords': 'kwargs', 'defaults': None}}
def test_docs(self):
def fn(*args, **kwargs):
"""I am a teapot."""
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': 'I am a teapot.', 'cron': None, 'argspec': {'args': [],
'varargs': 'args', 'keywords': 'kwargs', 'defaults': None}}
class TestVisit(object):
def test_single(self):
def fn():
pass
callables = {'fn': fn}
data = visitor.visit(callables)
assert len(data) == 1
assert data[0]['name'] == 'fn'
def test_multi(self):
def fn():
pass
def fn2():
pass
def fn3():
pass
callables = {'fn': fn, 'fn2': fn2, 'fn3': fn3}
data = visitor.visit(callables)
assert len(data) == 3
assert data[0]['name'] == 'fn'
assert data[1]['name'] == 'fn2'
assert data[2]['name'] == 'fn3'
def test_nested(self):
def fn():
pass
def fn2():
pass
def fn3():
pass
callables = {'fn': fn, 'mod': {'fn2': fn2, 'fn3': fn3}}
data = visitor.visit(callables)
assert len(data) == 3
assert data[0]['name'] == 'fn'
assert data[0]['path'] == ()
assert data[1]['name'] == 'fn2'
assert data[1]['path'] == ('mod',)
assert data[2]['name'] == 'fn3'
assert data[2]['path'] == ('mod',)
class TestVisitFabfile(object):
def test_one(self):
data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))
assert len(data) == 3
<|reserved_special_token_1|>
import os
from fabric.api import task, roles
import pytest
from fabric_rundeck import visitor
def fixture_path(*path):
return os.path.join(os.path.dirname(__file__), 'data', *path)
class TestUnwrap(object):
@pytest.fixture
def fn(self):
def fn():
pass
return fn
def test_fn(self, fn):
assert visitor.unwrap(fn) is fn
def test_task(self, fn):
t = task(fn)
assert visitor.unwrap(t) is fn
def test_taskcall(self, fn):
t = task()(fn)
assert visitor.unwrap(t) is fn
def test_task_roles(self, fn):
t = task(roles('foo')(fn))
assert visitor.unwrap(t) is fn
def test_taskcall_roles(self, fn):
t = task()(roles('foo')(fn))
assert visitor.unwrap(t) is fn
def test_roles_task(self, fn):
t = roles('foo')(task(fn))
assert visitor.unwrap(t) is fn
def test_roles_taskcall(self, fn):
t = roles('foo')(task()(fn))
assert visitor.unwrap(t) is fn
def test_lambda(self):
fn = lambda : None
assert visitor.unwrap(fn) is fn
def test_lambda_task(self):
fn = lambda : None
t = task(fn)
assert visitor.unwrap(t) is fn
class TestVisitTask(object):
def test_no_args(self):
def fn():
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':
None, 'keywords': None, 'defaults': None}}
def test_simple_args(self):
def fn(a, b):
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b'],
'varargs': None, 'keywords': None, 'defaults': None}}
def test_arg_defaults(self):
def fn(a, b=1, c=None):
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b', 'c'],
'varargs': None, 'keywords': None, 'defaults': (1, None)}}
def test_varargs(self):
def fn(*args, **kwargs):
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':
'args', 'keywords': 'kwargs', 'defaults': None}}
def test_docs(self):
def fn(*args, **kwargs):
"""I am a teapot."""
pass
assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),
'doc': 'I am a teapot.', 'cron': None, 'argspec': {'args': [],
'varargs': 'args', 'keywords': 'kwargs', 'defaults': None}}
class TestVisit(object):
def test_single(self):
def fn():
pass
callables = {'fn': fn}
data = visitor.visit(callables)
assert len(data) == 1
assert data[0]['name'] == 'fn'
def test_multi(self):
def fn():
pass
def fn2():
pass
def fn3():
pass
callables = {'fn': fn, 'fn2': fn2, 'fn3': fn3}
data = visitor.visit(callables)
assert len(data) == 3
assert data[0]['name'] == 'fn'
assert data[1]['name'] == 'fn2'
assert data[2]['name'] == 'fn3'
def test_nested(self):
def fn():
pass
def fn2():
pass
def fn3():
pass
callables = {'fn': fn, 'mod': {'fn2': fn2, 'fn3': fn3}}
data = visitor.visit(callables)
assert len(data) == 3
assert data[0]['name'] == 'fn'
assert data[0]['path'] == ()
assert data[1]['name'] == 'fn2'
assert data[1]['path'] == ('mod',)
assert data[2]['name'] == 'fn3'
assert data[2]['path'] == ('mod',)
class TestVisitFabfile(object):
def test_one(self):
data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))
assert len(data) == 3
<|reserved_special_token_1|>
#
# Author:: Noah Kantrowitz <[email protected]>
#
# Copyright 2014, Noah Kantrowitz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from fabric.api import task, roles
import pytest
from fabric_rundeck import visitor
def fixture_path(*path):
return os.path.join(os.path.dirname(__file__), 'data', *path)
class TestUnwrap(object):
@pytest.fixture
def fn(self):
def fn():
pass
return fn
def test_fn(self, fn):
assert visitor.unwrap(fn) is fn
def test_task(self, fn):
t = task(fn)
assert visitor.unwrap(t) is fn
def test_taskcall(self, fn):
t = task()(fn)
assert visitor.unwrap(t) is fn
def test_task_roles(self, fn):
t = task(roles('foo')(fn))
assert visitor.unwrap(t) is fn
def test_taskcall_roles(self, fn):
t = task()(roles('foo')(fn))
assert visitor.unwrap(t) is fn
def test_roles_task(self, fn):
t = roles('foo')(task(fn))
assert visitor.unwrap(t) is fn
def test_roles_taskcall(self, fn):
t = roles('foo')(task()(fn))
assert visitor.unwrap(t) is fn
def test_lambda(self):
fn = lambda: None
assert visitor.unwrap(fn) is fn
def test_lambda_task(self):
fn = lambda: None
t = task(fn)
assert visitor.unwrap(t) is fn
class TestVisitTask(object):
def test_no_args(self):
def fn():
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': None,
'cron': None,
'argspec': {
'args': [],
'varargs': None,
'keywords': None,
'defaults': None,
},
}
def test_simple_args(self):
def fn(a, b):
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': None,
'cron': None,
'argspec': {
'args': ['a', 'b'],
'varargs': None,
'keywords': None,
'defaults': None,
},
}
def test_arg_defaults(self):
def fn(a, b=1, c=None):
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': None,
'cron': None,
'argspec': {
'args': ['a', 'b', 'c'],
'varargs': None,
'keywords': None,
'defaults': (1, None),
},
}
def test_varargs(self):
def fn(*args, **kwargs):
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': None,
'cron': None,
'argspec': {
'args': [],
'varargs': 'args',
'keywords': 'kwargs',
'defaults': None,
},
}
def test_docs(self):
def fn(*args, **kwargs):
"""I am a teapot."""
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': 'I am a teapot.',
'cron': None,
'argspec': {
'args': [],
'varargs': 'args',
'keywords': 'kwargs',
'defaults': None,
},
}
class TestVisit(object):
def test_single(self):
def fn():
pass
callables = {
'fn': fn,
}
data = visitor.visit(callables)
assert len(data) == 1
assert data[0]['name'] == 'fn'
def test_multi(self):
def fn():
pass
def fn2():
pass
def fn3():
pass
callables = {
'fn': fn,
'fn2': fn2,
'fn3': fn3,
}
data = visitor.visit(callables)
assert len(data) == 3
assert data[0]['name'] == 'fn'
assert data[1]['name'] == 'fn2'
assert data[2]['name'] == 'fn3'
def test_nested(self):
def fn():
pass
def fn2():
pass
def fn3():
pass
callables = {
'fn': fn,
'mod': {
'fn2': fn2,
'fn3': fn3,
}
}
data = visitor.visit(callables)
assert len(data) == 3
assert data[0]['name'] == 'fn'
assert data[0]['path'] == ()
assert data[1]['name'] == 'fn2'
assert data[1]['path'] == ('mod',)
assert data[2]['name'] == 'fn3'
assert data[2]['path'] == ('mod',)
class TestVisitFabfile(object):
def test_one(self):
data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))
assert len(data) == 3
|
flexible
|
{
"blob_id": "a1e563f94044ff7cd7e0e55542bc4ca2db81df28",
"index": 9749,
"step-1": "<mask token>\n\n\nclass TestUnwrap(object):\n\n @pytest.fixture\n def fn(self):\n\n def fn():\n pass\n return fn\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestVisitTask(object):\n\n def test_no_args(self):\n\n def fn():\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n None, 'keywords': None, 'defaults': None}}\n\n def test_simple_args(self):\n\n def fn(a, b):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b'],\n 'varargs': None, 'keywords': None, 'defaults': None}}\n\n def test_arg_defaults(self):\n\n def fn(a, b=1, c=None):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b', 'c'],\n 'varargs': None, 'keywords': None, 'defaults': (1, None)}}\n\n def test_varargs(self):\n\n def fn(*args, **kwargs):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n def test_docs(self):\n\n def fn(*args, **kwargs):\n \"\"\"I am a teapot.\"\"\"\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': 'I am a teapot.', 'cron': None, 'argspec': {'args': [],\n 'varargs': 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n\nclass TestVisit(object):\n\n def test_single(self):\n\n def fn():\n pass\n callables = {'fn': fn}\n data = visitor.visit(callables)\n assert len(data) == 1\n assert data[0]['name'] == 'fn'\n\n def test_multi(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'fn2': fn2, 'fn3': fn3}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[1]['name'] == 'fn2'\n assert data[2]['name'] == 'fn3'\n\n def test_nested(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'mod': {'fn2': fn2, 'fn3': fn3}}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[0]['path'] == ()\n assert data[1]['name'] == 'fn2'\n assert data[1]['path'] == ('mod',)\n assert data[2]['name'] == 'fn3'\n assert data[2]['path'] == ('mod',)\n\n\nclass TestVisitFabfile(object):\n\n def test_one(self):\n data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))\n assert len(data) == 3\n",
"step-2": "<mask token>\n\n\nclass TestUnwrap(object):\n\n @pytest.fixture\n def fn(self):\n\n def fn():\n pass\n return fn\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_roles_task(self, fn):\n t = roles('foo')(task(fn))\n assert visitor.unwrap(t) is fn\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestVisitTask(object):\n\n def test_no_args(self):\n\n def fn():\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n None, 'keywords': None, 'defaults': None}}\n\n def test_simple_args(self):\n\n def fn(a, b):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b'],\n 'varargs': None, 'keywords': None, 'defaults': None}}\n\n def test_arg_defaults(self):\n\n def fn(a, b=1, c=None):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b', 'c'],\n 'varargs': None, 'keywords': None, 'defaults': (1, None)}}\n\n def test_varargs(self):\n\n def fn(*args, **kwargs):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n def test_docs(self):\n\n def fn(*args, **kwargs):\n \"\"\"I am a teapot.\"\"\"\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': 'I am a teapot.', 'cron': None, 'argspec': {'args': [],\n 'varargs': 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n\nclass TestVisit(object):\n\n def test_single(self):\n\n def fn():\n pass\n callables = {'fn': fn}\n data = visitor.visit(callables)\n assert len(data) == 1\n assert data[0]['name'] == 'fn'\n\n def test_multi(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'fn2': fn2, 'fn3': fn3}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[1]['name'] == 'fn2'\n assert data[2]['name'] == 'fn3'\n\n def test_nested(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'mod': {'fn2': fn2, 'fn3': fn3}}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[0]['path'] == ()\n assert data[1]['name'] == 'fn2'\n assert data[1]['path'] == ('mod',)\n assert data[2]['name'] == 'fn3'\n assert data[2]['path'] == ('mod',)\n\n\nclass TestVisitFabfile(object):\n\n def test_one(self):\n data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))\n assert len(data) == 3\n",
"step-3": "<mask token>\n\n\nclass TestUnwrap(object):\n\n @pytest.fixture\n def fn(self):\n\n def fn():\n pass\n return fn\n\n def test_fn(self, fn):\n assert visitor.unwrap(fn) is fn\n\n def test_task(self, fn):\n t = task(fn)\n assert visitor.unwrap(t) is fn\n <mask token>\n\n def test_task_roles(self, fn):\n t = task(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_taskcall_roles(self, fn):\n t = task()(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_roles_task(self, fn):\n t = roles('foo')(task(fn))\n assert visitor.unwrap(t) is fn\n <mask token>\n <mask token>\n\n def test_lambda_task(self):\n fn = lambda : None\n t = task(fn)\n assert visitor.unwrap(t) is fn\n\n\nclass TestVisitTask(object):\n\n def test_no_args(self):\n\n def fn():\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n None, 'keywords': None, 'defaults': None}}\n\n def test_simple_args(self):\n\n def fn(a, b):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b'],\n 'varargs': None, 'keywords': None, 'defaults': None}}\n\n def test_arg_defaults(self):\n\n def fn(a, b=1, c=None):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b', 'c'],\n 'varargs': None, 'keywords': None, 'defaults': (1, None)}}\n\n def test_varargs(self):\n\n def fn(*args, **kwargs):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n def test_docs(self):\n\n def fn(*args, **kwargs):\n \"\"\"I am a teapot.\"\"\"\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': 'I am a teapot.', 'cron': None, 'argspec': {'args': [],\n 'varargs': 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n\nclass TestVisit(object):\n\n def test_single(self):\n\n def fn():\n pass\n callables = {'fn': fn}\n data = visitor.visit(callables)\n assert len(data) == 1\n assert data[0]['name'] == 'fn'\n\n def test_multi(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'fn2': fn2, 'fn3': fn3}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[1]['name'] == 'fn2'\n assert data[2]['name'] == 'fn3'\n\n def test_nested(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'mod': {'fn2': fn2, 'fn3': fn3}}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[0]['path'] == ()\n assert data[1]['name'] == 'fn2'\n assert data[1]['path'] == ('mod',)\n assert data[2]['name'] == 'fn3'\n assert data[2]['path'] == ('mod',)\n\n\nclass TestVisitFabfile(object):\n\n def test_one(self):\n data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))\n assert len(data) == 3\n",
"step-4": "import os\nfrom fabric.api import task, roles\nimport pytest\nfrom fabric_rundeck import visitor\n\n\ndef fixture_path(*path):\n return os.path.join(os.path.dirname(__file__), 'data', *path)\n\n\nclass TestUnwrap(object):\n\n @pytest.fixture\n def fn(self):\n\n def fn():\n pass\n return fn\n\n def test_fn(self, fn):\n assert visitor.unwrap(fn) is fn\n\n def test_task(self, fn):\n t = task(fn)\n assert visitor.unwrap(t) is fn\n\n def test_taskcall(self, fn):\n t = task()(fn)\n assert visitor.unwrap(t) is fn\n\n def test_task_roles(self, fn):\n t = task(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_taskcall_roles(self, fn):\n t = task()(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_roles_task(self, fn):\n t = roles('foo')(task(fn))\n assert visitor.unwrap(t) is fn\n\n def test_roles_taskcall(self, fn):\n t = roles('foo')(task()(fn))\n assert visitor.unwrap(t) is fn\n\n def test_lambda(self):\n fn = lambda : None\n assert visitor.unwrap(fn) is fn\n\n def test_lambda_task(self):\n fn = lambda : None\n t = task(fn)\n assert visitor.unwrap(t) is fn\n\n\nclass TestVisitTask(object):\n\n def test_no_args(self):\n\n def fn():\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n None, 'keywords': None, 'defaults': None}}\n\n def test_simple_args(self):\n\n def fn(a, b):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b'],\n 'varargs': None, 'keywords': None, 'defaults': None}}\n\n def test_arg_defaults(self):\n\n def fn(a, b=1, c=None):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b', 'c'],\n 'varargs': None, 'keywords': None, 'defaults': (1, None)}}\n\n def test_varargs(self):\n\n def fn(*args, **kwargs):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n def test_docs(self):\n\n def fn(*args, **kwargs):\n \"\"\"I am a teapot.\"\"\"\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': 'I am a teapot.', 'cron': None, 'argspec': {'args': [],\n 'varargs': 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n\nclass TestVisit(object):\n\n def test_single(self):\n\n def fn():\n pass\n callables = {'fn': fn}\n data = visitor.visit(callables)\n assert len(data) == 1\n assert data[0]['name'] == 'fn'\n\n def test_multi(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'fn2': fn2, 'fn3': fn3}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[1]['name'] == 'fn2'\n assert data[2]['name'] == 'fn3'\n\n def test_nested(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'mod': {'fn2': fn2, 'fn3': fn3}}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[0]['path'] == ()\n assert data[1]['name'] == 'fn2'\n assert data[1]['path'] == ('mod',)\n assert data[2]['name'] == 'fn3'\n assert data[2]['path'] == ('mod',)\n\n\nclass TestVisitFabfile(object):\n\n def test_one(self):\n data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))\n assert len(data) == 3\n",
"step-5": "#\n# Author:: Noah Kantrowitz <[email protected]>\n#\n# Copyright 2014, Noah Kantrowitz\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\n\nfrom fabric.api import task, roles\nimport pytest\n\nfrom fabric_rundeck import visitor\n\n\ndef fixture_path(*path):\n return os.path.join(os.path.dirname(__file__), 'data', *path)\n\n\nclass TestUnwrap(object):\n @pytest.fixture\n def fn(self):\n def fn():\n pass\n return fn\n\n def test_fn(self, fn):\n assert visitor.unwrap(fn) is fn\n\n def test_task(self, fn):\n t = task(fn)\n assert visitor.unwrap(t) is fn\n\n def test_taskcall(self, fn):\n t = task()(fn)\n assert visitor.unwrap(t) is fn\n\n def test_task_roles(self, fn):\n t = task(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_taskcall_roles(self, fn):\n t = task()(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_roles_task(self, fn):\n t = roles('foo')(task(fn))\n assert visitor.unwrap(t) is fn\n\n def test_roles_taskcall(self, fn):\n t = roles('foo')(task()(fn))\n assert visitor.unwrap(t) is fn\n\n def test_lambda(self):\n fn = lambda: None\n assert visitor.unwrap(fn) is fn\n\n def test_lambda_task(self):\n fn = lambda: None\n t = task(fn)\n assert visitor.unwrap(t) is fn\n\n\nclass TestVisitTask(object):\n def test_no_args(self):\n def fn():\n pass\n assert visitor.visit_task(fn, ()) == {\n 'name': 'fn',\n 'path': (),\n 'doc': None,\n 'cron': None,\n 'argspec': {\n 'args': [],\n 'varargs': None,\n 'keywords': None,\n 'defaults': None,\n },\n }\n\n def test_simple_args(self):\n def fn(a, b):\n pass\n assert visitor.visit_task(fn, ()) == {\n 'name': 'fn',\n 'path': (),\n 'doc': None,\n 'cron': None,\n 'argspec': {\n 'args': ['a', 'b'],\n 'varargs': None,\n 'keywords': None,\n 'defaults': None,\n },\n }\n\n def test_arg_defaults(self):\n def fn(a, b=1, c=None):\n pass\n assert visitor.visit_task(fn, ()) == {\n 'name': 'fn',\n 'path': (),\n 'doc': None,\n 'cron': None,\n 'argspec': {\n 'args': ['a', 'b', 'c'],\n 'varargs': None,\n 'keywords': None,\n 'defaults': (1, None),\n },\n }\n\n def test_varargs(self):\n def fn(*args, **kwargs):\n pass\n assert visitor.visit_task(fn, ()) == {\n 'name': 'fn',\n 'path': (),\n 'doc': None,\n 'cron': None,\n 'argspec': {\n 'args': [],\n 'varargs': 'args',\n 'keywords': 'kwargs',\n 'defaults': None,\n },\n }\n\n def test_docs(self):\n def fn(*args, **kwargs):\n \"\"\"I am a teapot.\"\"\"\n pass\n assert visitor.visit_task(fn, ()) == {\n 'name': 'fn',\n 'path': (),\n 'doc': 'I am a teapot.',\n 'cron': None,\n 'argspec': {\n 'args': [],\n 'varargs': 'args',\n 'keywords': 'kwargs',\n 'defaults': None,\n },\n }\n\n\nclass TestVisit(object):\n def test_single(self):\n def fn():\n pass\n callables = {\n 'fn': fn,\n }\n data = visitor.visit(callables)\n assert len(data) == 1\n assert data[0]['name'] == 'fn'\n\n def test_multi(self):\n def fn():\n pass\n def fn2():\n pass\n def fn3():\n pass\n callables = {\n 'fn': fn,\n 'fn2': fn2,\n 'fn3': fn3,\n }\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[1]['name'] == 'fn2'\n assert data[2]['name'] == 'fn3'\n\n def test_nested(self):\n def fn():\n pass\n def fn2():\n pass\n def fn3():\n pass\n callables = {\n 'fn': fn,\n 'mod': {\n 'fn2': fn2,\n 'fn3': fn3,\n }\n }\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[0]['path'] == ()\n assert data[1]['name'] == 'fn2'\n assert data[1]['path'] == ('mod',)\n assert data[2]['name'] == 'fn3'\n assert data[2]['path'] == ('mod',)\n\n\nclass TestVisitFabfile(object):\n def test_one(self):\n data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))\n assert len(data) == 3\n",
"step-ids": [
14,
15,
20,
25,
26
]
}
|
[
14,
15,
20,
25,
26
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_fibonacci_number():
global CURRENT_INDEX
while fib_list[CURRENT_INDEX - 1] < NUMBER_TO_BE_CHECKED:
fib_list.append(fib_list[CURRENT_INDEX - 1] + fib_list[
CURRENT_INDEX - 2])
CURRENT_INDEX += 1
if NUMBER_TO_BE_CHECKED not in fib_list:
print('Your number is not a Fibonacci number.')
else:
print('Your number is a Fibonacci number.')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_fibonacci_number():
global CURRENT_INDEX
while fib_list[CURRENT_INDEX - 1] < NUMBER_TO_BE_CHECKED:
fib_list.append(fib_list[CURRENT_INDEX - 1] + fib_list[
CURRENT_INDEX - 2])
CURRENT_INDEX += 1
if NUMBER_TO_BE_CHECKED not in fib_list:
print('Your number is not a Fibonacci number.')
else:
print('Your number is a Fibonacci number.')
while True:
try:
NUMBER_TO_BE_CHECKED = int(input('Please enter the number to check: '))
except ValueError:
print('Your input is not an integer!')
continue
else:
check_fibonacci_number()
break
<|reserved_special_token_1|>
fib_list = [0, 1]
CURRENT_INDEX = 2
def check_fibonacci_number():
global CURRENT_INDEX
while fib_list[CURRENT_INDEX - 1] < NUMBER_TO_BE_CHECKED:
fib_list.append(fib_list[CURRENT_INDEX - 1] + fib_list[
CURRENT_INDEX - 2])
CURRENT_INDEX += 1
if NUMBER_TO_BE_CHECKED not in fib_list:
print('Your number is not a Fibonacci number.')
else:
print('Your number is a Fibonacci number.')
while True:
try:
NUMBER_TO_BE_CHECKED = int(input('Please enter the number to check: '))
except ValueError:
print('Your input is not an integer!')
continue
else:
check_fibonacci_number()
break
<|reserved_special_token_1|>
# Umut Cakan Computer Science S006742
# Fibonacci list. First and second terms are static.
fib_list = [0, 1]
# Current index.
CURRENT_INDEX = 2
# Function for the checking input is a Fibonacci number or not.
def check_fibonacci_number():
global CURRENT_INDEX
# Get the fibonacci numbers that are less or equal to input value.
# Because we will not need to check fib numbers that are higher than our input.
while fib_list[CURRENT_INDEX - 1] < NUMBER_TO_BE_CHECKED:
fib_list.append(fib_list[CURRENT_INDEX - 1] + fib_list[CURRENT_INDEX - 2])
CURRENT_INDEX += 1
# Check if the input value is in that list or not.
if NUMBER_TO_BE_CHECKED not in fib_list:
print("Your number is not a Fibonacci number.")
else:
print("Your number is a Fibonacci number.")
# Get number to be checked from user.
while True:
try:
NUMBER_TO_BE_CHECKED = int(input("Please enter the number to check: "))
# If it is not an integer throw an error and wait for another input.
except ValueError:
print("Your input is not an integer!")
continue
# If it is an integer, proceed.
else:
check_fibonacci_number()
break
|
flexible
|
{
"blob_id": "50fa8852f74f4d2428fb238a86dd1feedb210877",
"index": 3261,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_fibonacci_number():\n global CURRENT_INDEX\n while fib_list[CURRENT_INDEX - 1] < NUMBER_TO_BE_CHECKED:\n fib_list.append(fib_list[CURRENT_INDEX - 1] + fib_list[\n CURRENT_INDEX - 2])\n CURRENT_INDEX += 1\n if NUMBER_TO_BE_CHECKED not in fib_list:\n print('Your number is not a Fibonacci number.')\n else:\n print('Your number is a Fibonacci number.')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef check_fibonacci_number():\n global CURRENT_INDEX\n while fib_list[CURRENT_INDEX - 1] < NUMBER_TO_BE_CHECKED:\n fib_list.append(fib_list[CURRENT_INDEX - 1] + fib_list[\n CURRENT_INDEX - 2])\n CURRENT_INDEX += 1\n if NUMBER_TO_BE_CHECKED not in fib_list:\n print('Your number is not a Fibonacci number.')\n else:\n print('Your number is a Fibonacci number.')\n\n\nwhile True:\n try:\n NUMBER_TO_BE_CHECKED = int(input('Please enter the number to check: '))\n except ValueError:\n print('Your input is not an integer!')\n continue\n else:\n check_fibonacci_number()\n break\n",
"step-4": "fib_list = [0, 1]\nCURRENT_INDEX = 2\n\n\ndef check_fibonacci_number():\n global CURRENT_INDEX\n while fib_list[CURRENT_INDEX - 1] < NUMBER_TO_BE_CHECKED:\n fib_list.append(fib_list[CURRENT_INDEX - 1] + fib_list[\n CURRENT_INDEX - 2])\n CURRENT_INDEX += 1\n if NUMBER_TO_BE_CHECKED not in fib_list:\n print('Your number is not a Fibonacci number.')\n else:\n print('Your number is a Fibonacci number.')\n\n\nwhile True:\n try:\n NUMBER_TO_BE_CHECKED = int(input('Please enter the number to check: '))\n except ValueError:\n print('Your input is not an integer!')\n continue\n else:\n check_fibonacci_number()\n break\n",
"step-5": "# Umut Cakan Computer Science S006742\n\n# Fibonacci list. First and second terms are static.\nfib_list = [0, 1]\n# Current index.\nCURRENT_INDEX = 2\n\n# Function for the checking input is a Fibonacci number or not.\ndef check_fibonacci_number():\n global CURRENT_INDEX\n # Get the fibonacci numbers that are less or equal to input value.\n # Because we will not need to check fib numbers that are higher than our input.\n while fib_list[CURRENT_INDEX - 1] < NUMBER_TO_BE_CHECKED:\n fib_list.append(fib_list[CURRENT_INDEX - 1] + fib_list[CURRENT_INDEX - 2])\n CURRENT_INDEX += 1\n # Check if the input value is in that list or not.\n if NUMBER_TO_BE_CHECKED not in fib_list:\n print(\"Your number is not a Fibonacci number.\")\n else:\n print(\"Your number is a Fibonacci number.\")\n\n\n# Get number to be checked from user.\nwhile True:\n try:\n NUMBER_TO_BE_CHECKED = int(input(\"Please enter the number to check: \"))\n # If it is not an integer throw an error and wait for another input.\n except ValueError:\n print(\"Your input is not an integer!\")\n continue\n # If it is an integer, proceed. \n else:\n check_fibonacci_number()\n break\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def loadWavFile(fileName, filePath, savePlot, maxAudioLength, reduceNoise=True
):
data, rate = librosa.load(filePath, sr=None)
if reduceNoise:
noiseRemovedData = noisereduce.reduce_noise(audio_clip=data,
noise_clip=data[0:10000], verbose=False)
noiseRemovedData = noisereduce.reduce_noise(audio_clip=
noiseRemovedData, noise_clip=data[-10000:], verbose=False)
data = noiseRemovedData
maxDataLength = int(maxAudioLength * rate)
padding = []
if data.shape[0] > maxDataLength:
raise ValueError('Max audio length breached')
else:
paddingDataLength = maxDataLength - data.shape[0]
padding = [(0) for i in range(paddingDataLength)]
leftSpeakerSound = data
audioWithPadding = numpy.concatenate((leftSpeakerSound, padding))
if savePlot:
fig, ax = plt.subplots()
ax.plot(audioWithPadding)
fig.suptitle(fileName)
fig.savefig('./output_img/wav/' + fileName + '_wav.png')
plt.close(fig)
return audioWithPadding, rate
<|reserved_special_token_1|>
from scipy.io import wavfile
import numpy
from matplotlib import pyplot as plt
import librosa
import noisereduce
def loadWavFile(fileName, filePath, savePlot, maxAudioLength, reduceNoise=True
):
data, rate = librosa.load(filePath, sr=None)
if reduceNoise:
noiseRemovedData = noisereduce.reduce_noise(audio_clip=data,
noise_clip=data[0:10000], verbose=False)
noiseRemovedData = noisereduce.reduce_noise(audio_clip=
noiseRemovedData, noise_clip=data[-10000:], verbose=False)
data = noiseRemovedData
maxDataLength = int(maxAudioLength * rate)
padding = []
if data.shape[0] > maxDataLength:
raise ValueError('Max audio length breached')
else:
paddingDataLength = maxDataLength - data.shape[0]
padding = [(0) for i in range(paddingDataLength)]
leftSpeakerSound = data
audioWithPadding = numpy.concatenate((leftSpeakerSound, padding))
if savePlot:
fig, ax = plt.subplots()
ax.plot(audioWithPadding)
fig.suptitle(fileName)
fig.savefig('./output_img/wav/' + fileName + '_wav.png')
plt.close(fig)
return audioWithPadding, rate
<|reserved_special_token_1|>
from scipy.io import wavfile
import numpy
from matplotlib import pyplot as plt
import librosa
import noisereduce
def loadWavFile(fileName, filePath, savePlot, maxAudioLength, reduceNoise = True):
# Read file
# rate, data = wavfile.read(filePath)
# print(filePath, rate, data.shape, "audio length", data.shape[0] / rate, data[0])
data, rate = librosa.load(filePath, sr=None)
# print(filePath, rate, data.shape, "librosa audio length", data.shape[0] / rate, data[0])
if reduceNoise:
noiseRemovedData = noisereduce.reduce_noise(audio_clip=data, noise_clip=data[0:10000], verbose=False)
noiseRemovedData = noisereduce.reduce_noise(audio_clip=noiseRemovedData, noise_clip=data[-10000:], verbose=False)
data = noiseRemovedData
maxDataLength = int(maxAudioLength * rate)
padding = []
if data.shape[0] > maxDataLength:
raise ValueError("Max audio length breached")
else:
paddingDataLength = maxDataLength - data.shape[0]
padding = [0 for i in range(paddingDataLength)]
# data is stereo sound. take left speaker only
leftSpeakerSound = data # data[:,0]
# print("leftSpeakerSound.shape", leftSpeakerSound.shape)
audioWithPadding = numpy.concatenate((leftSpeakerSound, padding))
# print("audioWithPadding.shape", audioWithPadding.shape)
if savePlot:
fig, ax = plt.subplots()
ax.plot(audioWithPadding)
fig.suptitle(fileName)
fig.savefig("./output_img/wav/" + fileName + "_wav.png")
plt.close(fig)
return audioWithPadding, rate
|
flexible
|
{
"blob_id": "07ac061d7d1eaf23b6c95fbcbf6753f25e568188",
"index": 157,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef loadWavFile(fileName, filePath, savePlot, maxAudioLength, reduceNoise=True\n ):\n data, rate = librosa.load(filePath, sr=None)\n if reduceNoise:\n noiseRemovedData = noisereduce.reduce_noise(audio_clip=data,\n noise_clip=data[0:10000], verbose=False)\n noiseRemovedData = noisereduce.reduce_noise(audio_clip=\n noiseRemovedData, noise_clip=data[-10000:], verbose=False)\n data = noiseRemovedData\n maxDataLength = int(maxAudioLength * rate)\n padding = []\n if data.shape[0] > maxDataLength:\n raise ValueError('Max audio length breached')\n else:\n paddingDataLength = maxDataLength - data.shape[0]\n padding = [(0) for i in range(paddingDataLength)]\n leftSpeakerSound = data\n audioWithPadding = numpy.concatenate((leftSpeakerSound, padding))\n if savePlot:\n fig, ax = plt.subplots()\n ax.plot(audioWithPadding)\n fig.suptitle(fileName)\n fig.savefig('./output_img/wav/' + fileName + '_wav.png')\n plt.close(fig)\n return audioWithPadding, rate\n",
"step-3": "from scipy.io import wavfile\nimport numpy\nfrom matplotlib import pyplot as plt\nimport librosa\nimport noisereduce\n\n\ndef loadWavFile(fileName, filePath, savePlot, maxAudioLength, reduceNoise=True\n ):\n data, rate = librosa.load(filePath, sr=None)\n if reduceNoise:\n noiseRemovedData = noisereduce.reduce_noise(audio_clip=data,\n noise_clip=data[0:10000], verbose=False)\n noiseRemovedData = noisereduce.reduce_noise(audio_clip=\n noiseRemovedData, noise_clip=data[-10000:], verbose=False)\n data = noiseRemovedData\n maxDataLength = int(maxAudioLength * rate)\n padding = []\n if data.shape[0] > maxDataLength:\n raise ValueError('Max audio length breached')\n else:\n paddingDataLength = maxDataLength - data.shape[0]\n padding = [(0) for i in range(paddingDataLength)]\n leftSpeakerSound = data\n audioWithPadding = numpy.concatenate((leftSpeakerSound, padding))\n if savePlot:\n fig, ax = plt.subplots()\n ax.plot(audioWithPadding)\n fig.suptitle(fileName)\n fig.savefig('./output_img/wav/' + fileName + '_wav.png')\n plt.close(fig)\n return audioWithPadding, rate\n",
"step-4": "from scipy.io import wavfile\nimport numpy\nfrom matplotlib import pyplot as plt\nimport librosa\nimport noisereduce\n\ndef loadWavFile(fileName, filePath, savePlot, maxAudioLength, reduceNoise = True):\n # Read file\n # rate, data = wavfile.read(filePath)\n # print(filePath, rate, data.shape, \"audio length\", data.shape[0] / rate, data[0])\n\n data, rate = librosa.load(filePath, sr=None)\n # print(filePath, rate, data.shape, \"librosa audio length\", data.shape[0] / rate, data[0])\n if reduceNoise:\n noiseRemovedData = noisereduce.reduce_noise(audio_clip=data, noise_clip=data[0:10000], verbose=False)\n noiseRemovedData = noisereduce.reduce_noise(audio_clip=noiseRemovedData, noise_clip=data[-10000:], verbose=False)\n data = noiseRemovedData\n\n\n maxDataLength = int(maxAudioLength * rate)\n padding = []\n if data.shape[0] > maxDataLength:\n raise ValueError(\"Max audio length breached\")\n else:\n paddingDataLength = maxDataLength - data.shape[0]\n padding = [0 for i in range(paddingDataLength)]\n\n # data is stereo sound. take left speaker only\n leftSpeakerSound = data # data[:,0]\n # print(\"leftSpeakerSound.shape\", leftSpeakerSound.shape)\n\n audioWithPadding = numpy.concatenate((leftSpeakerSound, padding))\n # print(\"audioWithPadding.shape\", audioWithPadding.shape)\n\n if savePlot:\n fig, ax = plt.subplots()\n ax.plot(audioWithPadding)\n fig.suptitle(fileName)\n fig.savefig(\"./output_img/wav/\" + fileName + \"_wav.png\")\n plt.close(fig)\n\n return audioWithPadding, rate",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# square environment. there are the wall at the edge
from environment import super_environment
class SquareNormal(super_environment.Environment):
def __init__(self, size_x, size_y):
super().__init__(size_x, size_y)
@staticmethod
def environment_type():
return 'square'
def get_converted_position(self, position_before, position_after, radius):
# return the able position.if the position over the edge wall it is impossible.
x = position_after[0]
if x < radius:
x = radius
elif x + radius > self.screen_size_x:
x = self.screen_size_x - radius
y = position_after[1]
if y < radius:
y = radius
elif y > self.screen_size_y - radius:
y = self.screen_size_y - radius
return x, y
|
normal
|
{
"blob_id": "919f1746bfdec61f5e81e6ce0e17bb3bf040230a",
"index": 2958,
"step-1": "<mask token>\n\n\nclass SquareNormal(super_environment.Environment):\n <mask token>\n\n @staticmethod\n def environment_type():\n return 'square'\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SquareNormal(super_environment.Environment):\n <mask token>\n\n @staticmethod\n def environment_type():\n return 'square'\n\n def get_converted_position(self, position_before, position_after, radius):\n x = position_after[0]\n if x < radius:\n x = radius\n elif x + radius > self.screen_size_x:\n x = self.screen_size_x - radius\n y = position_after[1]\n if y < radius:\n y = radius\n elif y > self.screen_size_y - radius:\n y = self.screen_size_y - radius\n return x, y\n",
"step-3": "<mask token>\n\n\nclass SquareNormal(super_environment.Environment):\n\n def __init__(self, size_x, size_y):\n super().__init__(size_x, size_y)\n\n @staticmethod\n def environment_type():\n return 'square'\n\n def get_converted_position(self, position_before, position_after, radius):\n x = position_after[0]\n if x < radius:\n x = radius\n elif x + radius > self.screen_size_x:\n x = self.screen_size_x - radius\n y = position_after[1]\n if y < radius:\n y = radius\n elif y > self.screen_size_y - radius:\n y = self.screen_size_y - radius\n return x, y\n",
"step-4": "from environment import super_environment\n\n\nclass SquareNormal(super_environment.Environment):\n\n def __init__(self, size_x, size_y):\n super().__init__(size_x, size_y)\n\n @staticmethod\n def environment_type():\n return 'square'\n\n def get_converted_position(self, position_before, position_after, radius):\n x = position_after[0]\n if x < radius:\n x = radius\n elif x + radius > self.screen_size_x:\n x = self.screen_size_x - radius\n y = position_after[1]\n if y < radius:\n y = radius\n elif y > self.screen_size_y - radius:\n y = self.screen_size_y - radius\n return x, y\n",
"step-5": "# square environment. there are the wall at the edge\nfrom environment import super_environment\n\n\nclass SquareNormal(super_environment.Environment):\n def __init__(self, size_x, size_y):\n super().__init__(size_x, size_y)\n\n @staticmethod\n def environment_type():\n return 'square'\n\n def get_converted_position(self, position_before, position_after, radius):\n # return the able position.if the position over the edge wall it is impossible.\n x = position_after[0]\n if x < radius:\n x = radius\n elif x + radius > self.screen_size_x:\n x = self.screen_size_x - radius\n y = position_after[1]\n if y < radius:\n y = radius\n elif y > self.screen_size_y - radius:\n y = self.screen_size_y - radius\n return x, y\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Deck:
def __init__(self, num_cols, front, back):
self.flashcards = []
self.num_cols = num_cols
self.front = front
self.back = back
class Flashcard:
def __init__(self, deck, front, back, column, row):
self.deck = deck
self.front = front
self.back = back
self.column = column
self.row = row
self.correct = False
def show_front(self):
r = '{}: {}'.format(self.deck.front, self.front)
return r
def show_back(self):
return '{}: {}'.format(self.deck.back, self.back)
def show_card(self):
return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.
deck.back, self.back)
def show_reverse(self):
return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck
.front, self.front)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Deck:
def __init__(self, num_cols, front, back):
self.flashcards = []
self.num_cols = num_cols
self.front = front
self.back = back
class Flashcard:
def __init__(self, deck, front, back, column, row):
self.deck = deck
self.front = front
self.back = back
self.column = column
self.row = row
self.correct = False
def show_front(self):
r = '{}: {}'.format(self.deck.front, self.front)
return r
def show_back(self):
return '{}: {}'.format(self.deck.back, self.back)
def show_card(self):
return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.
deck.back, self.back)
def show_reverse(self):
return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck
.front, self.front)
def create_deck(filename, num_cols):
df = pd.read_excel(filename)
front = df.columns.values[0]
back = df.columns.values[1]
deck = Deck(num_cols, front, back)
for i in range(num_cols):
front_column = '{}.{}'.format(front, i) if i else front
back_column = '{}.{}'.format(back, i) if i else back
for row in range(df[front_column].size):
f = df[front_column][row]
b = df[back_column][row]
if not (pd.isnull(f) or pd.isnull(b)):
fc = Flashcard(deck, f.strip(), b.strip(), i, row)
deck.flashcards.append(fc)
return deck
def get_cards_from_deck(deck, first_letter, start_index, number_of_cards):
flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or
first_letter == -1]
return flashcards[start_index:number_of_cards + start_index]
def play_game(deck, mode, first_letter, start_index, number_of_cards):
flashcards = get_cards_from_deck(deck, first_letter, start_index,
number_of_cards)
play_cards(mode, deck, flashcards)
def play_cards(mode, deck, cards):
source = deck.front if mode % 2 == 0 else deck.back
target = deck.back if mode % 2 == 0 else deck.front
if mode >= 2:
random.shuffle(cards)
num_cards = len(cards)
start_time = time.time()
for i, fc in enumerate(cards):
source_word = fc.front if mode % 2 == 0 else fc.back
target_word = fc.back if mode % 2 == 0 else fc.front
quiz(fc, source, source_word, target, target_word, i, num_cards)
print('All Done!')
correct = sum(fc.correct == True for fc in cards)
incorrect = len(cards) - correct
print('Correct: {}'.format(correct))
print('Incorrect: {}'.format(incorrect))
if incorrect:
incorrect_cards = [fc for fc in cards if not fc.correct]
print('\n'.join([fc.show_card() for fc in incorrect_cards]))
again = input('review incorrect words (y/n): ')
if again == 'y' or again == '1' or again == 'да':
play_cards(mode, deck, incorrect_cards)
else:
finish_time = time.time()
time_diff = time.gmtime(finish_time - start_time)
avg_time = time.gmtime((finish_time - start_time) / num_cards)
print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))
print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))
def quiz(fc, source_language, source_word, target_language, target_word, i,
number_of_cards):
print('Card {}/{}'.format(i + 1, number_of_cards))
print('{} word: {}'.format(source_language, source_word))
answer = input('Enter {} translation: '.format(target_language))
if is_correct(answer, target_word):
fc.correct = True
print('Correct!')
else:
print('Incorrect! Correct answer was: {}'.format(target_word))
n = input('Enter {} translation for {}: '.format(target_language,
source_word))
def is_correct(answer, target):
return format_for_comparison(answer) == format_for_comparison(target)
def format_for_comparison(word):
word = word.strip().lower()
word = word.split('(')
word[0] = word[0].split(', ')
word[0].sort()
word[0] = ', '.join(word[0])
word = '('.join(word)
return word
def learn_words(deck, first_letter, start_index, number_of_cards):
flashcards = get_cards_from_deck(deck, first_letter, start_index,
number_of_cards)
for i, card in enumerate(flashcards):
print('Card {}/{}'.format(i + 1, number_of_cards))
input('{}\nPractice: '.format(card.show_card()))
input('{}\nPractice: '.format(card.show_front()))
input('{}\nPractice: '.format(card.show_back()))
print('Done! Review learned words:')
for card in flashcards:
print('{}'.format(card.show_card()))
def main(filename, first_letter, start_index, number_of_cards, mode):
num_cols = 9
deck = create_deck(filename, num_cols)
print('Welcome to The Flashcard Learner!')
print("Okay! Let's play!")
if mode == 4:
learn_words(deck, first_letter, start_index, number_of_cards)
else:
play_game(deck, mode, first_letter, start_index, number_of_cards)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Deck:
def __init__(self, num_cols, front, back):
self.flashcards = []
self.num_cols = num_cols
self.front = front
self.back = back
class Flashcard:
def __init__(self, deck, front, back, column, row):
self.deck = deck
self.front = front
self.back = back
self.column = column
self.row = row
self.correct = False
def show_front(self):
r = '{}: {}'.format(self.deck.front, self.front)
return r
def show_back(self):
return '{}: {}'.format(self.deck.back, self.back)
def show_card(self):
return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.
deck.back, self.back)
def show_reverse(self):
return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck
.front, self.front)
def create_deck(filename, num_cols):
df = pd.read_excel(filename)
front = df.columns.values[0]
back = df.columns.values[1]
deck = Deck(num_cols, front, back)
for i in range(num_cols):
front_column = '{}.{}'.format(front, i) if i else front
back_column = '{}.{}'.format(back, i) if i else back
for row in range(df[front_column].size):
f = df[front_column][row]
b = df[back_column][row]
if not (pd.isnull(f) or pd.isnull(b)):
fc = Flashcard(deck, f.strip(), b.strip(), i, row)
deck.flashcards.append(fc)
return deck
def get_cards_from_deck(deck, first_letter, start_index, number_of_cards):
flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or
first_letter == -1]
return flashcards[start_index:number_of_cards + start_index]
def play_game(deck, mode, first_letter, start_index, number_of_cards):
flashcards = get_cards_from_deck(deck, first_letter, start_index,
number_of_cards)
play_cards(mode, deck, flashcards)
def play_cards(mode, deck, cards):
source = deck.front if mode % 2 == 0 else deck.back
target = deck.back if mode % 2 == 0 else deck.front
if mode >= 2:
random.shuffle(cards)
num_cards = len(cards)
start_time = time.time()
for i, fc in enumerate(cards):
source_word = fc.front if mode % 2 == 0 else fc.back
target_word = fc.back if mode % 2 == 0 else fc.front
quiz(fc, source, source_word, target, target_word, i, num_cards)
print('All Done!')
correct = sum(fc.correct == True for fc in cards)
incorrect = len(cards) - correct
print('Correct: {}'.format(correct))
print('Incorrect: {}'.format(incorrect))
if incorrect:
incorrect_cards = [fc for fc in cards if not fc.correct]
print('\n'.join([fc.show_card() for fc in incorrect_cards]))
again = input('review incorrect words (y/n): ')
if again == 'y' or again == '1' or again == 'да':
play_cards(mode, deck, incorrect_cards)
else:
finish_time = time.time()
time_diff = time.gmtime(finish_time - start_time)
avg_time = time.gmtime((finish_time - start_time) / num_cards)
print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))
print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))
def quiz(fc, source_language, source_word, target_language, target_word, i,
number_of_cards):
print('Card {}/{}'.format(i + 1, number_of_cards))
print('{} word: {}'.format(source_language, source_word))
answer = input('Enter {} translation: '.format(target_language))
if is_correct(answer, target_word):
fc.correct = True
print('Correct!')
else:
print('Incorrect! Correct answer was: {}'.format(target_word))
n = input('Enter {} translation for {}: '.format(target_language,
source_word))
def is_correct(answer, target):
return format_for_comparison(answer) == format_for_comparison(target)
def format_for_comparison(word):
word = word.strip().lower()
word = word.split('(')
word[0] = word[0].split(', ')
word[0].sort()
word[0] = ', '.join(word[0])
word = '('.join(word)
return word
def learn_words(deck, first_letter, start_index, number_of_cards):
flashcards = get_cards_from_deck(deck, first_letter, start_index,
number_of_cards)
for i, card in enumerate(flashcards):
print('Card {}/{}'.format(i + 1, number_of_cards))
input('{}\nPractice: '.format(card.show_card()))
input('{}\nPractice: '.format(card.show_front()))
input('{}\nPractice: '.format(card.show_back()))
print('Done! Review learned words:')
for card in flashcards:
print('{}'.format(card.show_card()))
def main(filename, first_letter, start_index, number_of_cards, mode):
num_cols = 9
deck = create_deck(filename, num_cols)
print('Welcome to The Flashcard Learner!')
print("Okay! Let's play!")
if mode == 4:
learn_words(deck, first_letter, start_index, number_of_cards)
else:
play_game(deck, mode, first_letter, start_index, number_of_cards)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Learn flashcards')
parser.add_argument('filename', help='name of .xlsx file with vocab',
default='RussianVocab.xlsx')
parser.add_argument('category', type=int, help=
'e.g. which letter are you learning? (-1: all, 0:a, 1:б, 2:в, etc.)')
parser.add_argument('start', type=int, help=
'start index (lists are 0-indexed)')
parser.add_argument('num', type=int, help=
"number of cards you'd like to see")
parser.add_argument('mode', type=int)
args = parser.parse_args()
main(args.filename, args.category, args.start, args.num, args.mode)
<|reserved_special_token_1|>
import argparse
import pandas as pd
import random
import time
class Deck:
def __init__(self, num_cols, front, back):
self.flashcards = []
self.num_cols = num_cols
self.front = front
self.back = back
class Flashcard:
def __init__(self, deck, front, back, column, row):
self.deck = deck
self.front = front
self.back = back
self.column = column
self.row = row
self.correct = False
def show_front(self):
r = '{}: {}'.format(self.deck.front, self.front)
return r
def show_back(self):
return '{}: {}'.format(self.deck.back, self.back)
def show_card(self):
return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.
deck.back, self.back)
def show_reverse(self):
return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck
.front, self.front)
def create_deck(filename, num_cols):
df = pd.read_excel(filename)
front = df.columns.values[0]
back = df.columns.values[1]
deck = Deck(num_cols, front, back)
for i in range(num_cols):
front_column = '{}.{}'.format(front, i) if i else front
back_column = '{}.{}'.format(back, i) if i else back
for row in range(df[front_column].size):
f = df[front_column][row]
b = df[back_column][row]
if not (pd.isnull(f) or pd.isnull(b)):
fc = Flashcard(deck, f.strip(), b.strip(), i, row)
deck.flashcards.append(fc)
return deck
def get_cards_from_deck(deck, first_letter, start_index, number_of_cards):
flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or
first_letter == -1]
return flashcards[start_index:number_of_cards + start_index]
def play_game(deck, mode, first_letter, start_index, number_of_cards):
flashcards = get_cards_from_deck(deck, first_letter, start_index,
number_of_cards)
play_cards(mode, deck, flashcards)
def play_cards(mode, deck, cards):
source = deck.front if mode % 2 == 0 else deck.back
target = deck.back if mode % 2 == 0 else deck.front
if mode >= 2:
random.shuffle(cards)
num_cards = len(cards)
start_time = time.time()
for i, fc in enumerate(cards):
source_word = fc.front if mode % 2 == 0 else fc.back
target_word = fc.back if mode % 2 == 0 else fc.front
quiz(fc, source, source_word, target, target_word, i, num_cards)
print('All Done!')
correct = sum(fc.correct == True for fc in cards)
incorrect = len(cards) - correct
print('Correct: {}'.format(correct))
print('Incorrect: {}'.format(incorrect))
if incorrect:
incorrect_cards = [fc for fc in cards if not fc.correct]
print('\n'.join([fc.show_card() for fc in incorrect_cards]))
again = input('review incorrect words (y/n): ')
if again == 'y' or again == '1' or again == 'да':
play_cards(mode, deck, incorrect_cards)
else:
finish_time = time.time()
time_diff = time.gmtime(finish_time - start_time)
avg_time = time.gmtime((finish_time - start_time) / num_cards)
print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))
print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))
def quiz(fc, source_language, source_word, target_language, target_word, i,
number_of_cards):
print('Card {}/{}'.format(i + 1, number_of_cards))
print('{} word: {}'.format(source_language, source_word))
answer = input('Enter {} translation: '.format(target_language))
if is_correct(answer, target_word):
fc.correct = True
print('Correct!')
else:
print('Incorrect! Correct answer was: {}'.format(target_word))
n = input('Enter {} translation for {}: '.format(target_language,
source_word))
def is_correct(answer, target):
return format_for_comparison(answer) == format_for_comparison(target)
def format_for_comparison(word):
word = word.strip().lower()
word = word.split('(')
word[0] = word[0].split(', ')
word[0].sort()
word[0] = ', '.join(word[0])
word = '('.join(word)
return word
def learn_words(deck, first_letter, start_index, number_of_cards):
flashcards = get_cards_from_deck(deck, first_letter, start_index,
number_of_cards)
for i, card in enumerate(flashcards):
print('Card {}/{}'.format(i + 1, number_of_cards))
input('{}\nPractice: '.format(card.show_card()))
input('{}\nPractice: '.format(card.show_front()))
input('{}\nPractice: '.format(card.show_back()))
print('Done! Review learned words:')
for card in flashcards:
print('{}'.format(card.show_card()))
def main(filename, first_letter, start_index, number_of_cards, mode):
num_cols = 9
deck = create_deck(filename, num_cols)
print('Welcome to The Flashcard Learner!')
print("Okay! Let's play!")
if mode == 4:
learn_words(deck, first_letter, start_index, number_of_cards)
else:
play_game(deck, mode, first_letter, start_index, number_of_cards)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Learn flashcards')
parser.add_argument('filename', help='name of .xlsx file with vocab',
default='RussianVocab.xlsx')
parser.add_argument('category', type=int, help=
'e.g. which letter are you learning? (-1: all, 0:a, 1:б, 2:в, etc.)')
parser.add_argument('start', type=int, help=
'start index (lists are 0-indexed)')
parser.add_argument('num', type=int, help=
"number of cards you'd like to see")
parser.add_argument('mode', type=int)
args = parser.parse_args()
main(args.filename, args.category, args.start, args.num, args.mode)
<|reserved_special_token_1|>
import argparse
import pandas as pd
import random
import time
class Deck:
def __init__(self, num_cols, front, back):
self.flashcards = []
self.num_cols = num_cols
self.front = front
self.back = back
class Flashcard:
def __init__(self, deck, front, back, column, row):
self.deck = deck
self.front = front
self.back = back
self.column = column
self.row = row
self.correct = False
def show_front(self):
r = "{}: {}".format(self.deck.front, self.front)
return r
def show_back(self):
return "{}: {}".format(self.deck.back, self.back)
def show_card(self):
return "{}: {}, {}: {}".format(self.deck.front, self.front, self.deck.back, self.back)
def show_reverse(self):
return "{}: {}, {}: {}".format(self.deck.back, self.back, self.deck.front, self.front)
def create_deck(filename, num_cols):
df = pd.read_excel(filename)
front = df.columns.values[0]
back = df.columns.values[1]
deck = Deck(num_cols, front, back)
for i in range(num_cols):
front_column = "{}.{}".format(front, i) if i else front
back_column = "{}.{}".format(back, i) if i else back
for row in range(df[front_column].size):
f = df[front_column][row]
b = df[back_column][row]
if not (pd.isnull(f) or pd.isnull(b)):
fc = Flashcard(deck, f.strip(), b.strip(), i, row)
deck.flashcards.append(fc)
return deck
def get_cards_from_deck(deck, first_letter, start_index, number_of_cards):
flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or first_letter == -1]
return flashcards[start_index:number_of_cards+start_index]
def play_game(deck, mode, first_letter, start_index, number_of_cards):
flashcards = get_cards_from_deck(deck, first_letter, start_index, number_of_cards)
play_cards(mode, deck, flashcards)
def play_cards(mode, deck, cards):
source = deck.front if mode%2 == 0 else deck.back
target = deck.back if mode%2 == 0 else deck.front
if mode >= 2:
random.shuffle(cards)
num_cards = len(cards)
start_time = time.time()
for i, fc in enumerate(cards):
source_word = fc.front if mode%2==0 else fc.back
target_word = fc.back if mode%2==0 else fc.front
quiz(fc, source, source_word, target, target_word, i, num_cards)
print("All Done!")
correct = sum(fc.correct == True for fc in cards)
incorrect = len(cards) - correct
print("Correct: {}".format(correct))
print("Incorrect: {}".format(incorrect))
if (incorrect):
incorrect_cards = [fc for fc in cards if not fc.correct]
print("\n".join([fc.show_card() for fc in incorrect_cards]))
again = input("review incorrect words (y/n): ")
if again == 'y' or again == '1' or again == 'да':
play_cards(mode, deck, incorrect_cards)
else:
finish_time = time.time()
time_diff = time.gmtime(finish_time - start_time)
avg_time = time.gmtime((finish_time - start_time) / num_cards)
print("Total Time: {}".format(time.strftime("%H:%M:%S", time_diff)))
print("Time per card: {}".format(time.strftime("%H:%M:%S", avg_time)))
def quiz(fc, source_language, source_word, target_language, target_word, i, number_of_cards):
print("Card {}/{}".format(i+1, number_of_cards))
print("{} word: {}".format(source_language, source_word))
answer = input("Enter {} translation: ".format(target_language))
if is_correct(answer, target_word):
fc.correct = True
print("Correct!")
else:
print("Incorrect! Correct answer was: {}".format(target_word))
n = input("Enter {} translation for {}: ".format(target_language, source_word))
def is_correct(answer, target):
return format_for_comparison(answer) == format_for_comparison(target)
def format_for_comparison(word):
# strip whitespace and lowercase
word = word.strip().lower()
# pop off the declensions from the end
word = word.split('(')
# sort the list of meanings
word[0] = word[0].split(', ')
word[0].sort()
# join the first part back together:
word[0] = ', '.join(word[0])
# now add the declensions back on
word = '('.join(word)
return word
def learn_words(deck, first_letter, start_index, number_of_cards):
flashcards = get_cards_from_deck(deck, first_letter, start_index, number_of_cards)
for i, card in enumerate(flashcards):
print("Card {}/{}".format(i+1, number_of_cards))
input("{}\nPractice: ".format(card.show_card()))
input("{}\nPractice: ".format(card.show_front()))
input("{}\nPractice: ".format(card.show_back()))
print("Done! Review learned words:")
for card in flashcards:
print("{}".format(card.show_card()))
def main(filename, first_letter, start_index, number_of_cards, mode):
num_cols = 9
deck = create_deck(filename, num_cols)
print("Welcome to The Flashcard Learner!")
# print("Available Modes:")
# print("0: Quiz - Given a word in {}, provide {} translation".format(deck.front.lower(), deck.back.lower()))
# print("1: Quiz - Given a word in {}, provide {} translation".format(deck.back.lower(), deck.front.lower()))
# print("2: Mode 0 with cards given in random order")
# print("3: Mode 1 with cards given in random order")
# print("4: Learning - Shown {} and {} side by side, practice typing both".format(deck.front.lower(), deck.back.lower()))
# mode = int(input("Enter mode: "))
print("Okay! Let's play!")
if mode == 4:
learn_words(deck, first_letter, start_index, number_of_cards)
else:
play_game(deck, mode, first_letter, start_index, number_of_cards)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Learn flashcards")
parser.add_argument("filename", help="name of .xlsx file with vocab", default="RussianVocab.xlsx")
parser.add_argument("category", type=int, help="e.g. which letter are you learning? (-1: all, 0:a, 1:б, 2:в, etc.)")
parser.add_argument("start", type=int, help="start index (lists are 0-indexed)")
parser.add_argument("num", type=int, help="number of cards you'd like to see")
parser.add_argument("mode", type=int)
args = parser.parse_args()
main(args.filename, args.category, args.start, args.num, args.mode)
|
flexible
|
{
"blob_id": "d5903698eb8ed6be531b0cc522d4feff6b79da4e",
"index": 954,
"step-1": "<mask token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\ndef create_deck(filename, num_cols):\n df = pd.read_excel(filename)\n front = df.columns.values[0]\n back = df.columns.values[1]\n deck = Deck(num_cols, front, back)\n for i in range(num_cols):\n front_column = '{}.{}'.format(front, i) if i else front\n back_column = '{}.{}'.format(back, i) if i else back\n for row in range(df[front_column].size):\n f = df[front_column][row]\n b = df[back_column][row]\n if not (pd.isnull(f) or pd.isnull(b)):\n fc = Flashcard(deck, f.strip(), b.strip(), i, row)\n deck.flashcards.append(fc)\n return deck\n\n\ndef get_cards_from_deck(deck, first_letter, start_index, number_of_cards):\n flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or\n first_letter == -1]\n return flashcards[start_index:number_of_cards + start_index]\n\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n play_cards(mode, deck, flashcards)\n\n\ndef play_cards(mode, deck, cards):\n source = deck.front if mode % 2 == 0 else deck.back\n target = deck.back if mode % 2 == 0 else deck.front\n if mode >= 2:\n random.shuffle(cards)\n num_cards = len(cards)\n start_time = time.time()\n for i, fc in enumerate(cards):\n source_word = fc.front if mode % 2 == 0 else fc.back\n target_word = fc.back if mode % 2 == 0 else fc.front\n quiz(fc, source, source_word, target, target_word, i, num_cards)\n print('All Done!')\n correct = sum(fc.correct == True for fc in cards)\n incorrect = len(cards) - correct\n print('Correct: {}'.format(correct))\n print('Incorrect: {}'.format(incorrect))\n if incorrect:\n incorrect_cards = [fc for fc in cards if not fc.correct]\n print('\\n'.join([fc.show_card() for fc in incorrect_cards]))\n again = input('review incorrect words (y/n): ')\n if again == 'y' or again == '1' or again == 'да':\n play_cards(mode, deck, incorrect_cards)\n else:\n finish_time = time.time()\n time_diff = time.gmtime(finish_time - start_time)\n avg_time = time.gmtime((finish_time - start_time) / num_cards)\n print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))\n print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))\n\n\ndef quiz(fc, source_language, source_word, target_language, target_word, i,\n number_of_cards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n print('{} word: {}'.format(source_language, source_word))\n answer = input('Enter {} translation: '.format(target_language))\n if is_correct(answer, target_word):\n fc.correct = True\n print('Correct!')\n else:\n print('Incorrect! Correct answer was: {}'.format(target_word))\n n = input('Enter {} translation for {}: '.format(target_language,\n source_word))\n\n\ndef is_correct(answer, target):\n return format_for_comparison(answer) == format_for_comparison(target)\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n for i, card in enumerate(flashcards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n input('{}\\nPractice: '.format(card.show_card()))\n input('{}\\nPractice: '.format(card.show_front()))\n input('{}\\nPractice: '.format(card.show_back()))\n print('Done! Review learned words:')\n for card in flashcards:\n print('{}'.format(card.show_card()))\n\n\ndef main(filename, first_letter, start_index, number_of_cards, mode):\n num_cols = 9\n deck = create_deck(filename, num_cols)\n print('Welcome to The Flashcard Learner!')\n print(\"Okay! Let's play!\")\n if mode == 4:\n learn_words(deck, first_letter, start_index, number_of_cards)\n else:\n play_game(deck, mode, first_letter, start_index, number_of_cards)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\ndef create_deck(filename, num_cols):\n df = pd.read_excel(filename)\n front = df.columns.values[0]\n back = df.columns.values[1]\n deck = Deck(num_cols, front, back)\n for i in range(num_cols):\n front_column = '{}.{}'.format(front, i) if i else front\n back_column = '{}.{}'.format(back, i) if i else back\n for row in range(df[front_column].size):\n f = df[front_column][row]\n b = df[back_column][row]\n if not (pd.isnull(f) or pd.isnull(b)):\n fc = Flashcard(deck, f.strip(), b.strip(), i, row)\n deck.flashcards.append(fc)\n return deck\n\n\ndef get_cards_from_deck(deck, first_letter, start_index, number_of_cards):\n flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or\n first_letter == -1]\n return flashcards[start_index:number_of_cards + start_index]\n\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n play_cards(mode, deck, flashcards)\n\n\ndef play_cards(mode, deck, cards):\n source = deck.front if mode % 2 == 0 else deck.back\n target = deck.back if mode % 2 == 0 else deck.front\n if mode >= 2:\n random.shuffle(cards)\n num_cards = len(cards)\n start_time = time.time()\n for i, fc in enumerate(cards):\n source_word = fc.front if mode % 2 == 0 else fc.back\n target_word = fc.back if mode % 2 == 0 else fc.front\n quiz(fc, source, source_word, target, target_word, i, num_cards)\n print('All Done!')\n correct = sum(fc.correct == True for fc in cards)\n incorrect = len(cards) - correct\n print('Correct: {}'.format(correct))\n print('Incorrect: {}'.format(incorrect))\n if incorrect:\n incorrect_cards = [fc for fc in cards if not fc.correct]\n print('\\n'.join([fc.show_card() for fc in incorrect_cards]))\n again = input('review incorrect words (y/n): ')\n if again == 'y' or again == '1' or again == 'да':\n play_cards(mode, deck, incorrect_cards)\n else:\n finish_time = time.time()\n time_diff = time.gmtime(finish_time - start_time)\n avg_time = time.gmtime((finish_time - start_time) / num_cards)\n print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))\n print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))\n\n\ndef quiz(fc, source_language, source_word, target_language, target_word, i,\n number_of_cards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n print('{} word: {}'.format(source_language, source_word))\n answer = input('Enter {} translation: '.format(target_language))\n if is_correct(answer, target_word):\n fc.correct = True\n print('Correct!')\n else:\n print('Incorrect! Correct answer was: {}'.format(target_word))\n n = input('Enter {} translation for {}: '.format(target_language,\n source_word))\n\n\ndef is_correct(answer, target):\n return format_for_comparison(answer) == format_for_comparison(target)\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n for i, card in enumerate(flashcards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n input('{}\\nPractice: '.format(card.show_card()))\n input('{}\\nPractice: '.format(card.show_front()))\n input('{}\\nPractice: '.format(card.show_back()))\n print('Done! Review learned words:')\n for card in flashcards:\n print('{}'.format(card.show_card()))\n\n\ndef main(filename, first_letter, start_index, number_of_cards, mode):\n num_cols = 9\n deck = create_deck(filename, num_cols)\n print('Welcome to The Flashcard Learner!')\n print(\"Okay! Let's play!\")\n if mode == 4:\n learn_words(deck, first_letter, start_index, number_of_cards)\n else:\n play_game(deck, mode, first_letter, start_index, number_of_cards)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Learn flashcards')\n parser.add_argument('filename', help='name of .xlsx file with vocab',\n default='RussianVocab.xlsx')\n parser.add_argument('category', type=int, help=\n 'e.g. which letter are you learning? (-1: all, 0:a, 1:б, 2:в, etc.)')\n parser.add_argument('start', type=int, help=\n 'start index (lists are 0-indexed)')\n parser.add_argument('num', type=int, help=\n \"number of cards you'd like to see\")\n parser.add_argument('mode', type=int)\n args = parser.parse_args()\n main(args.filename, args.category, args.start, args.num, args.mode)\n",
"step-4": "import argparse\nimport pandas as pd\nimport random\nimport time\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\ndef create_deck(filename, num_cols):\n df = pd.read_excel(filename)\n front = df.columns.values[0]\n back = df.columns.values[1]\n deck = Deck(num_cols, front, back)\n for i in range(num_cols):\n front_column = '{}.{}'.format(front, i) if i else front\n back_column = '{}.{}'.format(back, i) if i else back\n for row in range(df[front_column].size):\n f = df[front_column][row]\n b = df[back_column][row]\n if not (pd.isnull(f) or pd.isnull(b)):\n fc = Flashcard(deck, f.strip(), b.strip(), i, row)\n deck.flashcards.append(fc)\n return deck\n\n\ndef get_cards_from_deck(deck, first_letter, start_index, number_of_cards):\n flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or\n first_letter == -1]\n return flashcards[start_index:number_of_cards + start_index]\n\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n play_cards(mode, deck, flashcards)\n\n\ndef play_cards(mode, deck, cards):\n source = deck.front if mode % 2 == 0 else deck.back\n target = deck.back if mode % 2 == 0 else deck.front\n if mode >= 2:\n random.shuffle(cards)\n num_cards = len(cards)\n start_time = time.time()\n for i, fc in enumerate(cards):\n source_word = fc.front if mode % 2 == 0 else fc.back\n target_word = fc.back if mode % 2 == 0 else fc.front\n quiz(fc, source, source_word, target, target_word, i, num_cards)\n print('All Done!')\n correct = sum(fc.correct == True for fc in cards)\n incorrect = len(cards) - correct\n print('Correct: {}'.format(correct))\n print('Incorrect: {}'.format(incorrect))\n if incorrect:\n incorrect_cards = [fc for fc in cards if not fc.correct]\n print('\\n'.join([fc.show_card() for fc in incorrect_cards]))\n again = input('review incorrect words (y/n): ')\n if again == 'y' or again == '1' or again == 'да':\n play_cards(mode, deck, incorrect_cards)\n else:\n finish_time = time.time()\n time_diff = time.gmtime(finish_time - start_time)\n avg_time = time.gmtime((finish_time - start_time) / num_cards)\n print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))\n print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))\n\n\ndef quiz(fc, source_language, source_word, target_language, target_word, i,\n number_of_cards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n print('{} word: {}'.format(source_language, source_word))\n answer = input('Enter {} translation: '.format(target_language))\n if is_correct(answer, target_word):\n fc.correct = True\n print('Correct!')\n else:\n print('Incorrect! Correct answer was: {}'.format(target_word))\n n = input('Enter {} translation for {}: '.format(target_language,\n source_word))\n\n\ndef is_correct(answer, target):\n return format_for_comparison(answer) == format_for_comparison(target)\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n for i, card in enumerate(flashcards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n input('{}\\nPractice: '.format(card.show_card()))\n input('{}\\nPractice: '.format(card.show_front()))\n input('{}\\nPractice: '.format(card.show_back()))\n print('Done! Review learned words:')\n for card in flashcards:\n print('{}'.format(card.show_card()))\n\n\ndef main(filename, first_letter, start_index, number_of_cards, mode):\n num_cols = 9\n deck = create_deck(filename, num_cols)\n print('Welcome to The Flashcard Learner!')\n print(\"Okay! Let's play!\")\n if mode == 4:\n learn_words(deck, first_letter, start_index, number_of_cards)\n else:\n play_game(deck, mode, first_letter, start_index, number_of_cards)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Learn flashcards')\n parser.add_argument('filename', help='name of .xlsx file with vocab',\n default='RussianVocab.xlsx')\n parser.add_argument('category', type=int, help=\n 'e.g. which letter are you learning? (-1: all, 0:a, 1:б, 2:в, etc.)')\n parser.add_argument('start', type=int, help=\n 'start index (lists are 0-indexed)')\n parser.add_argument('num', type=int, help=\n \"number of cards you'd like to see\")\n parser.add_argument('mode', type=int)\n args = parser.parse_args()\n main(args.filename, args.category, args.start, args.num, args.mode)\n",
"step-5": "import argparse\nimport pandas as pd\nimport random\nimport time\n\nclass Deck:\n\tdef __init__(self, num_cols, front, back):\n\t\tself.flashcards = []\n\t\tself.num_cols = num_cols\n\t\tself.front = front\n\t\tself.back = back\n\nclass Flashcard:\n\tdef __init__(self, deck, front, back, column, row):\n\t\tself.deck = deck\n\t\tself.front = front\n\t\tself.back = back\n\t\tself.column = column\n\t\tself.row = row\n\t\tself.correct = False\n\n\tdef show_front(self):\n\t\tr = \"{}: {}\".format(self.deck.front, self.front)\n\t\treturn r\n\n\tdef show_back(self):\n\t\treturn \"{}: {}\".format(self.deck.back, self.back)\n\n\tdef show_card(self):\n\t\treturn \"{}: {}, {}: {}\".format(self.deck.front, self.front, self.deck.back, self.back)\n\n\tdef show_reverse(self):\n\t\treturn \"{}: {}, {}: {}\".format(self.deck.back, self.back, self.deck.front, self.front)\n\n\ndef create_deck(filename, num_cols):\n\tdf = pd.read_excel(filename)\n\tfront = df.columns.values[0]\n\tback = df.columns.values[1]\n\n\tdeck = Deck(num_cols, front, back)\n\tfor i in range(num_cols):\n\t\tfront_column = \"{}.{}\".format(front, i) if i else front\n\t\tback_column = \"{}.{}\".format(back, i) if i else back\n\t\tfor row in range(df[front_column].size):\n\t\t\tf = df[front_column][row]\n\t\t\tb = df[back_column][row]\n\t\t\tif not (pd.isnull(f) or pd.isnull(b)):\t\n\t\t\t\tfc = Flashcard(deck, f.strip(), b.strip(), i, row)\n\t\t\t\tdeck.flashcards.append(fc)\n\t\n\treturn deck\n\ndef get_cards_from_deck(deck, first_letter, start_index, number_of_cards):\n\tflashcards = [fc for fc in deck.flashcards if fc.column == first_letter or first_letter == -1]\n\treturn flashcards[start_index:number_of_cards+start_index]\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n\tflashcards = get_cards_from_deck(deck, first_letter, start_index, number_of_cards)\n\tplay_cards(mode, deck, flashcards)\n\ndef play_cards(mode, deck, cards):\n\tsource = deck.front if mode%2 == 0 else deck.back\n\ttarget = deck.back if mode%2 == 0 else deck.front\n\n\tif mode >= 2:\n\t\trandom.shuffle(cards)\n\n\tnum_cards = len(cards)\n\tstart_time = time.time()\n\n\tfor i, fc in enumerate(cards):\n\t\tsource_word = fc.front if mode%2==0 else fc.back\n\t\ttarget_word = fc.back if mode%2==0 else fc.front\n\n\t\tquiz(fc, source, source_word, target, target_word, i, num_cards)\n\n\tprint(\"All Done!\")\n\tcorrect = sum(fc.correct == True for fc in cards)\n\tincorrect = len(cards) - correct\n\tprint(\"Correct: {}\".format(correct))\n\tprint(\"Incorrect: {}\".format(incorrect))\n\n\tif (incorrect):\n\t\tincorrect_cards = [fc for fc in cards if not fc.correct]\n\t\tprint(\"\\n\".join([fc.show_card() for fc in incorrect_cards]))\n\t\tagain = input(\"review incorrect words (y/n): \")\n\t\tif again == 'y' or again == '1' or again == 'да':\n\t\t\tplay_cards(mode, deck, incorrect_cards)\n\telse:\n\t\tfinish_time = time.time()\n\t\ttime_diff = time.gmtime(finish_time - start_time)\n\t\tavg_time = time.gmtime((finish_time - start_time) / num_cards)\n\t\tprint(\"Total Time: {}\".format(time.strftime(\"%H:%M:%S\", time_diff)))\n\t\tprint(\"Time per card: {}\".format(time.strftime(\"%H:%M:%S\", avg_time)))\n\ndef quiz(fc, source_language, source_word, target_language, target_word, i, number_of_cards):\n\t\tprint(\"Card {}/{}\".format(i+1, number_of_cards))\n\t\tprint(\"{} word: {}\".format(source_language, source_word))\n\t\tanswer = input(\"Enter {} translation: \".format(target_language))\n\t\t\n\t\tif is_correct(answer, target_word):\n\t\t\tfc.correct = True\n\t\t\tprint(\"Correct!\")\n\t\t\n\t\telse:\n\t\t\tprint(\"Incorrect! Correct answer was: {}\".format(target_word))\n\t\t\tn = input(\"Enter {} translation for {}: \".format(target_language, source_word))\n\n\ndef is_correct(answer, target):\n\treturn format_for_comparison(answer) == format_for_comparison(target)\n\n\ndef format_for_comparison(word):\n\t# strip whitespace and lowercase\n\tword = word.strip().lower()\n\n\t# pop off the declensions from the end\n\tword = word.split('(')\n\n\t# sort the list of meanings\n\tword[0] = word[0].split(', ')\n\tword[0].sort()\n\n\t# join the first part back together:\n\tword[0] = ', '.join(word[0])\n\n\t# now add the declensions back on\n\tword = '('.join(word)\n\t\n\treturn word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n\tflashcards = get_cards_from_deck(deck, first_letter, start_index, number_of_cards)\n\tfor i, card in enumerate(flashcards):\n\t\tprint(\"Card {}/{}\".format(i+1, number_of_cards))\n\t\tinput(\"{}\\nPractice: \".format(card.show_card()))\n\t\tinput(\"{}\\nPractice: \".format(card.show_front()))\n\t\tinput(\"{}\\nPractice: \".format(card.show_back()))\n\t\n\tprint(\"Done! Review learned words:\")\n\tfor card in flashcards:\n\t\tprint(\"{}\".format(card.show_card()))\n\ndef main(filename, first_letter, start_index, number_of_cards, mode):\n\tnum_cols = 9\n\tdeck = create_deck(filename, num_cols)\n\tprint(\"Welcome to The Flashcard Learner!\")\n\t# print(\"Available Modes:\")\n\t# print(\"0: Quiz - Given a word in {}, provide {} translation\".format(deck.front.lower(), deck.back.lower()))\n\t# print(\"1: Quiz - Given a word in {}, provide {} translation\".format(deck.back.lower(), deck.front.lower()))\n\t# print(\"2: Mode 0 with cards given in random order\")\n\t# print(\"3: Mode 1 with cards given in random order\")\n\t# print(\"4: Learning - Shown {} and {} side by side, practice typing both\".format(deck.front.lower(), deck.back.lower()))\n\t# mode = int(input(\"Enter mode: \"))\n\t\n\tprint(\"Okay! Let's play!\")\n\tif mode == 4:\n\t\tlearn_words(deck, first_letter, start_index, number_of_cards)\n\telse:\n\t\tplay_game(deck, mode, first_letter, start_index, number_of_cards)\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(description=\"Learn flashcards\")\n\tparser.add_argument(\"filename\", help=\"name of .xlsx file with vocab\", default=\"RussianVocab.xlsx\")\n\tparser.add_argument(\"category\", type=int, help=\"e.g. which letter are you learning? (-1: all, 0:a, 1:б, 2:в, etc.)\")\n\tparser.add_argument(\"start\", type=int, help=\"start index (lists are 0-indexed)\")\n\tparser.add_argument(\"num\", type=int, help=\"number of cards you'd like to see\")\n\tparser.add_argument(\"mode\", type=int)\n\targs = parser.parse_args()\n\tmain(args.filename, args.category, args.start, args.num, args.mode)\n\n",
"step-ids": [
8,
17,
18,
19,
20
]
}
|
[
8,
17,
18,
19,
20
] |
class Tienda:
def __init__(self, nombre_tienda, lista_productos = []):
self.nombre_tienda = nombre_tienda
self.lista_productos = lista_productos
def __str__(self):
return f"Nombre de la Tienda: {self.nombre_tienda}\nLista de Productos: {self.lista_productos}\n"
def anhadir_producto(self, producto_nuevo):
self.lista_productos.append(producto_nuevo)
print("# # # # # # # PRODUCTO ANHADIDO # # # # # # #")
producto_nuevo.producto_info()
return self
def vender_producto(self, id):
print("\n# # # # # # # PRODUCTO VENDIDO # # # # # # #")
self.lista_productos.pop(id).producto_info()
return self
def inflacion(self, porcentaje_incremento):
a = 0
for pro in self.lista_productos:
a += 1
print(f"=================Producto 0{a}:=================")
pro.producto_info()
print("AUMENTA su precio a: ")
pro.actualizar_precio(porcentaje_incremento, True).producto_info()
return self
def descuentazo(self, categoria, descuentazo_porcentaje):
a = 0
for product in self.lista_productos:
a += 1
if product.cat_producto == categoria:
print(f"=================Producto 0{a}:=================")
product.producto_info()
print("Se REMATA, y su nuevo precio de remate es: ")
product.actualizar_precio(descuentazo_porcentaje, False).producto_info()
print(f"Descuento de precios a toda la categoria {categoria}, realizado")
return self
#########################################################
##### coso = Tienda("VERDULERIA")
##### print(coso)
##### print("anhadir_P")
##### pera = ("PERA", 1000, "FRUTAS")
##### coco = ("COCO", 1511, "FRUTAS")
##### coso.anhadir_producto(pera)
##### coso.anhadir_producto(coco)
##### print(coso)
##### print("#############################")
##### coso.vender_producto(1)
|
normal
|
{
"blob_id": "0ae5d20b78bf7c23418de55ffd4d81cd5284c6d5",
"index": 8912,
"step-1": "class Tienda:\n\n def __init__(self, nombre_tienda, lista_productos=[]):\n self.nombre_tienda = nombre_tienda\n self.lista_productos = lista_productos\n <mask token>\n\n def anhadir_producto(self, producto_nuevo):\n self.lista_productos.append(producto_nuevo)\n print('# # # # # # # PRODUCTO ANHADIDO # # # # # # #')\n producto_nuevo.producto_info()\n return self\n <mask token>\n\n def inflacion(self, porcentaje_incremento):\n a = 0\n for pro in self.lista_productos:\n a += 1\n print(f'=================Producto 0{a}:=================')\n pro.producto_info()\n print('AUMENTA su precio a: ')\n pro.actualizar_precio(porcentaje_incremento, True).producto_info()\n return self\n <mask token>\n",
"step-2": "class Tienda:\n\n def __init__(self, nombre_tienda, lista_productos=[]):\n self.nombre_tienda = nombre_tienda\n self.lista_productos = lista_productos\n\n def __str__(self):\n return f\"\"\"Nombre de la Tienda: {self.nombre_tienda}\nLista de Productos: {self.lista_productos}\n\"\"\"\n\n def anhadir_producto(self, producto_nuevo):\n self.lista_productos.append(producto_nuevo)\n print('# # # # # # # PRODUCTO ANHADIDO # # # # # # #')\n producto_nuevo.producto_info()\n return self\n <mask token>\n\n def inflacion(self, porcentaje_incremento):\n a = 0\n for pro in self.lista_productos:\n a += 1\n print(f'=================Producto 0{a}:=================')\n pro.producto_info()\n print('AUMENTA su precio a: ')\n pro.actualizar_precio(porcentaje_incremento, True).producto_info()\n return self\n <mask token>\n",
"step-3": "class Tienda:\n\n def __init__(self, nombre_tienda, lista_productos=[]):\n self.nombre_tienda = nombre_tienda\n self.lista_productos = lista_productos\n\n def __str__(self):\n return f\"\"\"Nombre de la Tienda: {self.nombre_tienda}\nLista de Productos: {self.lista_productos}\n\"\"\"\n\n def anhadir_producto(self, producto_nuevo):\n self.lista_productos.append(producto_nuevo)\n print('# # # # # # # PRODUCTO ANHADIDO # # # # # # #')\n producto_nuevo.producto_info()\n return self\n\n def vender_producto(self, id):\n print('\\n# # # # # # # PRODUCTO VENDIDO # # # # # # #')\n self.lista_productos.pop(id).producto_info()\n return self\n\n def inflacion(self, porcentaje_incremento):\n a = 0\n for pro in self.lista_productos:\n a += 1\n print(f'=================Producto 0{a}:=================')\n pro.producto_info()\n print('AUMENTA su precio a: ')\n pro.actualizar_precio(porcentaje_incremento, True).producto_info()\n return self\n <mask token>\n",
"step-4": "class Tienda:\n\n def __init__(self, nombre_tienda, lista_productos=[]):\n self.nombre_tienda = nombre_tienda\n self.lista_productos = lista_productos\n\n def __str__(self):\n return f\"\"\"Nombre de la Tienda: {self.nombre_tienda}\nLista de Productos: {self.lista_productos}\n\"\"\"\n\n def anhadir_producto(self, producto_nuevo):\n self.lista_productos.append(producto_nuevo)\n print('# # # # # # # PRODUCTO ANHADIDO # # # # # # #')\n producto_nuevo.producto_info()\n return self\n\n def vender_producto(self, id):\n print('\\n# # # # # # # PRODUCTO VENDIDO # # # # # # #')\n self.lista_productos.pop(id).producto_info()\n return self\n\n def inflacion(self, porcentaje_incremento):\n a = 0\n for pro in self.lista_productos:\n a += 1\n print(f'=================Producto 0{a}:=================')\n pro.producto_info()\n print('AUMENTA su precio a: ')\n pro.actualizar_precio(porcentaje_incremento, True).producto_info()\n return self\n\n def descuentazo(self, categoria, descuentazo_porcentaje):\n a = 0\n for product in self.lista_productos:\n a += 1\n if product.cat_producto == categoria:\n print(f'=================Producto 0{a}:=================')\n product.producto_info()\n print('Se REMATA, y su nuevo precio de remate es: ')\n product.actualizar_precio(descuentazo_porcentaje, False\n ).producto_info()\n print(\n f'Descuento de precios a toda la categoria {categoria}, realizado')\n return self\n",
"step-5": "class Tienda:\n def __init__(self, nombre_tienda, lista_productos = []):\n self.nombre_tienda = nombre_tienda\n self.lista_productos = lista_productos\n\n def __str__(self):\n return f\"Nombre de la Tienda: {self.nombre_tienda}\\nLista de Productos: {self.lista_productos}\\n\"\n \n def anhadir_producto(self, producto_nuevo):\n self.lista_productos.append(producto_nuevo)\n print(\"# # # # # # # PRODUCTO ANHADIDO # # # # # # #\")\n producto_nuevo.producto_info()\n return self\n\n def vender_producto(self, id):\n print(\"\\n# # # # # # # PRODUCTO VENDIDO # # # # # # #\")\n self.lista_productos.pop(id).producto_info()\n return self\n\n def inflacion(self, porcentaje_incremento):\n a = 0\n for pro in self.lista_productos:\n a += 1\n print(f\"=================Producto 0{a}:=================\")\n pro.producto_info()\n print(\"AUMENTA su precio a: \")\n pro.actualizar_precio(porcentaje_incremento, True).producto_info()\n return self\n\n def descuentazo(self, categoria, descuentazo_porcentaje):\n a = 0\n for product in self.lista_productos:\n a += 1\n if product.cat_producto == categoria:\n print(f\"=================Producto 0{a}:=================\")\n product.producto_info()\n print(\"Se REMATA, y su nuevo precio de remate es: \")\n product.actualizar_precio(descuentazo_porcentaje, False).producto_info()\n print(f\"Descuento de precios a toda la categoria {categoria}, realizado\")\n return self\n\n#########################################################\n##### coso = Tienda(\"VERDULERIA\")\n##### print(coso)\n##### print(\"anhadir_P\")\n##### pera = (\"PERA\", 1000, \"FRUTAS\")\n##### coco = (\"COCO\", 1511, \"FRUTAS\")\n##### coso.anhadir_producto(pera)\n##### coso.anhadir_producto(coco)\n##### print(coso)\n##### print(\"#############################\")\n##### coso.vender_producto(1)",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from unittest import TestCase
from attendance import Member
__author__ = 'colin'
class TestMember(TestCase):
def test_here(self):
member = Member("John", "Doe")
self.assertFalse(member.attended)
member.here()
self.assertTrue(member.attended)
|
normal
|
{
"blob_id": "a6713a4edece14a88bd9c8ddd483ff8e16acdbcc",
"index": 9695,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestMember(TestCase):\n\n def test_here(self):\n member = Member('John', 'Doe')\n self.assertFalse(member.attended)\n member.here()\n self.assertTrue(member.attended)\n",
"step-3": "<mask token>\n__author__ = 'colin'\n\n\nclass TestMember(TestCase):\n\n def test_here(self):\n member = Member('John', 'Doe')\n self.assertFalse(member.attended)\n member.here()\n self.assertTrue(member.attended)\n",
"step-4": "from unittest import TestCase\nfrom attendance import Member\n__author__ = 'colin'\n\n\nclass TestMember(TestCase):\n\n def test_here(self):\n member = Member('John', 'Doe')\n self.assertFalse(member.attended)\n member.here()\n self.assertTrue(member.attended)\n",
"step-5": "from unittest import TestCase\nfrom attendance import Member\n\n__author__ = 'colin'\n\n\nclass TestMember(TestCase):\n def test_here(self):\n member = Member(\"John\", \"Doe\")\n self.assertFalse(member.attended)\n member.here()\n self.assertTrue(member.attended)",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
gpio.setmode(gpio.BOARD)
<|reserved_special_token_0|>
gpio.setup(pin, gpio.OUT)
gpio.output(pin, gpio.HIGH)
time.sleep(5)
gpio.output(pin, gpio.LOW)
time.sleep(1)
gpio.cleanup()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
gpio.setmode(gpio.BOARD)
pin = 40
gpio.setup(pin, gpio.OUT)
gpio.output(pin, gpio.HIGH)
time.sleep(5)
gpio.output(pin, gpio.LOW)
time.sleep(1)
gpio.cleanup()
<|reserved_special_token_1|>
import RPi.GPIO as gpio
import time
gpio.setmode(gpio.BOARD)
pin = 40
gpio.setup(pin, gpio.OUT)
gpio.output(pin, gpio.HIGH)
time.sleep(5)
gpio.output(pin, gpio.LOW)
time.sleep(1)
gpio.cleanup()
<|reserved_special_token_1|>
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import RPi.GPIO as gpio # 导入Rpi.GPIO库函数命名为GPIO
import time
gpio.setmode(gpio.BOARD) #将GPIO编程方式设置为BOARD模式
pin = 40
gpio.setup(pin, gpio.OUT) #控制pin号引脚
gpio.output(pin, gpio.HIGH) #11号引脚输出高电平
time.sleep(5) #计时0.5秒
gpio.output(pin, gpio.LOW) #11号引脚输出低电平
time.sleep(1) #计时1秒
gpio.cleanup() #释放使用的GPIO引脚
|
flexible
|
{
"blob_id": "cfdfc490396546b7af732417b506100357cd9a1f",
"index": 6762,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ngpio.setmode(gpio.BOARD)\n<mask token>\ngpio.setup(pin, gpio.OUT)\ngpio.output(pin, gpio.HIGH)\ntime.sleep(5)\ngpio.output(pin, gpio.LOW)\ntime.sleep(1)\ngpio.cleanup()\n",
"step-3": "<mask token>\ngpio.setmode(gpio.BOARD)\npin = 40\ngpio.setup(pin, gpio.OUT)\ngpio.output(pin, gpio.HIGH)\ntime.sleep(5)\ngpio.output(pin, gpio.LOW)\ntime.sleep(1)\ngpio.cleanup()\n",
"step-4": "import RPi.GPIO as gpio\nimport time\ngpio.setmode(gpio.BOARD)\npin = 40\ngpio.setup(pin, gpio.OUT)\ngpio.output(pin, gpio.HIGH)\ntime.sleep(5)\ngpio.output(pin, gpio.LOW)\ntime.sleep(1)\ngpio.cleanup()\n",
"step-5": "#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n\nimport RPi.GPIO as gpio # 导入Rpi.GPIO库函数命名为GPIO\nimport time\n\ngpio.setmode(gpio.BOARD) #将GPIO编程方式设置为BOARD模式\n\npin = 40\n\ngpio.setup(pin, gpio.OUT) #控制pin号引脚\n\ngpio.output(pin, gpio.HIGH) #11号引脚输出高电平\ntime.sleep(5) #计时0.5秒\ngpio.output(pin, gpio.LOW) #11号引脚输出低电平\ntime.sleep(1) #计时1秒\n\ngpio.cleanup() #释放使用的GPIO引脚",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
number = int(input())
bonus = 0
if number <= 100:
bonus = 5
total_point = number + bonus
elif number > 1000:
bonus = 0.1 * number
total_point = number + bonus
else:
bonus = 0.2 * number
total_point = number + bonus
if number % 2 == 0:
bonus = bonus + 1
total_point = number + bonus
print(bonus)
print(total_point)
elif number % 10 == 5:
bonus = bonus + 2
total_point = number + bonus
print(bonus)
print(total_point)
|
normal
|
{
"blob_id": "7ee3301b55d323d156bd394f8525e37502d19430",
"index": 7669,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif number <= 100:\n bonus = 5\n total_point = number + bonus\nelif number > 1000:\n bonus = 0.1 * number\n total_point = number + bonus\nelse:\n bonus = 0.2 * number\n total_point = number + bonus\nif number % 2 == 0:\n bonus = bonus + 1\n total_point = number + bonus\n print(bonus)\n print(total_point)\nelif number % 10 == 5:\n bonus = bonus + 2\n total_point = number + bonus\n print(bonus)\n print(total_point)\n",
"step-3": "number = int(input())\nbonus = 0\nif number <= 100:\n bonus = 5\n total_point = number + bonus\nelif number > 1000:\n bonus = 0.1 * number\n total_point = number + bonus\nelse:\n bonus = 0.2 * number\n total_point = number + bonus\nif number % 2 == 0:\n bonus = bonus + 1\n total_point = number + bonus\n print(bonus)\n print(total_point)\nelif number % 10 == 5:\n bonus = bonus + 2\n total_point = number + bonus\n print(bonus)\n print(total_point)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import re
# Class with static regex compilations
class RegexCompiles:
# regex for finding product-id in an EMAG link
re_compile_product_id = re.compile('Product-Id=[0-9]*')
# regex for finding the first number
re_compile_id = re.compile('[0-9]+')
# Verifies if a word exists in a text
def find_whole_word(text, word) -> bool:
return re.compile(r'\b({0})\b'.format(word), flags=re.IGNORECASE).search(text)
# Verifies if all the words in a given title (given_title) exist in another title (title)
def verify_card_title(title, given_title) -> bool:
title = title.lower()
given_title = given_title.lower()
for token in given_title.strip().split():
if find_whole_word(title, token) is None:
return False
return True
# Returns the product id from an emag link
def get_product_id(link_to_product) -> int:
s_matched = RegexCompiles.re_compile_product_id.search(link_to_product).group()
id_matched = RegexCompiles.re_compile_id.search(s_matched).group()
return int(id_matched)
|
normal
|
{
"blob_id": "b1c06e9c5516a378c0bbce2ce9e17afaeae01928",
"index": 668,
"step-1": "<mask token>\n\n\nclass RegexCompiles:\n re_compile_product_id = re.compile('Product-Id=[0-9]*')\n re_compile_id = re.compile('[0-9]+')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RegexCompiles:\n re_compile_product_id = re.compile('Product-Id=[0-9]*')\n re_compile_id = re.compile('[0-9]+')\n\n\n<mask token>\n\n\ndef verify_card_title(title, given_title) ->bool:\n title = title.lower()\n given_title = given_title.lower()\n for token in given_title.strip().split():\n if find_whole_word(title, token) is None:\n return False\n return True\n\n\ndef get_product_id(link_to_product) ->int:\n s_matched = RegexCompiles.re_compile_product_id.search(link_to_product\n ).group()\n id_matched = RegexCompiles.re_compile_id.search(s_matched).group()\n return int(id_matched)\n",
"step-3": "<mask token>\n\n\nclass RegexCompiles:\n re_compile_product_id = re.compile('Product-Id=[0-9]*')\n re_compile_id = re.compile('[0-9]+')\n\n\ndef find_whole_word(text, word) ->bool:\n return re.compile('\\\\b({0})\\\\b'.format(word), flags=re.IGNORECASE).search(\n text)\n\n\ndef verify_card_title(title, given_title) ->bool:\n title = title.lower()\n given_title = given_title.lower()\n for token in given_title.strip().split():\n if find_whole_word(title, token) is None:\n return False\n return True\n\n\ndef get_product_id(link_to_product) ->int:\n s_matched = RegexCompiles.re_compile_product_id.search(link_to_product\n ).group()\n id_matched = RegexCompiles.re_compile_id.search(s_matched).group()\n return int(id_matched)\n",
"step-4": "import re\n\n\nclass RegexCompiles:\n re_compile_product_id = re.compile('Product-Id=[0-9]*')\n re_compile_id = re.compile('[0-9]+')\n\n\ndef find_whole_word(text, word) ->bool:\n return re.compile('\\\\b({0})\\\\b'.format(word), flags=re.IGNORECASE).search(\n text)\n\n\ndef verify_card_title(title, given_title) ->bool:\n title = title.lower()\n given_title = given_title.lower()\n for token in given_title.strip().split():\n if find_whole_word(title, token) is None:\n return False\n return True\n\n\ndef get_product_id(link_to_product) ->int:\n s_matched = RegexCompiles.re_compile_product_id.search(link_to_product\n ).group()\n id_matched = RegexCompiles.re_compile_id.search(s_matched).group()\n return int(id_matched)\n",
"step-5": "import re\n\n\n# Class with static regex compilations\nclass RegexCompiles:\n # regex for finding product-id in an EMAG link\n re_compile_product_id = re.compile('Product-Id=[0-9]*')\n # regex for finding the first number\n re_compile_id = re.compile('[0-9]+')\n\n\n# Verifies if a word exists in a text\ndef find_whole_word(text, word) -> bool:\n return re.compile(r'\\b({0})\\b'.format(word), flags=re.IGNORECASE).search(text)\n\n\n# Verifies if all the words in a given title (given_title) exist in another title (title)\ndef verify_card_title(title, given_title) -> bool:\n title = title.lower()\n given_title = given_title.lower()\n for token in given_title.strip().split():\n if find_whole_word(title, token) is None:\n return False\n return True\n\n\n# Returns the product id from an emag link\ndef get_product_id(link_to_product) -> int:\n s_matched = RegexCompiles.re_compile_product_id.search(link_to_product).group()\n id_matched = RegexCompiles.re_compile_id.search(s_matched).group()\n return int(id_matched)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def get_config(p, section, key, env_var, default, boolean=False, integer=
False, floating=False, islist=False):
""" return a configuration variable with casting """
value = _get_config(p, section, key, env_var, default)
if boolean:
return mk_boolean(value)
if value and integer:
return int(value)
if value and floating:
return float(value)
if value and islist:
return [x.strip() for x in value.split(',')]
return value
def _get_config(p, section, key, env_var, default):
""" helper function for get_config """
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key, raw=True)
except:
return default
return default
def load_config_file():
""" Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner """
p = SafeConfigParser()
path0 = os.getenv('SOJOURNER_CONFIG', None)
if path0 is not None:
path0 = os.path.expanduser(path0)
path1 = os.getcwd() + '/sojourner.cfg'
path2 = os.path.expanduser('~/.sojourner.cfg')
path3 = '/etc/sojourner/sojourner.cfg'
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
print('Error reading config file: \n{0}'.format(e))
sys.exit(1)
return p
return None
def shell_expand_path(path):
""" shell_expand_path is needed as os.path.expanduser does not work
when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE """
if path:
path = os.path.expanduser(os.path.expandvars(path))
return path
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mk_boolean(value):
if value is None:
return False
val = str(value)
if val.lower() in ['true', 't', 'y', '1', 'yes']:
return True
else:
return False
def get_config(p, section, key, env_var, default, boolean=False, integer=
False, floating=False, islist=False):
""" return a configuration variable with casting """
value = _get_config(p, section, key, env_var, default)
if boolean:
return mk_boolean(value)
if value and integer:
return int(value)
if value and floating:
return float(value)
if value and islist:
return [x.strip() for x in value.split(',')]
return value
def _get_config(p, section, key, env_var, default):
""" helper function for get_config """
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key, raw=True)
except:
return default
return default
def load_config_file():
""" Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner """
p = SafeConfigParser()
path0 = os.getenv('SOJOURNER_CONFIG', None)
if path0 is not None:
path0 = os.path.expanduser(path0)
path1 = os.getcwd() + '/sojourner.cfg'
path2 = os.path.expanduser('~/.sojourner.cfg')
path3 = '/etc/sojourner/sojourner.cfg'
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
print('Error reading config file: \n{0}'.format(e))
sys.exit(1)
return p
return None
def shell_expand_path(path):
""" shell_expand_path is needed as os.path.expanduser does not work
when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE """
if path:
path = os.path.expanduser(os.path.expandvars(path))
return path
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__metaclass__ = type
<|reserved_special_token_0|>
def mk_boolean(value):
if value is None:
return False
val = str(value)
if val.lower() in ['true', 't', 'y', '1', 'yes']:
return True
else:
return False
def get_config(p, section, key, env_var, default, boolean=False, integer=
False, floating=False, islist=False):
""" return a configuration variable with casting """
value = _get_config(p, section, key, env_var, default)
if boolean:
return mk_boolean(value)
if value and integer:
return int(value)
if value and floating:
return float(value)
if value and islist:
return [x.strip() for x in value.split(',')]
return value
def _get_config(p, section, key, env_var, default):
""" helper function for get_config """
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key, raw=True)
except:
return default
return default
def load_config_file():
""" Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner """
p = SafeConfigParser()
path0 = os.getenv('SOJOURNER_CONFIG', None)
if path0 is not None:
path0 = os.path.expanduser(path0)
path1 = os.getcwd() + '/sojourner.cfg'
path2 = os.path.expanduser('~/.sojourner.cfg')
path3 = '/etc/sojourner/sojourner.cfg'
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
print('Error reading config file: \n{0}'.format(e))
sys.exit(1)
return p
return None
def shell_expand_path(path):
""" shell_expand_path is needed as os.path.expanduser does not work
when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE """
if path:
path = os.path.expanduser(os.path.expandvars(path))
return path
p = load_config_file()
active_user = pwd.getpwuid(os.geteuid())[0]
DEFAULTS = 'defaults'
DEFAULT_SOJOURNER_HOME = shell_expand_path(get_config(p, DEFAULTS,
'sojourner_home', 'DEFAULT_SOJOURNER_HOME', os.environ['HOME'] +
'/Sojourner'))
DEFAULT_DB_ENGINE = get_config(p, DEFAULTS, 'db_engine',
'SOJOURNER_DB_ENGINE', 'sqlite')
DEFAULT_DB_HOST = get_config(p, DEFAULTS, 'db_host', 'SOJOURNER_DB_HOST',
'localhost')
DEFAULT_DB_PORT = get_config(p, DEFAULTS, 'db_port', 'SOJOURNER_DB_PORT',
'3306')
DEFAULT_DB_USER = get_config(p, DEFAULTS, 'db_user', 'SOJOURNER_DB_USER',
'sojourner')
DEFAULT_DB_PASSWD = get_config(p, DEFAULTS, 'db_passwd',
'SOJOURNER_DB_PASSWD', 'sojourner')
DEFAULT_DB_DBNAME = get_config(p, DEFAULTS, 'db_dbname',
'SOJOURNER_DB_DBNAME', 'sojourner')
SOJOURNER_PROVISIONER = get_config(p, 'sojourner', 'provisioner',
'SOJOURNER_PROVISIONER', 'ansible')
SOJOURNER_ANSIBLE_ROLES = get_config(p, 'ansible', 'ansible_roles',
'SOJOURNER_ANSIBLE_ROLES', DEFAULT_SOJOURNER_HOME + 'Ansible_Roles')
SOJOURNER_CHEF_COOKBOOKS = get_config(p, 'chef', 'chef_cookbooks',
'SOJOURNER_CHEF_COOKBOOKS', DEFAULT_SOJOURNER_HOME + 'Chef_Cookbooks')
<|reserved_special_token_1|>
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import pwd
import sys
from string import ascii_letters, digits
from ConfigParser import SafeConfigParser
def mk_boolean(value):
if value is None:
return False
val = str(value)
if val.lower() in ['true', 't', 'y', '1', 'yes']:
return True
else:
return False
def get_config(p, section, key, env_var, default, boolean=False, integer=
False, floating=False, islist=False):
""" return a configuration variable with casting """
value = _get_config(p, section, key, env_var, default)
if boolean:
return mk_boolean(value)
if value and integer:
return int(value)
if value and floating:
return float(value)
if value and islist:
return [x.strip() for x in value.split(',')]
return value
def _get_config(p, section, key, env_var, default):
""" helper function for get_config """
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key, raw=True)
except:
return default
return default
def load_config_file():
""" Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner """
p = SafeConfigParser()
path0 = os.getenv('SOJOURNER_CONFIG', None)
if path0 is not None:
path0 = os.path.expanduser(path0)
path1 = os.getcwd() + '/sojourner.cfg'
path2 = os.path.expanduser('~/.sojourner.cfg')
path3 = '/etc/sojourner/sojourner.cfg'
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
print('Error reading config file: \n{0}'.format(e))
sys.exit(1)
return p
return None
def shell_expand_path(path):
""" shell_expand_path is needed as os.path.expanduser does not work
when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE """
if path:
path = os.path.expanduser(os.path.expandvars(path))
return path
p = load_config_file()
active_user = pwd.getpwuid(os.geteuid())[0]
DEFAULTS = 'defaults'
DEFAULT_SOJOURNER_HOME = shell_expand_path(get_config(p, DEFAULTS,
'sojourner_home', 'DEFAULT_SOJOURNER_HOME', os.environ['HOME'] +
'/Sojourner'))
DEFAULT_DB_ENGINE = get_config(p, DEFAULTS, 'db_engine',
'SOJOURNER_DB_ENGINE', 'sqlite')
DEFAULT_DB_HOST = get_config(p, DEFAULTS, 'db_host', 'SOJOURNER_DB_HOST',
'localhost')
DEFAULT_DB_PORT = get_config(p, DEFAULTS, 'db_port', 'SOJOURNER_DB_PORT',
'3306')
DEFAULT_DB_USER = get_config(p, DEFAULTS, 'db_user', 'SOJOURNER_DB_USER',
'sojourner')
DEFAULT_DB_PASSWD = get_config(p, DEFAULTS, 'db_passwd',
'SOJOURNER_DB_PASSWD', 'sojourner')
DEFAULT_DB_DBNAME = get_config(p, DEFAULTS, 'db_dbname',
'SOJOURNER_DB_DBNAME', 'sojourner')
SOJOURNER_PROVISIONER = get_config(p, 'sojourner', 'provisioner',
'SOJOURNER_PROVISIONER', 'ansible')
SOJOURNER_ANSIBLE_ROLES = get_config(p, 'ansible', 'ansible_roles',
'SOJOURNER_ANSIBLE_ROLES', DEFAULT_SOJOURNER_HOME + 'Ansible_Roles')
SOJOURNER_CHEF_COOKBOOKS = get_config(p, 'chef', 'chef_cookbooks',
'SOJOURNER_CHEF_COOKBOOKS', DEFAULT_SOJOURNER_HOME + 'Chef_Cookbooks')
<|reserved_special_token_1|>
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pwd
import sys
from string import ascii_letters, digits
from ConfigParser import SafeConfigParser
# copied from utils, avoid circular reference fun :)
def mk_boolean(value):
if value is None:
return False
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False):
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
return mk_boolean(value)
if value and integer:
return int(value)
if value and floating:
return float(value)
if value and islist:
return [x.strip() for x in value.split(',')]
return value
def _get_config(p, section, key, env_var, default):
''' helper function for get_config '''
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key, raw=True)
except:
return default
return default
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner '''
p = SafeConfigParser()
path0 = os.getenv("SOJOURNER_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
path1 = os.getcwd() + "/sojourner.cfg"
path2 = os.path.expanduser("~/.sojourner.cfg")
path3 = "/etc/sojourner/sojourner.cfg"
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
print("Error reading config file: \n{0}".format(e))
sys.exit(1)
return p
return None
def shell_expand_path(path):
''' shell_expand_path is needed as os.path.expanduser does not work
when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE '''
if path:
path = os.path.expanduser(os.path.expandvars(path))
return path
p = load_config_file()
active_user = pwd.getpwuid(os.geteuid())[0]
# sections in config file
DEFAULTS='defaults'
# configurable things
# def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False):
DEFAULT_SOJOURNER_HOME = shell_expand_path(get_config(p, DEFAULTS, 'sojourner_home','DEFAULT_SOJOURNER_HOME',os.environ['HOME']+'/Sojourner'))
DEFAULT_DB_ENGINE = get_config(p, DEFAULTS, 'db_engine', 'SOJOURNER_DB_ENGINE', 'sqlite')
DEFAULT_DB_HOST = get_config(p, DEFAULTS, 'db_host', 'SOJOURNER_DB_HOST', 'localhost')
DEFAULT_DB_PORT = get_config(p, DEFAULTS, 'db_port', 'SOJOURNER_DB_PORT', '3306')
DEFAULT_DB_USER = get_config(p, DEFAULTS, 'db_user', 'SOJOURNER_DB_USER', 'sojourner')
DEFAULT_DB_PASSWD = get_config(p, DEFAULTS, 'db_passwd', 'SOJOURNER_DB_PASSWD', 'sojourner')
DEFAULT_DB_DBNAME = get_config(p, DEFAULTS, 'db_dbname', 'SOJOURNER_DB_DBNAME', 'sojourner')
SOJOURNER_PROVISIONER = get_config(p, 'sojourner', 'provisioner', 'SOJOURNER_PROVISIONER', 'ansible')
# ANSIBLE RELATED
SOJOURNER_ANSIBLE_ROLES = get_config(p, 'ansible', 'ansible_roles', 'SOJOURNER_ANSIBLE_ROLES', DEFAULT_SOJOURNER_HOME + 'Ansible_Roles')
# CHEF RELATED
SOJOURNER_CHEF_COOKBOOKS = get_config(p, 'chef', 'chef_cookbooks', 'SOJOURNER_CHEF_COOKBOOKS', DEFAULT_SOJOURNER_HOME + 'Chef_Cookbooks')
|
flexible
|
{
"blob_id": "63bd8a15dd489844968f46c4b0ffe157d567537a",
"index": 8044,
"step-1": "<mask token>\n\n\ndef get_config(p, section, key, env_var, default, boolean=False, integer=\n False, floating=False, islist=False):\n \"\"\" return a configuration variable with casting \"\"\"\n value = _get_config(p, section, key, env_var, default)\n if boolean:\n return mk_boolean(value)\n if value and integer:\n return int(value)\n if value and floating:\n return float(value)\n if value and islist:\n return [x.strip() for x in value.split(',')]\n return value\n\n\ndef _get_config(p, section, key, env_var, default):\n \"\"\" helper function for get_config \"\"\"\n if env_var is not None:\n value = os.environ.get(env_var, None)\n if value is not None:\n return value\n if p is not None:\n try:\n return p.get(section, key, raw=True)\n except:\n return default\n return default\n\n\ndef load_config_file():\n \"\"\" Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner \"\"\"\n p = SafeConfigParser()\n path0 = os.getenv('SOJOURNER_CONFIG', None)\n if path0 is not None:\n path0 = os.path.expanduser(path0)\n path1 = os.getcwd() + '/sojourner.cfg'\n path2 = os.path.expanduser('~/.sojourner.cfg')\n path3 = '/etc/sojourner/sojourner.cfg'\n for path in [path0, path1, path2, path3]:\n if path is not None and os.path.exists(path):\n try:\n p.read(path)\n except configparser.Error as e:\n print('Error reading config file: \\n{0}'.format(e))\n sys.exit(1)\n return p\n return None\n\n\ndef shell_expand_path(path):\n \"\"\" shell_expand_path is needed as os.path.expanduser does not work\n when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE \"\"\"\n if path:\n path = os.path.expanduser(os.path.expandvars(path))\n return path\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mk_boolean(value):\n if value is None:\n return False\n val = str(value)\n if val.lower() in ['true', 't', 'y', '1', 'yes']:\n return True\n else:\n return False\n\n\ndef get_config(p, section, key, env_var, default, boolean=False, integer=\n False, floating=False, islist=False):\n \"\"\" return a configuration variable with casting \"\"\"\n value = _get_config(p, section, key, env_var, default)\n if boolean:\n return mk_boolean(value)\n if value and integer:\n return int(value)\n if value and floating:\n return float(value)\n if value and islist:\n return [x.strip() for x in value.split(',')]\n return value\n\n\ndef _get_config(p, section, key, env_var, default):\n \"\"\" helper function for get_config \"\"\"\n if env_var is not None:\n value = os.environ.get(env_var, None)\n if value is not None:\n return value\n if p is not None:\n try:\n return p.get(section, key, raw=True)\n except:\n return default\n return default\n\n\ndef load_config_file():\n \"\"\" Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner \"\"\"\n p = SafeConfigParser()\n path0 = os.getenv('SOJOURNER_CONFIG', None)\n if path0 is not None:\n path0 = os.path.expanduser(path0)\n path1 = os.getcwd() + '/sojourner.cfg'\n path2 = os.path.expanduser('~/.sojourner.cfg')\n path3 = '/etc/sojourner/sojourner.cfg'\n for path in [path0, path1, path2, path3]:\n if path is not None and os.path.exists(path):\n try:\n p.read(path)\n except configparser.Error as e:\n print('Error reading config file: \\n{0}'.format(e))\n sys.exit(1)\n return p\n return None\n\n\ndef shell_expand_path(path):\n \"\"\" shell_expand_path is needed as os.path.expanduser does not work\n when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE \"\"\"\n if path:\n path = os.path.expanduser(os.path.expandvars(path))\n return path\n\n\n<mask token>\n",
"step-3": "<mask token>\n__metaclass__ = type\n<mask token>\n\n\ndef mk_boolean(value):\n if value is None:\n return False\n val = str(value)\n if val.lower() in ['true', 't', 'y', '1', 'yes']:\n return True\n else:\n return False\n\n\ndef get_config(p, section, key, env_var, default, boolean=False, integer=\n False, floating=False, islist=False):\n \"\"\" return a configuration variable with casting \"\"\"\n value = _get_config(p, section, key, env_var, default)\n if boolean:\n return mk_boolean(value)\n if value and integer:\n return int(value)\n if value and floating:\n return float(value)\n if value and islist:\n return [x.strip() for x in value.split(',')]\n return value\n\n\ndef _get_config(p, section, key, env_var, default):\n \"\"\" helper function for get_config \"\"\"\n if env_var is not None:\n value = os.environ.get(env_var, None)\n if value is not None:\n return value\n if p is not None:\n try:\n return p.get(section, key, raw=True)\n except:\n return default\n return default\n\n\ndef load_config_file():\n \"\"\" Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner \"\"\"\n p = SafeConfigParser()\n path0 = os.getenv('SOJOURNER_CONFIG', None)\n if path0 is not None:\n path0 = os.path.expanduser(path0)\n path1 = os.getcwd() + '/sojourner.cfg'\n path2 = os.path.expanduser('~/.sojourner.cfg')\n path3 = '/etc/sojourner/sojourner.cfg'\n for path in [path0, path1, path2, path3]:\n if path is not None and os.path.exists(path):\n try:\n p.read(path)\n except configparser.Error as e:\n print('Error reading config file: \\n{0}'.format(e))\n sys.exit(1)\n return p\n return None\n\n\ndef shell_expand_path(path):\n \"\"\" shell_expand_path is needed as os.path.expanduser does not work\n when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE \"\"\"\n if path:\n path = os.path.expanduser(os.path.expandvars(path))\n return path\n\n\np = load_config_file()\nactive_user = pwd.getpwuid(os.geteuid())[0]\nDEFAULTS = 'defaults'\nDEFAULT_SOJOURNER_HOME = shell_expand_path(get_config(p, DEFAULTS,\n 'sojourner_home', 'DEFAULT_SOJOURNER_HOME', os.environ['HOME'] +\n '/Sojourner'))\nDEFAULT_DB_ENGINE = get_config(p, DEFAULTS, 'db_engine',\n 'SOJOURNER_DB_ENGINE', 'sqlite')\nDEFAULT_DB_HOST = get_config(p, DEFAULTS, 'db_host', 'SOJOURNER_DB_HOST',\n 'localhost')\nDEFAULT_DB_PORT = get_config(p, DEFAULTS, 'db_port', 'SOJOURNER_DB_PORT',\n '3306')\nDEFAULT_DB_USER = get_config(p, DEFAULTS, 'db_user', 'SOJOURNER_DB_USER',\n 'sojourner')\nDEFAULT_DB_PASSWD = get_config(p, DEFAULTS, 'db_passwd',\n 'SOJOURNER_DB_PASSWD', 'sojourner')\nDEFAULT_DB_DBNAME = get_config(p, DEFAULTS, 'db_dbname',\n 'SOJOURNER_DB_DBNAME', 'sojourner')\nSOJOURNER_PROVISIONER = get_config(p, 'sojourner', 'provisioner',\n 'SOJOURNER_PROVISIONER', 'ansible')\nSOJOURNER_ANSIBLE_ROLES = get_config(p, 'ansible', 'ansible_roles',\n 'SOJOURNER_ANSIBLE_ROLES', DEFAULT_SOJOURNER_HOME + 'Ansible_Roles')\nSOJOURNER_CHEF_COOKBOOKS = get_config(p, 'chef', 'chef_cookbooks',\n 'SOJOURNER_CHEF_COOKBOOKS', DEFAULT_SOJOURNER_HOME + 'Chef_Cookbooks')\n",
"step-4": "from __future__ import absolute_import, division, print_function\n__metaclass__ = type\nimport os\nimport pwd\nimport sys\nfrom string import ascii_letters, digits\nfrom ConfigParser import SafeConfigParser\n\n\ndef mk_boolean(value):\n if value is None:\n return False\n val = str(value)\n if val.lower() in ['true', 't', 'y', '1', 'yes']:\n return True\n else:\n return False\n\n\ndef get_config(p, section, key, env_var, default, boolean=False, integer=\n False, floating=False, islist=False):\n \"\"\" return a configuration variable with casting \"\"\"\n value = _get_config(p, section, key, env_var, default)\n if boolean:\n return mk_boolean(value)\n if value and integer:\n return int(value)\n if value and floating:\n return float(value)\n if value and islist:\n return [x.strip() for x in value.split(',')]\n return value\n\n\ndef _get_config(p, section, key, env_var, default):\n \"\"\" helper function for get_config \"\"\"\n if env_var is not None:\n value = os.environ.get(env_var, None)\n if value is not None:\n return value\n if p is not None:\n try:\n return p.get(section, key, raw=True)\n except:\n return default\n return default\n\n\ndef load_config_file():\n \"\"\" Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner \"\"\"\n p = SafeConfigParser()\n path0 = os.getenv('SOJOURNER_CONFIG', None)\n if path0 is not None:\n path0 = os.path.expanduser(path0)\n path1 = os.getcwd() + '/sojourner.cfg'\n path2 = os.path.expanduser('~/.sojourner.cfg')\n path3 = '/etc/sojourner/sojourner.cfg'\n for path in [path0, path1, path2, path3]:\n if path is not None and os.path.exists(path):\n try:\n p.read(path)\n except configparser.Error as e:\n print('Error reading config file: \\n{0}'.format(e))\n sys.exit(1)\n return p\n return None\n\n\ndef shell_expand_path(path):\n \"\"\" shell_expand_path is needed as os.path.expanduser does not work\n when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE \"\"\"\n if path:\n path = os.path.expanduser(os.path.expandvars(path))\n return path\n\n\np = load_config_file()\nactive_user = pwd.getpwuid(os.geteuid())[0]\nDEFAULTS = 'defaults'\nDEFAULT_SOJOURNER_HOME = shell_expand_path(get_config(p, DEFAULTS,\n 'sojourner_home', 'DEFAULT_SOJOURNER_HOME', os.environ['HOME'] +\n '/Sojourner'))\nDEFAULT_DB_ENGINE = get_config(p, DEFAULTS, 'db_engine',\n 'SOJOURNER_DB_ENGINE', 'sqlite')\nDEFAULT_DB_HOST = get_config(p, DEFAULTS, 'db_host', 'SOJOURNER_DB_HOST',\n 'localhost')\nDEFAULT_DB_PORT = get_config(p, DEFAULTS, 'db_port', 'SOJOURNER_DB_PORT',\n '3306')\nDEFAULT_DB_USER = get_config(p, DEFAULTS, 'db_user', 'SOJOURNER_DB_USER',\n 'sojourner')\nDEFAULT_DB_PASSWD = get_config(p, DEFAULTS, 'db_passwd',\n 'SOJOURNER_DB_PASSWD', 'sojourner')\nDEFAULT_DB_DBNAME = get_config(p, DEFAULTS, 'db_dbname',\n 'SOJOURNER_DB_DBNAME', 'sojourner')\nSOJOURNER_PROVISIONER = get_config(p, 'sojourner', 'provisioner',\n 'SOJOURNER_PROVISIONER', 'ansible')\nSOJOURNER_ANSIBLE_ROLES = get_config(p, 'ansible', 'ansible_roles',\n 'SOJOURNER_ANSIBLE_ROLES', DEFAULT_SOJOURNER_HOME + 'Ansible_Roles')\nSOJOURNER_CHEF_COOKBOOKS = get_config(p, 'chef', 'chef_cookbooks',\n 'SOJOURNER_CHEF_COOKBOOKS', DEFAULT_SOJOURNER_HOME + 'Chef_Cookbooks')\n",
"step-5": "\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\nimport pwd\nimport sys\n\nfrom string import ascii_letters, digits\nfrom ConfigParser import SafeConfigParser\n\n# copied from utils, avoid circular reference fun :)\ndef mk_boolean(value):\n if value is None:\n return False\n val = str(value)\n if val.lower() in [ \"true\", \"t\", \"y\", \"1\", \"yes\" ]:\n return True\n else:\n return False\n\ndef get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False):\n ''' return a configuration variable with casting '''\n value = _get_config(p, section, key, env_var, default)\n if boolean:\n return mk_boolean(value)\n if value and integer:\n return int(value)\n if value and floating:\n return float(value)\n if value and islist:\n return [x.strip() for x in value.split(',')]\n return value\n\ndef _get_config(p, section, key, env_var, default):\n ''' helper function for get_config '''\n if env_var is not None:\n value = os.environ.get(env_var, None)\n if value is not None:\n return value\n if p is not None:\n try:\n return p.get(section, key, raw=True)\n except:\n return default\n return default\n\ndef load_config_file():\n ''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner '''\n\n p = SafeConfigParser()\n\n path0 = os.getenv(\"SOJOURNER_CONFIG\", None)\n if path0 is not None:\n path0 = os.path.expanduser(path0)\n path1 = os.getcwd() + \"/sojourner.cfg\"\n path2 = os.path.expanduser(\"~/.sojourner.cfg\")\n path3 = \"/etc/sojourner/sojourner.cfg\"\n\n for path in [path0, path1, path2, path3]:\n if path is not None and os.path.exists(path):\n try:\n p.read(path)\n except configparser.Error as e:\n print(\"Error reading config file: \\n{0}\".format(e))\n sys.exit(1)\n return p\n return None\n\ndef shell_expand_path(path):\n ''' shell_expand_path is needed as os.path.expanduser does not work\n when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE '''\n if path:\n path = os.path.expanduser(os.path.expandvars(path))\n return path\n\np = load_config_file()\n\nactive_user = pwd.getpwuid(os.geteuid())[0]\n\n# sections in config file\nDEFAULTS='defaults'\n\n# configurable things\n# \t\t\tdef get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False):\nDEFAULT_SOJOURNER_HOME = shell_expand_path(get_config(p, DEFAULTS, 'sojourner_home','DEFAULT_SOJOURNER_HOME',os.environ['HOME']+'/Sojourner'))\nDEFAULT_DB_ENGINE = get_config(p, DEFAULTS, 'db_engine', 'SOJOURNER_DB_ENGINE', 'sqlite')\nDEFAULT_DB_HOST = get_config(p, DEFAULTS, 'db_host', 'SOJOURNER_DB_HOST', 'localhost')\nDEFAULT_DB_PORT = get_config(p, DEFAULTS, 'db_port', 'SOJOURNER_DB_PORT', '3306')\nDEFAULT_DB_USER = get_config(p, DEFAULTS, 'db_user', 'SOJOURNER_DB_USER', 'sojourner')\nDEFAULT_DB_PASSWD = get_config(p, DEFAULTS, 'db_passwd', 'SOJOURNER_DB_PASSWD', 'sojourner')\nDEFAULT_DB_DBNAME = get_config(p, DEFAULTS, 'db_dbname', 'SOJOURNER_DB_DBNAME', 'sojourner')\n\n\nSOJOURNER_PROVISIONER = get_config(p, 'sojourner', 'provisioner', 'SOJOURNER_PROVISIONER', 'ansible')\n\n# ANSIBLE RELATED\nSOJOURNER_ANSIBLE_ROLES = get_config(p, 'ansible', 'ansible_roles', 'SOJOURNER_ANSIBLE_ROLES', DEFAULT_SOJOURNER_HOME + 'Ansible_Roles')\n\n# CHEF RELATED\nSOJOURNER_CHEF_COOKBOOKS = get_config(p, 'chef', 'chef_cookbooks', 'SOJOURNER_CHEF_COOKBOOKS', DEFAULT_SOJOURNER_HOME + 'Chef_Cookbooks')\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
class Solution(object):
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
i = 0
for j in range(1, len(A), 2):
if A[j] % 2 == 1:
continue
else:
while i + 2 < len(A) and A[i] % 2 == 0:
i += 2
A[i], A[j] = A[j], A[i]
i += 2
return A
|
normal
|
{
"blob_id": "429af603bf8f1c003799c3d94c0ce9a2c2f80dfc",
"index": 3835,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def sortArrayByParityII(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: List[int]\n \"\"\"\n i = 0\n for j in range(1, len(A), 2):\n if A[j] % 2 == 1:\n continue\n else:\n while i + 2 < len(A) and A[i] % 2 == 0:\n i += 2\n A[i], A[j] = A[j], A[i]\n i += 2\n return A\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from os import getenv
config_env = {'api_port': int(getenv('API_PORT')), 'psg_uri': getenv('PSG_URI')
}
|
normal
|
{
"blob_id": "21dd3d1deb00e9bc09803d01f1c05673ea8d25d2",
"index": 3771,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nconfig_env = {'api_port': int(getenv('API_PORT')), 'psg_uri': getenv('PSG_URI')\n }\n",
"step-3": "from os import getenv\nconfig_env = {'api_port': int(getenv('API_PORT')), 'psg_uri': getenv('PSG_URI')\n }\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
class default_locations:
mc_2016_data_directory = "/afs/hephy.at/data/cms06/nanoTuples/"
mc_2016_postProcessing_directory = "stops_2016_nano_v0p23/dilep/"
data_2016_data_directory = "/afs/hephy.at/data/cms07/nanoTuples/"
data_2016_postProcessing_directory = "stops_2016_nano_v0p19/dilep/"
mc_2017_data_directory = "/afs/hephy.at/data/cms06/nanoTuples/"
mc_2017_postProcessing_directory = "stops_2017_nano_v0p23/dilep/"
data_2017_data_directory = "/afs/hephy.at/data/cms07/nanoTuples/"
data_2017_postProcessing_directory = "stops_2017_nano_v0p19/dilep/"
mc_2018_data_directory = "/afs/hephy.at/data/cms06/nanoTuples/"
mc_2018_postProcessing_directory = "stops_2018_nano_v0p23/dilep/"
data_2018_data_directory = "/afs/hephy.at/data/cms07/nanoTuples/"
data_2018_postProcessing_directory = "stops_2018_nano_v0p19/dilep/"
import os
if os.environ['HOSTNAME'].startswith('clip'):
default_locations.mc_2016_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/"
default_locations.data_2016_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/"
default_locations.mc_2017_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/"
default_locations.data_2017_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/"
default_locations.mc_2018_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/"
default_locations.data_2018_data_directory = "/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/"
|
normal
|
{
"blob_id": "b6df9414f99294c7986d3eb5332d40288f059cd1",
"index": 1245,
"step-1": "class default_locations:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "class default_locations:\n mc_2016_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2016_postProcessing_directory = 'stops_2016_nano_v0p23/dilep/'\n data_2016_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2016_postProcessing_directory = 'stops_2016_nano_v0p19/dilep/'\n mc_2017_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2017_postProcessing_directory = 'stops_2017_nano_v0p23/dilep/'\n data_2017_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2017_postProcessing_directory = 'stops_2017_nano_v0p19/dilep/'\n mc_2018_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2018_postProcessing_directory = 'stops_2018_nano_v0p23/dilep/'\n data_2018_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2018_postProcessing_directory = 'stops_2018_nano_v0p19/dilep/'\n\n\n<mask token>\n",
"step-3": "class default_locations:\n mc_2016_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2016_postProcessing_directory = 'stops_2016_nano_v0p23/dilep/'\n data_2016_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2016_postProcessing_directory = 'stops_2016_nano_v0p19/dilep/'\n mc_2017_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2017_postProcessing_directory = 'stops_2017_nano_v0p23/dilep/'\n data_2017_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2017_postProcessing_directory = 'stops_2017_nano_v0p19/dilep/'\n mc_2018_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2018_postProcessing_directory = 'stops_2018_nano_v0p23/dilep/'\n data_2018_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2018_postProcessing_directory = 'stops_2018_nano_v0p19/dilep/'\n\n\n<mask token>\nif os.environ['HOSTNAME'].startswith('clip'):\n default_locations.mc_2016_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.data_2016_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.mc_2017_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.data_2017_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.mc_2018_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.data_2018_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n",
"step-4": "class default_locations:\n mc_2016_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2016_postProcessing_directory = 'stops_2016_nano_v0p23/dilep/'\n data_2016_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2016_postProcessing_directory = 'stops_2016_nano_v0p19/dilep/'\n mc_2017_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2017_postProcessing_directory = 'stops_2017_nano_v0p23/dilep/'\n data_2017_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2017_postProcessing_directory = 'stops_2017_nano_v0p19/dilep/'\n mc_2018_data_directory = '/afs/hephy.at/data/cms06/nanoTuples/'\n mc_2018_postProcessing_directory = 'stops_2018_nano_v0p23/dilep/'\n data_2018_data_directory = '/afs/hephy.at/data/cms07/nanoTuples/'\n data_2018_postProcessing_directory = 'stops_2018_nano_v0p19/dilep/'\n\n\nimport os\nif os.environ['HOSTNAME'].startswith('clip'):\n default_locations.mc_2016_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.data_2016_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.mc_2017_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.data_2017_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.mc_2018_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n default_locations.data_2018_data_directory = (\n '/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/')\n",
"step-5": "class default_locations:\n mc_2016_data_directory = \"/afs/hephy.at/data/cms06/nanoTuples/\" \n mc_2016_postProcessing_directory = \"stops_2016_nano_v0p23/dilep/\" \n data_2016_data_directory = \"/afs/hephy.at/data/cms07/nanoTuples/\" \n data_2016_postProcessing_directory = \"stops_2016_nano_v0p19/dilep/\" \n \n mc_2017_data_directory = \"/afs/hephy.at/data/cms06/nanoTuples/\" \n mc_2017_postProcessing_directory = \"stops_2017_nano_v0p23/dilep/\" \n data_2017_data_directory = \"/afs/hephy.at/data/cms07/nanoTuples/\" \n data_2017_postProcessing_directory = \"stops_2017_nano_v0p19/dilep/\" \n \n mc_2018_data_directory = \"/afs/hephy.at/data/cms06/nanoTuples/\" \n mc_2018_postProcessing_directory = \"stops_2018_nano_v0p23/dilep/\" \n data_2018_data_directory = \"/afs/hephy.at/data/cms07/nanoTuples/\" \n data_2018_postProcessing_directory = \"stops_2018_nano_v0p19/dilep/\"\n\nimport os\nif os.environ['HOSTNAME'].startswith('clip'):\n default_locations.mc_2016_data_directory = \"/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/\"\n default_locations.data_2016_data_directory = \"/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/\"\n default_locations.mc_2017_data_directory = \"/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/\"\n default_locations.data_2017_data_directory = \"/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/\"\n default_locations.mc_2018_data_directory = \"/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/\"\n default_locations.data_2018_data_directory = \"/mnt/hephy/cms/robert.schoefbeck/StopsDileptonLegacy/nanoTuples/\"\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class simpleLSTM:
<|reserved_special_token_0|>
def create_dataset(self, dataset, look_back=4):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset.iloc[i:i + look_back]
dataX.append(a)
dataY.append(dataset.iloc[i + look_back])
return np.array(dataX), np.array(dataY)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def LSTM_CNN(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, '2019-01-01')
train, val = self.split_dataset(train, '2014-01-01')
batch_size = 1
look_back = 3
EPOCHS = 100
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,
trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])
)
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.
shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = False
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
print('[INFO] MODEL LOADED...')
else:
input_shape = num_of_features, look_back
model = Sequential()
model.add(LSTM(32, activation='relu', input_shape=input_shape))
model.add(Dropout(0.2))
model.add(Dense(num_of_features, activation='relu'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)
history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1,
validation_data=(valX, valY))
model.save(self.MODEL_PATH)
print('[INFO] MODEL SAVED...')
trainPredict = model.predict(trainX)
valPredict = model.predict(valX)
testPredict = model.predict(testX)
testR2 = r2_score(testY, testPredict)
print('Test R2: %.2f ' % testR2)
valR2 = r2_score(valY, valPredict)
print('Val R2: %.2f ' % valR2)
trainR2 = r2_score(trainY, trainPredict)
print('Train R2: %.2f ' % trainR2)
feature_i = 0
plt.plot(test.index[look_back + 1:], testY[:, feature_i].ravel(),
label='Test_obs')
plt.plot(test.index[look_back + 1:], testPredict[:, feature_i].
ravel(), label='Test_pred')
plt.plot(val.index[look_back + 1:], valY[:, feature_i].ravel(),
label='Val_obs')
plt.plot(val.index[look_back + 1:], valPredict[:, feature_i].ravel(
), label='Val_pred')
plt.plot(train.index[look_back + 1:], trainY[:, feature_i].ravel(),
label='Train_obs')
plt.plot(train.index[look_back + 1:], trainPredict[:, feature_i].
ravel(), label='Train_pred')
plt.xticks(rotation=45)
plt.legend()
plt.show()
<|reserved_special_token_0|>
def statefulLSTM(self, stock_h):
dataset = self.get_features(stock_h, num_of_features=1)
train, test = self.split_dataset(dataset, '2017-01-01')
val, test = self.split_dataset(test, '2019-01-01')
batch_size = 1
look_back = 3
EPOCHS = 25
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
if os.path.exists('models\\stateful_lstm.h5'):
model = tensorflow.keras.models.load_model(
'models\\stateful_lstm.h5')
else:
model = Sequential()
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),
stateful=True, return_sequences=True))
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),
stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam',
metrics=['accuracy'])
for i in range(EPOCHS):
print(f'[INFO] EPOCH: {i}/{EPOCHS}')
model.fit(trainX, trainY, epochs=1, batch_size=batch_size,
verbose=2, shuffle=False, validation_data=(valX, valY))
model.save('models\\stateful_lstm.h5')
trainPredict = model.predict(trainX, batch_size=batch_size)
testPredict = model.predict(testX, batch_size=batch_size)
trainScore = math.sqrt(mean_squared_error(trainY[:, 0],
trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % trainScore)
testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:,
0]))
print('Test Score: %.2f RMSE' % testScore)
plt.plot(testY)
plt.plot(testPredict)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class simpleLSTM:
def __init__(self):
self.MODEL_PATH = 'models\\basic_lstm.h5'
def create_dataset(self, dataset, look_back=4):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset.iloc[i:i + look_back]
dataX.append(a)
dataY.append(dataset.iloc[i + look_back])
return np.array(dataX), np.array(dataY)
<|reserved_special_token_0|>
def split_dataset(self, dataset, split_date, initial_data_cut=None,
type='start'):
if initial_data_cut != None:
split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')
if type == 'start':
dataset = dataset.loc[split_date_old:]
if type == 'end':
dataset = dataset.loc[:split_date_old]
split_date = pd.Timestamp(split_date + ' 00:00:00')
train = dataset.loc[:split_date]
test = dataset.loc[split_date:]
print(f'Train: {len(train)}, Test: {len(test)}')
return train, test
def LSTM_CNN(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, '2019-01-01')
train, val = self.split_dataset(train, '2014-01-01')
batch_size = 1
look_back = 3
EPOCHS = 100
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,
trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])
)
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.
shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = False
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
print('[INFO] MODEL LOADED...')
else:
input_shape = num_of_features, look_back
model = Sequential()
model.add(LSTM(32, activation='relu', input_shape=input_shape))
model.add(Dropout(0.2))
model.add(Dense(num_of_features, activation='relu'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)
history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1,
validation_data=(valX, valY))
model.save(self.MODEL_PATH)
print('[INFO] MODEL SAVED...')
trainPredict = model.predict(trainX)
valPredict = model.predict(valX)
testPredict = model.predict(testX)
testR2 = r2_score(testY, testPredict)
print('Test R2: %.2f ' % testR2)
valR2 = r2_score(valY, valPredict)
print('Val R2: %.2f ' % valR2)
trainR2 = r2_score(trainY, trainPredict)
print('Train R2: %.2f ' % trainR2)
feature_i = 0
plt.plot(test.index[look_back + 1:], testY[:, feature_i].ravel(),
label='Test_obs')
plt.plot(test.index[look_back + 1:], testPredict[:, feature_i].
ravel(), label='Test_pred')
plt.plot(val.index[look_back + 1:], valY[:, feature_i].ravel(),
label='Val_obs')
plt.plot(val.index[look_back + 1:], valPredict[:, feature_i].ravel(
), label='Val_pred')
plt.plot(train.index[look_back + 1:], trainY[:, feature_i].ravel(),
label='Train_obs')
plt.plot(train.index[look_back + 1:], trainPredict[:, feature_i].
ravel(), label='Train_pred')
plt.xticks(rotation=45)
plt.legend()
plt.show()
def basicLSTM(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, '2016-01-01',
initial_data_cut='2019-01-01')
train, val = self.split_dataset(train, '2012-01-01')
look_back = 5
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,
trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])
)
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.
shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = True
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
else:
model = Sequential()
model.add(LSTM(32, input_shape=(num_of_features, look_back)))
model.add(Dropout(0.3))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam',
metrics=['accuracy'])
model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2,
validation_data=(valX, valY), callbacks=[early_stop])
model.save(self.MODEL_PATH)
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
trainScore = r2_score(trainY, trainPredict)
print('R2 Train Score: %.2f' % trainScore)
testScore = r2_score(testY, testPredict)
print('R2 Test Score: %.2f' % testScore)
plt.plot(testY)
plt.plot(testPredict)
plt.show()
def statefulLSTM(self, stock_h):
dataset = self.get_features(stock_h, num_of_features=1)
train, test = self.split_dataset(dataset, '2017-01-01')
val, test = self.split_dataset(test, '2019-01-01')
batch_size = 1
look_back = 3
EPOCHS = 25
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
if os.path.exists('models\\stateful_lstm.h5'):
model = tensorflow.keras.models.load_model(
'models\\stateful_lstm.h5')
else:
model = Sequential()
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),
stateful=True, return_sequences=True))
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),
stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam',
metrics=['accuracy'])
for i in range(EPOCHS):
print(f'[INFO] EPOCH: {i}/{EPOCHS}')
model.fit(trainX, trainY, epochs=1, batch_size=batch_size,
verbose=2, shuffle=False, validation_data=(valX, valY))
model.save('models\\stateful_lstm.h5')
trainPredict = model.predict(trainX, batch_size=batch_size)
testPredict = model.predict(testX, batch_size=batch_size)
trainScore = math.sqrt(mean_squared_error(trainY[:, 0],
trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % trainScore)
testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:,
0]))
print('Test Score: %.2f RMSE' % testScore)
plt.plot(testY)
plt.plot(testPredict)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class simpleLSTM:
def __init__(self):
self.MODEL_PATH = 'models\\basic_lstm.h5'
def create_dataset(self, dataset, look_back=4):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset.iloc[i:i + look_back]
dataX.append(a)
dataY.append(dataset.iloc[i + look_back])
return np.array(dataX), np.array(dataY)
def get_features(self, stock_h, num_of_features=1):
if num_of_features == 1:
dataset = stock_h[['Close']]
elif num_of_features == 2:
dataset = stock_h[['Close', 'Open']]
elif num_of_features == 4:
dataset = stock_h[['Close', 'Open', 'Low', 'High']]
elif num_of_features == 5:
dataset = stock_h[['Close', 'Open', 'Low', 'High', 'Volume']]
return dataset
def split_dataset(self, dataset, split_date, initial_data_cut=None,
type='start'):
if initial_data_cut != None:
split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')
if type == 'start':
dataset = dataset.loc[split_date_old:]
if type == 'end':
dataset = dataset.loc[:split_date_old]
split_date = pd.Timestamp(split_date + ' 00:00:00')
train = dataset.loc[:split_date]
test = dataset.loc[split_date:]
print(f'Train: {len(train)}, Test: {len(test)}')
return train, test
def LSTM_CNN(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, '2019-01-01')
train, val = self.split_dataset(train, '2014-01-01')
batch_size = 1
look_back = 3
EPOCHS = 100
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,
trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])
)
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.
shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = False
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
print('[INFO] MODEL LOADED...')
else:
input_shape = num_of_features, look_back
model = Sequential()
model.add(LSTM(32, activation='relu', input_shape=input_shape))
model.add(Dropout(0.2))
model.add(Dense(num_of_features, activation='relu'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)
history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1,
validation_data=(valX, valY))
model.save(self.MODEL_PATH)
print('[INFO] MODEL SAVED...')
trainPredict = model.predict(trainX)
valPredict = model.predict(valX)
testPredict = model.predict(testX)
testR2 = r2_score(testY, testPredict)
print('Test R2: %.2f ' % testR2)
valR2 = r2_score(valY, valPredict)
print('Val R2: %.2f ' % valR2)
trainR2 = r2_score(trainY, trainPredict)
print('Train R2: %.2f ' % trainR2)
feature_i = 0
plt.plot(test.index[look_back + 1:], testY[:, feature_i].ravel(),
label='Test_obs')
plt.plot(test.index[look_back + 1:], testPredict[:, feature_i].
ravel(), label='Test_pred')
plt.plot(val.index[look_back + 1:], valY[:, feature_i].ravel(),
label='Val_obs')
plt.plot(val.index[look_back + 1:], valPredict[:, feature_i].ravel(
), label='Val_pred')
plt.plot(train.index[look_back + 1:], trainY[:, feature_i].ravel(),
label='Train_obs')
plt.plot(train.index[look_back + 1:], trainPredict[:, feature_i].
ravel(), label='Train_pred')
plt.xticks(rotation=45)
plt.legend()
plt.show()
def basicLSTM(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, '2016-01-01',
initial_data_cut='2019-01-01')
train, val = self.split_dataset(train, '2012-01-01')
look_back = 5
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,
trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])
)
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.
shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = True
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
else:
model = Sequential()
model.add(LSTM(32, input_shape=(num_of_features, look_back)))
model.add(Dropout(0.3))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam',
metrics=['accuracy'])
model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2,
validation_data=(valX, valY), callbacks=[early_stop])
model.save(self.MODEL_PATH)
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
trainScore = r2_score(trainY, trainPredict)
print('R2 Train Score: %.2f' % trainScore)
testScore = r2_score(testY, testPredict)
print('R2 Test Score: %.2f' % testScore)
plt.plot(testY)
plt.plot(testPredict)
plt.show()
def statefulLSTM(self, stock_h):
dataset = self.get_features(stock_h, num_of_features=1)
train, test = self.split_dataset(dataset, '2017-01-01')
val, test = self.split_dataset(test, '2019-01-01')
batch_size = 1
look_back = 3
EPOCHS = 25
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
if os.path.exists('models\\stateful_lstm.h5'):
model = tensorflow.keras.models.load_model(
'models\\stateful_lstm.h5')
else:
model = Sequential()
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),
stateful=True, return_sequences=True))
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),
stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam',
metrics=['accuracy'])
for i in range(EPOCHS):
print(f'[INFO] EPOCH: {i}/{EPOCHS}')
model.fit(trainX, trainY, epochs=1, batch_size=batch_size,
verbose=2, shuffle=False, validation_data=(valX, valY))
model.save('models\\stateful_lstm.h5')
trainPredict = model.predict(trainX, batch_size=batch_size)
testPredict = model.predict(testX, batch_size=batch_size)
trainScore = math.sqrt(mean_squared_error(trainY[:, 0],
trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % trainScore)
testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:,
0]))
print('Test Score: %.2f RMSE' % testScore)
plt.plot(testY)
plt.plot(testPredict)
plt.show()
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
import math
import sklearn
import sklearn.preprocessing
import datetime
import os
import matplotlib.pyplot as plt
import yfinance as yf
import math
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, r2_score
import tensorflow
class simpleLSTM:
def __init__(self):
self.MODEL_PATH = 'models\\basic_lstm.h5'
def create_dataset(self, dataset, look_back=4):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset.iloc[i:i + look_back]
dataX.append(a)
dataY.append(dataset.iloc[i + look_back])
return np.array(dataX), np.array(dataY)
def get_features(self, stock_h, num_of_features=1):
if num_of_features == 1:
dataset = stock_h[['Close']]
elif num_of_features == 2:
dataset = stock_h[['Close', 'Open']]
elif num_of_features == 4:
dataset = stock_h[['Close', 'Open', 'Low', 'High']]
elif num_of_features == 5:
dataset = stock_h[['Close', 'Open', 'Low', 'High', 'Volume']]
return dataset
def split_dataset(self, dataset, split_date, initial_data_cut=None,
type='start'):
if initial_data_cut != None:
split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')
if type == 'start':
dataset = dataset.loc[split_date_old:]
if type == 'end':
dataset = dataset.loc[:split_date_old]
split_date = pd.Timestamp(split_date + ' 00:00:00')
train = dataset.loc[:split_date]
test = dataset.loc[split_date:]
print(f'Train: {len(train)}, Test: {len(test)}')
return train, test
def LSTM_CNN(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, '2019-01-01')
train, val = self.split_dataset(train, '2014-01-01')
batch_size = 1
look_back = 3
EPOCHS = 100
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,
trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])
)
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.
shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = False
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
print('[INFO] MODEL LOADED...')
else:
input_shape = num_of_features, look_back
model = Sequential()
model.add(LSTM(32, activation='relu', input_shape=input_shape))
model.add(Dropout(0.2))
model.add(Dense(num_of_features, activation='relu'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)
history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1,
validation_data=(valX, valY))
model.save(self.MODEL_PATH)
print('[INFO] MODEL SAVED...')
trainPredict = model.predict(trainX)
valPredict = model.predict(valX)
testPredict = model.predict(testX)
testR2 = r2_score(testY, testPredict)
print('Test R2: %.2f ' % testR2)
valR2 = r2_score(valY, valPredict)
print('Val R2: %.2f ' % valR2)
trainR2 = r2_score(trainY, trainPredict)
print('Train R2: %.2f ' % trainR2)
feature_i = 0
plt.plot(test.index[look_back + 1:], testY[:, feature_i].ravel(),
label='Test_obs')
plt.plot(test.index[look_back + 1:], testPredict[:, feature_i].
ravel(), label='Test_pred')
plt.plot(val.index[look_back + 1:], valY[:, feature_i].ravel(),
label='Val_obs')
plt.plot(val.index[look_back + 1:], valPredict[:, feature_i].ravel(
), label='Val_pred')
plt.plot(train.index[look_back + 1:], trainY[:, feature_i].ravel(),
label='Train_obs')
plt.plot(train.index[look_back + 1:], trainPredict[:, feature_i].
ravel(), label='Train_pred')
plt.xticks(rotation=45)
plt.legend()
plt.show()
def basicLSTM(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, '2016-01-01',
initial_data_cut='2019-01-01')
train, val = self.split_dataset(train, '2012-01-01')
look_back = 5
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,
trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])
)
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.
shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = True
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
else:
model = Sequential()
model.add(LSTM(32, input_shape=(num_of_features, look_back)))
model.add(Dropout(0.3))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam',
metrics=['accuracy'])
model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2,
validation_data=(valX, valY), callbacks=[early_stop])
model.save(self.MODEL_PATH)
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
trainScore = r2_score(trainY, trainPredict)
print('R2 Train Score: %.2f' % trainScore)
testScore = r2_score(testY, testPredict)
print('R2 Test Score: %.2f' % testScore)
plt.plot(testY)
plt.plot(testPredict)
plt.show()
def statefulLSTM(self, stock_h):
dataset = self.get_features(stock_h, num_of_features=1)
train, test = self.split_dataset(dataset, '2017-01-01')
val, test = self.split_dataset(test, '2019-01-01')
batch_size = 1
look_back = 3
EPOCHS = 25
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
if os.path.exists('models\\stateful_lstm.h5'):
model = tensorflow.keras.models.load_model(
'models\\stateful_lstm.h5')
else:
model = Sequential()
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),
stateful=True, return_sequences=True))
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),
stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam',
metrics=['accuracy'])
for i in range(EPOCHS):
print(f'[INFO] EPOCH: {i}/{EPOCHS}')
model.fit(trainX, trainY, epochs=1, batch_size=batch_size,
verbose=2, shuffle=False, validation_data=(valX, valY))
model.save('models\\stateful_lstm.h5')
trainPredict = model.predict(trainX, batch_size=batch_size)
testPredict = model.predict(testX, batch_size=batch_size)
trainScore = math.sqrt(mean_squared_error(trainY[:, 0],
trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % trainScore)
testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:,
0]))
print('Test Score: %.2f RMSE' % testScore)
plt.plot(testY)
plt.plot(testPredict)
plt.show()
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
import math
import sklearn
import sklearn.preprocessing
import datetime
import os
import matplotlib.pyplot as plt
import yfinance as yf
import math
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, r2_score
import tensorflow
class simpleLSTM:
def __init__(self):
self.MODEL_PATH = r"models\basic_lstm.h5"
def create_dataset(self, dataset, look_back=4):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset.iloc[i:(i + look_back)]
dataX.append(a)
dataY.append(dataset.iloc[i + look_back])
# dataY.append(dataset.iloc[i + look_back][0])
return np.array(dataX), np.array(dataY)
def get_features(self, stock_h, num_of_features=1):
if num_of_features == 1:
dataset = stock_h[["Close"]]
elif num_of_features == 2:
dataset = stock_h[["Close", "Open"]]
elif num_of_features == 4:
dataset = stock_h[["Close", "Open", "Low", "High"]]
elif num_of_features == 5:
dataset = stock_h[["Close", "Open", "Low", "High", "Volume"]]
return dataset
def split_dataset(self, dataset, split_date, initial_data_cut=None, type="start"):
if initial_data_cut != None:
split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')
if type == "start":
dataset = dataset.loc[split_date_old:]
if type == "end":
dataset = dataset.loc[:split_date_old]
split_date = pd.Timestamp(split_date + ' 00:00:00')
train = dataset.loc[:split_date]
test = dataset.loc[split_date:]
# train_size = int(len(dataset) * 0.67)
# test_size = len(dataset) - train_size
# train = dataset[0:train_size, :]
# test = dataset[train_size:len(dataset), :]
# print(len(train), len(test))
print(f"Train: {len(train)}, Test: {len(test)}")
return train, test
def LSTM_CNN(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
# train, test = self.split_dataset(dataset, "2020-09-01", initial_data_cut="2020-01-01", type="start")
# train, test = self.split_dataset(dataset, "2017-02-01")
# val, test = self.split_dataset(test, "2021-01-01")
# train, test = self.split_dataset(dataset, "2017-01-01", initial_data_cut="2019-01-01", type="end")
train, test = self.split_dataset(dataset, "2019-01-01")
train, val = self.split_dataset(train, "2014-01-01")
batch_size = 1
look_back = 3
EPOCHS = 100
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features, trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = False
# It can be used to reconstruct the model identically.
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
print("[INFO] MODEL LOADED...")
else:
# input_shape = (look_back, 1)
input_shape = (num_of_features, look_back)
model = Sequential()
model.add(
LSTM(32, activation="relu", input_shape=input_shape))
# model.add(
# Conv1D(filters=32, kernel_size=5, strides=1, padding="same", activation="relu",
# input_shape=input_shape))
# lstm_model.add(Dropout(0.1))
model.add(Dropout(0.2))
model.add(Dense(num_of_features, activation='relu'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)
# callbacks=[early_stop]
history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1, validation_data=(valX, valY))
model.save(self.MODEL_PATH)
print("[INFO] MODEL SAVED...")
trainPredict = model.predict(trainX)
valPredict = model.predict(valX)
testPredict = model.predict(testX)
# testR2 = r2_score(testY[:, 0], testPredict[:, 0])
# print('Test R2: %.2f ' % (testR2))
# valR2 = r2_score(valY[:, 0], valPredict[:, 0])
# print('Val R2: %.2f ' % (valR2))
# trainR2 = r2_score(trainY[:, 0], trainPredict[:, 0])
# print('Train R2: %.2f ' % (trainR2))
testR2 = r2_score(testY, testPredict)
print('Test R2: %.2f ' % (testR2))
valR2 = r2_score(valY, valPredict)
print('Val R2: %.2f ' % (valR2))
trainR2 = r2_score(trainY, trainPredict)
print('Train R2: %.2f ' % (trainR2))
feature_i = 0
plt.plot(test.index[look_back+1:], testY[:, feature_i].ravel(), label="Test_obs")
plt.plot(test.index[look_back+1:], testPredict[:, feature_i].ravel(), label="Test_pred")
plt.plot(val.index[look_back+1:], valY[:, feature_i].ravel(), label="Val_obs")
plt.plot(val.index[look_back+1:], valPredict[:, feature_i].ravel(), label="Val_pred")
plt.plot(train.index[look_back+1:], trainY[:, feature_i].ravel(), label="Train_obs")
plt.plot(train.index[look_back+1:], trainPredict[:, feature_i].ravel(), label="Train_pred")
plt.xticks(rotation=45)
plt.legend()
plt.show()
def basicLSTM(self, stock_h):
num_of_features = 4
dataset = self.get_features(stock_h, num_of_features=num_of_features)
train, test = self.split_dataset(dataset, "2016-01-01", initial_data_cut="2019-01-01")
# train, test = self.split_dataset(dataset, "2018-01-01")
train, val = self.split_dataset(train, "2012-01-01")
look_back = 5
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], num_of_features, trainX.shape[1]))
valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
SAVE = True
if os.path.exists(self.MODEL_PATH) and SAVE:
model = tensorflow.keras.models.load_model(self.MODEL_PATH)
else:
model = Sequential()
model.add(LSTM(32, input_shape=(num_of_features, look_back)))
model.add(Dropout(0.3))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2, validation_data=(valX, valY),
callbacks=[early_stop])
model.save(self.MODEL_PATH)
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# trainScore = math.sqrt(mean_squared_error(trainY, trainPredict))
# print('Train Score: %.2f RMSE' % (trainScore))
# testScore = math.sqrt(mean_squared_error(testY, testPredict))
# print('Test Score: %.2f RMSE' % (testScore))
trainScore = r2_score(trainY, trainPredict)
print('R2 Train Score: %.2f' % (trainScore))
testScore = r2_score(testY, testPredict)
print('R2 Test Score: %.2f' % (testScore))
plt.plot(testY)
plt.plot(testPredict)
plt.show()
def statefulLSTM(self, stock_h):
dataset = self.get_features(stock_h, num_of_features=1)
# train, test = split_dataset(dataset, "2019-01-01", initial_data_cut="2018-01-01")
train, test = self.split_dataset(dataset, "2017-01-01")
val, test = self.split_dataset(test, "2019-01-01")
batch_size = 1
look_back = 3
EPOCHS = 25
trainX, trainY = self.create_dataset(train, look_back)
valX, valY = self.create_dataset(val, look_back)
testX, testY = self.create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
# trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
# valX = np.reshape(valX, (valX.shape[0], 1, valX.shape[1]))
# testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)
# It can be used to reconstruct the model identically.
if os.path.exists("models\stateful_lstm.h5"):
model = tensorflow.keras.models.load_model("models\stateful_lstm.h5")
else:
model = Sequential()
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True, return_sequences=True))
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
for i in range(EPOCHS):
print(f"[INFO] EPOCH: {i}/{EPOCHS}")
model.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=2, shuffle=False, validation_data=(valX, valY))
# model.reset_states()
model.save("models\stateful_lstm.h5")
# model.save("stateful_lstm")
# model.fit(trainX, trainY, epochs=200, batch_size=1, verbose=2, validation_data=(valX, valY),
# callbacks=[early_stop])
trainPredict = model.predict(trainX, batch_size=batch_size)
# model.reset_states()
testPredict = model.predict(testX, batch_size=batch_size)
# trainPredict = model.predict(trainX)
# testPredict = model.predict(testX)
# trainScore = math.sqrt(mean_squared_error(trainY, trainPredict))
# print('Train Score: %.2f RMSE' % (trainScore))
# testScore = math.sqrt(mean_squared_error(testY, testPredict))
# print('Test Score: %.2f RMSE' % (testScore))
#
trainScore = math.sqrt(mean_squared_error(trainY[:, 0], trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:, 0]))
print('Test Score: %.2f RMSE' % (testScore))
plt.plot(testY)
plt.plot(testPredict)
plt.show()
# # shift train predictions for plotting
# trainPredictPlot = np.empty_like(dataset)
# trainPredictPlot[:, :] = np.nan
# trainPredictPlot[look_back:len(trainPredict) + look_back, :] = trainPredict
# # shift test predictions for plotting
# testPredictPlot = np.empty_like(dataset)
# testPredictPlot[:, :] = np.nan
# testPredictPlot[len(trainPredict) + (look_back * 2) + 1:len(dataset) - 1, :] = testPredict
# # plot baseline and predictions
# # plt.plot(scaler.inverse_transform(dataset))
# plt.plot(trainPredictPlot)
# plt.plot(testPredictPlot)
# plt.show()
|
flexible
|
{
"blob_id": "97ea837961c92b5c92a93ec33ac016de7ff1e876",
"index": 2449,
"step-1": "<mask token>\n\n\nclass simpleLSTM:\n <mask token>\n\n def create_dataset(self, dataset, look_back=4):\n dataX, dataY = [], []\n for i in range(len(dataset) - look_back - 1):\n a = dataset.iloc[i:i + look_back]\n dataX.append(a)\n dataY.append(dataset.iloc[i + look_back])\n return np.array(dataX), np.array(dataY)\n <mask token>\n <mask token>\n\n def LSTM_CNN(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n train, test = self.split_dataset(dataset, '2019-01-01')\n train, val = self.split_dataset(train, '2014-01-01')\n batch_size = 1\n look_back = 3\n EPOCHS = 100\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,\n trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])\n )\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.\n shape[1]))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n SAVE = False\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n print('[INFO] MODEL LOADED...')\n else:\n input_shape = num_of_features, look_back\n model = Sequential()\n model.add(LSTM(32, activation='relu', input_shape=input_shape))\n model.add(Dropout(0.2))\n model.add(Dense(num_of_features, activation='relu'))\n model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)\n history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1,\n validation_data=(valX, valY))\n model.save(self.MODEL_PATH)\n print('[INFO] MODEL SAVED...')\n trainPredict = model.predict(trainX)\n valPredict = model.predict(valX)\n testPredict = model.predict(testX)\n testR2 = r2_score(testY, testPredict)\n print('Test R2: %.2f ' % testR2)\n valR2 = r2_score(valY, valPredict)\n print('Val R2: %.2f ' % valR2)\n trainR2 = r2_score(trainY, trainPredict)\n print('Train R2: %.2f ' % trainR2)\n feature_i = 0\n plt.plot(test.index[look_back + 1:], testY[:, feature_i].ravel(),\n label='Test_obs')\n plt.plot(test.index[look_back + 1:], testPredict[:, feature_i].\n ravel(), label='Test_pred')\n plt.plot(val.index[look_back + 1:], valY[:, feature_i].ravel(),\n label='Val_obs')\n plt.plot(val.index[look_back + 1:], valPredict[:, feature_i].ravel(\n ), label='Val_pred')\n plt.plot(train.index[look_back + 1:], trainY[:, feature_i].ravel(),\n label='Train_obs')\n plt.plot(train.index[look_back + 1:], trainPredict[:, feature_i].\n ravel(), label='Train_pred')\n plt.xticks(rotation=45)\n plt.legend()\n plt.show()\n <mask token>\n\n def statefulLSTM(self, stock_h):\n dataset = self.get_features(stock_h, num_of_features=1)\n train, test = self.split_dataset(dataset, '2017-01-01')\n val, test = self.split_dataset(test, '2019-01-01')\n batch_size = 1\n look_back = 3\n EPOCHS = 25\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))\n valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))\n testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n if os.path.exists('models\\\\stateful_lstm.h5'):\n model = tensorflow.keras.models.load_model(\n 'models\\\\stateful_lstm.h5')\n else:\n model = Sequential()\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),\n stateful=True, return_sequences=True))\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),\n stateful=True))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam',\n metrics=['accuracy'])\n for i in range(EPOCHS):\n print(f'[INFO] EPOCH: {i}/{EPOCHS}')\n model.fit(trainX, trainY, epochs=1, batch_size=batch_size,\n verbose=2, shuffle=False, validation_data=(valX, valY))\n model.save('models\\\\stateful_lstm.h5')\n trainPredict = model.predict(trainX, batch_size=batch_size)\n testPredict = model.predict(testX, batch_size=batch_size)\n trainScore = math.sqrt(mean_squared_error(trainY[:, 0],\n trainPredict[:, 0]))\n print('Train Score: %.2f RMSE' % trainScore)\n testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:,\n 0]))\n print('Test Score: %.2f RMSE' % testScore)\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n",
"step-2": "<mask token>\n\n\nclass simpleLSTM:\n\n def __init__(self):\n self.MODEL_PATH = 'models\\\\basic_lstm.h5'\n\n def create_dataset(self, dataset, look_back=4):\n dataX, dataY = [], []\n for i in range(len(dataset) - look_back - 1):\n a = dataset.iloc[i:i + look_back]\n dataX.append(a)\n dataY.append(dataset.iloc[i + look_back])\n return np.array(dataX), np.array(dataY)\n <mask token>\n\n def split_dataset(self, dataset, split_date, initial_data_cut=None,\n type='start'):\n if initial_data_cut != None:\n split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')\n if type == 'start':\n dataset = dataset.loc[split_date_old:]\n if type == 'end':\n dataset = dataset.loc[:split_date_old]\n split_date = pd.Timestamp(split_date + ' 00:00:00')\n train = dataset.loc[:split_date]\n test = dataset.loc[split_date:]\n print(f'Train: {len(train)}, Test: {len(test)}')\n return train, test\n\n def LSTM_CNN(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n train, test = self.split_dataset(dataset, '2019-01-01')\n train, val = self.split_dataset(train, '2014-01-01')\n batch_size = 1\n look_back = 3\n EPOCHS = 100\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,\n trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])\n )\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.\n shape[1]))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n SAVE = False\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n print('[INFO] MODEL LOADED...')\n else:\n input_shape = num_of_features, look_back\n model = Sequential()\n model.add(LSTM(32, activation='relu', input_shape=input_shape))\n model.add(Dropout(0.2))\n model.add(Dense(num_of_features, activation='relu'))\n model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)\n history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1,\n validation_data=(valX, valY))\n model.save(self.MODEL_PATH)\n print('[INFO] MODEL SAVED...')\n trainPredict = model.predict(trainX)\n valPredict = model.predict(valX)\n testPredict = model.predict(testX)\n testR2 = r2_score(testY, testPredict)\n print('Test R2: %.2f ' % testR2)\n valR2 = r2_score(valY, valPredict)\n print('Val R2: %.2f ' % valR2)\n trainR2 = r2_score(trainY, trainPredict)\n print('Train R2: %.2f ' % trainR2)\n feature_i = 0\n plt.plot(test.index[look_back + 1:], testY[:, feature_i].ravel(),\n label='Test_obs')\n plt.plot(test.index[look_back + 1:], testPredict[:, feature_i].\n ravel(), label='Test_pred')\n plt.plot(val.index[look_back + 1:], valY[:, feature_i].ravel(),\n label='Val_obs')\n plt.plot(val.index[look_back + 1:], valPredict[:, feature_i].ravel(\n ), label='Val_pred')\n plt.plot(train.index[look_back + 1:], trainY[:, feature_i].ravel(),\n label='Train_obs')\n plt.plot(train.index[look_back + 1:], trainPredict[:, feature_i].\n ravel(), label='Train_pred')\n plt.xticks(rotation=45)\n plt.legend()\n plt.show()\n\n def basicLSTM(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n train, test = self.split_dataset(dataset, '2016-01-01',\n initial_data_cut='2019-01-01')\n train, val = self.split_dataset(train, '2012-01-01')\n look_back = 5\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,\n trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])\n )\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.\n shape[1]))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n SAVE = True\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n else:\n model = Sequential()\n model.add(LSTM(32, input_shape=(num_of_features, look_back)))\n model.add(Dropout(0.3))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam',\n metrics=['accuracy'])\n model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2,\n validation_data=(valX, valY), callbacks=[early_stop])\n model.save(self.MODEL_PATH)\n trainPredict = model.predict(trainX)\n testPredict = model.predict(testX)\n trainScore = r2_score(trainY, trainPredict)\n print('R2 Train Score: %.2f' % trainScore)\n testScore = r2_score(testY, testPredict)\n print('R2 Test Score: %.2f' % testScore)\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n\n def statefulLSTM(self, stock_h):\n dataset = self.get_features(stock_h, num_of_features=1)\n train, test = self.split_dataset(dataset, '2017-01-01')\n val, test = self.split_dataset(test, '2019-01-01')\n batch_size = 1\n look_back = 3\n EPOCHS = 25\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))\n valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))\n testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n if os.path.exists('models\\\\stateful_lstm.h5'):\n model = tensorflow.keras.models.load_model(\n 'models\\\\stateful_lstm.h5')\n else:\n model = Sequential()\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),\n stateful=True, return_sequences=True))\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),\n stateful=True))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam',\n metrics=['accuracy'])\n for i in range(EPOCHS):\n print(f'[INFO] EPOCH: {i}/{EPOCHS}')\n model.fit(trainX, trainY, epochs=1, batch_size=batch_size,\n verbose=2, shuffle=False, validation_data=(valX, valY))\n model.save('models\\\\stateful_lstm.h5')\n trainPredict = model.predict(trainX, batch_size=batch_size)\n testPredict = model.predict(testX, batch_size=batch_size)\n trainScore = math.sqrt(mean_squared_error(trainY[:, 0],\n trainPredict[:, 0]))\n print('Train Score: %.2f RMSE' % trainScore)\n testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:,\n 0]))\n print('Test Score: %.2f RMSE' % testScore)\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n",
"step-3": "<mask token>\n\n\nclass simpleLSTM:\n\n def __init__(self):\n self.MODEL_PATH = 'models\\\\basic_lstm.h5'\n\n def create_dataset(self, dataset, look_back=4):\n dataX, dataY = [], []\n for i in range(len(dataset) - look_back - 1):\n a = dataset.iloc[i:i + look_back]\n dataX.append(a)\n dataY.append(dataset.iloc[i + look_back])\n return np.array(dataX), np.array(dataY)\n\n def get_features(self, stock_h, num_of_features=1):\n if num_of_features == 1:\n dataset = stock_h[['Close']]\n elif num_of_features == 2:\n dataset = stock_h[['Close', 'Open']]\n elif num_of_features == 4:\n dataset = stock_h[['Close', 'Open', 'Low', 'High']]\n elif num_of_features == 5:\n dataset = stock_h[['Close', 'Open', 'Low', 'High', 'Volume']]\n return dataset\n\n def split_dataset(self, dataset, split_date, initial_data_cut=None,\n type='start'):\n if initial_data_cut != None:\n split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')\n if type == 'start':\n dataset = dataset.loc[split_date_old:]\n if type == 'end':\n dataset = dataset.loc[:split_date_old]\n split_date = pd.Timestamp(split_date + ' 00:00:00')\n train = dataset.loc[:split_date]\n test = dataset.loc[split_date:]\n print(f'Train: {len(train)}, Test: {len(test)}')\n return train, test\n\n def LSTM_CNN(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n train, test = self.split_dataset(dataset, '2019-01-01')\n train, val = self.split_dataset(train, '2014-01-01')\n batch_size = 1\n look_back = 3\n EPOCHS = 100\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,\n trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])\n )\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.\n shape[1]))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n SAVE = False\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n print('[INFO] MODEL LOADED...')\n else:\n input_shape = num_of_features, look_back\n model = Sequential()\n model.add(LSTM(32, activation='relu', input_shape=input_shape))\n model.add(Dropout(0.2))\n model.add(Dense(num_of_features, activation='relu'))\n model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)\n history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1,\n validation_data=(valX, valY))\n model.save(self.MODEL_PATH)\n print('[INFO] MODEL SAVED...')\n trainPredict = model.predict(trainX)\n valPredict = model.predict(valX)\n testPredict = model.predict(testX)\n testR2 = r2_score(testY, testPredict)\n print('Test R2: %.2f ' % testR2)\n valR2 = r2_score(valY, valPredict)\n print('Val R2: %.2f ' % valR2)\n trainR2 = r2_score(trainY, trainPredict)\n print('Train R2: %.2f ' % trainR2)\n feature_i = 0\n plt.plot(test.index[look_back + 1:], testY[:, feature_i].ravel(),\n label='Test_obs')\n plt.plot(test.index[look_back + 1:], testPredict[:, feature_i].\n ravel(), label='Test_pred')\n plt.plot(val.index[look_back + 1:], valY[:, feature_i].ravel(),\n label='Val_obs')\n plt.plot(val.index[look_back + 1:], valPredict[:, feature_i].ravel(\n ), label='Val_pred')\n plt.plot(train.index[look_back + 1:], trainY[:, feature_i].ravel(),\n label='Train_obs')\n plt.plot(train.index[look_back + 1:], trainPredict[:, feature_i].\n ravel(), label='Train_pred')\n plt.xticks(rotation=45)\n plt.legend()\n plt.show()\n\n def basicLSTM(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n train, test = self.split_dataset(dataset, '2016-01-01',\n initial_data_cut='2019-01-01')\n train, val = self.split_dataset(train, '2012-01-01')\n look_back = 5\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,\n trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])\n )\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.\n shape[1]))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n SAVE = True\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n else:\n model = Sequential()\n model.add(LSTM(32, input_shape=(num_of_features, look_back)))\n model.add(Dropout(0.3))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam',\n metrics=['accuracy'])\n model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2,\n validation_data=(valX, valY), callbacks=[early_stop])\n model.save(self.MODEL_PATH)\n trainPredict = model.predict(trainX)\n testPredict = model.predict(testX)\n trainScore = r2_score(trainY, trainPredict)\n print('R2 Train Score: %.2f' % trainScore)\n testScore = r2_score(testY, testPredict)\n print('R2 Test Score: %.2f' % testScore)\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n\n def statefulLSTM(self, stock_h):\n dataset = self.get_features(stock_h, num_of_features=1)\n train, test = self.split_dataset(dataset, '2017-01-01')\n val, test = self.split_dataset(test, '2019-01-01')\n batch_size = 1\n look_back = 3\n EPOCHS = 25\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))\n valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))\n testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n if os.path.exists('models\\\\stateful_lstm.h5'):\n model = tensorflow.keras.models.load_model(\n 'models\\\\stateful_lstm.h5')\n else:\n model = Sequential()\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),\n stateful=True, return_sequences=True))\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),\n stateful=True))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam',\n metrics=['accuracy'])\n for i in range(EPOCHS):\n print(f'[INFO] EPOCH: {i}/{EPOCHS}')\n model.fit(trainX, trainY, epochs=1, batch_size=batch_size,\n verbose=2, shuffle=False, validation_data=(valX, valY))\n model.save('models\\\\stateful_lstm.h5')\n trainPredict = model.predict(trainX, batch_size=batch_size)\n testPredict = model.predict(testX, batch_size=batch_size)\n trainScore = math.sqrt(mean_squared_error(trainY[:, 0],\n trainPredict[:, 0]))\n print('Train Score: %.2f RMSE' % trainScore)\n testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:,\n 0]))\n print('Test Score: %.2f RMSE' % testScore)\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport math\nimport sklearn\nimport sklearn.preprocessing\nimport datetime\nimport os\nimport matplotlib.pyplot as plt\nimport yfinance as yf\nimport math\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport tensorflow\n\n\nclass simpleLSTM:\n\n def __init__(self):\n self.MODEL_PATH = 'models\\\\basic_lstm.h5'\n\n def create_dataset(self, dataset, look_back=4):\n dataX, dataY = [], []\n for i in range(len(dataset) - look_back - 1):\n a = dataset.iloc[i:i + look_back]\n dataX.append(a)\n dataY.append(dataset.iloc[i + look_back])\n return np.array(dataX), np.array(dataY)\n\n def get_features(self, stock_h, num_of_features=1):\n if num_of_features == 1:\n dataset = stock_h[['Close']]\n elif num_of_features == 2:\n dataset = stock_h[['Close', 'Open']]\n elif num_of_features == 4:\n dataset = stock_h[['Close', 'Open', 'Low', 'High']]\n elif num_of_features == 5:\n dataset = stock_h[['Close', 'Open', 'Low', 'High', 'Volume']]\n return dataset\n\n def split_dataset(self, dataset, split_date, initial_data_cut=None,\n type='start'):\n if initial_data_cut != None:\n split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')\n if type == 'start':\n dataset = dataset.loc[split_date_old:]\n if type == 'end':\n dataset = dataset.loc[:split_date_old]\n split_date = pd.Timestamp(split_date + ' 00:00:00')\n train = dataset.loc[:split_date]\n test = dataset.loc[split_date:]\n print(f'Train: {len(train)}, Test: {len(test)}')\n return train, test\n\n def LSTM_CNN(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n train, test = self.split_dataset(dataset, '2019-01-01')\n train, val = self.split_dataset(train, '2014-01-01')\n batch_size = 1\n look_back = 3\n EPOCHS = 100\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,\n trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])\n )\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.\n shape[1]))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n SAVE = False\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n print('[INFO] MODEL LOADED...')\n else:\n input_shape = num_of_features, look_back\n model = Sequential()\n model.add(LSTM(32, activation='relu', input_shape=input_shape))\n model.add(Dropout(0.2))\n model.add(Dense(num_of_features, activation='relu'))\n model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)\n history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1,\n validation_data=(valX, valY))\n model.save(self.MODEL_PATH)\n print('[INFO] MODEL SAVED...')\n trainPredict = model.predict(trainX)\n valPredict = model.predict(valX)\n testPredict = model.predict(testX)\n testR2 = r2_score(testY, testPredict)\n print('Test R2: %.2f ' % testR2)\n valR2 = r2_score(valY, valPredict)\n print('Val R2: %.2f ' % valR2)\n trainR2 = r2_score(trainY, trainPredict)\n print('Train R2: %.2f ' % trainR2)\n feature_i = 0\n plt.plot(test.index[look_back + 1:], testY[:, feature_i].ravel(),\n label='Test_obs')\n plt.plot(test.index[look_back + 1:], testPredict[:, feature_i].\n ravel(), label='Test_pred')\n plt.plot(val.index[look_back + 1:], valY[:, feature_i].ravel(),\n label='Val_obs')\n plt.plot(val.index[look_back + 1:], valPredict[:, feature_i].ravel(\n ), label='Val_pred')\n plt.plot(train.index[look_back + 1:], trainY[:, feature_i].ravel(),\n label='Train_obs')\n plt.plot(train.index[look_back + 1:], trainPredict[:, feature_i].\n ravel(), label='Train_pred')\n plt.xticks(rotation=45)\n plt.legend()\n plt.show()\n\n def basicLSTM(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n train, test = self.split_dataset(dataset, '2016-01-01',\n initial_data_cut='2019-01-01')\n train, val = self.split_dataset(train, '2012-01-01')\n look_back = 5\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features,\n trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1])\n )\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.\n shape[1]))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n SAVE = True\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n else:\n model = Sequential()\n model.add(LSTM(32, input_shape=(num_of_features, look_back)))\n model.add(Dropout(0.3))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam',\n metrics=['accuracy'])\n model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2,\n validation_data=(valX, valY), callbacks=[early_stop])\n model.save(self.MODEL_PATH)\n trainPredict = model.predict(trainX)\n testPredict = model.predict(testX)\n trainScore = r2_score(trainY, trainPredict)\n print('R2 Train Score: %.2f' % trainScore)\n testScore = r2_score(testY, testPredict)\n print('R2 Test Score: %.2f' % testScore)\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n\n def statefulLSTM(self, stock_h):\n dataset = self.get_features(stock_h, num_of_features=1)\n train, test = self.split_dataset(dataset, '2017-01-01')\n val, test = self.split_dataset(test, '2019-01-01')\n batch_size = 1\n look_back = 3\n EPOCHS = 25\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))\n valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))\n testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n if os.path.exists('models\\\\stateful_lstm.h5'):\n model = tensorflow.keras.models.load_model(\n 'models\\\\stateful_lstm.h5')\n else:\n model = Sequential()\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),\n stateful=True, return_sequences=True))\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1),\n stateful=True))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam',\n metrics=['accuracy'])\n for i in range(EPOCHS):\n print(f'[INFO] EPOCH: {i}/{EPOCHS}')\n model.fit(trainX, trainY, epochs=1, batch_size=batch_size,\n verbose=2, shuffle=False, validation_data=(valX, valY))\n model.save('models\\\\stateful_lstm.h5')\n trainPredict = model.predict(trainX, batch_size=batch_size)\n testPredict = model.predict(testX, batch_size=batch_size)\n trainScore = math.sqrt(mean_squared_error(trainY[:, 0],\n trainPredict[:, 0]))\n print('Train Score: %.2f RMSE' % trainScore)\n testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:,\n 0]))\n print('Test Score: %.2f RMSE' % testScore)\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n",
"step-5": "import numpy as np\nimport pandas as pd\nimport math\nimport sklearn\nimport sklearn.preprocessing\nimport datetime\nimport os\nimport matplotlib.pyplot as plt\nimport yfinance as yf\n\nimport math\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport tensorflow\n\n\nclass simpleLSTM:\n def __init__(self):\n self.MODEL_PATH = r\"models\\basic_lstm.h5\"\n\n\n def create_dataset(self, dataset, look_back=4):\n dataX, dataY = [], []\n for i in range(len(dataset) - look_back - 1):\n a = dataset.iloc[i:(i + look_back)]\n dataX.append(a)\n dataY.append(dataset.iloc[i + look_back])\n # dataY.append(dataset.iloc[i + look_back][0])\n return np.array(dataX), np.array(dataY)\n\n\n def get_features(self, stock_h, num_of_features=1):\n if num_of_features == 1:\n dataset = stock_h[[\"Close\"]]\n elif num_of_features == 2:\n dataset = stock_h[[\"Close\", \"Open\"]]\n elif num_of_features == 4:\n dataset = stock_h[[\"Close\", \"Open\", \"Low\", \"High\"]]\n elif num_of_features == 5:\n dataset = stock_h[[\"Close\", \"Open\", \"Low\", \"High\", \"Volume\"]]\n return dataset\n\n def split_dataset(self, dataset, split_date, initial_data_cut=None, type=\"start\"):\n if initial_data_cut != None:\n split_date_old = pd.Timestamp(initial_data_cut + ' 00:00:00')\n if type == \"start\":\n dataset = dataset.loc[split_date_old:]\n if type == \"end\":\n dataset = dataset.loc[:split_date_old]\n\n split_date = pd.Timestamp(split_date + ' 00:00:00')\n train = dataset.loc[:split_date]\n test = dataset.loc[split_date:]\n\n # train_size = int(len(dataset) * 0.67)\n # test_size = len(dataset) - train_size\n # train = dataset[0:train_size, :]\n # test = dataset[train_size:len(dataset), :]\n # print(len(train), len(test))\n print(f\"Train: {len(train)}, Test: {len(test)}\")\n return train, test\n\n def LSTM_CNN(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n # train, test = self.split_dataset(dataset, \"2020-09-01\", initial_data_cut=\"2020-01-01\", type=\"start\")\n # train, test = self.split_dataset(dataset, \"2017-02-01\")\n # val, test = self.split_dataset(test, \"2021-01-01\")\n # train, test = self.split_dataset(dataset, \"2017-01-01\", initial_data_cut=\"2019-01-01\", type=\"end\")\n train, test = self.split_dataset(dataset, \"2019-01-01\")\n train, val = self.split_dataset(train, \"2014-01-01\")\n\n batch_size = 1\n look_back = 3\n EPOCHS = 100\n\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features, trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1]))\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.shape[1]))\n\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n\n SAVE = False\n # It can be used to reconstruct the model identically.\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n print(\"[INFO] MODEL LOADED...\")\n else:\n # input_shape = (look_back, 1)\n input_shape = (num_of_features, look_back)\n model = Sequential()\n model.add(\n LSTM(32, activation=\"relu\", input_shape=input_shape))\n # model.add(\n # Conv1D(filters=32, kernel_size=5, strides=1, padding=\"same\", activation=\"relu\",\n # input_shape=input_shape))\n # lstm_model.add(Dropout(0.1))\n model.add(Dropout(0.2))\n\n model.add(Dense(num_of_features, activation='relu'))\n\n model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])\n\n early_stop = EarlyStopping(monitor='loss', patience=15, verbose=1)\n # callbacks=[early_stop]\n history = model.fit(trainX, trainY, epochs=EPOCHS, verbose=1, validation_data=(valX, valY))\n\n model.save(self.MODEL_PATH)\n print(\"[INFO] MODEL SAVED...\")\n\n trainPredict = model.predict(trainX)\n\n valPredict = model.predict(valX)\n\n testPredict = model.predict(testX)\n\n\n # testR2 = r2_score(testY[:, 0], testPredict[:, 0])\n # print('Test R2: %.2f ' % (testR2))\n # valR2 = r2_score(valY[:, 0], valPredict[:, 0])\n # print('Val R2: %.2f ' % (valR2))\n # trainR2 = r2_score(trainY[:, 0], trainPredict[:, 0])\n # print('Train R2: %.2f ' % (trainR2))\n\n testR2 = r2_score(testY, testPredict)\n print('Test R2: %.2f ' % (testR2))\n valR2 = r2_score(valY, valPredict)\n print('Val R2: %.2f ' % (valR2))\n trainR2 = r2_score(trainY, trainPredict)\n print('Train R2: %.2f ' % (trainR2))\n\n feature_i = 0\n\n plt.plot(test.index[look_back+1:], testY[:, feature_i].ravel(), label=\"Test_obs\")\n plt.plot(test.index[look_back+1:], testPredict[:, feature_i].ravel(), label=\"Test_pred\")\n plt.plot(val.index[look_back+1:], valY[:, feature_i].ravel(), label=\"Val_obs\")\n plt.plot(val.index[look_back+1:], valPredict[:, feature_i].ravel(), label=\"Val_pred\")\n plt.plot(train.index[look_back+1:], trainY[:, feature_i].ravel(), label=\"Train_obs\")\n plt.plot(train.index[look_back+1:], trainPredict[:, feature_i].ravel(), label=\"Train_pred\")\n plt.xticks(rotation=45)\n plt.legend()\n plt.show()\n\n def basicLSTM(self, stock_h):\n num_of_features = 4\n dataset = self.get_features(stock_h, num_of_features=num_of_features)\n train, test = self.split_dataset(dataset, \"2016-01-01\", initial_data_cut=\"2019-01-01\")\n # train, test = self.split_dataset(dataset, \"2018-01-01\")\n train, val = self.split_dataset(train, \"2012-01-01\")\n\n look_back = 5\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n\n trainX = np.reshape(trainX, (trainX.shape[0], num_of_features, trainX.shape[1]))\n valX = np.reshape(valX, (valX.shape[0], num_of_features, valX.shape[1]))\n testX = np.reshape(testX, (testX.shape[0], num_of_features, testX.shape[1]))\n\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n\n SAVE = True\n\n if os.path.exists(self.MODEL_PATH) and SAVE:\n model = tensorflow.keras.models.load_model(self.MODEL_PATH)\n else:\n model = Sequential()\n model.add(LSTM(32, input_shape=(num_of_features, look_back)))\n model.add(Dropout(0.3))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])\n\n model.fit(trainX, trainY, epochs=25, batch_size=1, verbose=2, validation_data=(valX, valY),\n callbacks=[early_stop])\n\n model.save(self.MODEL_PATH)\n\n trainPredict = model.predict(trainX)\n testPredict = model.predict(testX)\n\n # trainScore = math.sqrt(mean_squared_error(trainY, trainPredict))\n # print('Train Score: %.2f RMSE' % (trainScore))\n # testScore = math.sqrt(mean_squared_error(testY, testPredict))\n # print('Test Score: %.2f RMSE' % (testScore))\n\n\n trainScore = r2_score(trainY, trainPredict)\n print('R2 Train Score: %.2f' % (trainScore))\n testScore = r2_score(testY, testPredict)\n print('R2 Test Score: %.2f' % (testScore))\n\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n\n def statefulLSTM(self, stock_h):\n dataset = self.get_features(stock_h, num_of_features=1)\n # train, test = split_dataset(dataset, \"2019-01-01\", initial_data_cut=\"2018-01-01\")\n train, test = self.split_dataset(dataset, \"2017-01-01\")\n val, test = self.split_dataset(test, \"2019-01-01\")\n\n batch_size = 1\n look_back = 3\n EPOCHS = 25\n\n trainX, trainY = self.create_dataset(train, look_back)\n valX, valY = self.create_dataset(val, look_back)\n testX, testY = self.create_dataset(test, look_back)\n\n # reshape input to be [samples, time steps, features]\n trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))\n valX = np.reshape(valX, (valX.shape[0], valX.shape[1], 1))\n testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))\n\n # trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))\n # valX = np.reshape(valX, (valX.shape[0], 1, valX.shape[1]))\n # testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))\n\n early_stop = EarlyStopping(monitor='loss', patience=1, verbose=1)\n\n\n # It can be used to reconstruct the model identically.\n if os.path.exists(\"models\\stateful_lstm.h5\"):\n model = tensorflow.keras.models.load_model(\"models\\stateful_lstm.h5\")\n else:\n model = Sequential()\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True, return_sequences=True))\n model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])\n\n for i in range(EPOCHS):\n print(f\"[INFO] EPOCH: {i}/{EPOCHS}\")\n model.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=2, shuffle=False, validation_data=(valX, valY))\n # model.reset_states()\n\n model.save(\"models\\stateful_lstm.h5\")\n # model.save(\"stateful_lstm\")\n\n # model.fit(trainX, trainY, epochs=200, batch_size=1, verbose=2, validation_data=(valX, valY),\n # callbacks=[early_stop])\n trainPredict = model.predict(trainX, batch_size=batch_size)\n # model.reset_states()\n testPredict = model.predict(testX, batch_size=batch_size)\n\n # trainPredict = model.predict(trainX)\n # testPredict = model.predict(testX)\n\n # trainScore = math.sqrt(mean_squared_error(trainY, trainPredict))\n # print('Train Score: %.2f RMSE' % (trainScore))\n # testScore = math.sqrt(mean_squared_error(testY, testPredict))\n # print('Test Score: %.2f RMSE' % (testScore))\n #\n\n\n trainScore = math.sqrt(mean_squared_error(trainY[:, 0], trainPredict[:, 0]))\n print('Train Score: %.2f RMSE' % (trainScore))\n testScore = math.sqrt(mean_squared_error(testY[:, 0], testPredict[:, 0]))\n print('Test Score: %.2f RMSE' % (testScore))\n\n plt.plot(testY)\n plt.plot(testPredict)\n plt.show()\n\n # # shift train predictions for plotting\n # trainPredictPlot = np.empty_like(dataset)\n # trainPredictPlot[:, :] = np.nan\n # trainPredictPlot[look_back:len(trainPredict) + look_back, :] = trainPredict\n # # shift test predictions for plotting\n # testPredictPlot = np.empty_like(dataset)\n # testPredictPlot[:, :] = np.nan\n # testPredictPlot[len(trainPredict) + (look_back * 2) + 1:len(dataset) - 1, :] = testPredict\n # # plot baseline and predictions\n # # plt.plot(scaler.inverse_transform(dataset))\n # plt.plot(trainPredictPlot)\n # plt.plot(testPredictPlot)\n # plt.show()\n\n",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
class Solution:
def getDescentPeriods(self, prices: List[int]) -> int:
ans = 1 # prices[0]
dp = 1
for i in range(1, len(prices)):
if prices[i] == prices[i - 1] - 1:
dp += 1
else:
dp = 1
ans += dp
return ans
|
normal
|
{
"blob_id": "d10468d2d0aefa19a7d225bfffad03ec6cb6e082",
"index": 4079,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def getDescentPeriods(self, prices: List[int]) ->int:\n ans = 1\n dp = 1\n for i in range(1, len(prices)):\n if prices[i] == prices[i - 1] - 1:\n dp += 1\n else:\n dp = 1\n ans += dp\n return ans\n",
"step-4": "class Solution:\n def getDescentPeriods(self, prices: List[int]) -> int:\n ans = 1 # prices[0]\n dp = 1\n\n for i in range(1, len(prices)):\n if prices[i] == prices[i - 1] - 1:\n dp += 1\n else:\n dp = 1\n ans += dp\n\n return ans\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__ = ['JWTClient', 'get_domain', 'authenticated_users_only']
<|reserved_special_token_1|>
from .hailjwt import JWTClient, get_domain, authenticated_users_only
__all__ = ['JWTClient', 'get_domain', 'authenticated_users_only']
|
flexible
|
{
"blob_id": "39fb8d9f93be1e6c1ed2a425d14061737d643ab6",
"index": 9330,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['JWTClient', 'get_domain', 'authenticated_users_only']\n",
"step-3": "from .hailjwt import JWTClient, get_domain, authenticated_users_only\n__all__ = ['JWTClient', 'get_domain', 'authenticated_users_only']\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'):
mockProgramInOutFilePath = os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'
)
else:
mockProgramInOutFilePath = '.mockprogram_inout.txt'
if not os.path.exists(mockProgramInOutFilePath):
print('Error: ' + mockProgramInOutFilePath + ' is missing!')
sys.exit(1)
<|reserved_special_token_0|>
if len(mockprogramInoutArray) and mockprogramInoutArray[-1] == '':
mockprogramInoutArray = mockprogramInoutArray[:-1]
if len(mockprogramInoutArray) < 3:
print('Error: ' + mockProgramInOutFilePath +
' has less than three lines:\n-------------\n' + mockprogramInout +
'-------------')
sys.exit(2)
<|reserved_special_token_0|>
if expectedInputLine.find('MOCK_PROGRAM_INPUT:') != 0:
print("Error, first line = '" + expectedInputLine +
"', does not match ^MOCK_PROGRAM_INPUT:")
sys.exit(3)
<|reserved_special_token_0|>
if inputArgs != expectedInput:
print("Error, input args='" + inputArgs + "' does not match expected='" +
expectedInput + "'")
sys.exit(4)
<|reserved_special_token_0|>
if returnCodeLine.find('MOCK_PROGRAM_RETURN:') != 0:
print("Error, second line = '" + returnCodeLine +
"', does not match ^MOCK_PROGRAM_RETURN:")
sys.exit(5)
<|reserved_special_token_0|>
if outputLine.find('MOCK_PROGRAM_OUTPUT:') != 0:
print("Error, third line = '" + outputLine +
"', does not match ^MOCK_PROGRAM_OUTPUT:")
sys.exit(6)
<|reserved_special_token_0|>
if len(mockprogramInoutArray) > 3:
for line in mockprogramInoutArray[3:]:
if line.find('MOCK_PROGRAM_INPUT:') == 0:
break
outputStr = outputStr + '\n' + line
numLinesOuput = numLinesOuput + 1
print(outputStr)
<|reserved_special_token_0|>
if len(mockprogramInoutArray) > lineLineIndex:
open(mockProgramInOutFilePath, 'w').write('\n'.join(
mockprogramInoutArray[lineLineIndex:]) + '\n')
else:
open(mockProgramInOutFilePath, 'w').write('')
sys.exit(int(returnCode))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
inputArgs = ' '.join(sys.argv[1:])
if os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'):
mockProgramInOutFilePath = os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'
)
else:
mockProgramInOutFilePath = '.mockprogram_inout.txt'
if not os.path.exists(mockProgramInOutFilePath):
print('Error: ' + mockProgramInOutFilePath + ' is missing!')
sys.exit(1)
mockprogramInout = open(mockProgramInOutFilePath, 'r').read()
mockprogramInoutArray = mockprogramInout.splitlines()
if len(mockprogramInoutArray) and mockprogramInoutArray[-1] == '':
mockprogramInoutArray = mockprogramInoutArray[:-1]
if len(mockprogramInoutArray) < 3:
print('Error: ' + mockProgramInOutFilePath +
' has less than three lines:\n-------------\n' + mockprogramInout +
'-------------')
sys.exit(2)
expectedInputLine = mockprogramInoutArray[0]
if expectedInputLine.find('MOCK_PROGRAM_INPUT:') != 0:
print("Error, first line = '" + expectedInputLine +
"', does not match ^MOCK_PROGRAM_INPUT:")
sys.exit(3)
expectedInput = expectedInputLine.replace('MOCK_PROGRAM_INPUT:', '').strip()
if inputArgs != expectedInput:
print("Error, input args='" + inputArgs + "' does not match expected='" +
expectedInput + "'")
sys.exit(4)
returnCodeLine = mockprogramInoutArray[1]
if returnCodeLine.find('MOCK_PROGRAM_RETURN:') != 0:
print("Error, second line = '" + returnCodeLine +
"', does not match ^MOCK_PROGRAM_RETURN:")
sys.exit(5)
returnCode = returnCodeLine.replace('MOCK_PROGRAM_RETURN:', '').strip()
outputLine = mockprogramInoutArray[2]
if outputLine.find('MOCK_PROGRAM_OUTPUT:') != 0:
print("Error, third line = '" + outputLine +
"', does not match ^MOCK_PROGRAM_OUTPUT:")
sys.exit(6)
outputStr = outputLine.replace('MOCK_PROGRAM_OUTPUT: ', '')
numLinesOuput = 1
if len(mockprogramInoutArray) > 3:
for line in mockprogramInoutArray[3:]:
if line.find('MOCK_PROGRAM_INPUT:') == 0:
break
outputStr = outputStr + '\n' + line
numLinesOuput = numLinesOuput + 1
print(outputStr)
lineLineIndex = 2 + numLinesOuput
if len(mockprogramInoutArray) > lineLineIndex:
open(mockProgramInOutFilePath, 'w').write('\n'.join(
mockprogramInoutArray[lineLineIndex:]) + '\n')
else:
open(mockProgramInOutFilePath, 'w').write('')
sys.exit(int(returnCode))
<|reserved_special_token_1|>
import sys
import os
inputArgs = ' '.join(sys.argv[1:])
if os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'):
mockProgramInOutFilePath = os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'
)
else:
mockProgramInOutFilePath = '.mockprogram_inout.txt'
if not os.path.exists(mockProgramInOutFilePath):
print('Error: ' + mockProgramInOutFilePath + ' is missing!')
sys.exit(1)
mockprogramInout = open(mockProgramInOutFilePath, 'r').read()
mockprogramInoutArray = mockprogramInout.splitlines()
if len(mockprogramInoutArray) and mockprogramInoutArray[-1] == '':
mockprogramInoutArray = mockprogramInoutArray[:-1]
if len(mockprogramInoutArray) < 3:
print('Error: ' + mockProgramInOutFilePath +
' has less than three lines:\n-------------\n' + mockprogramInout +
'-------------')
sys.exit(2)
expectedInputLine = mockprogramInoutArray[0]
if expectedInputLine.find('MOCK_PROGRAM_INPUT:') != 0:
print("Error, first line = '" + expectedInputLine +
"', does not match ^MOCK_PROGRAM_INPUT:")
sys.exit(3)
expectedInput = expectedInputLine.replace('MOCK_PROGRAM_INPUT:', '').strip()
if inputArgs != expectedInput:
print("Error, input args='" + inputArgs + "' does not match expected='" +
expectedInput + "'")
sys.exit(4)
returnCodeLine = mockprogramInoutArray[1]
if returnCodeLine.find('MOCK_PROGRAM_RETURN:') != 0:
print("Error, second line = '" + returnCodeLine +
"', does not match ^MOCK_PROGRAM_RETURN:")
sys.exit(5)
returnCode = returnCodeLine.replace('MOCK_PROGRAM_RETURN:', '').strip()
outputLine = mockprogramInoutArray[2]
if outputLine.find('MOCK_PROGRAM_OUTPUT:') != 0:
print("Error, third line = '" + outputLine +
"', does not match ^MOCK_PROGRAM_OUTPUT:")
sys.exit(6)
outputStr = outputLine.replace('MOCK_PROGRAM_OUTPUT: ', '')
numLinesOuput = 1
if len(mockprogramInoutArray) > 3:
for line in mockprogramInoutArray[3:]:
if line.find('MOCK_PROGRAM_INPUT:') == 0:
break
outputStr = outputStr + '\n' + line
numLinesOuput = numLinesOuput + 1
print(outputStr)
lineLineIndex = 2 + numLinesOuput
if len(mockprogramInoutArray) > lineLineIndex:
open(mockProgramInOutFilePath, 'w').write('\n'.join(
mockprogramInoutArray[lineLineIndex:]) + '\n')
else:
open(mockProgramInOutFilePath, 'w').write('')
sys.exit(int(returnCode))
<|reserved_special_token_1|>
#!/usr/bin/env python
# @HEADER
# ************************************************************************
#
# TriBITS: Tribal Build, Integrate, and Test System
# Copyright 2013 Sandia Corporation
#
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Corporation nor the names of the
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ************************************************************************
# @HEADER
#
# Usage: mockprogram.py [any arguments]
#
# Mock program that takes input arguments and produces stdout by reading from
# a file .mockprogram_inout.txt in the current directory or the file specified
# by the env var MOCKPROGRAM_INOUT_FILE_OVERRIDE (which can be in any
# directory). This script is used to take the place of real commands during a
# test that involves calling commands on the commandline.
#
# The file .mockprogram_inout.txt (or pointed to by
# MOCKPROGRAM_INOUT_FILE_OVERRIDE) is of the form:
#
# MOCK_PROGRAM_INPUT: <args_1>
# MOCK_PROGRAM_RETURN: <rtn>
# MOCK_PROGRAM_OUTPUT: <outline_1_line_1>
# <outline_1_line_2>
# ...
# MOCK_PROGRAM_INPUT: <args_2>
#
# The program reads in the blocks starting at the time and removes the block
# from the file after it runs. After all of the blocks are read in, if run
# again it will error out with error code 2.
#
# This program can be used, for example, to simulate git command. For
# example, a couple of git commits might be simulated like:
#
# MOCK_PROGRAM_INPUT: log -1
# MOCK_PROGRAM_RETURN: 0
# MOCK_PROGRAM_OUTPUT: This is the summary line
#
# The is the body of the commit msg
# MOCK_PROGRAM_INPUT: diff --name-only HEAD --not @{u}
# MOCK_PROGRAM_RETURN: 0
# MOCK_PROGRAM_OUTPUT: file_name_1.txt
# file_name_2.txt
# file_name_3.txt
#
import sys
import os
inputArgs = ' '.join(sys.argv[1:])
#print("inputArgs = '" + inputArgs + "'"
if os.environ.get("MOCKPROGRAM_INOUT_FILE_OVERRIDE"):
mockProgramInOutFilePath=os.environ.get("MOCKPROGRAM_INOUT_FILE_OVERRIDE")
else:
mockProgramInOutFilePath='.mockprogram_inout.txt'
if not os.path.exists(mockProgramInOutFilePath):
print("Error: "+mockProgramInOutFilePath+" is missing!")
sys.exit(1)
mockprogramInout = open(mockProgramInOutFilePath, 'r').read()
mockprogramInoutArray = mockprogramInout.splitlines()
if len(mockprogramInoutArray) and mockprogramInoutArray[-1] == "":
mockprogramInoutArray = mockprogramInoutArray[:-1]
if len(mockprogramInoutArray) < 3:
print("Error: "+mockProgramInOutFilePath+" has less than three lines:\n"
"-------------\n" + mockprogramInout + "-------------")
sys.exit(2)
# Assert input
expectedInputLine = mockprogramInoutArray[0]
if expectedInputLine.find("MOCK_PROGRAM_INPUT:") != 0:
print("Error, first line = '" + expectedInputLine + "', does not match "
"^MOCK_PROGRAM_INPUT:")
sys.exit(3)
expectedInput = expectedInputLine.replace("MOCK_PROGRAM_INPUT:", "").strip()
if inputArgs != expectedInput:
print("Error, input args='" + inputArgs + "' does not match expected='" +
expectedInput + "'")
sys.exit(4)
# Get return code
returnCodeLine = mockprogramInoutArray[1]
if returnCodeLine.find("MOCK_PROGRAM_RETURN:") != 0:
print("Error, second line = '" + returnCodeLine + "', does not match "
"^MOCK_PROGRAM_RETURN:")
sys.exit(5)
returnCode = returnCodeLine.replace("MOCK_PROGRAM_RETURN:", "").strip()
# Get output (can be multi-line)
outputLine = mockprogramInoutArray[2]
if outputLine.find("MOCK_PROGRAM_OUTPUT:") != 0:
print("Error, third line = '" + outputLine + "', does not match "
"^MOCK_PROGRAM_OUTPUT:")
sys.exit(6)
outputStr = outputLine.replace("MOCK_PROGRAM_OUTPUT: ", "")
numLinesOuput = 1
if len(mockprogramInoutArray) > 3:
for line in mockprogramInoutArray[3:]:
if line.find("MOCK_PROGRAM_INPUT:") == 0:
break
outputStr = outputStr+"\n"+line
numLinesOuput = numLinesOuput + 1
print(outputStr)
# Write the remaining lines back into the file
lineLineIndex = 2 + numLinesOuput
if len(mockprogramInoutArray) > lineLineIndex:
open(mockProgramInOutFilePath, 'w').write(
('\n'.join(mockprogramInoutArray[lineLineIndex:]))+"\n" )
else:
open(mockProgramInOutFilePath, 'w').write("")
# Return exit code
sys.exit(int(returnCode))
|
flexible
|
{
"blob_id": "550f5ad4fef77d5795db0393ae0701f679143e72",
"index": 221,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'):\n mockProgramInOutFilePath = os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'\n )\nelse:\n mockProgramInOutFilePath = '.mockprogram_inout.txt'\nif not os.path.exists(mockProgramInOutFilePath):\n print('Error: ' + mockProgramInOutFilePath + ' is missing!')\n sys.exit(1)\n<mask token>\nif len(mockprogramInoutArray) and mockprogramInoutArray[-1] == '':\n mockprogramInoutArray = mockprogramInoutArray[:-1]\nif len(mockprogramInoutArray) < 3:\n print('Error: ' + mockProgramInOutFilePath +\n ' has less than three lines:\\n-------------\\n' + mockprogramInout +\n '-------------')\n sys.exit(2)\n<mask token>\nif expectedInputLine.find('MOCK_PROGRAM_INPUT:') != 0:\n print(\"Error, first line = '\" + expectedInputLine +\n \"', does not match ^MOCK_PROGRAM_INPUT:\")\n sys.exit(3)\n<mask token>\nif inputArgs != expectedInput:\n print(\"Error, input args='\" + inputArgs + \"' does not match expected='\" +\n expectedInput + \"'\")\n sys.exit(4)\n<mask token>\nif returnCodeLine.find('MOCK_PROGRAM_RETURN:') != 0:\n print(\"Error, second line = '\" + returnCodeLine +\n \"', does not match ^MOCK_PROGRAM_RETURN:\")\n sys.exit(5)\n<mask token>\nif outputLine.find('MOCK_PROGRAM_OUTPUT:') != 0:\n print(\"Error, third line = '\" + outputLine +\n \"', does not match ^MOCK_PROGRAM_OUTPUT:\")\n sys.exit(6)\n<mask token>\nif len(mockprogramInoutArray) > 3:\n for line in mockprogramInoutArray[3:]:\n if line.find('MOCK_PROGRAM_INPUT:') == 0:\n break\n outputStr = outputStr + '\\n' + line\n numLinesOuput = numLinesOuput + 1\nprint(outputStr)\n<mask token>\nif len(mockprogramInoutArray) > lineLineIndex:\n open(mockProgramInOutFilePath, 'w').write('\\n'.join(\n mockprogramInoutArray[lineLineIndex:]) + '\\n')\nelse:\n open(mockProgramInOutFilePath, 'w').write('')\nsys.exit(int(returnCode))\n",
"step-3": "<mask token>\ninputArgs = ' '.join(sys.argv[1:])\nif os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'):\n mockProgramInOutFilePath = os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'\n )\nelse:\n mockProgramInOutFilePath = '.mockprogram_inout.txt'\nif not os.path.exists(mockProgramInOutFilePath):\n print('Error: ' + mockProgramInOutFilePath + ' is missing!')\n sys.exit(1)\nmockprogramInout = open(mockProgramInOutFilePath, 'r').read()\nmockprogramInoutArray = mockprogramInout.splitlines()\nif len(mockprogramInoutArray) and mockprogramInoutArray[-1] == '':\n mockprogramInoutArray = mockprogramInoutArray[:-1]\nif len(mockprogramInoutArray) < 3:\n print('Error: ' + mockProgramInOutFilePath +\n ' has less than three lines:\\n-------------\\n' + mockprogramInout +\n '-------------')\n sys.exit(2)\nexpectedInputLine = mockprogramInoutArray[0]\nif expectedInputLine.find('MOCK_PROGRAM_INPUT:') != 0:\n print(\"Error, first line = '\" + expectedInputLine +\n \"', does not match ^MOCK_PROGRAM_INPUT:\")\n sys.exit(3)\nexpectedInput = expectedInputLine.replace('MOCK_PROGRAM_INPUT:', '').strip()\nif inputArgs != expectedInput:\n print(\"Error, input args='\" + inputArgs + \"' does not match expected='\" +\n expectedInput + \"'\")\n sys.exit(4)\nreturnCodeLine = mockprogramInoutArray[1]\nif returnCodeLine.find('MOCK_PROGRAM_RETURN:') != 0:\n print(\"Error, second line = '\" + returnCodeLine +\n \"', does not match ^MOCK_PROGRAM_RETURN:\")\n sys.exit(5)\nreturnCode = returnCodeLine.replace('MOCK_PROGRAM_RETURN:', '').strip()\noutputLine = mockprogramInoutArray[2]\nif outputLine.find('MOCK_PROGRAM_OUTPUT:') != 0:\n print(\"Error, third line = '\" + outputLine +\n \"', does not match ^MOCK_PROGRAM_OUTPUT:\")\n sys.exit(6)\noutputStr = outputLine.replace('MOCK_PROGRAM_OUTPUT: ', '')\nnumLinesOuput = 1\nif len(mockprogramInoutArray) > 3:\n for line in mockprogramInoutArray[3:]:\n if line.find('MOCK_PROGRAM_INPUT:') == 0:\n break\n outputStr = outputStr + '\\n' + line\n numLinesOuput = numLinesOuput + 1\nprint(outputStr)\nlineLineIndex = 2 + numLinesOuput\nif len(mockprogramInoutArray) > lineLineIndex:\n open(mockProgramInOutFilePath, 'w').write('\\n'.join(\n mockprogramInoutArray[lineLineIndex:]) + '\\n')\nelse:\n open(mockProgramInOutFilePath, 'w').write('')\nsys.exit(int(returnCode))\n",
"step-4": "import sys\nimport os\ninputArgs = ' '.join(sys.argv[1:])\nif os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'):\n mockProgramInOutFilePath = os.environ.get('MOCKPROGRAM_INOUT_FILE_OVERRIDE'\n )\nelse:\n mockProgramInOutFilePath = '.mockprogram_inout.txt'\nif not os.path.exists(mockProgramInOutFilePath):\n print('Error: ' + mockProgramInOutFilePath + ' is missing!')\n sys.exit(1)\nmockprogramInout = open(mockProgramInOutFilePath, 'r').read()\nmockprogramInoutArray = mockprogramInout.splitlines()\nif len(mockprogramInoutArray) and mockprogramInoutArray[-1] == '':\n mockprogramInoutArray = mockprogramInoutArray[:-1]\nif len(mockprogramInoutArray) < 3:\n print('Error: ' + mockProgramInOutFilePath +\n ' has less than three lines:\\n-------------\\n' + mockprogramInout +\n '-------------')\n sys.exit(2)\nexpectedInputLine = mockprogramInoutArray[0]\nif expectedInputLine.find('MOCK_PROGRAM_INPUT:') != 0:\n print(\"Error, first line = '\" + expectedInputLine +\n \"', does not match ^MOCK_PROGRAM_INPUT:\")\n sys.exit(3)\nexpectedInput = expectedInputLine.replace('MOCK_PROGRAM_INPUT:', '').strip()\nif inputArgs != expectedInput:\n print(\"Error, input args='\" + inputArgs + \"' does not match expected='\" +\n expectedInput + \"'\")\n sys.exit(4)\nreturnCodeLine = mockprogramInoutArray[1]\nif returnCodeLine.find('MOCK_PROGRAM_RETURN:') != 0:\n print(\"Error, second line = '\" + returnCodeLine +\n \"', does not match ^MOCK_PROGRAM_RETURN:\")\n sys.exit(5)\nreturnCode = returnCodeLine.replace('MOCK_PROGRAM_RETURN:', '').strip()\noutputLine = mockprogramInoutArray[2]\nif outputLine.find('MOCK_PROGRAM_OUTPUT:') != 0:\n print(\"Error, third line = '\" + outputLine +\n \"', does not match ^MOCK_PROGRAM_OUTPUT:\")\n sys.exit(6)\noutputStr = outputLine.replace('MOCK_PROGRAM_OUTPUT: ', '')\nnumLinesOuput = 1\nif len(mockprogramInoutArray) > 3:\n for line in mockprogramInoutArray[3:]:\n if line.find('MOCK_PROGRAM_INPUT:') == 0:\n break\n outputStr = outputStr + '\\n' + line\n numLinesOuput = numLinesOuput + 1\nprint(outputStr)\nlineLineIndex = 2 + numLinesOuput\nif len(mockprogramInoutArray) > lineLineIndex:\n open(mockProgramInOutFilePath, 'w').write('\\n'.join(\n mockprogramInoutArray[lineLineIndex:]) + '\\n')\nelse:\n open(mockProgramInOutFilePath, 'w').write('')\nsys.exit(int(returnCode))\n",
"step-5": "#!/usr/bin/env python\n\n# @HEADER\n# ************************************************************************\n#\n# TriBITS: Tribal Build, Integrate, and Test System\n# Copyright 2013 Sandia Corporation\n#\n# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,\n# the U.S. Government retains certain rights in this software.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the Corporation nor the names of the\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION \"AS IS\" AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# ************************************************************************\n# @HEADER\n\n#\n# Usage: mockprogram.py [any arguments]\n#\n# Mock program that takes input arguments and produces stdout by reading from\n# a file .mockprogram_inout.txt in the current directory or the file specified\n# by the env var MOCKPROGRAM_INOUT_FILE_OVERRIDE (which can be in any\n# directory). This script is used to take the place of real commands during a\n# test that involves calling commands on the commandline.\n#\n# The file .mockprogram_inout.txt (or pointed to by\n# MOCKPROGRAM_INOUT_FILE_OVERRIDE) is of the form:\n#\n# MOCK_PROGRAM_INPUT: <args_1>\n# MOCK_PROGRAM_RETURN: <rtn>\n# MOCK_PROGRAM_OUTPUT: <outline_1_line_1>\n# <outline_1_line_2>\n# ...\n# MOCK_PROGRAM_INPUT: <args_2>\n#\n# The program reads in the blocks starting at the time and removes the block\n# from the file after it runs. After all of the blocks are read in, if run\n# again it will error out with error code 2.\n#\n# This program can be used, for example, to simulate git command. For\n# example, a couple of git commits might be simulated like:\n#\n# MOCK_PROGRAM_INPUT: log -1\n# MOCK_PROGRAM_RETURN: 0\n# MOCK_PROGRAM_OUTPUT: This is the summary line\n#\n# The is the body of the commit msg\n# MOCK_PROGRAM_INPUT: diff --name-only HEAD --not @{u}\n# MOCK_PROGRAM_RETURN: 0\n# MOCK_PROGRAM_OUTPUT: file_name_1.txt\n# file_name_2.txt\n# file_name_3.txt\n\n#\n\nimport sys\nimport os\n\ninputArgs = ' '.join(sys.argv[1:])\n#print(\"inputArgs = '\" + inputArgs + \"'\"\n\nif os.environ.get(\"MOCKPROGRAM_INOUT_FILE_OVERRIDE\"):\n mockProgramInOutFilePath=os.environ.get(\"MOCKPROGRAM_INOUT_FILE_OVERRIDE\")\nelse:\n mockProgramInOutFilePath='.mockprogram_inout.txt'\n\nif not os.path.exists(mockProgramInOutFilePath):\n print(\"Error: \"+mockProgramInOutFilePath+\" is missing!\")\n sys.exit(1)\n\nmockprogramInout = open(mockProgramInOutFilePath, 'r').read()\nmockprogramInoutArray = mockprogramInout.splitlines()\nif len(mockprogramInoutArray) and mockprogramInoutArray[-1] == \"\":\n mockprogramInoutArray = mockprogramInoutArray[:-1]\n\nif len(mockprogramInoutArray) < 3:\n print(\"Error: \"+mockProgramInOutFilePath+\" has less than three lines:\\n\"\n \"-------------\\n\" + mockprogramInout + \"-------------\")\n sys.exit(2)\n\n# Assert input\nexpectedInputLine = mockprogramInoutArray[0]\nif expectedInputLine.find(\"MOCK_PROGRAM_INPUT:\") != 0:\n print(\"Error, first line = '\" + expectedInputLine + \"', does not match \"\n \"^MOCK_PROGRAM_INPUT:\") \n sys.exit(3)\nexpectedInput = expectedInputLine.replace(\"MOCK_PROGRAM_INPUT:\", \"\").strip()\nif inputArgs != expectedInput:\n print(\"Error, input args='\" + inputArgs + \"' does not match expected='\" +\n expectedInput + \"'\")\n sys.exit(4)\n\n# Get return code\nreturnCodeLine = mockprogramInoutArray[1]\nif returnCodeLine.find(\"MOCK_PROGRAM_RETURN:\") != 0:\n print(\"Error, second line = '\" + returnCodeLine + \"', does not match \"\n \"^MOCK_PROGRAM_RETURN:\") \n sys.exit(5)\nreturnCode = returnCodeLine.replace(\"MOCK_PROGRAM_RETURN:\", \"\").strip()\n\n# Get output (can be multi-line)\noutputLine = mockprogramInoutArray[2]\nif outputLine.find(\"MOCK_PROGRAM_OUTPUT:\") != 0:\n print(\"Error, third line = '\" + outputLine + \"', does not match \"\n \"^MOCK_PROGRAM_OUTPUT:\") \n sys.exit(6)\noutputStr = outputLine.replace(\"MOCK_PROGRAM_OUTPUT: \", \"\")\nnumLinesOuput = 1\nif len(mockprogramInoutArray) > 3:\n for line in mockprogramInoutArray[3:]:\n if line.find(\"MOCK_PROGRAM_INPUT:\") == 0:\n break\n outputStr = outputStr+\"\\n\"+line\n numLinesOuput = numLinesOuput + 1\nprint(outputStr)\n\n# Write the remaining lines back into the file\nlineLineIndex = 2 + numLinesOuput\nif len(mockprogramInoutArray) > lineLineIndex:\n open(mockProgramInOutFilePath, 'w').write(\n ('\\n'.join(mockprogramInoutArray[lineLineIndex:]))+\"\\n\" )\nelse:\n open(mockProgramInOutFilePath, 'w').write(\"\")\n\n# Return exit code\nsys.exit(int(returnCode))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import turtle
import math
from tkinter import *
#活性边表节点:
class AetNode(object):
def __init__(self,x,tx,my):
self.x=x
self.tx=tx
self.my=my
def op(self):
return self.x
class AetList(object):
def __init__(self,y):
self.y=y
self.numy=0
self.l=[]
pass
def findRange(point):
# 找到最大y和最小y:
maxy = point[0][1]
miny = point[0][1]
for i in point:
if maxy < i[1]:
maxy = i[1]
if miny > i[1]:
miny = i[1]
return (miny,maxy)
def printNewEegeList(newEdgeTable):
print("新边表是:")
for i in newEdgeTable:
print(i.y)
for j in i.l:
print((j.x,j.tx,j.my))
print("__________________________________")
def createNewEdgeTable(point):
miny,maxy=findRange(point)
# 找打所有y的顶点:
Y = []
for i in point:
Y.append(i[1])
Y = set(Y)
Y = list(Y)
# 创建新边表:
newEdgeList = []
y=miny
while y <=maxy:
if y in Y:
# 找到所有的X值:
print(y)
templist=[]
for i in range(0, 6):
if point[i][1] == y:
templist.append(i)
print(templist)
print("一次创建新边表")
lists = AetList(y)
for temp in templist:
index1 = (temp + 7) % 6
index2 = (temp + 5) % 6
print(point[temp][0],point[temp][1])
print(point[index1][0],point[index1][1])
print(point[index2][0],point[index2][1])
print("+++++++++++++++++++++")
# lists = AetList(y)
if point[index1][1] > y:
lists.numy+=1
if point[index1][1] - point[temp][1]==0:
node = AetNode(point[temp][0],0,point[index1][1])
else:
node = AetNode(point[temp][0],
((point[index1][0] - point[temp][0]) / (point[index1][1] - point[temp][1])),
point[index1][1])
lists.l.append(node)
if point[index2][1] > y:
lists.numy+=1
if point[index2][1] - point[temp][1]==0:
node = AetNode(point[temp][0], 0, point[index2][1])
else:
node = AetNode(point[temp][0],
((point[index2][0] - point[temp][0]) / (point[index2][1] - point[temp][1])),
point[index2][1])
lists.l.append(node)
if len(lists.l)!=0:
lists.l.sort(key=AetNode.op)
if len(templist)>1:
lists.numy-=1
newEdgeList.append(lists)
y+=1
printNewEegeList(newEdgeList)
return (newEdgeList,Y)
def draw(x1,y1,x,y):
turtle.penup()
turtle.goto(x1,y1)
turtle.pendown()
turtle.goto(x,y)
def run():
turtle.screensize(1920,1080)
turtle.penup()
turtle.hideturtle()
point=[]
# point=[[20,20],[50,10],[110,30],[110,80],[50,50],[20,70]]
# point=[[-10,-10],[10,-10],[15,0],[10,10],[-10,10],[-15,0]]
temp = [float(x11.get()), float(y1.get())]
point.append(temp)
temp = [float(x2.get()), float(y2.get())]
point.append(temp)
temp = [float(x3.get()), float(y3.get())]
point.append(temp)
temp = [float(x4.get()), float(y4.get())]
point.append(temp)
temp = [float(x5.get()), float(y5.get())]
point.append(temp)
temp = [float(x6.get()), float(y6.get())]
point.append(temp)
point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]
#画出原图:
for i in point:
turtle.goto(i[0],i[1])
turtle.pendown()
turtle.goto(point[0][0],point[0][1])
#创建新边表:
newEdgeTable,Y=createNewEdgeTable(point)
miny,maxy=findRange(point)
y=miny
acativeList=[]
while y<=maxy:
#把新边表加进来:
ynum=0
if y in Y:
for i in newEdgeTable:
if i.y==y:
for j in i.l:
acativeList.append(j)
ynum=i.numy
break
acativeList.sort(key=AetNode.op)
for j in acativeList:
print((j.x,j.tx,j.my))
print("****************")
#进行填色:
i=0
flag=True
while i<len(acativeList)-1:
x1=acativeList[i].x
temp=[acativeList[i+1].x,y]
if temp in point and ynum>=1:
ynum-=1
else:
i+=1
if flag:
draw(x1,y,temp[0],y)
flag=not flag
#更新活性边表:
newacativeList=[]
for i in acativeList:
if i.my>y:
i.x+=i.tx
newacativeList.append(i)
acativeList=newacativeList
y+=1
turtle.mainloop()
tk=Tk()
tk.title("扫描填充算法:by 高谦")
Label(tk,text="输入顶点:").grid(row=0)
Label(tk,text="1:").grid(row=1)
Label(tk,text="2:").grid(row=2)
Label(tk,text="3:").grid(row=3)
Label(tk,text="4:").grid(row=4)
Label(tk,text="5:").grid(row=5)
Label(tk,text="6:").grid(row=6)
Label(tk,text="例:\n\n").grid(row=9)
Label(tk,text="(20,20),(50,10)\n(110,30),(110,80)\n(50,50),(20,70)").grid(row=9,column=1)
Label(tk,text="(-10,-10),(10,-10)\n(15,0),(10,10)\n(-10,10),(-15,0)").grid(row=9,column=2)
x11=Entry(tk)
x2=Entry(tk)
x3=Entry(tk)
x4=Entry(tk)
x5=Entry(tk)
x6=Entry(tk)
x11.grid(row=1,column=1)
x2.grid(row=2,column=1)
x3.grid(row=3,column=1)
x4.grid(row=4,column=1)
x5.grid(row=5,column=1)
x6.grid(row=6,column=1)
y1=Entry(tk)
y2=Entry(tk)
y3=Entry(tk)
y4=Entry(tk)
y5=Entry(tk)
y6=Entry(tk)
y1.grid(row=1,column=2,padx=5,pady=5)
y2.grid(row=2,column=2,padx=5,pady=5)
y3.grid(row=3,column=2,padx=5,pady=5)
y4.grid(row=4,column=2,padx=5,pady=5)
y5.grid(row=5,column=2,padx=5,pady=5)
y6.grid(row=6,column=2,padx=5,pady=5)
Button(tk,text="扫描填充",width=10,command=run).grid(row=7,column=1)
Button(tk,text="退出程序",width=10,command=tk.quit).grid(row=7,column=2)
tk.mainloop()
|
normal
|
{
"blob_id": "0a7a95755924fd264169286cc5b5b7587d7ee8e4",
"index": 4608,
"step-1": "<mask token>\n\n\nclass AetNode(object):\n\n def __init__(self, x, tx, my):\n self.x = x\n self.tx = tx\n self.my = my\n\n def op(self):\n return self.x\n\n\nclass AetList(object):\n\n def __init__(self, y):\n self.y = y\n self.numy = 0\n self.l = []\n pass\n\n\n<mask token>\n\n\ndef createNewEdgeTable(point):\n miny, maxy = findRange(point)\n Y = []\n for i in point:\n Y.append(i[1])\n Y = set(Y)\n Y = list(Y)\n newEdgeList = []\n y = miny\n while y <= maxy:\n if y in Y:\n print(y)\n templist = []\n for i in range(0, 6):\n if point[i][1] == y:\n templist.append(i)\n print(templist)\n print('一次创建新边表')\n lists = AetList(y)\n for temp in templist:\n index1 = (temp + 7) % 6\n index2 = (temp + 5) % 6\n print(point[temp][0], point[temp][1])\n print(point[index1][0], point[index1][1])\n print(point[index2][0], point[index2][1])\n print('+++++++++++++++++++++')\n if point[index1][1] > y:\n lists.numy += 1\n if point[index1][1] - point[temp][1] == 0:\n node = AetNode(point[temp][0], 0, point[index1][1])\n else:\n node = AetNode(point[temp][0], (point[index1][0] -\n point[temp][0]) / (point[index1][1] - point[\n temp][1]), point[index1][1])\n lists.l.append(node)\n if point[index2][1] > y:\n lists.numy += 1\n if point[index2][1] - point[temp][1] == 0:\n node = AetNode(point[temp][0], 0, point[index2][1])\n else:\n node = AetNode(point[temp][0], (point[index2][0] -\n point[temp][0]) / (point[index2][1] - point[\n temp][1]), point[index2][1])\n lists.l.append(node)\n if len(lists.l) != 0:\n lists.l.sort(key=AetNode.op)\n if len(templist) > 1:\n lists.numy -= 1\n newEdgeList.append(lists)\n y += 1\n printNewEegeList(newEdgeList)\n return newEdgeList, Y\n\n\ndef draw(x1, y1, x, y):\n turtle.penup()\n turtle.goto(x1, y1)\n turtle.pendown()\n turtle.goto(x, y)\n\n\ndef run():\n turtle.screensize(1920, 1080)\n turtle.penup()\n turtle.hideturtle()\n point = []\n temp = [float(x11.get()), float(y1.get())]\n point.append(temp)\n temp = [float(x2.get()), float(y2.get())]\n point.append(temp)\n temp = [float(x3.get()), float(y3.get())]\n point.append(temp)\n temp = [float(x4.get()), float(y4.get())]\n point.append(temp)\n temp = [float(x5.get()), float(y5.get())]\n point.append(temp)\n temp = [float(x6.get()), float(y6.get())]\n point.append(temp)\n point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]\n for i in point:\n turtle.goto(i[0], i[1])\n turtle.pendown()\n turtle.goto(point[0][0], point[0][1])\n newEdgeTable, Y = createNewEdgeTable(point)\n miny, maxy = findRange(point)\n y = miny\n acativeList = []\n while y <= maxy:\n ynum = 0\n if y in Y:\n for i in newEdgeTable:\n if i.y == y:\n for j in i.l:\n acativeList.append(j)\n ynum = i.numy\n break\n acativeList.sort(key=AetNode.op)\n for j in acativeList:\n print((j.x, j.tx, j.my))\n print('****************')\n i = 0\n flag = True\n while i < len(acativeList) - 1:\n x1 = acativeList[i].x\n temp = [acativeList[i + 1].x, y]\n if temp in point and ynum >= 1:\n ynum -= 1\n else:\n i += 1\n if flag:\n draw(x1, y, temp[0], y)\n flag = not flag\n newacativeList = []\n for i in acativeList:\n if i.my > y:\n i.x += i.tx\n newacativeList.append(i)\n acativeList = newacativeList\n y += 1\n turtle.mainloop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AetNode(object):\n\n def __init__(self, x, tx, my):\n self.x = x\n self.tx = tx\n self.my = my\n\n def op(self):\n return self.x\n\n\nclass AetList(object):\n\n def __init__(self, y):\n self.y = y\n self.numy = 0\n self.l = []\n pass\n\n\ndef findRange(point):\n maxy = point[0][1]\n miny = point[0][1]\n for i in point:\n if maxy < i[1]:\n maxy = i[1]\n if miny > i[1]:\n miny = i[1]\n return miny, maxy\n\n\ndef printNewEegeList(newEdgeTable):\n print('新边表是:')\n for i in newEdgeTable:\n print(i.y)\n for j in i.l:\n print((j.x, j.tx, j.my))\n print('__________________________________')\n\n\ndef createNewEdgeTable(point):\n miny, maxy = findRange(point)\n Y = []\n for i in point:\n Y.append(i[1])\n Y = set(Y)\n Y = list(Y)\n newEdgeList = []\n y = miny\n while y <= maxy:\n if y in Y:\n print(y)\n templist = []\n for i in range(0, 6):\n if point[i][1] == y:\n templist.append(i)\n print(templist)\n print('一次创建新边表')\n lists = AetList(y)\n for temp in templist:\n index1 = (temp + 7) % 6\n index2 = (temp + 5) % 6\n print(point[temp][0], point[temp][1])\n print(point[index1][0], point[index1][1])\n print(point[index2][0], point[index2][1])\n print('+++++++++++++++++++++')\n if point[index1][1] > y:\n lists.numy += 1\n if point[index1][1] - point[temp][1] == 0:\n node = AetNode(point[temp][0], 0, point[index1][1])\n else:\n node = AetNode(point[temp][0], (point[index1][0] -\n point[temp][0]) / (point[index1][1] - point[\n temp][1]), point[index1][1])\n lists.l.append(node)\n if point[index2][1] > y:\n lists.numy += 1\n if point[index2][1] - point[temp][1] == 0:\n node = AetNode(point[temp][0], 0, point[index2][1])\n else:\n node = AetNode(point[temp][0], (point[index2][0] -\n point[temp][0]) / (point[index2][1] - point[\n temp][1]), point[index2][1])\n lists.l.append(node)\n if len(lists.l) != 0:\n lists.l.sort(key=AetNode.op)\n if len(templist) > 1:\n lists.numy -= 1\n newEdgeList.append(lists)\n y += 1\n printNewEegeList(newEdgeList)\n return newEdgeList, Y\n\n\ndef draw(x1, y1, x, y):\n turtle.penup()\n turtle.goto(x1, y1)\n turtle.pendown()\n turtle.goto(x, y)\n\n\ndef run():\n turtle.screensize(1920, 1080)\n turtle.penup()\n turtle.hideturtle()\n point = []\n temp = [float(x11.get()), float(y1.get())]\n point.append(temp)\n temp = [float(x2.get()), float(y2.get())]\n point.append(temp)\n temp = [float(x3.get()), float(y3.get())]\n point.append(temp)\n temp = [float(x4.get()), float(y4.get())]\n point.append(temp)\n temp = [float(x5.get()), float(y5.get())]\n point.append(temp)\n temp = [float(x6.get()), float(y6.get())]\n point.append(temp)\n point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]\n for i in point:\n turtle.goto(i[0], i[1])\n turtle.pendown()\n turtle.goto(point[0][0], point[0][1])\n newEdgeTable, Y = createNewEdgeTable(point)\n miny, maxy = findRange(point)\n y = miny\n acativeList = []\n while y <= maxy:\n ynum = 0\n if y in Y:\n for i in newEdgeTable:\n if i.y == y:\n for j in i.l:\n acativeList.append(j)\n ynum = i.numy\n break\n acativeList.sort(key=AetNode.op)\n for j in acativeList:\n print((j.x, j.tx, j.my))\n print('****************')\n i = 0\n flag = True\n while i < len(acativeList) - 1:\n x1 = acativeList[i].x\n temp = [acativeList[i + 1].x, y]\n if temp in point and ynum >= 1:\n ynum -= 1\n else:\n i += 1\n if flag:\n draw(x1, y, temp[0], y)\n flag = not flag\n newacativeList = []\n for i in acativeList:\n if i.my > y:\n i.x += i.tx\n newacativeList.append(i)\n acativeList = newacativeList\n y += 1\n turtle.mainloop()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AetNode(object):\n\n def __init__(self, x, tx, my):\n self.x = x\n self.tx = tx\n self.my = my\n\n def op(self):\n return self.x\n\n\nclass AetList(object):\n\n def __init__(self, y):\n self.y = y\n self.numy = 0\n self.l = []\n pass\n\n\ndef findRange(point):\n maxy = point[0][1]\n miny = point[0][1]\n for i in point:\n if maxy < i[1]:\n maxy = i[1]\n if miny > i[1]:\n miny = i[1]\n return miny, maxy\n\n\ndef printNewEegeList(newEdgeTable):\n print('新边表是:')\n for i in newEdgeTable:\n print(i.y)\n for j in i.l:\n print((j.x, j.tx, j.my))\n print('__________________________________')\n\n\ndef createNewEdgeTable(point):\n miny, maxy = findRange(point)\n Y = []\n for i in point:\n Y.append(i[1])\n Y = set(Y)\n Y = list(Y)\n newEdgeList = []\n y = miny\n while y <= maxy:\n if y in Y:\n print(y)\n templist = []\n for i in range(0, 6):\n if point[i][1] == y:\n templist.append(i)\n print(templist)\n print('一次创建新边表')\n lists = AetList(y)\n for temp in templist:\n index1 = (temp + 7) % 6\n index2 = (temp + 5) % 6\n print(point[temp][0], point[temp][1])\n print(point[index1][0], point[index1][1])\n print(point[index2][0], point[index2][1])\n print('+++++++++++++++++++++')\n if point[index1][1] > y:\n lists.numy += 1\n if point[index1][1] - point[temp][1] == 0:\n node = AetNode(point[temp][0], 0, point[index1][1])\n else:\n node = AetNode(point[temp][0], (point[index1][0] -\n point[temp][0]) / (point[index1][1] - point[\n temp][1]), point[index1][1])\n lists.l.append(node)\n if point[index2][1] > y:\n lists.numy += 1\n if point[index2][1] - point[temp][1] == 0:\n node = AetNode(point[temp][0], 0, point[index2][1])\n else:\n node = AetNode(point[temp][0], (point[index2][0] -\n point[temp][0]) / (point[index2][1] - point[\n temp][1]), point[index2][1])\n lists.l.append(node)\n if len(lists.l) != 0:\n lists.l.sort(key=AetNode.op)\n if len(templist) > 1:\n lists.numy -= 1\n newEdgeList.append(lists)\n y += 1\n printNewEegeList(newEdgeList)\n return newEdgeList, Y\n\n\ndef draw(x1, y1, x, y):\n turtle.penup()\n turtle.goto(x1, y1)\n turtle.pendown()\n turtle.goto(x, y)\n\n\ndef run():\n turtle.screensize(1920, 1080)\n turtle.penup()\n turtle.hideturtle()\n point = []\n temp = [float(x11.get()), float(y1.get())]\n point.append(temp)\n temp = [float(x2.get()), float(y2.get())]\n point.append(temp)\n temp = [float(x3.get()), float(y3.get())]\n point.append(temp)\n temp = [float(x4.get()), float(y4.get())]\n point.append(temp)\n temp = [float(x5.get()), float(y5.get())]\n point.append(temp)\n temp = [float(x6.get()), float(y6.get())]\n point.append(temp)\n point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]\n for i in point:\n turtle.goto(i[0], i[1])\n turtle.pendown()\n turtle.goto(point[0][0], point[0][1])\n newEdgeTable, Y = createNewEdgeTable(point)\n miny, maxy = findRange(point)\n y = miny\n acativeList = []\n while y <= maxy:\n ynum = 0\n if y in Y:\n for i in newEdgeTable:\n if i.y == y:\n for j in i.l:\n acativeList.append(j)\n ynum = i.numy\n break\n acativeList.sort(key=AetNode.op)\n for j in acativeList:\n print((j.x, j.tx, j.my))\n print('****************')\n i = 0\n flag = True\n while i < len(acativeList) - 1:\n x1 = acativeList[i].x\n temp = [acativeList[i + 1].x, y]\n if temp in point and ynum >= 1:\n ynum -= 1\n else:\n i += 1\n if flag:\n draw(x1, y, temp[0], y)\n flag = not flag\n newacativeList = []\n for i in acativeList:\n if i.my > y:\n i.x += i.tx\n newacativeList.append(i)\n acativeList = newacativeList\n y += 1\n turtle.mainloop()\n\n\n<mask token>\ntk.title('扫描填充算法:by 高谦')\nLabel(tk, text='输入顶点:').grid(row=0)\nLabel(tk, text='1:').grid(row=1)\nLabel(tk, text='2:').grid(row=2)\nLabel(tk, text='3:').grid(row=3)\nLabel(tk, text='4:').grid(row=4)\nLabel(tk, text='5:').grid(row=5)\nLabel(tk, text='6:').grid(row=6)\nLabel(tk, text='例:\\n\\n').grid(row=9)\nLabel(tk, text=\"\"\"(20,20),(50,10)\n(110,30),(110,80)\n(50,50),(20,70)\"\"\").grid(\n row=9, column=1)\nLabel(tk, text=\"\"\"(-10,-10),(10,-10)\n(15,0),(10,10)\n(-10,10),(-15,0)\"\"\").grid(\n row=9, column=2)\n<mask token>\nx11.grid(row=1, column=1)\nx2.grid(row=2, column=1)\nx3.grid(row=3, column=1)\nx4.grid(row=4, column=1)\nx5.grid(row=5, column=1)\nx6.grid(row=6, column=1)\n<mask token>\ny1.grid(row=1, column=2, padx=5, pady=5)\ny2.grid(row=2, column=2, padx=5, pady=5)\ny3.grid(row=3, column=2, padx=5, pady=5)\ny4.grid(row=4, column=2, padx=5, pady=5)\ny5.grid(row=5, column=2, padx=5, pady=5)\ny6.grid(row=6, column=2, padx=5, pady=5)\nButton(tk, text='扫描填充', width=10, command=run).grid(row=7, column=1)\nButton(tk, text='退出程序', width=10, command=tk.quit).grid(row=7, column=2)\ntk.mainloop()\n",
"step-4": "<mask token>\n\n\nclass AetNode(object):\n\n def __init__(self, x, tx, my):\n self.x = x\n self.tx = tx\n self.my = my\n\n def op(self):\n return self.x\n\n\nclass AetList(object):\n\n def __init__(self, y):\n self.y = y\n self.numy = 0\n self.l = []\n pass\n\n\ndef findRange(point):\n maxy = point[0][1]\n miny = point[0][1]\n for i in point:\n if maxy < i[1]:\n maxy = i[1]\n if miny > i[1]:\n miny = i[1]\n return miny, maxy\n\n\ndef printNewEegeList(newEdgeTable):\n print('新边表是:')\n for i in newEdgeTable:\n print(i.y)\n for j in i.l:\n print((j.x, j.tx, j.my))\n print('__________________________________')\n\n\ndef createNewEdgeTable(point):\n miny, maxy = findRange(point)\n Y = []\n for i in point:\n Y.append(i[1])\n Y = set(Y)\n Y = list(Y)\n newEdgeList = []\n y = miny\n while y <= maxy:\n if y in Y:\n print(y)\n templist = []\n for i in range(0, 6):\n if point[i][1] == y:\n templist.append(i)\n print(templist)\n print('一次创建新边表')\n lists = AetList(y)\n for temp in templist:\n index1 = (temp + 7) % 6\n index2 = (temp + 5) % 6\n print(point[temp][0], point[temp][1])\n print(point[index1][0], point[index1][1])\n print(point[index2][0], point[index2][1])\n print('+++++++++++++++++++++')\n if point[index1][1] > y:\n lists.numy += 1\n if point[index1][1] - point[temp][1] == 0:\n node = AetNode(point[temp][0], 0, point[index1][1])\n else:\n node = AetNode(point[temp][0], (point[index1][0] -\n point[temp][0]) / (point[index1][1] - point[\n temp][1]), point[index1][1])\n lists.l.append(node)\n if point[index2][1] > y:\n lists.numy += 1\n if point[index2][1] - point[temp][1] == 0:\n node = AetNode(point[temp][0], 0, point[index2][1])\n else:\n node = AetNode(point[temp][0], (point[index2][0] -\n point[temp][0]) / (point[index2][1] - point[\n temp][1]), point[index2][1])\n lists.l.append(node)\n if len(lists.l) != 0:\n lists.l.sort(key=AetNode.op)\n if len(templist) > 1:\n lists.numy -= 1\n newEdgeList.append(lists)\n y += 1\n printNewEegeList(newEdgeList)\n return newEdgeList, Y\n\n\ndef draw(x1, y1, x, y):\n turtle.penup()\n turtle.goto(x1, y1)\n turtle.pendown()\n turtle.goto(x, y)\n\n\ndef run():\n turtle.screensize(1920, 1080)\n turtle.penup()\n turtle.hideturtle()\n point = []\n temp = [float(x11.get()), float(y1.get())]\n point.append(temp)\n temp = [float(x2.get()), float(y2.get())]\n point.append(temp)\n temp = [float(x3.get()), float(y3.get())]\n point.append(temp)\n temp = [float(x4.get()), float(y4.get())]\n point.append(temp)\n temp = [float(x5.get()), float(y5.get())]\n point.append(temp)\n temp = [float(x6.get()), float(y6.get())]\n point.append(temp)\n point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]\n for i in point:\n turtle.goto(i[0], i[1])\n turtle.pendown()\n turtle.goto(point[0][0], point[0][1])\n newEdgeTable, Y = createNewEdgeTable(point)\n miny, maxy = findRange(point)\n y = miny\n acativeList = []\n while y <= maxy:\n ynum = 0\n if y in Y:\n for i in newEdgeTable:\n if i.y == y:\n for j in i.l:\n acativeList.append(j)\n ynum = i.numy\n break\n acativeList.sort(key=AetNode.op)\n for j in acativeList:\n print((j.x, j.tx, j.my))\n print('****************')\n i = 0\n flag = True\n while i < len(acativeList) - 1:\n x1 = acativeList[i].x\n temp = [acativeList[i + 1].x, y]\n if temp in point and ynum >= 1:\n ynum -= 1\n else:\n i += 1\n if flag:\n draw(x1, y, temp[0], y)\n flag = not flag\n newacativeList = []\n for i in acativeList:\n if i.my > y:\n i.x += i.tx\n newacativeList.append(i)\n acativeList = newacativeList\n y += 1\n turtle.mainloop()\n\n\ntk = Tk()\ntk.title('扫描填充算法:by 高谦')\nLabel(tk, text='输入顶点:').grid(row=0)\nLabel(tk, text='1:').grid(row=1)\nLabel(tk, text='2:').grid(row=2)\nLabel(tk, text='3:').grid(row=3)\nLabel(tk, text='4:').grid(row=4)\nLabel(tk, text='5:').grid(row=5)\nLabel(tk, text='6:').grid(row=6)\nLabel(tk, text='例:\\n\\n').grid(row=9)\nLabel(tk, text=\"\"\"(20,20),(50,10)\n(110,30),(110,80)\n(50,50),(20,70)\"\"\").grid(\n row=9, column=1)\nLabel(tk, text=\"\"\"(-10,-10),(10,-10)\n(15,0),(10,10)\n(-10,10),(-15,0)\"\"\").grid(\n row=9, column=2)\nx11 = Entry(tk)\nx2 = Entry(tk)\nx3 = Entry(tk)\nx4 = Entry(tk)\nx5 = Entry(tk)\nx6 = Entry(tk)\nx11.grid(row=1, column=1)\nx2.grid(row=2, column=1)\nx3.grid(row=3, column=1)\nx4.grid(row=4, column=1)\nx5.grid(row=5, column=1)\nx6.grid(row=6, column=1)\ny1 = Entry(tk)\ny2 = Entry(tk)\ny3 = Entry(tk)\ny4 = Entry(tk)\ny5 = Entry(tk)\ny6 = Entry(tk)\ny1.grid(row=1, column=2, padx=5, pady=5)\ny2.grid(row=2, column=2, padx=5, pady=5)\ny3.grid(row=3, column=2, padx=5, pady=5)\ny4.grid(row=4, column=2, padx=5, pady=5)\ny5.grid(row=5, column=2, padx=5, pady=5)\ny6.grid(row=6, column=2, padx=5, pady=5)\nButton(tk, text='扫描填充', width=10, command=run).grid(row=7, column=1)\nButton(tk, text='退出程序', width=10, command=tk.quit).grid(row=7, column=2)\ntk.mainloop()\n",
"step-5": "import turtle\nimport math\nfrom tkinter import *\n#活性边表节点:\nclass AetNode(object):\n def __init__(self,x,tx,my):\n self.x=x\n self.tx=tx\n self.my=my\n def op(self):\n return self.x\nclass AetList(object):\n def __init__(self,y):\n self.y=y\n self.numy=0\n self.l=[]\n pass\ndef findRange(point):\n # 找到最大y和最小y:\n maxy = point[0][1]\n miny = point[0][1]\n for i in point:\n if maxy < i[1]:\n maxy = i[1]\n if miny > i[1]:\n miny = i[1]\n return (miny,maxy)\ndef printNewEegeList(newEdgeTable):\n print(\"新边表是:\")\n for i in newEdgeTable:\n print(i.y)\n for j in i.l:\n print((j.x,j.tx,j.my))\n print(\"__________________________________\")\ndef createNewEdgeTable(point):\n miny,maxy=findRange(point)\n # 找打所有y的顶点:\n Y = []\n for i in point:\n Y.append(i[1])\n Y = set(Y)\n Y = list(Y)\n # 创建新边表:\n newEdgeList = []\n y=miny\n while y <=maxy:\n if y in Y:\n # 找到所有的X值:\n print(y)\n templist=[]\n for i in range(0, 6):\n if point[i][1] == y:\n templist.append(i)\n print(templist)\n print(\"一次创建新边表\")\n lists = AetList(y)\n for temp in templist:\n index1 = (temp + 7) % 6\n index2 = (temp + 5) % 6\n print(point[temp][0],point[temp][1])\n print(point[index1][0],point[index1][1])\n print(point[index2][0],point[index2][1])\n print(\"+++++++++++++++++++++\")\n # lists = AetList(y)\n if point[index1][1] > y:\n lists.numy+=1\n if point[index1][1] - point[temp][1]==0:\n node = AetNode(point[temp][0],0,point[index1][1])\n else:\n node = AetNode(point[temp][0],\n ((point[index1][0] - point[temp][0]) / (point[index1][1] - point[temp][1])),\n point[index1][1])\n lists.l.append(node)\n if point[index2][1] > y:\n lists.numy+=1\n if point[index2][1] - point[temp][1]==0:\n node = AetNode(point[temp][0], 0, point[index2][1])\n else:\n node = AetNode(point[temp][0],\n ((point[index2][0] - point[temp][0]) / (point[index2][1] - point[temp][1])),\n point[index2][1])\n lists.l.append(node)\n if len(lists.l)!=0:\n lists.l.sort(key=AetNode.op)\n if len(templist)>1:\n lists.numy-=1\n newEdgeList.append(lists)\n y+=1\n printNewEegeList(newEdgeList)\n return (newEdgeList,Y)\ndef draw(x1,y1,x,y):\n turtle.penup()\n turtle.goto(x1,y1)\n turtle.pendown()\n turtle.goto(x,y)\ndef run():\n turtle.screensize(1920,1080)\n turtle.penup()\n turtle.hideturtle()\n point=[]\n # point=[[20,20],[50,10],[110,30],[110,80],[50,50],[20,70]]\n # point=[[-10,-10],[10,-10],[15,0],[10,10],[-10,10],[-15,0]]\n temp = [float(x11.get()), float(y1.get())]\n point.append(temp)\n temp = [float(x2.get()), float(y2.get())]\n point.append(temp)\n temp = [float(x3.get()), float(y3.get())]\n point.append(temp)\n temp = [float(x4.get()), float(y4.get())]\n point.append(temp)\n temp = [float(x5.get()), float(y5.get())]\n point.append(temp)\n temp = [float(x6.get()), float(y6.get())]\n point.append(temp)\n point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]\n #画出原图:\n for i in point:\n turtle.goto(i[0],i[1])\n turtle.pendown()\n turtle.goto(point[0][0],point[0][1])\n #创建新边表:\n newEdgeTable,Y=createNewEdgeTable(point)\n miny,maxy=findRange(point)\n y=miny\n acativeList=[]\n while y<=maxy:\n #把新边表加进来:\n ynum=0\n if y in Y:\n for i in newEdgeTable:\n if i.y==y:\n for j in i.l:\n acativeList.append(j)\n ynum=i.numy\n break\n acativeList.sort(key=AetNode.op)\n for j in acativeList:\n print((j.x,j.tx,j.my))\n print(\"****************\")\n #进行填色:\n i=0\n flag=True\n while i<len(acativeList)-1:\n x1=acativeList[i].x\n temp=[acativeList[i+1].x,y]\n if temp in point and ynum>=1:\n ynum-=1\n else:\n i+=1\n if flag:\n draw(x1,y,temp[0],y)\n flag=not flag\n #更新活性边表:\n newacativeList=[]\n for i in acativeList:\n if i.my>y:\n i.x+=i.tx\n newacativeList.append(i)\n acativeList=newacativeList\n y+=1\n turtle.mainloop()\ntk=Tk()\ntk.title(\"扫描填充算法:by 高谦\")\nLabel(tk,text=\"输入顶点:\").grid(row=0)\nLabel(tk,text=\"1:\").grid(row=1)\nLabel(tk,text=\"2:\").grid(row=2)\nLabel(tk,text=\"3:\").grid(row=3)\nLabel(tk,text=\"4:\").grid(row=4)\nLabel(tk,text=\"5:\").grid(row=5)\nLabel(tk,text=\"6:\").grid(row=6)\nLabel(tk,text=\"例:\\n\\n\").grid(row=9)\nLabel(tk,text=\"(20,20),(50,10)\\n(110,30),(110,80)\\n(50,50),(20,70)\").grid(row=9,column=1)\nLabel(tk,text=\"(-10,-10),(10,-10)\\n(15,0),(10,10)\\n(-10,10),(-15,0)\").grid(row=9,column=2)\nx11=Entry(tk)\nx2=Entry(tk)\nx3=Entry(tk)\nx4=Entry(tk)\nx5=Entry(tk)\nx6=Entry(tk)\nx11.grid(row=1,column=1)\nx2.grid(row=2,column=1)\nx3.grid(row=3,column=1)\nx4.grid(row=4,column=1)\nx5.grid(row=5,column=1)\nx6.grid(row=6,column=1)\n\ny1=Entry(tk)\ny2=Entry(tk)\ny3=Entry(tk)\ny4=Entry(tk)\ny5=Entry(tk)\ny6=Entry(tk)\ny1.grid(row=1,column=2,padx=5,pady=5)\ny2.grid(row=2,column=2,padx=5,pady=5)\ny3.grid(row=3,column=2,padx=5,pady=5)\ny4.grid(row=4,column=2,padx=5,pady=5)\ny5.grid(row=5,column=2,padx=5,pady=5)\ny6.grid(row=6,column=2,padx=5,pady=5)\nButton(tk,text=\"扫描填充\",width=10,command=run).grid(row=7,column=1)\nButton(tk,text=\"退出程序\",width=10,command=tk.quit).grid(row=7,column=2)\ntk.mainloop()",
"step-ids": [
8,
10,
11,
12,
14
]
}
|
[
8,
10,
11,
12,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for banner_span in list_of_banners:
print(f"{banner_span['id']}, {x_count}, {y_count}")
x_count += 1
if x_count == 51:
x_count = 1
y_count += 1
print('\n\n-----------------')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
r = requests.get('https://terraria.fandom.com/wiki/Banners_(enemy)')
soup = BeautifulSoup(r.text, 'html.parser')
list_of_banners = soup.find_all('span', {'id': re.compile('_Banner')})
x_count = 1
y_count = 1
for banner_span in list_of_banners:
print(f"{banner_span['id']}, {x_count}, {y_count}")
x_count += 1
if x_count == 51:
x_count = 1
y_count += 1
print('\n\n-----------------')
<|reserved_special_token_1|>
import requests
import re
from bs4 import BeautifulSoup
r = requests.get('https://terraria.fandom.com/wiki/Banners_(enemy)')
soup = BeautifulSoup(r.text, 'html.parser')
list_of_banners = soup.find_all('span', {'id': re.compile('_Banner')})
x_count = 1
y_count = 1
for banner_span in list_of_banners:
print(f"{banner_span['id']}, {x_count}, {y_count}")
x_count += 1
if x_count == 51:
x_count = 1
y_count += 1
print('\n\n-----------------')
<|reserved_special_token_1|>
import requests
import re
from bs4 import BeautifulSoup
r = requests.get("https://terraria.fandom.com/wiki/Banners_(enemy)")
soup = BeautifulSoup(r.text, 'html.parser')
list_of_banners = soup.find_all('span', {'id': re.compile(r'_Banner')})
x_count = 1
y_count = 1
for banner_span in list_of_banners:
print(f"{banner_span['id']}, {x_count}, {y_count}")
x_count += 1
if x_count == 51:
x_count = 1
y_count += 1
print("\n\n-----------------")
|
flexible
|
{
"blob_id": "e60d57e8884cba8ce50a571e3bd0affcd4dcaf68",
"index": 4056,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor banner_span in list_of_banners:\n print(f\"{banner_span['id']}, {x_count}, {y_count}\")\n x_count += 1\n if x_count == 51:\n x_count = 1\n y_count += 1\n print('\\n\\n-----------------')\n",
"step-3": "<mask token>\nr = requests.get('https://terraria.fandom.com/wiki/Banners_(enemy)')\nsoup = BeautifulSoup(r.text, 'html.parser')\nlist_of_banners = soup.find_all('span', {'id': re.compile('_Banner')})\nx_count = 1\ny_count = 1\nfor banner_span in list_of_banners:\n print(f\"{banner_span['id']}, {x_count}, {y_count}\")\n x_count += 1\n if x_count == 51:\n x_count = 1\n y_count += 1\n print('\\n\\n-----------------')\n",
"step-4": "import requests\nimport re\nfrom bs4 import BeautifulSoup\nr = requests.get('https://terraria.fandom.com/wiki/Banners_(enemy)')\nsoup = BeautifulSoup(r.text, 'html.parser')\nlist_of_banners = soup.find_all('span', {'id': re.compile('_Banner')})\nx_count = 1\ny_count = 1\nfor banner_span in list_of_banners:\n print(f\"{banner_span['id']}, {x_count}, {y_count}\")\n x_count += 1\n if x_count == 51:\n x_count = 1\n y_count += 1\n print('\\n\\n-----------------')\n",
"step-5": "import requests\nimport re\nfrom bs4 import BeautifulSoup\nr = requests.get(\"https://terraria.fandom.com/wiki/Banners_(enemy)\")\nsoup = BeautifulSoup(r.text, 'html.parser')\nlist_of_banners = soup.find_all('span', {'id': re.compile(r'_Banner')})\nx_count = 1\ny_count = 1\nfor banner_span in list_of_banners:\n print(f\"{banner_span['id']}, {x_count}, {y_count}\")\n x_count += 1\n if x_count == 51:\n x_count = 1\n y_count += 1\n print(\"\\n\\n-----------------\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from functools import reduce
from collections import defaultdict
def memory(count: int, start_numbers: list):
numbers = defaultdict(lambda: tuple(2 * [None]), { el: (idx,None ) for idx,el in enumerate(start_numbers) })
last = start_numbers[-1]
for idx in range(len(numbers), count):
last = 0 if None in numbers[last] else reduce(lambda a,b:a-b, numbers[last])
numbers[last] = ( idx, numbers[last][0] )
print(f"For starting numbers: {start_numbers}, the {count}th number is: {last}")
[ memory(count, [8,0,17,4,1,12]) for count in [ 2020, 30000000 ] ]
|
normal
|
{
"blob_id": "0f0adde7241898d2efe7e2b5cc218e42ed7b73d8",
"index": 5475,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef memory(count: int, start_numbers: list):\n numbers = defaultdict(lambda : tuple(2 * [None]), {el: (idx, None) for \n idx, el in enumerate(start_numbers)})\n last = start_numbers[-1]\n for idx in range(len(numbers), count):\n last = 0 if None in numbers[last] else reduce(lambda a, b: a - b,\n numbers[last])\n numbers[last] = idx, numbers[last][0]\n print(\n f'For starting numbers: {start_numbers}, the {count}th number is: {last}'\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef memory(count: int, start_numbers: list):\n numbers = defaultdict(lambda : tuple(2 * [None]), {el: (idx, None) for \n idx, el in enumerate(start_numbers)})\n last = start_numbers[-1]\n for idx in range(len(numbers), count):\n last = 0 if None in numbers[last] else reduce(lambda a, b: a - b,\n numbers[last])\n numbers[last] = idx, numbers[last][0]\n print(\n f'For starting numbers: {start_numbers}, the {count}th number is: {last}'\n )\n\n\n[memory(count, [8, 0, 17, 4, 1, 12]) for count in [2020, 30000000]]\n",
"step-4": "from functools import reduce\nfrom collections import defaultdict\n\n\ndef memory(count: int, start_numbers: list):\n numbers = defaultdict(lambda : tuple(2 * [None]), {el: (idx, None) for \n idx, el in enumerate(start_numbers)})\n last = start_numbers[-1]\n for idx in range(len(numbers), count):\n last = 0 if None in numbers[last] else reduce(lambda a, b: a - b,\n numbers[last])\n numbers[last] = idx, numbers[last][0]\n print(\n f'For starting numbers: {start_numbers}, the {count}th number is: {last}'\n )\n\n\n[memory(count, [8, 0, 17, 4, 1, 12]) for count in [2020, 30000000]]\n",
"step-5": "from functools import reduce\nfrom collections import defaultdict\n\ndef memory(count: int, start_numbers: list):\n numbers = defaultdict(lambda: tuple(2 * [None]), { el: (idx,None ) for idx,el in enumerate(start_numbers) })\n last = start_numbers[-1]\n for idx in range(len(numbers), count):\n last = 0 if None in numbers[last] else reduce(lambda a,b:a-b, numbers[last])\n numbers[last] = ( idx, numbers[last][0] )\n print(f\"For starting numbers: {start_numbers}, the {count}th number is: {last}\")\n[ memory(count, [8,0,17,4,1,12]) for count in [ 2020, 30000000 ] ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('system', '0005_location')]
operations = [migrations.AddField(model_name='setting', name=
'runned_locations_initial_data', field=models.BooleanField(blank=
True, default=False)), migrations.AlterField(model_name='location',
name='name', field=models.CharField(max_length=128, unique=True))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('system', '0005_location')]
operations = [migrations.AddField(model_name='setting', name=
'runned_locations_initial_data', field=models.BooleanField(blank=
True, default=False)), migrations.AlterField(model_name='location',
name='name', field=models.CharField(max_length=128, unique=True))]
<|reserved_special_token_1|>
# Generated by Django 2.2.10 on 2020-03-13 14:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('system', '0005_location'),
]
operations = [
migrations.AddField(
model_name='setting',
name='runned_locations_initial_data',
field=models.BooleanField(blank=True, default=False),
),
migrations.AlterField(
model_name='location',
name='name',
field=models.CharField(max_length=128, unique=True),
),
]
|
flexible
|
{
"blob_id": "211ef4c64e42c54423ac8dab2128952874a2cf5a",
"index": 7694,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('system', '0005_location')]\n operations = [migrations.AddField(model_name='setting', name=\n 'runned_locations_initial_data', field=models.BooleanField(blank=\n True, default=False)), migrations.AlterField(model_name='location',\n name='name', field=models.CharField(max_length=128, unique=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('system', '0005_location')]\n operations = [migrations.AddField(model_name='setting', name=\n 'runned_locations_initial_data', field=models.BooleanField(blank=\n True, default=False)), migrations.AlterField(model_name='location',\n name='name', field=models.CharField(max_length=128, unique=True))]\n",
"step-5": "# Generated by Django 2.2.10 on 2020-03-13 14:02\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('system', '0005_location'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='setting',\n name='runned_locations_initial_data',\n field=models.BooleanField(blank=True, default=False),\n ),\n migrations.AlterField(\n model_name='location',\n name='name',\n field=models.CharField(max_length=128, unique=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UpdatePurchaseFood(forms.ModelForm):
class Meta:
model = purchase_cards
fields = ['food_name', 'description', 'ss_code', 'calorie', 'fat',
'protein', 'carbs', 'image_path']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UpdateFood(forms.ModelForm):
class Meta:
model = Old_Food_Diary
fields = ['mfg_code', 'food_name', 'description', 'food_type',
'calories', 'fats', 'protein', 'carbohydrates', 'link_of_image',
'link_of_recipie', 'purchasing_link']
class UpdatePurchaseFood(forms.ModelForm):
class Meta:
model = purchase_cards
fields = ['food_name', 'description', 'ss_code', 'calorie', 'fat',
'protein', 'carbs', 'image_path']
<|reserved_special_token_1|>
from django import forms
from basic_app_new.models import *
class UpdateFood(forms.ModelForm):
class Meta:
model = Old_Food_Diary
fields = ['mfg_code', 'food_name', 'description', 'food_type',
'calories', 'fats', 'protein', 'carbohydrates', 'link_of_image',
'link_of_recipie', 'purchasing_link']
class UpdatePurchaseFood(forms.ModelForm):
class Meta:
model = purchase_cards
fields = ['food_name', 'description', 'ss_code', 'calorie', 'fat',
'protein', 'carbs', 'image_path']
|
flexible
|
{
"blob_id": "3a1b0b9891fec7b3d722f77cd2f3f6efa878a7a0",
"index": 4255,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass UpdatePurchaseFood(forms.ModelForm):\n\n\n class Meta:\n model = purchase_cards\n fields = ['food_name', 'description', 'ss_code', 'calorie', 'fat',\n 'protein', 'carbs', 'image_path']\n",
"step-3": "<mask token>\n\n\nclass UpdateFood(forms.ModelForm):\n\n\n class Meta:\n model = Old_Food_Diary\n fields = ['mfg_code', 'food_name', 'description', 'food_type',\n 'calories', 'fats', 'protein', 'carbohydrates', 'link_of_image',\n 'link_of_recipie', 'purchasing_link']\n\n\nclass UpdatePurchaseFood(forms.ModelForm):\n\n\n class Meta:\n model = purchase_cards\n fields = ['food_name', 'description', 'ss_code', 'calorie', 'fat',\n 'protein', 'carbs', 'image_path']\n",
"step-4": "from django import forms\nfrom basic_app_new.models import *\n\n\nclass UpdateFood(forms.ModelForm):\n\n\n class Meta:\n model = Old_Food_Diary\n fields = ['mfg_code', 'food_name', 'description', 'food_type',\n 'calories', 'fats', 'protein', 'carbohydrates', 'link_of_image',\n 'link_of_recipie', 'purchasing_link']\n\n\nclass UpdatePurchaseFood(forms.ModelForm):\n\n\n class Meta:\n model = purchase_cards\n fields = ['food_name', 'description', 'ss_code', 'calorie', 'fat',\n 'protein', 'carbs', 'image_path']\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.site.register(Hash)
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Hash
admin.site.register(Hash)
|
flexible
|
{
"blob_id": "e2e4adaa8f7f62662e0c2915faff1bed72986351",
"index": 1084,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Hash)\n",
"step-3": "from django.contrib import admin\nfrom .models import Hash\nadmin.site.register(Hash)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):
<|reserved_special_token_0|>
id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',
onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)
sha1: str = sql.Column(sql.String(40), nullable=False, index=True)
pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary
(), nullable=False)
<|reserved_special_token_0|>
class Database(sqlutil.Database):
"""A database of GraphTuples."""
def __init__(self, url: str, must_exist: bool=False, ctx: progress.
ProgressContext=progress.NullContext):
super(Database, self).__init__(url, Base, must_exist=must_exist)
self.ctx = ctx
self._graph_tuple_stats = None
self._splits = None
self._split_counts = None
@database_statistic
def graph_count(self) ->int:
"""The number of non-empty graphs in the database."""
return int(self.graph_tuple_stats.graph_count)
@database_statistic
def ir_count(self) ->int:
"""The number of distinct intermediate representations that the non-empty
graphs are constructed from.
"""
return int(self.graph_tuple_stats.ir_count or 0)
@database_statistic
def split_count(self) ->int:
"""The number of distinct splits in the database."""
return int(self.graph_tuple_stats.split_count or 0)
@database_statistic
def node_count(self) ->int:
"""The total node count in non-empty graphs."""
return int(self.graph_tuple_stats.node_count or 0)
@database_statistic
def edge_count(self) ->int:
"""The total edge count in non-empty graphs."""
return int(self.graph_tuple_stats.edge_count or 0)
@database_statistic
def control_edge_count(self) ->int:
"""The total control edge count in non-empty graphs."""
return int(self.graph_tuple_stats.control_edge_count or 0)
@database_statistic
def data_edge_count(self) ->int:
"""The total data edge count in non-empty graphs."""
return int(self.graph_tuple_stats.data_edge_count or 0)
@database_statistic
def call_edge_count(self) ->int:
"""The total call edge count in non-empty graphs."""
return int(self.graph_tuple_stats.call_edge_count or 0)
@database_statistic
def node_count_max(self) ->int:
"""The maximum node count in non-empty graphs."""
return int(self.graph_tuple_stats.node_count_max or 0)
@database_statistic
def edge_count_max(self) ->int:
"""The maximum edge count in non-empty graphs."""
return int(self.graph_tuple_stats.edge_count_max or 0)
@database_statistic
def control_edge_count_max(self) ->int:
"""The maximum control edge count in non-empty graphs."""
return int(self.graph_tuple_stats.control_edge_count_max or 0)
@database_statistic
def data_edge_count_max(self) ->int:
"""The maximum data edge count in non-empty graphs."""
return int(self.graph_tuple_stats.data_edge_count_max or 0)
@database_statistic
def call_edge_count_max(self) ->int:
"""The maximum call edge count in non-empty graphs."""
return int(self.graph_tuple_stats.call_edge_count_max or 0)
@database_statistic
def edge_position_max(self) ->int:
"""The maximum edge position in non-empty graphs."""
return int(self.graph_tuple_stats.edge_position_max or 0)
@database_statistic
def node_x_dimensionality(self) ->int:
"""The node x dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.node_x_dimensionality or 0)
@database_statistic
def node_y_dimensionality(self) ->int:
"""The node y dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.node_y_dimensionality or 0)
@database_statistic
def graph_x_dimensionality(self) ->int:
"""The graph x dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.graph_x_dimensionality or 0)
@database_statistic
def graph_y_dimensionality(self) ->int:
"""The graph y dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.graph_y_dimensionality or 0)
@database_statistic
def graph_data_size(self) ->int:
"""The total size of the non-empty graph data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size or 0)
@database_statistic
def graph_data_size_min(self) ->int:
"""The minimum size of the non-empty graph tuple data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size_min or 0)
@database_statistic
def graph_data_size_avg(self) ->float:
"""The average size of the non-empty graph tuple data, in bytes."""
return float(self.graph_tuple_stats.graph_data_size_avg or 0)
@database_statistic
def graph_data_size_max(self) ->int:
"""The maximum size of the non-empty graph tuple data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size_max or 0)
@database_statistic
def has_data_flow(self) ->bool:
"""Return whether the graph database has data flow annotations.
This is only true if *all* columns have data flow values.
"""
return self.graph_count and not self.data_flow_null_count
@database_statistic
def data_flow_null_count(self) ->int:
"""The number of database rows without data flow information.
If > 0, then has_data_flow is False.
"""
return self.graph_count - int(self.graph_tuple_stats.
data_flow_steps_count or 0)
@database_statistic
def data_flow_steps_min(self) ->Optional[int]:
"""The minimum data flow steps for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_steps_min or 0)
@database_statistic
def data_flow_steps_avg(self) ->Optional[float]:
"""The average data flow steps for non-empty graphs."""
if self.has_data_flow:
return float(self.graph_tuple_stats.data_flow_steps_avg)
@database_statistic
def data_flow_steps_max(self) ->Optional[int]:
"""The maximum data flow steps for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_steps_max or 0)
@database_statistic
def data_flow_positive_node_count_min(self) ->Optional[int]:
"""The minimum data flow positive node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.
data_flow_positive_node_count_min or 0)
@database_statistic
def data_flow_positive_node_count_avg(self) ->Optional[int]:
"""The minimum data flow average node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.
data_flow_positive_node_count_avg or 0)
@database_statistic
def data_flow_positive_node_count_max(self) ->Optional[int]:
"""The minimum data flow max node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.
data_flow_positive_node_count_max or 0)
@database_statistic
def splits(self) ->List[int]:
"""Return a list of unique split values."""
if self._splits is None:
self.RefreshStats()
return self._splits
@database_statistic
def split_counts(self) ->Dict[int, int]:
"""Return a dictionary mapping split to the number of graphs."""
if self._split_counts is None:
self.RefreshStats()
return self._split_counts
def RefreshStats(self):
"""Compute the database stats for access via the instance properties.
Raises:
ValueError: If the database contains invalid entries, e.g. inconsistent
vector dimensionalities.
"""
with self.ctx.Profile(2, lambda t:
f"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})"
), self.Session() as session:
query = session.query(sql.func.count(GraphTuple.id).label(
'graph_count'), sql.func.count(sql.func.distinct(GraphTuple
.ir_id)).label('ir_count'), sql.func.count(sql.func.
distinct(GraphTuple.split)).label('split_count'), sql.func.
sum(GraphTuple.node_count).label('node_count'), sql.func.
sum(GraphTuple.control_edge_count).label(
'control_edge_count'), sql.func.sum(GraphTuple.
data_edge_count).label('data_edge_count'), sql.func.sum(
GraphTuple.call_edge_count).label('call_edge_count'), sql.
func.sum(GraphTuple.control_edge_count + GraphTuple.
data_edge_count + GraphTuple.call_edge_count).label(
'edge_count'), sql.func.max(GraphTuple.node_count).label(
'node_count_max'), sql.func.max(GraphTuple.
control_edge_count).label('control_edge_count_max'), sql.
func.max(GraphTuple.data_edge_count).label(
'data_edge_count_max'), sql.func.max(GraphTuple.
call_edge_count).label('call_edge_count_max'), sql.func.max
(GraphTuple.call_edge_count).label('call_edge_count_max'),
sql.func.max(GraphTuple.control_edge_count + GraphTuple.
data_edge_count + GraphTuple.call_edge_count).label(
'edge_count_max'), sql.func.max(GraphTuple.
edge_position_max).label('edge_position_max'), sql.func.
count(sql.func.distinct(GraphTuple.node_x_dimensionality)).
label('node_x_dimensionality_count'), sql.func.count(sql.
func.distinct(GraphTuple.node_y_dimensionality)).label(
'node_y_dimensionality_count'), sql.func.count(sql.func.
distinct(GraphTuple.graph_x_dimensionality)).label(
'graph_x_dimensionality_count'), sql.func.count(sql.func.
distinct(GraphTuple.graph_y_dimensionality)).label(
'graph_y_dimensionality_count'), sql.func.max(GraphTuple.
node_x_dimensionality).label('node_x_dimensionality'), sql.
func.max(GraphTuple.node_y_dimensionality).label(
'node_y_dimensionality'), sql.func.max(GraphTuple.
graph_x_dimensionality).label('graph_x_dimensionality'),
sql.func.max(GraphTuple.graph_y_dimensionality).label(
'graph_y_dimensionality'), sql.func.sum(GraphTuple.
pickled_graph_tuple_size).label('graph_data_size'), sql.
func.min(GraphTuple.pickled_graph_tuple_size).label(
'graph_data_size_min'), sql.func.avg(GraphTuple.
pickled_graph_tuple_size).label('graph_data_size_avg'), sql
.func.max(GraphTuple.pickled_graph_tuple_size).label(
'graph_data_size_max'), sql.func.count(GraphTuple.
data_flow_steps).label('data_flow_steps_count'), sql.func.
min(GraphTuple.data_flow_steps).label('data_flow_steps_min'
), sql.func.avg(GraphTuple.data_flow_steps).label(
'data_flow_steps_avg'), sql.func.max(GraphTuple.
data_flow_steps).label('data_flow_steps_max'), sql.func.min
(GraphTuple.data_flow_positive_node_count).label(
'data_flow_positive_node_count_min'), sql.func.avg(
GraphTuple.data_flow_positive_node_count).label(
'data_flow_positive_node_count_avg'), sql.func.max(
GraphTuple.data_flow_positive_node_count).label(
'data_flow_positive_node_count_max'))
query = query.filter(GraphTuple.node_count > 1)
stats = query.one()
if stats.node_x_dimensionality_count > 1:
raise ValueError(
f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'
)
if stats.node_y_dimensionality_count > 1:
raise ValueError(
f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'
)
if stats.graph_x_dimensionality_count > 1:
raise ValueError(
f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'
)
if stats.graph_y_dimensionality_count > 1:
raise ValueError(
f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'
)
if not (stats.data_flow_steps_count == 0 or stats.
data_flow_steps_count == stats.graph_count):
raise ValueError(
f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'
)
self._graph_tuple_stats = stats
with self.Session() as session:
self._splits = sorted(set([row.split for row in session.
query(GraphTuple.split).filter(GraphTuple.split != None
).group_by(GraphTuple.split)]))
self._split_counts = {split: session.query(sql.func.count(
GraphTuple.id)).filter(GraphTuple.split == split).
scalar() for split in self._splits}
@property
def graph_tuple_stats(self):
"""Fetch aggregate graph tuple stats, or compute them if not set."""
if self._graph_tuple_stats is None:
self.RefreshStats()
return self._graph_tuple_stats
@property
def stats_json(self) ->Dict[str, Any]:
"""Fetch the database statics as a JSON dictionary."""
return {name: function(self) for name, function in
database_statistics_registry}
def __repr__(self) ->str:
return (
f"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}."
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):
<|reserved_special_token_0|>
id: int = sql.Column(sql.Integer, primary_key=True)
ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)
split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)
node_count: int = sql.Column(sql.Integer, nullable=False)
control_edge_count: int = sql.Column(sql.Integer, nullable=False)
data_edge_count: int = sql.Column(sql.Integer, nullable=False)
call_edge_count: int = sql.Column(sql.Integer, nullable=False)
edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.
FLOAT(), 'sqlite'), nullable=False)
node_x_dimensionality: int = sql.Column(sql.Integer, default=0,
nullable=False)
node_y_dimensionality: int = sql.Column(sql.Integer, default=0,
nullable=False)
graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,
nullable=False)
graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,
nullable=False)
pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)
data_flow_steps: int = sql.Column(sql.Integer, nullable=True)
data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)
data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)
timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()
data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist
=False, cascade='all, delete-orphan')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@decorators.memoized_property
def tuple(self) ->graph_tuple_lib.GraphTuple:
"""Un-pickle the graph tuple and cache the binary results."""
return pickle.loads(self.data.pickled_graph_tuple)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,
ir_id: int, split: Optional[int]=None) ->'GraphTuple':
"""Create a mapped database instance from the given graph tuple.
This is the preferred method of populating databases of graph tuples, as
it contains the boilerplate to extract and set the metadata columns, and
handles the join between the two data/metadata tables invisibly.
Args:
graph_tuple: The graph tuple to map.
ir_id: The intermediate representation ID.
split: The split value of this graph.
Returns:
A GraphTuple instance.
"""
pickled_graph_tuple = pickle.dumps(graph_tuple)
return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.
node_count, control_edge_count=graph_tuple.control_edge_count,
data_edge_count=graph_tuple.data_edge_count, call_edge_count=
graph_tuple.call_edge_count, edge_position_max=graph_tuple.
edge_position_max, node_x_dimensionality=graph_tuple.
node_x_dimensionality, node_y_dimensionality=graph_tuple.
node_y_dimensionality, graph_x_dimensionality=graph_tuple.
graph_x_dimensionality, graph_y_dimensionality=graph_tuple.
graph_y_dimensionality, pickled_graph_tuple_size=len(
pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(
pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):
"""The pickled graph tuple data. See GraphTuple for the parent table."""
id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',
onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)
sha1: str = sql.Column(sql.String(40), nullable=False, index=True)
pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary
(), nullable=False)
<|reserved_special_token_0|>
class Database(sqlutil.Database):
"""A database of GraphTuples."""
def __init__(self, url: str, must_exist: bool=False, ctx: progress.
ProgressContext=progress.NullContext):
super(Database, self).__init__(url, Base, must_exist=must_exist)
self.ctx = ctx
self._graph_tuple_stats = None
self._splits = None
self._split_counts = None
@database_statistic
def graph_count(self) ->int:
"""The number of non-empty graphs in the database."""
return int(self.graph_tuple_stats.graph_count)
@database_statistic
def ir_count(self) ->int:
"""The number of distinct intermediate representations that the non-empty
graphs are constructed from.
"""
return int(self.graph_tuple_stats.ir_count or 0)
@database_statistic
def split_count(self) ->int:
"""The number of distinct splits in the database."""
return int(self.graph_tuple_stats.split_count or 0)
@database_statistic
def node_count(self) ->int:
"""The total node count in non-empty graphs."""
return int(self.graph_tuple_stats.node_count or 0)
@database_statistic
def edge_count(self) ->int:
"""The total edge count in non-empty graphs."""
return int(self.graph_tuple_stats.edge_count or 0)
@database_statistic
def control_edge_count(self) ->int:
"""The total control edge count in non-empty graphs."""
return int(self.graph_tuple_stats.control_edge_count or 0)
@database_statistic
def data_edge_count(self) ->int:
"""The total data edge count in non-empty graphs."""
return int(self.graph_tuple_stats.data_edge_count or 0)
@database_statistic
def call_edge_count(self) ->int:
"""The total call edge count in non-empty graphs."""
return int(self.graph_tuple_stats.call_edge_count or 0)
@database_statistic
def node_count_max(self) ->int:
"""The maximum node count in non-empty graphs."""
return int(self.graph_tuple_stats.node_count_max or 0)
@database_statistic
def edge_count_max(self) ->int:
"""The maximum edge count in non-empty graphs."""
return int(self.graph_tuple_stats.edge_count_max or 0)
@database_statistic
def control_edge_count_max(self) ->int:
"""The maximum control edge count in non-empty graphs."""
return int(self.graph_tuple_stats.control_edge_count_max or 0)
@database_statistic
def data_edge_count_max(self) ->int:
"""The maximum data edge count in non-empty graphs."""
return int(self.graph_tuple_stats.data_edge_count_max or 0)
@database_statistic
def call_edge_count_max(self) ->int:
"""The maximum call edge count in non-empty graphs."""
return int(self.graph_tuple_stats.call_edge_count_max or 0)
@database_statistic
def edge_position_max(self) ->int:
"""The maximum edge position in non-empty graphs."""
return int(self.graph_tuple_stats.edge_position_max or 0)
@database_statistic
def node_x_dimensionality(self) ->int:
"""The node x dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.node_x_dimensionality or 0)
@database_statistic
def node_y_dimensionality(self) ->int:
"""The node y dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.node_y_dimensionality or 0)
@database_statistic
def graph_x_dimensionality(self) ->int:
"""The graph x dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.graph_x_dimensionality or 0)
@database_statistic
def graph_y_dimensionality(self) ->int:
"""The graph y dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.graph_y_dimensionality or 0)
@database_statistic
def graph_data_size(self) ->int:
"""The total size of the non-empty graph data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size or 0)
@database_statistic
def graph_data_size_min(self) ->int:
"""The minimum size of the non-empty graph tuple data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size_min or 0)
@database_statistic
def graph_data_size_avg(self) ->float:
"""The average size of the non-empty graph tuple data, in bytes."""
return float(self.graph_tuple_stats.graph_data_size_avg or 0)
@database_statistic
def graph_data_size_max(self) ->int:
"""The maximum size of the non-empty graph tuple data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size_max or 0)
@database_statistic
def has_data_flow(self) ->bool:
"""Return whether the graph database has data flow annotations.
This is only true if *all* columns have data flow values.
"""
return self.graph_count and not self.data_flow_null_count
@database_statistic
def data_flow_null_count(self) ->int:
"""The number of database rows without data flow information.
If > 0, then has_data_flow is False.
"""
return self.graph_count - int(self.graph_tuple_stats.
data_flow_steps_count or 0)
@database_statistic
def data_flow_steps_min(self) ->Optional[int]:
"""The minimum data flow steps for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_steps_min or 0)
@database_statistic
def data_flow_steps_avg(self) ->Optional[float]:
"""The average data flow steps for non-empty graphs."""
if self.has_data_flow:
return float(self.graph_tuple_stats.data_flow_steps_avg)
@database_statistic
def data_flow_steps_max(self) ->Optional[int]:
"""The maximum data flow steps for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_steps_max or 0)
@database_statistic
def data_flow_positive_node_count_min(self) ->Optional[int]:
"""The minimum data flow positive node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.
data_flow_positive_node_count_min or 0)
@database_statistic
def data_flow_positive_node_count_avg(self) ->Optional[int]:
"""The minimum data flow average node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.
data_flow_positive_node_count_avg or 0)
@database_statistic
def data_flow_positive_node_count_max(self) ->Optional[int]:
"""The minimum data flow max node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.
data_flow_positive_node_count_max or 0)
@database_statistic
def splits(self) ->List[int]:
"""Return a list of unique split values."""
if self._splits is None:
self.RefreshStats()
return self._splits
@database_statistic
def split_counts(self) ->Dict[int, int]:
"""Return a dictionary mapping split to the number of graphs."""
if self._split_counts is None:
self.RefreshStats()
return self._split_counts
def RefreshStats(self):
"""Compute the database stats for access via the instance properties.
Raises:
ValueError: If the database contains invalid entries, e.g. inconsistent
vector dimensionalities.
"""
with self.ctx.Profile(2, lambda t:
f"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})"
), self.Session() as session:
query = session.query(sql.func.count(GraphTuple.id).label(
'graph_count'), sql.func.count(sql.func.distinct(GraphTuple
.ir_id)).label('ir_count'), sql.func.count(sql.func.
distinct(GraphTuple.split)).label('split_count'), sql.func.
sum(GraphTuple.node_count).label('node_count'), sql.func.
sum(GraphTuple.control_edge_count).label(
'control_edge_count'), sql.func.sum(GraphTuple.
data_edge_count).label('data_edge_count'), sql.func.sum(
GraphTuple.call_edge_count).label('call_edge_count'), sql.
func.sum(GraphTuple.control_edge_count + GraphTuple.
data_edge_count + GraphTuple.call_edge_count).label(
'edge_count'), sql.func.max(GraphTuple.node_count).label(
'node_count_max'), sql.func.max(GraphTuple.
control_edge_count).label('control_edge_count_max'), sql.
func.max(GraphTuple.data_edge_count).label(
'data_edge_count_max'), sql.func.max(GraphTuple.
call_edge_count).label('call_edge_count_max'), sql.func.max
(GraphTuple.call_edge_count).label('call_edge_count_max'),
sql.func.max(GraphTuple.control_edge_count + GraphTuple.
data_edge_count + GraphTuple.call_edge_count).label(
'edge_count_max'), sql.func.max(GraphTuple.
edge_position_max).label('edge_position_max'), sql.func.
count(sql.func.distinct(GraphTuple.node_x_dimensionality)).
label('node_x_dimensionality_count'), sql.func.count(sql.
func.distinct(GraphTuple.node_y_dimensionality)).label(
'node_y_dimensionality_count'), sql.func.count(sql.func.
distinct(GraphTuple.graph_x_dimensionality)).label(
'graph_x_dimensionality_count'), sql.func.count(sql.func.
distinct(GraphTuple.graph_y_dimensionality)).label(
'graph_y_dimensionality_count'), sql.func.max(GraphTuple.
node_x_dimensionality).label('node_x_dimensionality'), sql.
func.max(GraphTuple.node_y_dimensionality).label(
'node_y_dimensionality'), sql.func.max(GraphTuple.
graph_x_dimensionality).label('graph_x_dimensionality'),
sql.func.max(GraphTuple.graph_y_dimensionality).label(
'graph_y_dimensionality'), sql.func.sum(GraphTuple.
pickled_graph_tuple_size).label('graph_data_size'), sql.
func.min(GraphTuple.pickled_graph_tuple_size).label(
'graph_data_size_min'), sql.func.avg(GraphTuple.
pickled_graph_tuple_size).label('graph_data_size_avg'), sql
.func.max(GraphTuple.pickled_graph_tuple_size).label(
'graph_data_size_max'), sql.func.count(GraphTuple.
data_flow_steps).label('data_flow_steps_count'), sql.func.
min(GraphTuple.data_flow_steps).label('data_flow_steps_min'
), sql.func.avg(GraphTuple.data_flow_steps).label(
'data_flow_steps_avg'), sql.func.max(GraphTuple.
data_flow_steps).label('data_flow_steps_max'), sql.func.min
(GraphTuple.data_flow_positive_node_count).label(
'data_flow_positive_node_count_min'), sql.func.avg(
GraphTuple.data_flow_positive_node_count).label(
'data_flow_positive_node_count_avg'), sql.func.max(
GraphTuple.data_flow_positive_node_count).label(
'data_flow_positive_node_count_max'))
query = query.filter(GraphTuple.node_count > 1)
stats = query.one()
if stats.node_x_dimensionality_count > 1:
raise ValueError(
f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'
)
if stats.node_y_dimensionality_count > 1:
raise ValueError(
f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'
)
if stats.graph_x_dimensionality_count > 1:
raise ValueError(
f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'
)
if stats.graph_y_dimensionality_count > 1:
raise ValueError(
f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'
)
if not (stats.data_flow_steps_count == 0 or stats.
data_flow_steps_count == stats.graph_count):
raise ValueError(
f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'
)
self._graph_tuple_stats = stats
with self.Session() as session:
self._splits = sorted(set([row.split for row in session.
query(GraphTuple.split).filter(GraphTuple.split != None
).group_by(GraphTuple.split)]))
self._split_counts = {split: session.query(sql.func.count(
GraphTuple.id)).filter(GraphTuple.split == split).
scalar() for split in self._splits}
@property
def graph_tuple_stats(self):
"""Fetch aggregate graph tuple stats, or compute them if not set."""
if self._graph_tuple_stats is None:
self.RefreshStats()
return self._graph_tuple_stats
@property
def stats_json(self) ->Dict[str, Any]:
"""Fetch the database statics as a JSON dictionary."""
return {name: function(self) for name, function in
database_statistics_registry}
def __repr__(self) ->str:
return (
f"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}."
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):
<|reserved_special_token_0|>
id: int = sql.Column(sql.Integer, primary_key=True)
ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)
split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)
node_count: int = sql.Column(sql.Integer, nullable=False)
control_edge_count: int = sql.Column(sql.Integer, nullable=False)
data_edge_count: int = sql.Column(sql.Integer, nullable=False)
call_edge_count: int = sql.Column(sql.Integer, nullable=False)
edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.
FLOAT(), 'sqlite'), nullable=False)
node_x_dimensionality: int = sql.Column(sql.Integer, default=0,
nullable=False)
node_y_dimensionality: int = sql.Column(sql.Integer, default=0,
nullable=False)
graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,
nullable=False)
graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,
nullable=False)
pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)
data_flow_steps: int = sql.Column(sql.Integer, nullable=True)
data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)
data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)
timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()
data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist
=False, cascade='all, delete-orphan')
@property
def has_data_flow(self) ->bool:
"""Returns whether graph tuple has data flow columns."""
return self.data_flow_steps is not None
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@decorators.memoized_property
def tuple(self) ->graph_tuple_lib.GraphTuple:
"""Un-pickle the graph tuple and cache the binary results."""
return pickle.loads(self.data.pickled_graph_tuple)
def ToFile(self, path: pathlib.Path) ->None:
"""Dump the pickled graph tuple to file.
This is lossy, as the ir_id column is not dumped.
Args:
path: The path of the graph tuple to write.
"""
with open(path, 'wb') as f:
pickle.dump(self.tuple, f)
@classmethod
def FromFile(cls, path: pathlib.Path, ir_id: int):
"""Construct a mapped database instance from a file generated by ToFile().
Args:
path: The path of the file to read.
ir_id: The IR id of the graph tuple.
Returns:
A GraphTuple instance.
"""
with open(path, 'rb') as f:
graph_tuple = pickle.load(f)
return cls.CreateFromGraphTuple(graph_tuple, ir_id)
@classmethod
def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,
ir_id: int, split: Optional[int]=None) ->'GraphTuple':
"""Create a mapped database instance from the given graph tuple.
This is the preferred method of populating databases of graph tuples, as
it contains the boilerplate to extract and set the metadata columns, and
handles the join between the two data/metadata tables invisibly.
Args:
graph_tuple: The graph tuple to map.
ir_id: The intermediate representation ID.
split: The split value of this graph.
Returns:
A GraphTuple instance.
"""
pickled_graph_tuple = pickle.dumps(graph_tuple)
return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.
node_count, control_edge_count=graph_tuple.control_edge_count,
data_edge_count=graph_tuple.data_edge_count, call_edge_count=
graph_tuple.call_edge_count, edge_position_max=graph_tuple.
edge_position_max, node_x_dimensionality=graph_tuple.
node_x_dimensionality, node_y_dimensionality=graph_tuple.
node_y_dimensionality, graph_x_dimensionality=graph_tuple.
graph_x_dimensionality, graph_y_dimensionality=graph_tuple.
graph_y_dimensionality, pickled_graph_tuple_size=len(
pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(
pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))
<|reserved_special_token_0|>
@classmethod
def CreateEmpty(cls, ir_id: int) ->'GraphTuple':
"""Create an "empty" graph tuple.
An empty graph tuple can be used to signal that the conversion to GraphTuple
failed, and is signalled by a node_count of 0. An empty graph tuple has
no corresponding GraphTupleData row.
"""
return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,
data_edge_count=0, call_edge_count=0, edge_position_max=0,
pickled_graph_tuple_size=0)
<|reserved_special_token_0|>
class GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):
"""The pickled graph tuple data. See GraphTuple for the parent table."""
id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',
onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)
sha1: str = sql.Column(sql.String(40), nullable=False, index=True)
pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary
(), nullable=False)
<|reserved_special_token_0|>
class Database(sqlutil.Database):
"""A database of GraphTuples."""
def __init__(self, url: str, must_exist: bool=False, ctx: progress.
ProgressContext=progress.NullContext):
super(Database, self).__init__(url, Base, must_exist=must_exist)
self.ctx = ctx
self._graph_tuple_stats = None
self._splits = None
self._split_counts = None
@database_statistic
def graph_count(self) ->int:
"""The number of non-empty graphs in the database."""
return int(self.graph_tuple_stats.graph_count)
@database_statistic
def ir_count(self) ->int:
"""The number of distinct intermediate representations that the non-empty
graphs are constructed from.
"""
return int(self.graph_tuple_stats.ir_count or 0)
@database_statistic
def split_count(self) ->int:
"""The number of distinct splits in the database."""
return int(self.graph_tuple_stats.split_count or 0)
@database_statistic
def node_count(self) ->int:
"""The total node count in non-empty graphs."""
return int(self.graph_tuple_stats.node_count or 0)
@database_statistic
def edge_count(self) ->int:
"""The total edge count in non-empty graphs."""
return int(self.graph_tuple_stats.edge_count or 0)
@database_statistic
def control_edge_count(self) ->int:
"""The total control edge count in non-empty graphs."""
return int(self.graph_tuple_stats.control_edge_count or 0)
@database_statistic
def data_edge_count(self) ->int:
"""The total data edge count in non-empty graphs."""
return int(self.graph_tuple_stats.data_edge_count or 0)
@database_statistic
def call_edge_count(self) ->int:
"""The total call edge count in non-empty graphs."""
return int(self.graph_tuple_stats.call_edge_count or 0)
@database_statistic
def node_count_max(self) ->int:
"""The maximum node count in non-empty graphs."""
return int(self.graph_tuple_stats.node_count_max or 0)
@database_statistic
def edge_count_max(self) ->int:
"""The maximum edge count in non-empty graphs."""
return int(self.graph_tuple_stats.edge_count_max or 0)
@database_statistic
def control_edge_count_max(self) ->int:
"""The maximum control edge count in non-empty graphs."""
return int(self.graph_tuple_stats.control_edge_count_max or 0)
@database_statistic
def data_edge_count_max(self) ->int:
"""The maximum data edge count in non-empty graphs."""
return int(self.graph_tuple_stats.data_edge_count_max or 0)
@database_statistic
def call_edge_count_max(self) ->int:
"""The maximum call edge count in non-empty graphs."""
return int(self.graph_tuple_stats.call_edge_count_max or 0)
@database_statistic
def edge_position_max(self) ->int:
"""The maximum edge position in non-empty graphs."""
return int(self.graph_tuple_stats.edge_position_max or 0)
@database_statistic
def node_x_dimensionality(self) ->int:
"""The node x dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.node_x_dimensionality or 0)
@database_statistic
def node_y_dimensionality(self) ->int:
"""The node y dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.node_y_dimensionality or 0)
@database_statistic
def graph_x_dimensionality(self) ->int:
"""The graph x dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.graph_x_dimensionality or 0)
@database_statistic
def graph_y_dimensionality(self) ->int:
"""The graph y dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.graph_y_dimensionality or 0)
@database_statistic
def graph_data_size(self) ->int:
"""The total size of the non-empty graph data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size or 0)
@database_statistic
def graph_data_size_min(self) ->int:
"""The minimum size of the non-empty graph tuple data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size_min or 0)
@database_statistic
def graph_data_size_avg(self) ->float:
"""The average size of the non-empty graph tuple data, in bytes."""
return float(self.graph_tuple_stats.graph_data_size_avg or 0)
@database_statistic
def graph_data_size_max(self) ->int:
"""The maximum size of the non-empty graph tuple data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size_max or 0)
@database_statistic
def has_data_flow(self) ->bool:
"""Return whether the graph database has data flow annotations.
This is only true if *all* columns have data flow values.
"""
return self.graph_count and not self.data_flow_null_count
@database_statistic
def data_flow_null_count(self) ->int:
"""The number of database rows without data flow information.
If > 0, then has_data_flow is False.
"""
return self.graph_count - int(self.graph_tuple_stats.
data_flow_steps_count or 0)
@database_statistic
def data_flow_steps_min(self) ->Optional[int]:
"""The minimum data flow steps for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_steps_min or 0)
@database_statistic
def data_flow_steps_avg(self) ->Optional[float]:
"""The average data flow steps for non-empty graphs."""
if self.has_data_flow:
return float(self.graph_tuple_stats.data_flow_steps_avg)
@database_statistic
def data_flow_steps_max(self) ->Optional[int]:
"""The maximum data flow steps for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_steps_max or 0)
@database_statistic
def data_flow_positive_node_count_min(self) ->Optional[int]:
"""The minimum data flow positive node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.
data_flow_positive_node_count_min or 0)
@database_statistic
def data_flow_positive_node_count_avg(self) ->Optional[int]:
"""The minimum data flow average node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.
data_flow_positive_node_count_avg or 0)
@database_statistic
def data_flow_positive_node_count_max(self) ->Optional[int]:
"""The minimum data flow max node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.
data_flow_positive_node_count_max or 0)
@database_statistic
def splits(self) ->List[int]:
"""Return a list of unique split values."""
if self._splits is None:
self.RefreshStats()
return self._splits
@database_statistic
def split_counts(self) ->Dict[int, int]:
"""Return a dictionary mapping split to the number of graphs."""
if self._split_counts is None:
self.RefreshStats()
return self._split_counts
def RefreshStats(self):
"""Compute the database stats for access via the instance properties.
Raises:
ValueError: If the database contains invalid entries, e.g. inconsistent
vector dimensionalities.
"""
with self.ctx.Profile(2, lambda t:
f"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})"
), self.Session() as session:
query = session.query(sql.func.count(GraphTuple.id).label(
'graph_count'), sql.func.count(sql.func.distinct(GraphTuple
.ir_id)).label('ir_count'), sql.func.count(sql.func.
distinct(GraphTuple.split)).label('split_count'), sql.func.
sum(GraphTuple.node_count).label('node_count'), sql.func.
sum(GraphTuple.control_edge_count).label(
'control_edge_count'), sql.func.sum(GraphTuple.
data_edge_count).label('data_edge_count'), sql.func.sum(
GraphTuple.call_edge_count).label('call_edge_count'), sql.
func.sum(GraphTuple.control_edge_count + GraphTuple.
data_edge_count + GraphTuple.call_edge_count).label(
'edge_count'), sql.func.max(GraphTuple.node_count).label(
'node_count_max'), sql.func.max(GraphTuple.
control_edge_count).label('control_edge_count_max'), sql.
func.max(GraphTuple.data_edge_count).label(
'data_edge_count_max'), sql.func.max(GraphTuple.
call_edge_count).label('call_edge_count_max'), sql.func.max
(GraphTuple.call_edge_count).label('call_edge_count_max'),
sql.func.max(GraphTuple.control_edge_count + GraphTuple.
data_edge_count + GraphTuple.call_edge_count).label(
'edge_count_max'), sql.func.max(GraphTuple.
edge_position_max).label('edge_position_max'), sql.func.
count(sql.func.distinct(GraphTuple.node_x_dimensionality)).
label('node_x_dimensionality_count'), sql.func.count(sql.
func.distinct(GraphTuple.node_y_dimensionality)).label(
'node_y_dimensionality_count'), sql.func.count(sql.func.
distinct(GraphTuple.graph_x_dimensionality)).label(
'graph_x_dimensionality_count'), sql.func.count(sql.func.
distinct(GraphTuple.graph_y_dimensionality)).label(
'graph_y_dimensionality_count'), sql.func.max(GraphTuple.
node_x_dimensionality).label('node_x_dimensionality'), sql.
func.max(GraphTuple.node_y_dimensionality).label(
'node_y_dimensionality'), sql.func.max(GraphTuple.
graph_x_dimensionality).label('graph_x_dimensionality'),
sql.func.max(GraphTuple.graph_y_dimensionality).label(
'graph_y_dimensionality'), sql.func.sum(GraphTuple.
pickled_graph_tuple_size).label('graph_data_size'), sql.
func.min(GraphTuple.pickled_graph_tuple_size).label(
'graph_data_size_min'), sql.func.avg(GraphTuple.
pickled_graph_tuple_size).label('graph_data_size_avg'), sql
.func.max(GraphTuple.pickled_graph_tuple_size).label(
'graph_data_size_max'), sql.func.count(GraphTuple.
data_flow_steps).label('data_flow_steps_count'), sql.func.
min(GraphTuple.data_flow_steps).label('data_flow_steps_min'
), sql.func.avg(GraphTuple.data_flow_steps).label(
'data_flow_steps_avg'), sql.func.max(GraphTuple.
data_flow_steps).label('data_flow_steps_max'), sql.func.min
(GraphTuple.data_flow_positive_node_count).label(
'data_flow_positive_node_count_min'), sql.func.avg(
GraphTuple.data_flow_positive_node_count).label(
'data_flow_positive_node_count_avg'), sql.func.max(
GraphTuple.data_flow_positive_node_count).label(
'data_flow_positive_node_count_max'))
query = query.filter(GraphTuple.node_count > 1)
stats = query.one()
if stats.node_x_dimensionality_count > 1:
raise ValueError(
f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'
)
if stats.node_y_dimensionality_count > 1:
raise ValueError(
f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'
)
if stats.graph_x_dimensionality_count > 1:
raise ValueError(
f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'
)
if stats.graph_y_dimensionality_count > 1:
raise ValueError(
f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'
)
if not (stats.data_flow_steps_count == 0 or stats.
data_flow_steps_count == stats.graph_count):
raise ValueError(
f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'
)
self._graph_tuple_stats = stats
with self.Session() as session:
self._splits = sorted(set([row.split for row in session.
query(GraphTuple.split).filter(GraphTuple.split != None
).group_by(GraphTuple.split)]))
self._split_counts = {split: session.query(sql.func.count(
GraphTuple.id)).filter(GraphTuple.split == split).
scalar() for split in self._splits}
@property
def graph_tuple_stats(self):
"""Fetch aggregate graph tuple stats, or compute them if not set."""
if self._graph_tuple_stats is None:
self.RefreshStats()
return self._graph_tuple_stats
@property
def stats_json(self) ->Dict[str, Any]:
"""Fetch the database statics as a JSON dictionary."""
return {name: function(self) for name, function in
database_statistics_registry}
def __repr__(self) ->str:
return (
f"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}."
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Meta(Base, sqlutil.TablenameFromClassNameMixin):
<|reserved_special_token_0|>
id: int = sql.Column(sql.Integer, primary_key=True)
run_id: str = run_id.RunId.SqlStringColumn()
timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()
key: str = sql.Column(sql.String(128), index=True)
pickled_value: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary(),
nullable=False)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):
"""A table of graph tuples.
For every GraphTuple, there should be a corresponding GraphTupleData row
containing the pickled graph tuple as a binary blob. The reason for dividing
the data horizontally across two tables is to enable fast scanning
of graph metadata, without needing to churn through a table of pickled binary
blobs.
"""
id: int = sql.Column(sql.Integer, primary_key=True)
ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)
split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)
node_count: int = sql.Column(sql.Integer, nullable=False)
control_edge_count: int = sql.Column(sql.Integer, nullable=False)
data_edge_count: int = sql.Column(sql.Integer, nullable=False)
call_edge_count: int = sql.Column(sql.Integer, nullable=False)
edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.
FLOAT(), 'sqlite'), nullable=False)
node_x_dimensionality: int = sql.Column(sql.Integer, default=0,
nullable=False)
node_y_dimensionality: int = sql.Column(sql.Integer, default=0,
nullable=False)
graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,
nullable=False)
graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,
nullable=False)
pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)
data_flow_steps: int = sql.Column(sql.Integer, nullable=True)
data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)
data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)
timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()
data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist
=False, cascade='all, delete-orphan')
@property
def has_data_flow(self) ->bool:
"""Returns whether graph tuple has data flow columns."""
return self.data_flow_steps is not None
@property
def edge_count(self) ->int:
return (self.control_edge_count + self.data_edge_count + self.
call_edge_count)
@property
def sha1(self) ->str:
"""Return the sha1 of the graph tuple."""
return self.data.sha1
@decorators.memoized_property
def tuple(self) ->graph_tuple_lib.GraphTuple:
"""Un-pickle the graph tuple and cache the binary results."""
return pickle.loads(self.data.pickled_graph_tuple)
def ToFile(self, path: pathlib.Path) ->None:
"""Dump the pickled graph tuple to file.
This is lossy, as the ir_id column is not dumped.
Args:
path: The path of the graph tuple to write.
"""
with open(path, 'wb') as f:
pickle.dump(self.tuple, f)
@classmethod
def FromFile(cls, path: pathlib.Path, ir_id: int):
"""Construct a mapped database instance from a file generated by ToFile().
Args:
path: The path of the file to read.
ir_id: The IR id of the graph tuple.
Returns:
A GraphTuple instance.
"""
with open(path, 'rb') as f:
graph_tuple = pickle.load(f)
return cls.CreateFromGraphTuple(graph_tuple, ir_id)
@classmethod
def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,
ir_id: int, split: Optional[int]=None) ->'GraphTuple':
"""Create a mapped database instance from the given graph tuple.
This is the preferred method of populating databases of graph tuples, as
it contains the boilerplate to extract and set the metadata columns, and
handles the join between the two data/metadata tables invisibly.
Args:
graph_tuple: The graph tuple to map.
ir_id: The intermediate representation ID.
split: The split value of this graph.
Returns:
A GraphTuple instance.
"""
pickled_graph_tuple = pickle.dumps(graph_tuple)
return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.
node_count, control_edge_count=graph_tuple.control_edge_count,
data_edge_count=graph_tuple.data_edge_count, call_edge_count=
graph_tuple.call_edge_count, edge_position_max=graph_tuple.
edge_position_max, node_x_dimensionality=graph_tuple.
node_x_dimensionality, node_y_dimensionality=graph_tuple.
node_y_dimensionality, graph_x_dimensionality=graph_tuple.
graph_x_dimensionality, graph_y_dimensionality=graph_tuple.
graph_y_dimensionality, pickled_graph_tuple_size=len(
pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(
pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))
@classmethod
def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:
Optional[int]=None) ->'GraphTuple':
"""Create a mapped database instance from the given networkx graph.
This is the preferred method of populating databases of graph tuples, as
it contains the boilerplate to extract and set the metadata columns, and
handles the join between the two data/metadata tables invisibly.
Args:
g: The networkx graph.
ir_id: The intermediate representation ID.
split: The split value of this graph.
Returns:
A GraphTuple instance.
"""
graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)
mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split
)
mapped.data_flow_steps = g.graph.get('data_flow_steps')
mapped.data_flow_root_node = g.graph.get('data_flow_root_node')
mapped.data_flow_positive_node_count = g.graph.get(
'data_flow_positive_node_count')
return mapped
@classmethod
def CreateEmpty(cls, ir_id: int) ->'GraphTuple':
"""Create an "empty" graph tuple.
An empty graph tuple can be used to signal that the conversion to GraphTuple
failed, and is signalled by a node_count of 0. An empty graph tuple has
no corresponding GraphTupleData row.
"""
return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,
data_edge_count=0, call_edge_count=0, edge_position_max=0,
pickled_graph_tuple_size=0)
@classmethod
def CreateFromProgramGraph(cls, program_graph: programl_pb2.
ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':
"""Create a mapped database instance from the given annotated graph.
This is the preferred method of populating databases of graph tuples, as
it contains the boilerplate to extract and set the metadata columns, and
handles the join between the two data/metadata tables invisibly.
Args:
annotated_graph: A DataFlowAnnotatedGraph instance.
ir_id: The intermediate representation ID.
split: The split value of this graph.
Returns:
A GraphTuple instance.
"""
graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(
program_graph)
mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)
mapped.data_flow_steps = program_graph.data_flow_steps
mapped.data_flow_root_node = program_graph.data_flow_root_node
mapped.data_flow_positive_node_count = (program_graph.
data_flow_positive_node_count)
return mapped
class GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):
"""The pickled graph tuple data. See GraphTuple for the parent table."""
id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',
onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)
sha1: str = sql.Column(sql.String(40), nullable=False, index=True)
pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary
(), nullable=False)
<|reserved_special_token_0|>
class Database(sqlutil.Database):
"""A database of GraphTuples."""
def __init__(self, url: str, must_exist: bool=False, ctx: progress.
ProgressContext=progress.NullContext):
super(Database, self).__init__(url, Base, must_exist=must_exist)
self.ctx = ctx
self._graph_tuple_stats = None
self._splits = None
self._split_counts = None
@database_statistic
def graph_count(self) ->int:
"""The number of non-empty graphs in the database."""
return int(self.graph_tuple_stats.graph_count)
@database_statistic
def ir_count(self) ->int:
"""The number of distinct intermediate representations that the non-empty
graphs are constructed from.
"""
return int(self.graph_tuple_stats.ir_count or 0)
@database_statistic
def split_count(self) ->int:
"""The number of distinct splits in the database."""
return int(self.graph_tuple_stats.split_count or 0)
@database_statistic
def node_count(self) ->int:
"""The total node count in non-empty graphs."""
return int(self.graph_tuple_stats.node_count or 0)
@database_statistic
def edge_count(self) ->int:
"""The total edge count in non-empty graphs."""
return int(self.graph_tuple_stats.edge_count or 0)
@database_statistic
def control_edge_count(self) ->int:
"""The total control edge count in non-empty graphs."""
return int(self.graph_tuple_stats.control_edge_count or 0)
@database_statistic
def data_edge_count(self) ->int:
"""The total data edge count in non-empty graphs."""
return int(self.graph_tuple_stats.data_edge_count or 0)
@database_statistic
def call_edge_count(self) ->int:
"""The total call edge count in non-empty graphs."""
return int(self.graph_tuple_stats.call_edge_count or 0)
@database_statistic
def node_count_max(self) ->int:
"""The maximum node count in non-empty graphs."""
return int(self.graph_tuple_stats.node_count_max or 0)
@database_statistic
def edge_count_max(self) ->int:
"""The maximum edge count in non-empty graphs."""
return int(self.graph_tuple_stats.edge_count_max or 0)
@database_statistic
def control_edge_count_max(self) ->int:
"""The maximum control edge count in non-empty graphs."""
return int(self.graph_tuple_stats.control_edge_count_max or 0)
@database_statistic
def data_edge_count_max(self) ->int:
"""The maximum data edge count in non-empty graphs."""
return int(self.graph_tuple_stats.data_edge_count_max or 0)
@database_statistic
def call_edge_count_max(self) ->int:
"""The maximum call edge count in non-empty graphs."""
return int(self.graph_tuple_stats.call_edge_count_max or 0)
@database_statistic
def edge_position_max(self) ->int:
"""The maximum edge position in non-empty graphs."""
return int(self.graph_tuple_stats.edge_position_max or 0)
@database_statistic
def node_x_dimensionality(self) ->int:
"""The node x dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.node_x_dimensionality or 0)
@database_statistic
def node_y_dimensionality(self) ->int:
"""The node y dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.node_y_dimensionality or 0)
@database_statistic
def graph_x_dimensionality(self) ->int:
"""The graph x dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.graph_x_dimensionality or 0)
@database_statistic
def graph_y_dimensionality(self) ->int:
"""The graph y dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.graph_y_dimensionality or 0)
@database_statistic
def graph_data_size(self) ->int:
"""The total size of the non-empty graph data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size or 0)
@database_statistic
def graph_data_size_min(self) ->int:
"""The minimum size of the non-empty graph tuple data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size_min or 0)
@database_statistic
def graph_data_size_avg(self) ->float:
"""The average size of the non-empty graph tuple data, in bytes."""
return float(self.graph_tuple_stats.graph_data_size_avg or 0)
@database_statistic
def graph_data_size_max(self) ->int:
"""The maximum size of the non-empty graph tuple data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size_max or 0)
@database_statistic
def has_data_flow(self) ->bool:
"""Return whether the graph database has data flow annotations.
This is only true if *all* columns have data flow values.
"""
return self.graph_count and not self.data_flow_null_count
@database_statistic
def data_flow_null_count(self) ->int:
"""The number of database rows without data flow information.
If > 0, then has_data_flow is False.
"""
return self.graph_count - int(self.graph_tuple_stats.
data_flow_steps_count or 0)
@database_statistic
def data_flow_steps_min(self) ->Optional[int]:
"""The minimum data flow steps for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_steps_min or 0)
@database_statistic
def data_flow_steps_avg(self) ->Optional[float]:
"""The average data flow steps for non-empty graphs."""
if self.has_data_flow:
return float(self.graph_tuple_stats.data_flow_steps_avg)
@database_statistic
def data_flow_steps_max(self) ->Optional[int]:
"""The maximum data flow steps for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_steps_max or 0)
@database_statistic
def data_flow_positive_node_count_min(self) ->Optional[int]:
"""The minimum data flow positive node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.
data_flow_positive_node_count_min or 0)
@database_statistic
def data_flow_positive_node_count_avg(self) ->Optional[int]:
"""The minimum data flow average node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.
data_flow_positive_node_count_avg or 0)
@database_statistic
def data_flow_positive_node_count_max(self) ->Optional[int]:
"""The minimum data flow max node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.
data_flow_positive_node_count_max or 0)
@database_statistic
def splits(self) ->List[int]:
"""Return a list of unique split values."""
if self._splits is None:
self.RefreshStats()
return self._splits
@database_statistic
def split_counts(self) ->Dict[int, int]:
"""Return a dictionary mapping split to the number of graphs."""
if self._split_counts is None:
self.RefreshStats()
return self._split_counts
def RefreshStats(self):
"""Compute the database stats for access via the instance properties.
Raises:
ValueError: If the database contains invalid entries, e.g. inconsistent
vector dimensionalities.
"""
with self.ctx.Profile(2, lambda t:
f"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})"
), self.Session() as session:
query = session.query(sql.func.count(GraphTuple.id).label(
'graph_count'), sql.func.count(sql.func.distinct(GraphTuple
.ir_id)).label('ir_count'), sql.func.count(sql.func.
distinct(GraphTuple.split)).label('split_count'), sql.func.
sum(GraphTuple.node_count).label('node_count'), sql.func.
sum(GraphTuple.control_edge_count).label(
'control_edge_count'), sql.func.sum(GraphTuple.
data_edge_count).label('data_edge_count'), sql.func.sum(
GraphTuple.call_edge_count).label('call_edge_count'), sql.
func.sum(GraphTuple.control_edge_count + GraphTuple.
data_edge_count + GraphTuple.call_edge_count).label(
'edge_count'), sql.func.max(GraphTuple.node_count).label(
'node_count_max'), sql.func.max(GraphTuple.
control_edge_count).label('control_edge_count_max'), sql.
func.max(GraphTuple.data_edge_count).label(
'data_edge_count_max'), sql.func.max(GraphTuple.
call_edge_count).label('call_edge_count_max'), sql.func.max
(GraphTuple.call_edge_count).label('call_edge_count_max'),
sql.func.max(GraphTuple.control_edge_count + GraphTuple.
data_edge_count + GraphTuple.call_edge_count).label(
'edge_count_max'), sql.func.max(GraphTuple.
edge_position_max).label('edge_position_max'), sql.func.
count(sql.func.distinct(GraphTuple.node_x_dimensionality)).
label('node_x_dimensionality_count'), sql.func.count(sql.
func.distinct(GraphTuple.node_y_dimensionality)).label(
'node_y_dimensionality_count'), sql.func.count(sql.func.
distinct(GraphTuple.graph_x_dimensionality)).label(
'graph_x_dimensionality_count'), sql.func.count(sql.func.
distinct(GraphTuple.graph_y_dimensionality)).label(
'graph_y_dimensionality_count'), sql.func.max(GraphTuple.
node_x_dimensionality).label('node_x_dimensionality'), sql.
func.max(GraphTuple.node_y_dimensionality).label(
'node_y_dimensionality'), sql.func.max(GraphTuple.
graph_x_dimensionality).label('graph_x_dimensionality'),
sql.func.max(GraphTuple.graph_y_dimensionality).label(
'graph_y_dimensionality'), sql.func.sum(GraphTuple.
pickled_graph_tuple_size).label('graph_data_size'), sql.
func.min(GraphTuple.pickled_graph_tuple_size).label(
'graph_data_size_min'), sql.func.avg(GraphTuple.
pickled_graph_tuple_size).label('graph_data_size_avg'), sql
.func.max(GraphTuple.pickled_graph_tuple_size).label(
'graph_data_size_max'), sql.func.count(GraphTuple.
data_flow_steps).label('data_flow_steps_count'), sql.func.
min(GraphTuple.data_flow_steps).label('data_flow_steps_min'
), sql.func.avg(GraphTuple.data_flow_steps).label(
'data_flow_steps_avg'), sql.func.max(GraphTuple.
data_flow_steps).label('data_flow_steps_max'), sql.func.min
(GraphTuple.data_flow_positive_node_count).label(
'data_flow_positive_node_count_min'), sql.func.avg(
GraphTuple.data_flow_positive_node_count).label(
'data_flow_positive_node_count_avg'), sql.func.max(
GraphTuple.data_flow_positive_node_count).label(
'data_flow_positive_node_count_max'))
query = query.filter(GraphTuple.node_count > 1)
stats = query.one()
if stats.node_x_dimensionality_count > 1:
raise ValueError(
f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'
)
if stats.node_y_dimensionality_count > 1:
raise ValueError(
f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'
)
if stats.graph_x_dimensionality_count > 1:
raise ValueError(
f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'
)
if stats.graph_y_dimensionality_count > 1:
raise ValueError(
f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'
)
if not (stats.data_flow_steps_count == 0 or stats.
data_flow_steps_count == stats.graph_count):
raise ValueError(
f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'
)
self._graph_tuple_stats = stats
with self.Session() as session:
self._splits = sorted(set([row.split for row in session.
query(GraphTuple.split).filter(GraphTuple.split != None
).group_by(GraphTuple.split)]))
self._split_counts = {split: session.query(sql.func.count(
GraphTuple.id)).filter(GraphTuple.split == split).
scalar() for split in self._splits}
@property
def graph_tuple_stats(self):
"""Fetch aggregate graph tuple stats, or compute them if not set."""
if self._graph_tuple_stats is None:
self.RefreshStats()
return self._graph_tuple_stats
@property
def stats_json(self) ->Dict[str, Any]:
"""Fetch the database statics as a JSON dictionary."""
return {name: function(self) for name, function in
database_statistics_registry}
def __repr__(self) ->str:
return (
f"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}."
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# Copyright 2019-2020 the ProGraML authors.
#
# Contact Chris Cummins <[email protected]>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module defines a database for storing graph tuples."""
import datetime
import pathlib
import pickle
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
import networkx as nx
import sqlalchemy as sql
from sqlalchemy.dialects import sqlite
from deeplearning.ml4pl import run_id
from deeplearning.ml4pl.graphs import programl_pb2
from deeplearning.ml4pl.graphs.labelled import graph_tuple as graph_tuple_lib
from labm8.py import app
from labm8.py import crypto
from labm8.py import decorators
from labm8.py import humanize
from labm8.py import jsonutil
from labm8.py import progress
from labm8.py import sqlutil
FLAGS = app.FLAGS
# Note we declare a graph_db flag at the bottom of this file, after declaring
# the Database class.
Base = sql.ext.declarative.declarative_base()
class Meta(Base, sqlutil.TablenameFromClassNameMixin):
"""A key-value database metadata store, with additional run ID."""
# Unused integer ID for this row.
id: int = sql.Column(sql.Integer, primary_key=True)
# The run ID that generated this <key,value> pair.
run_id: str = run_id.RunId.SqlStringColumn()
timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()
# The <key,value> pair.
key: str = sql.Column(sql.String(128), index=True)
pickled_value: bytes = sql.Column(
sqlutil.ColumnTypes.LargeBinary(), nullable=False
)
@property
def value(self) -> Any:
"""De-pickle the column value."""
return pickle.loads(self.pickled_value)
@classmethod
def Create(cls, key: str, value: Any):
"""Construct a table entry."""
return Meta(key=key, pickled_value=pickle.dumps(value))
class GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):
"""A table of graph tuples.
For every GraphTuple, there should be a corresponding GraphTupleData row
containing the pickled graph tuple as a binary blob. The reason for dividing
the data horizontally across two tables is to enable fast scanning
of graph metadata, without needing to churn through a table of pickled binary
blobs.
"""
id: int = sql.Column(sql.Integer, primary_key=True)
# A reference to the 'id' column of a
# deeplearning.ml4pl.ir.ir_database.IntermediateRepresentationFile database
# row. There is no foreign key relationship here because they are separate
# databases.
ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)
# An integer used to split databases of graphs into separate graphs, e.g.
# train/val/test split.
split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)
# The size of the program graph.
node_count: int = sql.Column(sql.Integer, nullable=False)
control_edge_count: int = sql.Column(sql.Integer, nullable=False)
data_edge_count: int = sql.Column(sql.Integer, nullable=False)
call_edge_count: int = sql.Column(sql.Integer, nullable=False)
# The maximum value of the 'position' attribute of edges.
# Although this is an integral value, we store it as a float when using sqlite
# backend because for an unknown reason, sql.func.max(edge_position_max)
# returns a byte array when aggregating over sqlite backend.
edge_position_max: int = sql.Column(
sql.Integer().with_variant(sqlite.FLOAT(), "sqlite"), nullable=False
)
# The dimensionality of node-level features and labels.
node_x_dimensionality: int = sql.Column(
sql.Integer, default=0, nullable=False
)
node_y_dimensionality: int = sql.Column(
sql.Integer, default=0, nullable=False
)
# The dimensionality of graph-level features and labels.
graph_x_dimensionality: int = sql.Column(
sql.Integer, default=0, nullable=False
)
graph_y_dimensionality: int = sql.Column(
sql.Integer, default=0, nullable=False
)
# The size of the pickled graph tuple in bytes.
pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)
# A copy of attributes from the
# deeplearning.ml4pl.graphs.labelled.data_flow_graphs.DataFlowAnnotatedGraph
# tuple for storing metadata of data flow analysis graphs. If not relevant ,
# these columns may be null.
data_flow_steps: int = sql.Column(sql.Integer, nullable=True)
data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)
data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)
timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()
# Create the one-to-one relationship from GraphTuple to GraphTupleData.
data: "GraphTupleData" = sql.orm.relationship(
"GraphTupleData", uselist=False, cascade="all, delete-orphan"
)
@property
def has_data_flow(self) -> bool:
"""Returns whether graph tuple has data flow columns."""
return self.data_flow_steps is not None
@property
def edge_count(self) -> int:
return self.control_edge_count + self.data_edge_count + self.call_edge_count
# Joined table accessors:
@property
def sha1(self) -> str:
"""Return the sha1 of the graph tuple."""
return self.data.sha1
@decorators.memoized_property
def tuple(self) -> graph_tuple_lib.GraphTuple:
"""Un-pickle the graph tuple and cache the binary results."""
return pickle.loads(self.data.pickled_graph_tuple)
def ToFile(self, path: pathlib.Path) -> None:
"""Dump the pickled graph tuple to file.
This is lossy, as the ir_id column is not dumped.
Args:
path: The path of the graph tuple to write.
"""
with open(path, "wb") as f:
pickle.dump(self.tuple, f)
# Factory methods:
@classmethod
def FromFile(cls, path: pathlib.Path, ir_id: int):
"""Construct a mapped database instance from a file generated by ToFile().
Args:
path: The path of the file to read.
ir_id: The IR id of the graph tuple.
Returns:
A GraphTuple instance.
"""
with open(path, "rb") as f:
graph_tuple = pickle.load(f)
return cls.CreateFromGraphTuple(graph_tuple, ir_id)
@classmethod
def CreateFromGraphTuple(
cls,
graph_tuple: graph_tuple_lib.GraphTuple,
ir_id: int,
split: Optional[int] = None,
) -> "GraphTuple":
"""Create a mapped database instance from the given graph tuple.
This is the preferred method of populating databases of graph tuples, as
it contains the boilerplate to extract and set the metadata columns, and
handles the join between the two data/metadata tables invisibly.
Args:
graph_tuple: The graph tuple to map.
ir_id: The intermediate representation ID.
split: The split value of this graph.
Returns:
A GraphTuple instance.
"""
pickled_graph_tuple = pickle.dumps(graph_tuple)
return GraphTuple(
ir_id=ir_id,
split=split,
node_count=graph_tuple.node_count,
control_edge_count=graph_tuple.control_edge_count,
data_edge_count=graph_tuple.data_edge_count,
call_edge_count=graph_tuple.call_edge_count,
edge_position_max=graph_tuple.edge_position_max,
node_x_dimensionality=graph_tuple.node_x_dimensionality,
node_y_dimensionality=graph_tuple.node_y_dimensionality,
graph_x_dimensionality=graph_tuple.graph_x_dimensionality,
graph_y_dimensionality=graph_tuple.graph_y_dimensionality,
pickled_graph_tuple_size=len(pickled_graph_tuple),
data=GraphTupleData(
sha1=crypto.sha1(pickled_graph_tuple),
pickled_graph_tuple=pickled_graph_tuple,
),
)
@classmethod
def CreateFromNetworkX(
cls, g: nx.MultiDiGraph, ir_id: int, split: Optional[int] = None,
) -> "GraphTuple":
"""Create a mapped database instance from the given networkx graph.
This is the preferred method of populating databases of graph tuples, as
it contains the boilerplate to extract and set the metadata columns, and
handles the join between the two data/metadata tables invisibly.
Args:
g: The networkx graph.
ir_id: The intermediate representation ID.
split: The split value of this graph.
Returns:
A GraphTuple instance.
"""
graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)
mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split)
mapped.data_flow_steps = g.graph.get("data_flow_steps")
mapped.data_flow_root_node = g.graph.get("data_flow_root_node")
mapped.data_flow_positive_node_count = g.graph.get(
"data_flow_positive_node_count"
)
return mapped
@classmethod
def CreateEmpty(cls, ir_id: int) -> "GraphTuple":
"""Create an "empty" graph tuple.
An empty graph tuple can be used to signal that the conversion to GraphTuple
failed, and is signalled by a node_count of 0. An empty graph tuple has
no corresponding GraphTupleData row.
"""
return GraphTuple(
ir_id=ir_id,
node_count=0,
control_edge_count=0,
data_edge_count=0,
call_edge_count=0,
edge_position_max=0,
pickled_graph_tuple_size=0,
)
@classmethod
def CreateFromProgramGraph(
cls,
program_graph: programl_pb2.ProgramGraph,
ir_id: int,
split: Optional[int] = None,
) -> "GraphTuple":
"""Create a mapped database instance from the given annotated graph.
This is the preferred method of populating databases of graph tuples, as
it contains the boilerplate to extract and set the metadata columns, and
handles the join between the two data/metadata tables invisibly.
Args:
annotated_graph: A DataFlowAnnotatedGraph instance.
ir_id: The intermediate representation ID.
split: The split value of this graph.
Returns:
A GraphTuple instance.
"""
graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(
program_graph
)
mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)
mapped.data_flow_steps = program_graph.data_flow_steps
mapped.data_flow_root_node = program_graph.data_flow_root_node
mapped.data_flow_positive_node_count = (
program_graph.data_flow_positive_node_count
)
return mapped
class GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):
"""The pickled graph tuple data. See GraphTuple for the parent table."""
id: int = sql.Column(
sql.Integer,
sql.ForeignKey("graph_tuples.id", onupdate="CASCADE", ondelete="CASCADE"),
primary_key=True,
)
# The sha1sum of the 'pickled_graph_tuple' column. There is no requirement
# that graph tuples be unique, but, should you wish to enforce this,
# you can group by this sha1 column and prune the duplicates.
sha1: str = sql.Column(sql.String(40), nullable=False, index=True)
# The pickled GraphTuple data.
pickled_graph_tuple: bytes = sql.Column(
sqlutil.ColumnTypes.LargeBinary(), nullable=False
)
# A registry of database statics, where each entry is a <name, property> tuple.
database_statistics_registry: List[Tuple[str, Callable[["Database"], Any]]] = []
def database_statistic(func):
"""A decorator to mark a method on a Database as a database static.
Database statistics can be accessed using Database.stats_json property to
retrieve a <name, vale> dictionary.
"""
global database_statistics_registry
database_statistics_registry.append((func.__name__, func))
return property(func)
class Database(sqlutil.Database):
"""A database of GraphTuples."""
def __init__(
self,
url: str,
must_exist: bool = False,
ctx: progress.ProgressContext = progress.NullContext,
):
super(Database, self).__init__(url, Base, must_exist=must_exist)
self.ctx = ctx
# Lazily evaluated attributes.
self._graph_tuple_stats = None
self._splits = None
self._split_counts = None
##############################################################################
# Database stats. These are evaluated lazily and the results cached. There is
# no cache invalidation strategy - after modifying the database, you must
# manually call RefreshStats() to ensure that stale stats are re-computed.
##############################################################################
@database_statistic
def graph_count(self) -> int:
"""The number of non-empty graphs in the database."""
return int(self.graph_tuple_stats.graph_count)
@database_statistic
def ir_count(self) -> int:
"""The number of distinct intermediate representations that the non-empty
graphs are constructed from.
"""
return int(self.graph_tuple_stats.ir_count or 0)
@database_statistic
def split_count(self) -> int:
"""The number of distinct splits in the database."""
return int(self.graph_tuple_stats.split_count or 0)
@database_statistic
def node_count(self) -> int:
"""The total node count in non-empty graphs."""
return int(self.graph_tuple_stats.node_count or 0)
@database_statistic
def edge_count(self) -> int:
"""The total edge count in non-empty graphs."""
return int(self.graph_tuple_stats.edge_count or 0)
@database_statistic
def control_edge_count(self) -> int:
"""The total control edge count in non-empty graphs."""
return int(self.graph_tuple_stats.control_edge_count or 0)
@database_statistic
def data_edge_count(self) -> int:
"""The total data edge count in non-empty graphs."""
return int(self.graph_tuple_stats.data_edge_count or 0)
@database_statistic
def call_edge_count(self) -> int:
"""The total call edge count in non-empty graphs."""
return int(self.graph_tuple_stats.call_edge_count or 0)
@database_statistic
def node_count_max(self) -> int:
"""The maximum node count in non-empty graphs."""
return int(self.graph_tuple_stats.node_count_max or 0)
@database_statistic
def edge_count_max(self) -> int:
"""The maximum edge count in non-empty graphs."""
return int(self.graph_tuple_stats.edge_count_max or 0)
@database_statistic
def control_edge_count_max(self) -> int:
"""The maximum control edge count in non-empty graphs."""
return int(self.graph_tuple_stats.control_edge_count_max or 0)
@database_statistic
def data_edge_count_max(self) -> int:
"""The maximum data edge count in non-empty graphs."""
return int(self.graph_tuple_stats.data_edge_count_max or 0)
@database_statistic
def call_edge_count_max(self) -> int:
"""The maximum call edge count in non-empty graphs."""
return int(self.graph_tuple_stats.call_edge_count_max or 0)
@database_statistic
def edge_position_max(self) -> int:
"""The maximum edge position in non-empty graphs."""
return int(self.graph_tuple_stats.edge_position_max or 0)
@database_statistic
def node_x_dimensionality(self) -> int:
"""The node x dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.node_x_dimensionality or 0)
@database_statistic
def node_y_dimensionality(self) -> int:
"""The node y dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.node_y_dimensionality or 0)
@database_statistic
def graph_x_dimensionality(self) -> int:
"""The graph x dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.graph_x_dimensionality or 0)
@database_statistic
def graph_y_dimensionality(self) -> int:
"""The graph y dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.graph_y_dimensionality or 0)
@database_statistic
def graph_data_size(self) -> int:
"""The total size of the non-empty graph data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size or 0)
@database_statistic
def graph_data_size_min(self) -> int:
"""The minimum size of the non-empty graph tuple data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size_min or 0)
@database_statistic
def graph_data_size_avg(self) -> float:
"""The average size of the non-empty graph tuple data, in bytes."""
return float(self.graph_tuple_stats.graph_data_size_avg or 0)
@database_statistic
def graph_data_size_max(self) -> int:
"""The maximum size of the non-empty graph tuple data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size_max or 0)
@database_statistic
def has_data_flow(self) -> bool:
"""Return whether the graph database has data flow annotations.
This is only true if *all* columns have data flow values.
"""
return self.graph_count and not self.data_flow_null_count
@database_statistic
def data_flow_null_count(self) -> int:
"""The number of database rows without data flow information.
If > 0, then has_data_flow is False.
"""
return self.graph_count - int(
self.graph_tuple_stats.data_flow_steps_count or 0
)
@database_statistic
def data_flow_steps_min(self) -> Optional[int]:
"""The minimum data flow steps for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_steps_min or 0)
@database_statistic
def data_flow_steps_avg(self) -> Optional[float]:
"""The average data flow steps for non-empty graphs."""
if self.has_data_flow:
return float(self.graph_tuple_stats.data_flow_steps_avg)
@database_statistic
def data_flow_steps_max(self) -> Optional[int]:
"""The maximum data flow steps for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_steps_max or 0)
@database_statistic
def data_flow_positive_node_count_min(self) -> Optional[int]:
"""The minimum data flow positive node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_positive_node_count_min or 0)
@database_statistic
def data_flow_positive_node_count_avg(self) -> Optional[int]:
"""The minimum data flow average node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_positive_node_count_avg or 0)
@database_statistic
def data_flow_positive_node_count_max(self) -> Optional[int]:
"""The minimum data flow max node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_positive_node_count_max or 0)
@database_statistic
def splits(self) -> List[int]:
"""Return a list of unique split values."""
if self._splits is None:
self.RefreshStats()
return self._splits
@database_statistic
def split_counts(self) -> Dict[int, int]:
"""Return a dictionary mapping split to the number of graphs."""
if self._split_counts is None:
self.RefreshStats()
return self._split_counts
def RefreshStats(self):
"""Compute the database stats for access via the instance properties.
Raises:
ValueError: If the database contains invalid entries, e.g. inconsistent
vector dimensionalities.
"""
with self.ctx.Profile(
2,
lambda t: (
"Computed stats over "
f"{humanize.BinaryPrefix(stats.graph_data_size, 'B')} database "
f"({humanize.Plural(stats.graph_count, 'graph')})"
),
), self.Session() as session:
query = session.query(
# Graph and IR counts.
sql.func.count(GraphTuple.id).label("graph_count"),
sql.func.count(sql.func.distinct(GraphTuple.ir_id)).label("ir_count"),
sql.func.count(sql.func.distinct(GraphTuple.split)).label(
"split_count"
),
# Node and edge attribute sums.
sql.func.sum(GraphTuple.node_count).label("node_count"),
sql.func.sum(GraphTuple.control_edge_count).label("control_edge_count"),
sql.func.sum(GraphTuple.data_edge_count).label("data_edge_count"),
sql.func.sum(GraphTuple.call_edge_count).label("call_edge_count"),
sql.func.sum(
GraphTuple.control_edge_count
+ GraphTuple.data_edge_count
+ GraphTuple.call_edge_count
).label("edge_count"),
# Node and edge attribute maximums.
sql.func.max(GraphTuple.node_count).label("node_count_max"),
sql.func.max(GraphTuple.control_edge_count).label(
"control_edge_count_max"
),
sql.func.max(GraphTuple.data_edge_count).label("data_edge_count_max"),
sql.func.max(GraphTuple.call_edge_count).label("call_edge_count_max"),
sql.func.max(GraphTuple.call_edge_count).label("call_edge_count_max"),
sql.func.max(
GraphTuple.control_edge_count
+ GraphTuple.data_edge_count
+ GraphTuple.call_edge_count
).label("edge_count_max"),
sql.func.max(GraphTuple.edge_position_max).label("edge_position_max"),
# Feature and label dimensionality counts. Each of these columns
# should be one, showing that there is a single value for all graph
# tuples.
sql.func.count(
sql.func.distinct(GraphTuple.node_x_dimensionality)
).label("node_x_dimensionality_count"),
sql.func.count(
sql.func.distinct(GraphTuple.node_y_dimensionality)
).label("node_y_dimensionality_count"),
sql.func.count(
sql.func.distinct(GraphTuple.graph_x_dimensionality)
).label("graph_x_dimensionality_count"),
sql.func.count(
sql.func.distinct(GraphTuple.graph_y_dimensionality)
).label("graph_y_dimensionality_count"),
# Feature and label dimensionalities.
sql.func.max(GraphTuple.node_x_dimensionality).label(
"node_x_dimensionality"
),
sql.func.max(GraphTuple.node_y_dimensionality).label(
"node_y_dimensionality"
),
sql.func.max(GraphTuple.graph_x_dimensionality).label(
"graph_x_dimensionality"
),
sql.func.max(GraphTuple.graph_y_dimensionality).label(
"graph_y_dimensionality"
),
# Graph tuple sizes.
sql.func.sum(GraphTuple.pickled_graph_tuple_size).label(
"graph_data_size"
),
sql.func.min(GraphTuple.pickled_graph_tuple_size).label(
"graph_data_size_min"
),
sql.func.avg(GraphTuple.pickled_graph_tuple_size).label(
"graph_data_size_avg"
),
sql.func.max(GraphTuple.pickled_graph_tuple_size).label(
"graph_data_size_max"
),
# Data flow column null counts.
sql.func.count(GraphTuple.data_flow_steps).label(
"data_flow_steps_count"
),
# Data flow step counts.
sql.func.min(GraphTuple.data_flow_steps).label("data_flow_steps_min"),
sql.func.avg(GraphTuple.data_flow_steps).label("data_flow_steps_avg"),
sql.func.max(GraphTuple.data_flow_steps).label("data_flow_steps_max"),
# Data flow positive node count.
sql.func.min(GraphTuple.data_flow_positive_node_count).label(
"data_flow_positive_node_count_min"
),
sql.func.avg(GraphTuple.data_flow_positive_node_count).label(
"data_flow_positive_node_count_avg"
),
sql.func.max(GraphTuple.data_flow_positive_node_count).label(
"data_flow_positive_node_count_max"
),
)
# Ignore "empty" graphs.
query = query.filter(GraphTuple.node_count > 1)
# Compute the stats.
stats = query.one()
# Check that databases have a consistent value for dimensionalities.
if stats.node_x_dimensionality_count > 1:
raise ValueError(
f"Database contains {stats.node_x_dimensionality_count} "
"distinct node x dimensionalities"
)
if stats.node_y_dimensionality_count > 1:
raise ValueError(
f"Database contains {stats.node_y_dimensionality_count} "
"distinct node y dimensionalities"
)
if stats.graph_x_dimensionality_count > 1:
raise ValueError(
f"Database contains {stats.graph_x_dimensionality_count} "
"distinct graph x dimensionalities"
)
if stats.graph_y_dimensionality_count > 1:
raise ValueError(
f"Database contains {stats.graph_y_dimensionality_count} "
"distinct graph y dimensionalities"
)
# Check that every graph has data flow attributes, or none of them do.
if not (
stats.data_flow_steps_count == 0
or stats.data_flow_steps_count == stats.graph_count
):
raise ValueError(
f"{stats.graph_count - stats.data_flow_steps_count} of "
f"{stats.graph_count} graphs have no data_flow_steps "
"value"
)
self._graph_tuple_stats = stats
with self.Session() as session:
self._splits = sorted(
set(
[
row.split
for row in session.query(GraphTuple.split)
.filter(GraphTuple.split != None)
.group_by(GraphTuple.split)
]
)
)
self._split_counts = {
split: session.query(sql.func.count(GraphTuple.id))
.filter(GraphTuple.split == split)
.scalar()
for split in self._splits
}
@property
def graph_tuple_stats(self):
"""Fetch aggregate graph tuple stats, or compute them if not set."""
if self._graph_tuple_stats is None:
self.RefreshStats()
return self._graph_tuple_stats
@property
def stats_json(self) -> Dict[str, Any]:
"""Fetch the database statics as a JSON dictionary."""
return {
name: function(self) for name, function in database_statistics_registry
}
def __repr__(self) -> str:
return (
f"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with "
f"dimensionalities: node_x={self.node_x_dimensionality}, "
f"node_y={self.node_y_dimensionality}, "
f"graph_x={self.graph_x_dimensionality}, "
f"graph_y={self.graph_y_dimensionality}."
)
# Deferred declaration of flags because we need to reference Database class.
app.DEFINE_database(
"graph_db", Database, None, "The database to read graph tuples from.",
)
def Main():
"""Main entry point."""
graph_db = FLAGS.graph_db()
print(jsonutil.format_json(graph_db.stats_json))
if __name__ == "__main__":
app.Run(Main)
|
flexible
|
{
"blob_id": "09788cf04ab5190a33b43e3756f4dbd7d78977a5",
"index": 581,
"step-1": "<mask token>\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n <mask token>\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<mask token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n <mask token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n <mask token>\n <mask token>\n <mask token>\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n <mask token>\n <mask token>\n\n @classmethod\n def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.\n node_count, control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count, call_edge_count=\n graph_tuple.call_edge_count, edge_position_max=graph_tuple.\n edge_position_max, node_x_dimensionality=graph_tuple.\n node_x_dimensionality, node_y_dimensionality=graph_tuple.\n node_y_dimensionality, graph_x_dimensionality=graph_tuple.\n graph_x_dimensionality, graph_y_dimensionality=graph_tuple.\n graph_y_dimensionality, pickled_graph_tuple_size=len(\n pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(\n pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))\n <mask token>\n <mask token>\n <mask token>\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<mask token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n <mask token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n <mask token>\n <mask token>\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n\n @classmethod\n def FromFile(cls, path: pathlib.Path, ir_id: int):\n \"\"\"Construct a mapped database instance from a file generated by ToFile().\n\n Args:\n path: The path of the file to read.\n ir_id: The IR id of the graph tuple.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n with open(path, 'rb') as f:\n graph_tuple = pickle.load(f)\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)\n\n @classmethod\n def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.\n node_count, control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count, call_edge_count=\n graph_tuple.call_edge_count, edge_position_max=graph_tuple.\n edge_position_max, node_x_dimensionality=graph_tuple.\n node_x_dimensionality, node_y_dimensionality=graph_tuple.\n node_y_dimensionality, graph_x_dimensionality=graph_tuple.\n graph_x_dimensionality, graph_y_dimensionality=graph_tuple.\n graph_y_dimensionality, pickled_graph_tuple_size=len(\n pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(\n pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))\n <mask token>\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) ->'GraphTuple':\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,\n data_edge_count=0, call_edge_count=0, edge_position_max=0,\n pickled_graph_tuple_size=0)\n <mask token>\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<mask token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Meta(Base, sqlutil.TablenameFromClassNameMixin):\n <mask token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n run_id: str = run_id.RunId.SqlStringColumn()\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n key: str = sql.Column(sql.String(128), index=True)\n pickled_value: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary(),\n nullable=False)\n <mask token>\n <mask token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"A table of graph tuples.\n\n For every GraphTuple, there should be a corresponding GraphTupleData row\n containing the pickled graph tuple as a binary blob. The reason for dividing\n the data horizontally across two tables is to enable fast scanning\n of graph metadata, without needing to churn through a table of pickled binary\n blobs.\n \"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n\n @property\n def edge_count(self) ->int:\n return (self.control_edge_count + self.data_edge_count + self.\n call_edge_count)\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n\n @classmethod\n def FromFile(cls, path: pathlib.Path, ir_id: int):\n \"\"\"Construct a mapped database instance from a file generated by ToFile().\n\n Args:\n path: The path of the file to read.\n ir_id: The IR id of the graph tuple.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n with open(path, 'rb') as f:\n graph_tuple = pickle.load(f)\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)\n\n @classmethod\n def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.\n node_count, control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count, call_edge_count=\n graph_tuple.call_edge_count, edge_position_max=graph_tuple.\n edge_position_max, node_x_dimensionality=graph_tuple.\n node_x_dimensionality, node_y_dimensionality=graph_tuple.\n node_y_dimensionality, graph_x_dimensionality=graph_tuple.\n graph_x_dimensionality, graph_y_dimensionality=graph_tuple.\n graph_y_dimensionality, pickled_graph_tuple_size=len(\n pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(\n pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) ->'GraphTuple':\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,\n data_edge_count=0, call_edge_count=0, edge_position_max=0,\n pickled_graph_tuple_size=0)\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<mask token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<mask token>\n",
"step-5": "# Copyright 2019-2020 the ProGraML authors.\n#\n# Contact Chris Cummins <[email protected]>.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module defines a database for storing graph tuples.\"\"\"\nimport datetime\nimport pathlib\nimport pickle\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\n\nimport networkx as nx\nimport sqlalchemy as sql\nfrom sqlalchemy.dialects import sqlite\n\nfrom deeplearning.ml4pl import run_id\nfrom deeplearning.ml4pl.graphs import programl_pb2\nfrom deeplearning.ml4pl.graphs.labelled import graph_tuple as graph_tuple_lib\nfrom labm8.py import app\nfrom labm8.py import crypto\nfrom labm8.py import decorators\nfrom labm8.py import humanize\nfrom labm8.py import jsonutil\nfrom labm8.py import progress\nfrom labm8.py import sqlutil\n\n\nFLAGS = app.FLAGS\n# Note we declare a graph_db flag at the bottom of this file, after declaring\n# the Database class.\n\nBase = sql.ext.declarative.declarative_base()\n\n\nclass Meta(Base, sqlutil.TablenameFromClassNameMixin):\n \"\"\"A key-value database metadata store, with additional run ID.\"\"\"\n\n # Unused integer ID for this row.\n id: int = sql.Column(sql.Integer, primary_key=True)\n\n # The run ID that generated this <key,value> pair.\n run_id: str = run_id.RunId.SqlStringColumn()\n\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n\n # The <key,value> pair.\n key: str = sql.Column(sql.String(128), index=True)\n pickled_value: bytes = sql.Column(\n sqlutil.ColumnTypes.LargeBinary(), nullable=False\n )\n\n @property\n def value(self) -> Any:\n \"\"\"De-pickle the column value.\"\"\"\n return pickle.loads(self.pickled_value)\n\n @classmethod\n def Create(cls, key: str, value: Any):\n \"\"\"Construct a table entry.\"\"\"\n return Meta(key=key, pickled_value=pickle.dumps(value))\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"A table of graph tuples.\n\n For every GraphTuple, there should be a corresponding GraphTupleData row\n containing the pickled graph tuple as a binary blob. The reason for dividing\n the data horizontally across two tables is to enable fast scanning\n of graph metadata, without needing to churn through a table of pickled binary\n blobs.\n \"\"\"\n\n id: int = sql.Column(sql.Integer, primary_key=True)\n\n # A reference to the 'id' column of a\n # deeplearning.ml4pl.ir.ir_database.IntermediateRepresentationFile database\n # row. There is no foreign key relationship here because they are separate\n # databases.\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n\n # An integer used to split databases of graphs into separate graphs, e.g.\n # train/val/test split.\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n\n # The size of the program graph.\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n\n # The maximum value of the 'position' attribute of edges.\n # Although this is an integral value, we store it as a float when using sqlite\n # backend because for an unknown reason, sql.func.max(edge_position_max)\n # returns a byte array when aggregating over sqlite backend.\n edge_position_max: int = sql.Column(\n sql.Integer().with_variant(sqlite.FLOAT(), \"sqlite\"), nullable=False\n )\n\n # The dimensionality of node-level features and labels.\n node_x_dimensionality: int = sql.Column(\n sql.Integer, default=0, nullable=False\n )\n node_y_dimensionality: int = sql.Column(\n sql.Integer, default=0, nullable=False\n )\n\n # The dimensionality of graph-level features and labels.\n graph_x_dimensionality: int = sql.Column(\n sql.Integer, default=0, nullable=False\n )\n graph_y_dimensionality: int = sql.Column(\n sql.Integer, default=0, nullable=False\n )\n\n # The size of the pickled graph tuple in bytes.\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n\n # A copy of attributes from the\n # deeplearning.ml4pl.graphs.labelled.data_flow_graphs.DataFlowAnnotatedGraph\n # tuple for storing metadata of data flow analysis graphs. If not relevant ,\n # these columns may be null.\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n\n # Create the one-to-one relationship from GraphTuple to GraphTupleData.\n data: \"GraphTupleData\" = sql.orm.relationship(\n \"GraphTupleData\", uselist=False, cascade=\"all, delete-orphan\"\n )\n\n @property\n def has_data_flow(self) -> bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n\n @property\n def edge_count(self) -> int:\n return self.control_edge_count + self.data_edge_count + self.call_edge_count\n\n # Joined table accessors:\n\n @property\n def sha1(self) -> str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) -> graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) -> None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, \"wb\") as f:\n pickle.dump(self.tuple, f)\n\n # Factory methods:\n\n @classmethod\n def FromFile(cls, path: pathlib.Path, ir_id: int):\n \"\"\"Construct a mapped database instance from a file generated by ToFile().\n\n Args:\n path: The path of the file to read.\n ir_id: The IR id of the graph tuple.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n with open(path, \"rb\") as f:\n graph_tuple = pickle.load(f)\n\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)\n\n @classmethod\n def CreateFromGraphTuple(\n cls,\n graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int,\n split: Optional[int] = None,\n ) -> \"GraphTuple\":\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(\n ir_id=ir_id,\n split=split,\n node_count=graph_tuple.node_count,\n control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count,\n call_edge_count=graph_tuple.call_edge_count,\n edge_position_max=graph_tuple.edge_position_max,\n node_x_dimensionality=graph_tuple.node_x_dimensionality,\n node_y_dimensionality=graph_tuple.node_y_dimensionality,\n graph_x_dimensionality=graph_tuple.graph_x_dimensionality,\n graph_y_dimensionality=graph_tuple.graph_y_dimensionality,\n pickled_graph_tuple_size=len(pickled_graph_tuple),\n data=GraphTupleData(\n sha1=crypto.sha1(pickled_graph_tuple),\n pickled_graph_tuple=pickled_graph_tuple,\n ),\n )\n\n @classmethod\n def CreateFromNetworkX(\n cls, g: nx.MultiDiGraph, ir_id: int, split: Optional[int] = None,\n ) -> \"GraphTuple\":\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split)\n mapped.data_flow_steps = g.graph.get(\"data_flow_steps\")\n mapped.data_flow_root_node = g.graph.get(\"data_flow_root_node\")\n mapped.data_flow_positive_node_count = g.graph.get(\n \"data_flow_positive_node_count\"\n )\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) -> \"GraphTuple\":\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(\n ir_id=ir_id,\n node_count=0,\n control_edge_count=0,\n data_edge_count=0,\n call_edge_count=0,\n edge_position_max=0,\n pickled_graph_tuple_size=0,\n )\n\n @classmethod\n def CreateFromProgramGraph(\n cls,\n program_graph: programl_pb2.ProgramGraph,\n ir_id: int,\n split: Optional[int] = None,\n ) -> \"GraphTuple\":\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph\n )\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (\n program_graph.data_flow_positive_node_count\n )\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n\n id: int = sql.Column(\n sql.Integer,\n sql.ForeignKey(\"graph_tuples.id\", onupdate=\"CASCADE\", ondelete=\"CASCADE\"),\n primary_key=True,\n )\n\n # The sha1sum of the 'pickled_graph_tuple' column. There is no requirement\n # that graph tuples be unique, but, should you wish to enforce this,\n # you can group by this sha1 column and prune the duplicates.\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n\n # The pickled GraphTuple data.\n pickled_graph_tuple: bytes = sql.Column(\n sqlutil.ColumnTypes.LargeBinary(), nullable=False\n )\n\n\n# A registry of database statics, where each entry is a <name, property> tuple.\ndatabase_statistics_registry: List[Tuple[str, Callable[[\"Database\"], Any]]] = []\n\n\ndef database_statistic(func):\n \"\"\"A decorator to mark a method on a Database as a database static.\n\n Database statistics can be accessed using Database.stats_json property to\n retrieve a <name, vale> dictionary.\n \"\"\"\n global database_statistics_registry\n database_statistics_registry.append((func.__name__, func))\n return property(func)\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(\n self,\n url: str,\n must_exist: bool = False,\n ctx: progress.ProgressContext = progress.NullContext,\n ):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n\n # Lazily evaluated attributes.\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n ##############################################################################\n # Database stats. These are evaluated lazily and the results cached. There is\n # no cache invalidation strategy - after modifying the database, you must\n # manually call RefreshStats() to ensure that stale stats are re-computed.\n ##############################################################################\n\n @database_statistic\n def graph_count(self) -> int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) -> int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) -> int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) -> int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) -> int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) -> int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) -> int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) -> int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) -> int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) -> int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) -> int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) -> int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) -> int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) -> int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) -> int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) -> int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) -> int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) -> int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) -> int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) -> int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) -> float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) -> int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) -> bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) -> int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(\n self.graph_tuple_stats.data_flow_steps_count or 0\n )\n\n @database_statistic\n def data_flow_steps_min(self) -> Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) -> Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) -> Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) -> Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) -> Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) -> Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) -> List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) -> Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(\n 2,\n lambda t: (\n \"Computed stats over \"\n f\"{humanize.BinaryPrefix(stats.graph_data_size, 'B')} database \"\n f\"({humanize.Plural(stats.graph_count, 'graph')})\"\n ),\n ), self.Session() as session:\n query = session.query(\n # Graph and IR counts.\n sql.func.count(GraphTuple.id).label(\"graph_count\"),\n sql.func.count(sql.func.distinct(GraphTuple.ir_id)).label(\"ir_count\"),\n sql.func.count(sql.func.distinct(GraphTuple.split)).label(\n \"split_count\"\n ),\n # Node and edge attribute sums.\n sql.func.sum(GraphTuple.node_count).label(\"node_count\"),\n sql.func.sum(GraphTuple.control_edge_count).label(\"control_edge_count\"),\n sql.func.sum(GraphTuple.data_edge_count).label(\"data_edge_count\"),\n sql.func.sum(GraphTuple.call_edge_count).label(\"call_edge_count\"),\n sql.func.sum(\n GraphTuple.control_edge_count\n + GraphTuple.data_edge_count\n + GraphTuple.call_edge_count\n ).label(\"edge_count\"),\n # Node and edge attribute maximums.\n sql.func.max(GraphTuple.node_count).label(\"node_count_max\"),\n sql.func.max(GraphTuple.control_edge_count).label(\n \"control_edge_count_max\"\n ),\n sql.func.max(GraphTuple.data_edge_count).label(\"data_edge_count_max\"),\n sql.func.max(GraphTuple.call_edge_count).label(\"call_edge_count_max\"),\n sql.func.max(GraphTuple.call_edge_count).label(\"call_edge_count_max\"),\n sql.func.max(\n GraphTuple.control_edge_count\n + GraphTuple.data_edge_count\n + GraphTuple.call_edge_count\n ).label(\"edge_count_max\"),\n sql.func.max(GraphTuple.edge_position_max).label(\"edge_position_max\"),\n # Feature and label dimensionality counts. Each of these columns\n # should be one, showing that there is a single value for all graph\n # tuples.\n sql.func.count(\n sql.func.distinct(GraphTuple.node_x_dimensionality)\n ).label(\"node_x_dimensionality_count\"),\n sql.func.count(\n sql.func.distinct(GraphTuple.node_y_dimensionality)\n ).label(\"node_y_dimensionality_count\"),\n sql.func.count(\n sql.func.distinct(GraphTuple.graph_x_dimensionality)\n ).label(\"graph_x_dimensionality_count\"),\n sql.func.count(\n sql.func.distinct(GraphTuple.graph_y_dimensionality)\n ).label(\"graph_y_dimensionality_count\"),\n # Feature and label dimensionalities.\n sql.func.max(GraphTuple.node_x_dimensionality).label(\n \"node_x_dimensionality\"\n ),\n sql.func.max(GraphTuple.node_y_dimensionality).label(\n \"node_y_dimensionality\"\n ),\n sql.func.max(GraphTuple.graph_x_dimensionality).label(\n \"graph_x_dimensionality\"\n ),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n \"graph_y_dimensionality\"\n ),\n # Graph tuple sizes.\n sql.func.sum(GraphTuple.pickled_graph_tuple_size).label(\n \"graph_data_size\"\n ),\n sql.func.min(GraphTuple.pickled_graph_tuple_size).label(\n \"graph_data_size_min\"\n ),\n sql.func.avg(GraphTuple.pickled_graph_tuple_size).label(\n \"graph_data_size_avg\"\n ),\n sql.func.max(GraphTuple.pickled_graph_tuple_size).label(\n \"graph_data_size_max\"\n ),\n # Data flow column null counts.\n sql.func.count(GraphTuple.data_flow_steps).label(\n \"data_flow_steps_count\"\n ),\n # Data flow step counts.\n sql.func.min(GraphTuple.data_flow_steps).label(\"data_flow_steps_min\"),\n sql.func.avg(GraphTuple.data_flow_steps).label(\"data_flow_steps_avg\"),\n sql.func.max(GraphTuple.data_flow_steps).label(\"data_flow_steps_max\"),\n # Data flow positive node count.\n sql.func.min(GraphTuple.data_flow_positive_node_count).label(\n \"data_flow_positive_node_count_min\"\n ),\n sql.func.avg(GraphTuple.data_flow_positive_node_count).label(\n \"data_flow_positive_node_count_avg\"\n ),\n sql.func.max(GraphTuple.data_flow_positive_node_count).label(\n \"data_flow_positive_node_count_max\"\n ),\n )\n\n # Ignore \"empty\" graphs.\n query = query.filter(GraphTuple.node_count > 1)\n\n # Compute the stats.\n stats = query.one()\n\n # Check that databases have a consistent value for dimensionalities.\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f\"Database contains {stats.node_x_dimensionality_count} \"\n \"distinct node x dimensionalities\"\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f\"Database contains {stats.node_y_dimensionality_count} \"\n \"distinct node y dimensionalities\"\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f\"Database contains {stats.graph_x_dimensionality_count} \"\n \"distinct graph x dimensionalities\"\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f\"Database contains {stats.graph_y_dimensionality_count} \"\n \"distinct graph y dimensionalities\"\n )\n\n # Check that every graph has data flow attributes, or none of them do.\n if not (\n stats.data_flow_steps_count == 0\n or stats.data_flow_steps_count == stats.graph_count\n ):\n raise ValueError(\n f\"{stats.graph_count - stats.data_flow_steps_count} of \"\n f\"{stats.graph_count} graphs have no data_flow_steps \"\n \"value\"\n )\n\n self._graph_tuple_stats = stats\n\n with self.Session() as session:\n self._splits = sorted(\n set(\n [\n row.split\n for row in session.query(GraphTuple.split)\n .filter(GraphTuple.split != None)\n .group_by(GraphTuple.split)\n ]\n )\n )\n\n self._split_counts = {\n split: session.query(sql.func.count(GraphTuple.id))\n .filter(GraphTuple.split == split)\n .scalar()\n for split in self._splits\n }\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) -> Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {\n name: function(self) for name, function in database_statistics_registry\n }\n\n def __repr__(self) -> str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with \"\n f\"dimensionalities: node_x={self.node_x_dimensionality}, \"\n f\"node_y={self.node_y_dimensionality}, \"\n f\"graph_x={self.graph_x_dimensionality}, \"\n f\"graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n# Deferred declaration of flags because we need to reference Database class.\napp.DEFINE_database(\n \"graph_db\", Database, None, \"The database to read graph tuples from.\",\n)\n\n\ndef Main():\n \"\"\"Main entry point.\"\"\"\n graph_db = FLAGS.graph_db()\n print(jsonutil.format_json(graph_db.stats_json))\n\n\nif __name__ == \"__main__\":\n app.Run(Main)\n",
"step-ids": [
40,
44,
48,
54,
63
]
}
|
[
40,
44,
48,
54,
63
] |
<|reserved_special_token_0|>
def fetch_images_from_db(chat_id, keyword_id, keyword_n, db, shared_dict):
search = False
if str(chat_id) + str(keyword_id) + 'db' in shared_dict:
print('%s for group %s already in progress, sleeping for a while' %
(keyword_id, chat_id))
time.sleep(uniform(1, 5))
else:
shared_dict[str(chat_id) + str(keyword_id) + 'db'] = 1
search = True
query = (
'SELECT id,url,type FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s AND requested = FALSE LIMIT 10'
)
data = db.fetch_data(query, (chat_id, keyword_id, keyword_n))
if data is not None:
if len(data) < 3:
query = (
'DELETE FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s'
)
db.execute(query, (chat_id, keyword_id, keyword_n))
return None
for i in data:
query = (
'UPDATE images SET requested = TRUE WHERE chat_id = %s AND keyword_n = %s AND id = %s'
)
db.execute(query, (chat_id, keyword_n, i[0]))
if search is True:
del shared_dict[str(chat_id) + str(keyword_id) + 'db']
return data
return data
def fetch_images_from_google(chat_id, keyword, keyword_id, keyword_n,
header, db, shared_dict, bot):
if str(chat_id) + keyword in shared_dict:
bot.sendMessage(chat_id, 'po etomu slovu poka idet poisk - ' + keyword)
return 1
shared_dict[str(chat_id) + keyword] = 1
query = keyword.split()
query = '+'.join(query)
print('query - ' + query)
url = 'https://www.google.co.in/search?q=' + urllib.parse.quote(query
) + '&source=lnms&tbm=isch'
soup = BeautifulSoup(urllib.request.urlopen(urllib.request.Request(url,
headers=header)), 'html.parser')
ActualImages = []
for a in soup.find_all('div', {'class': 'rg_meta'}):
link, Type = json.loads(a.text)['ou'], json.loads(a.text)['ity']
ActualImages.append((link, Type))
total_images = len(ActualImages)
if total_images == 0:
del shared_dict[str(chat_id) + keyword]
return None
print('there are total', total_images, 'images')
nuran = {}
i = 0
for a, (img, Type) in enumerate(ActualImages):
if Type == 'png' or Type == 'jpg':
nuran[i] = {}
nuran[i]['url'] = img
nuran[i]['type'] = Type
i += 1
if len(nuran) < 3:
del shared_dict[str(chat_id) + keyword]
return None
del shared_dict[str(chat_id) + keyword]
insert_images(chat_id, keyword_id, keyword_n, nuran, db)
return fetch_images_from_db(chat_id, keyword_id, keyword_n, db, shared_dict
)
<|reserved_special_token_0|>
def get_image(chat_id, keyword, shared_dict, db, bot, msg=True):
print('keyword - ' + keyword)
header = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36'
}
shared_dict[str(chat_id) + 'n'] += 1
keyword_n = len(keyword) % 10
nuran = fetch_images(chat_id, keyword, keyword_n, header, db,
shared_dict, bot)
if nuran == 1:
shared_dict[str(chat_id) + 'n'] -= 1
return
if nuran is None and msg is True:
shared_dict[str(chat_id) + 'n'] -= 1
bot.sendMessage(chat_id, 'ni4ego ne naydeno(')
return
DIR = '/tmp'
index = 0
num = 0
if msg is True:
bot.sendMessage(chat_id, 'lovi fotki')
while 1:
try:
print('trying to open %s' % nuran[index][1])
url = _convert_to_idn(urllib.parse.unquote(nuran[index][1]))
print('unquotted url %s' % url)
url = urllib.parse.quote(url, safe=':/')
req = urllib.request.Request(url, headers=header)
raw_img = urllib.request.urlopen(req, timeout=5).read()
type = 'jpg' if nuran[index][2] == True else 'png'
image_name = ''.join(choice(ascii_letters) for i in range(20))
f = open(os.path.join(DIR, image_name + '.' + type), 'wb')
f.write(raw_img)
f.close()
print('sending %s' % os.path.join(DIR, image_name + '.' + type))
bot.sendPhoto(chat_id, open(os.path.join(DIR, image_name + '.' +
type), 'rb'))
os.unlink(os.path.join(DIR, image_name + '.' + type))
except TelegramError as e:
print('Telegram error - {}'.format(e))
index += 1
continue
except IndexError:
print('index out of range, breaking')
break
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('error - {}'.format(e))
print(exc_type, fname, exc_tb.tb_lineno)
index += 1
continue
num += 1
index += 1
if num >= 3:
break
shared_dict[str(chat_id) + 'n'] -= 1
print('chat id count for %s - %s' % (chat_id, shared_dict[str(chat_id) +
'n']))
<|reserved_special_token_0|>
def handle_msg(msg, bot, shared_dict, db):
chat_id = str(msg['chat']['id'])
if msg['chat']['id'] in master_users and chat_id + 'chat' in shared_dict:
if msg['text'].upper() == 'STOP':
bot.sendMessage(chat_id, 'Chat has been ended', reply_markup={
'hide_keyboard': True})
del shared_dict[chat_id + 'chat']
return
if shared_dict[chat_id + 'chat'] == 0:
if msg['text'] not in chat_groups:
bot.sendMessage(chat_id, 'Incorrect group', reply_markup={
'hide_keyboard': True})
del shared_dict[chat_id + 'chat']
return
shared_dict[chat_id + 'chat'] = chat_groups[msg['text']]
bot.sendMessage(chat_id, "You're talking with group %s" % msg[
'text'], reply_markup={'hide_keyboard': True})
else:
bot.sendMessage(shared_dict[chat_id + 'chat'], msg['text'])
elif msg['chat']['id'
] in master_users and 'forward' in shared_dict and msg['text'].upper(
) == 'STOP FORWARD':
bot.sendMessage(chat_id, 'OK, forwarding has been disabled, bye')
del shared_dict['forward']
elif msg['chat']['id'] in master_users and msg['text'].upper() == 'CHAT':
bot.sendMessage(chat_id, 'Which one?', reply_markup={'keyboard':
generate_bot_array(chat_groups.keys())})
shared_dict[chat_id + 'chat'] = 0
elif msg['chat']['id'] in master_users and msg['text'].upper(
) == 'FORWARD':
bot.sendMessage(chat_id, "OK, I'll forward all msgs to you")
shared_dict['forward'] = msg['chat']['id']
elif msg['chat']['type'] == 'private' and msg['chat']['id'
] in allowed_users or (msg['chat']['type'] == 'supergroup' or msg[
'chat']['type'] == 'group') and msg['chat']['id'] in allowed_users:
if chat_id + 'n' not in shared_dict:
shared_dict[chat_id + 'n'] = 0
if shared_dict[chat_id + 'n'] >= allowed_users[msg['chat']['id']]:
bot.sendMessage(msg['chat']['id'], 'ya poka zanat')
return
if 'text' in msg and re.match('(FOTKI|ФОТКИ) .+', msg['text'].upper
(), re.IGNORECASE):
bot.sendMessage(chat_id, 'pristupayu k poisku fotok')
get_image(chat_id, re.match('^[^\\s]+ (.+)$', msg['text'], re.
IGNORECASE).group(1), shared_dict, db, bot)
elif msg['chat']['type'] == 'private':
bot.sendMessage(msg['from']['id'], 'idi na huy, ya teba ne znayu')
else:
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fetch_images_from_db(chat_id, keyword_id, keyword_n, db, shared_dict):
search = False
if str(chat_id) + str(keyword_id) + 'db' in shared_dict:
print('%s for group %s already in progress, sleeping for a while' %
(keyword_id, chat_id))
time.sleep(uniform(1, 5))
else:
shared_dict[str(chat_id) + str(keyword_id) + 'db'] = 1
search = True
query = (
'SELECT id,url,type FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s AND requested = FALSE LIMIT 10'
)
data = db.fetch_data(query, (chat_id, keyword_id, keyword_n))
if data is not None:
if len(data) < 3:
query = (
'DELETE FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s'
)
db.execute(query, (chat_id, keyword_id, keyword_n))
return None
for i in data:
query = (
'UPDATE images SET requested = TRUE WHERE chat_id = %s AND keyword_n = %s AND id = %s'
)
db.execute(query, (chat_id, keyword_n, i[0]))
if search is True:
del shared_dict[str(chat_id) + str(keyword_id) + 'db']
return data
return data
def fetch_images_from_google(chat_id, keyword, keyword_id, keyword_n,
header, db, shared_dict, bot):
if str(chat_id) + keyword in shared_dict:
bot.sendMessage(chat_id, 'po etomu slovu poka idet poisk - ' + keyword)
return 1
shared_dict[str(chat_id) + keyword] = 1
query = keyword.split()
query = '+'.join(query)
print('query - ' + query)
url = 'https://www.google.co.in/search?q=' + urllib.parse.quote(query
) + '&source=lnms&tbm=isch'
soup = BeautifulSoup(urllib.request.urlopen(urllib.request.Request(url,
headers=header)), 'html.parser')
ActualImages = []
for a in soup.find_all('div', {'class': 'rg_meta'}):
link, Type = json.loads(a.text)['ou'], json.loads(a.text)['ity']
ActualImages.append((link, Type))
total_images = len(ActualImages)
if total_images == 0:
del shared_dict[str(chat_id) + keyword]
return None
print('there are total', total_images, 'images')
nuran = {}
i = 0
for a, (img, Type) in enumerate(ActualImages):
if Type == 'png' or Type == 'jpg':
nuran[i] = {}
nuran[i]['url'] = img
nuran[i]['type'] = Type
i += 1
if len(nuran) < 3:
del shared_dict[str(chat_id) + keyword]
return None
del shared_dict[str(chat_id) + keyword]
insert_images(chat_id, keyword_id, keyword_n, nuran, db)
return fetch_images_from_db(chat_id, keyword_id, keyword_n, db, shared_dict
)
def fetch_images(chat_id, keyword, keyword_n, header, db, shared_dict, bot):
keyword_id = check_image_request(chat_id, keyword, db)
if keyword_id is not None:
images = fetch_images_from_db(chat_id, keyword_id[0], keyword_n, db,
shared_dict)
return images if images is not None else fetch_images_from_google(
chat_id, keyword, keyword_id, keyword_n, header, db,
shared_dict, bot)
keyword_id = insert_image_request(chat_id, keyword, db)
return fetch_images_from_google(chat_id, keyword, keyword_id, keyword_n,
header, db, shared_dict, bot)
def get_image(chat_id, keyword, shared_dict, db, bot, msg=True):
print('keyword - ' + keyword)
header = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36'
}
shared_dict[str(chat_id) + 'n'] += 1
keyword_n = len(keyword) % 10
nuran = fetch_images(chat_id, keyword, keyword_n, header, db,
shared_dict, bot)
if nuran == 1:
shared_dict[str(chat_id) + 'n'] -= 1
return
if nuran is None and msg is True:
shared_dict[str(chat_id) + 'n'] -= 1
bot.sendMessage(chat_id, 'ni4ego ne naydeno(')
return
DIR = '/tmp'
index = 0
num = 0
if msg is True:
bot.sendMessage(chat_id, 'lovi fotki')
while 1:
try:
print('trying to open %s' % nuran[index][1])
url = _convert_to_idn(urllib.parse.unquote(nuran[index][1]))
print('unquotted url %s' % url)
url = urllib.parse.quote(url, safe=':/')
req = urllib.request.Request(url, headers=header)
raw_img = urllib.request.urlopen(req, timeout=5).read()
type = 'jpg' if nuran[index][2] == True else 'png'
image_name = ''.join(choice(ascii_letters) for i in range(20))
f = open(os.path.join(DIR, image_name + '.' + type), 'wb')
f.write(raw_img)
f.close()
print('sending %s' % os.path.join(DIR, image_name + '.' + type))
bot.sendPhoto(chat_id, open(os.path.join(DIR, image_name + '.' +
type), 'rb'))
os.unlink(os.path.join(DIR, image_name + '.' + type))
except TelegramError as e:
print('Telegram error - {}'.format(e))
index += 1
continue
except IndexError:
print('index out of range, breaking')
break
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('error - {}'.format(e))
print(exc_type, fname, exc_tb.tb_lineno)
index += 1
continue
num += 1
index += 1
if num >= 3:
break
shared_dict[str(chat_id) + 'n'] -= 1
print('chat id count for %s - %s' % (chat_id, shared_dict[str(chat_id) +
'n']))
<|reserved_special_token_0|>
def handle_msg(msg, bot, shared_dict, db):
chat_id = str(msg['chat']['id'])
if msg['chat']['id'] in master_users and chat_id + 'chat' in shared_dict:
if msg['text'].upper() == 'STOP':
bot.sendMessage(chat_id, 'Chat has been ended', reply_markup={
'hide_keyboard': True})
del shared_dict[chat_id + 'chat']
return
if shared_dict[chat_id + 'chat'] == 0:
if msg['text'] not in chat_groups:
bot.sendMessage(chat_id, 'Incorrect group', reply_markup={
'hide_keyboard': True})
del shared_dict[chat_id + 'chat']
return
shared_dict[chat_id + 'chat'] = chat_groups[msg['text']]
bot.sendMessage(chat_id, "You're talking with group %s" % msg[
'text'], reply_markup={'hide_keyboard': True})
else:
bot.sendMessage(shared_dict[chat_id + 'chat'], msg['text'])
elif msg['chat']['id'
] in master_users and 'forward' in shared_dict and msg['text'].upper(
) == 'STOP FORWARD':
bot.sendMessage(chat_id, 'OK, forwarding has been disabled, bye')
del shared_dict['forward']
elif msg['chat']['id'] in master_users and msg['text'].upper() == 'CHAT':
bot.sendMessage(chat_id, 'Which one?', reply_markup={'keyboard':
generate_bot_array(chat_groups.keys())})
shared_dict[chat_id + 'chat'] = 0
elif msg['chat']['id'] in master_users and msg['text'].upper(
) == 'FORWARD':
bot.sendMessage(chat_id, "OK, I'll forward all msgs to you")
shared_dict['forward'] = msg['chat']['id']
elif msg['chat']['type'] == 'private' and msg['chat']['id'
] in allowed_users or (msg['chat']['type'] == 'supergroup' or msg[
'chat']['type'] == 'group') and msg['chat']['id'] in allowed_users:
if chat_id + 'n' not in shared_dict:
shared_dict[chat_id + 'n'] = 0
if shared_dict[chat_id + 'n'] >= allowed_users[msg['chat']['id']]:
bot.sendMessage(msg['chat']['id'], 'ya poka zanat')
return
if 'text' in msg and re.match('(FOTKI|ФОТКИ) .+', msg['text'].upper
(), re.IGNORECASE):
bot.sendMessage(chat_id, 'pristupayu k poisku fotok')
get_image(chat_id, re.match('^[^\\s]+ (.+)$', msg['text'], re.
IGNORECASE).group(1), shared_dict, db, bot)
elif msg['chat']['type'] == 'private':
bot.sendMessage(msg['from']['id'], 'idi na huy, ya teba ne znayu')
else:
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
parts = list(urllib.parse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
host = parts[1].rsplit(':', 1)
newhost = []
port = ''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urllib.parse.urlunsplit(parts)
else:
return url
def fetch_images_from_db(chat_id, keyword_id, keyword_n, db, shared_dict):
search = False
if str(chat_id) + str(keyword_id) + 'db' in shared_dict:
print('%s for group %s already in progress, sleeping for a while' %
(keyword_id, chat_id))
time.sleep(uniform(1, 5))
else:
shared_dict[str(chat_id) + str(keyword_id) + 'db'] = 1
search = True
query = (
'SELECT id,url,type FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s AND requested = FALSE LIMIT 10'
)
data = db.fetch_data(query, (chat_id, keyword_id, keyword_n))
if data is not None:
if len(data) < 3:
query = (
'DELETE FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s'
)
db.execute(query, (chat_id, keyword_id, keyword_n))
return None
for i in data:
query = (
'UPDATE images SET requested = TRUE WHERE chat_id = %s AND keyword_n = %s AND id = %s'
)
db.execute(query, (chat_id, keyword_n, i[0]))
if search is True:
del shared_dict[str(chat_id) + str(keyword_id) + 'db']
return data
return data
def fetch_images_from_google(chat_id, keyword, keyword_id, keyword_n,
header, db, shared_dict, bot):
if str(chat_id) + keyword in shared_dict:
bot.sendMessage(chat_id, 'po etomu slovu poka idet poisk - ' + keyword)
return 1
shared_dict[str(chat_id) + keyword] = 1
query = keyword.split()
query = '+'.join(query)
print('query - ' + query)
url = 'https://www.google.co.in/search?q=' + urllib.parse.quote(query
) + '&source=lnms&tbm=isch'
soup = BeautifulSoup(urllib.request.urlopen(urllib.request.Request(url,
headers=header)), 'html.parser')
ActualImages = []
for a in soup.find_all('div', {'class': 'rg_meta'}):
link, Type = json.loads(a.text)['ou'], json.loads(a.text)['ity']
ActualImages.append((link, Type))
total_images = len(ActualImages)
if total_images == 0:
del shared_dict[str(chat_id) + keyword]
return None
print('there are total', total_images, 'images')
nuran = {}
i = 0
for a, (img, Type) in enumerate(ActualImages):
if Type == 'png' or Type == 'jpg':
nuran[i] = {}
nuran[i]['url'] = img
nuran[i]['type'] = Type
i += 1
if len(nuran) < 3:
del shared_dict[str(chat_id) + keyword]
return None
del shared_dict[str(chat_id) + keyword]
insert_images(chat_id, keyword_id, keyword_n, nuran, db)
return fetch_images_from_db(chat_id, keyword_id, keyword_n, db, shared_dict
)
def fetch_images(chat_id, keyword, keyword_n, header, db, shared_dict, bot):
keyword_id = check_image_request(chat_id, keyword, db)
if keyword_id is not None:
images = fetch_images_from_db(chat_id, keyword_id[0], keyword_n, db,
shared_dict)
return images if images is not None else fetch_images_from_google(
chat_id, keyword, keyword_id, keyword_n, header, db,
shared_dict, bot)
keyword_id = insert_image_request(chat_id, keyword, db)
return fetch_images_from_google(chat_id, keyword, keyword_id, keyword_n,
header, db, shared_dict, bot)
def get_image(chat_id, keyword, shared_dict, db, bot, msg=True):
print('keyword - ' + keyword)
header = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36'
}
shared_dict[str(chat_id) + 'n'] += 1
keyword_n = len(keyword) % 10
nuran = fetch_images(chat_id, keyword, keyword_n, header, db,
shared_dict, bot)
if nuran == 1:
shared_dict[str(chat_id) + 'n'] -= 1
return
if nuran is None and msg is True:
shared_dict[str(chat_id) + 'n'] -= 1
bot.sendMessage(chat_id, 'ni4ego ne naydeno(')
return
DIR = '/tmp'
index = 0
num = 0
if msg is True:
bot.sendMessage(chat_id, 'lovi fotki')
while 1:
try:
print('trying to open %s' % nuran[index][1])
url = _convert_to_idn(urllib.parse.unquote(nuran[index][1]))
print('unquotted url %s' % url)
url = urllib.parse.quote(url, safe=':/')
req = urllib.request.Request(url, headers=header)
raw_img = urllib.request.urlopen(req, timeout=5).read()
type = 'jpg' if nuran[index][2] == True else 'png'
image_name = ''.join(choice(ascii_letters) for i in range(20))
f = open(os.path.join(DIR, image_name + '.' + type), 'wb')
f.write(raw_img)
f.close()
print('sending %s' % os.path.join(DIR, image_name + '.' + type))
bot.sendPhoto(chat_id, open(os.path.join(DIR, image_name + '.' +
type), 'rb'))
os.unlink(os.path.join(DIR, image_name + '.' + type))
except TelegramError as e:
print('Telegram error - {}'.format(e))
index += 1
continue
except IndexError:
print('index out of range, breaking')
break
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('error - {}'.format(e))
print(exc_type, fname, exc_tb.tb_lineno)
index += 1
continue
num += 1
index += 1
if num >= 3:
break
shared_dict[str(chat_id) + 'n'] -= 1
print('chat id count for %s - %s' % (chat_id, shared_dict[str(chat_id) +
'n']))
def generate_bot_array(lst):
keyboard_lst = []
tmp_lst = []
i = 0
for val in lst:
i += 1
tmp_lst.append(val)
if i % 3 == 0:
keyboard_lst.append(tmp_lst)
tmp_lst = []
keyboard_lst.append(tmp_lst)
return keyboard_lst
def handle_msg(msg, bot, shared_dict, db):
chat_id = str(msg['chat']['id'])
if msg['chat']['id'] in master_users and chat_id + 'chat' in shared_dict:
if msg['text'].upper() == 'STOP':
bot.sendMessage(chat_id, 'Chat has been ended', reply_markup={
'hide_keyboard': True})
del shared_dict[chat_id + 'chat']
return
if shared_dict[chat_id + 'chat'] == 0:
if msg['text'] not in chat_groups:
bot.sendMessage(chat_id, 'Incorrect group', reply_markup={
'hide_keyboard': True})
del shared_dict[chat_id + 'chat']
return
shared_dict[chat_id + 'chat'] = chat_groups[msg['text']]
bot.sendMessage(chat_id, "You're talking with group %s" % msg[
'text'], reply_markup={'hide_keyboard': True})
else:
bot.sendMessage(shared_dict[chat_id + 'chat'], msg['text'])
elif msg['chat']['id'
] in master_users and 'forward' in shared_dict and msg['text'].upper(
) == 'STOP FORWARD':
bot.sendMessage(chat_id, 'OK, forwarding has been disabled, bye')
del shared_dict['forward']
elif msg['chat']['id'] in master_users and msg['text'].upper() == 'CHAT':
bot.sendMessage(chat_id, 'Which one?', reply_markup={'keyboard':
generate_bot_array(chat_groups.keys())})
shared_dict[chat_id + 'chat'] = 0
elif msg['chat']['id'] in master_users and msg['text'].upper(
) == 'FORWARD':
bot.sendMessage(chat_id, "OK, I'll forward all msgs to you")
shared_dict['forward'] = msg['chat']['id']
elif msg['chat']['type'] == 'private' and msg['chat']['id'
] in allowed_users or (msg['chat']['type'] == 'supergroup' or msg[
'chat']['type'] == 'group') and msg['chat']['id'] in allowed_users:
if chat_id + 'n' not in shared_dict:
shared_dict[chat_id + 'n'] = 0
if shared_dict[chat_id + 'n'] >= allowed_users[msg['chat']['id']]:
bot.sendMessage(msg['chat']['id'], 'ya poka zanat')
return
if 'text' in msg and re.match('(FOTKI|ФОТКИ) .+', msg['text'].upper
(), re.IGNORECASE):
bot.sendMessage(chat_id, 'pristupayu k poisku fotok')
get_image(chat_id, re.match('^[^\\s]+ (.+)$', msg['text'], re.
IGNORECASE).group(1), shared_dict, db, bot)
elif msg['chat']['type'] == 'private':
bot.sendMessage(msg['from']['id'], 'idi na huy, ya teba ne znayu')
else:
pass
<|reserved_special_token_1|>
import time
import re
from config import allowed_users, master_users, chat_groups
from bs4 import BeautifulSoup
import requests
import urllib.request, urllib.error, urllib.parse
import http.cookiejar
import json
import os
import sys
from random import uniform, choice
from string import ascii_letters
from image import check_image_request, insert_images, insert_image_request
from telepot.exception import TelegramError
import json
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
parts = list(urllib.parse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
host = parts[1].rsplit(':', 1)
newhost = []
port = ''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urllib.parse.urlunsplit(parts)
else:
return url
def fetch_images_from_db(chat_id, keyword_id, keyword_n, db, shared_dict):
search = False
if str(chat_id) + str(keyword_id) + 'db' in shared_dict:
print('%s for group %s already in progress, sleeping for a while' %
(keyword_id, chat_id))
time.sleep(uniform(1, 5))
else:
shared_dict[str(chat_id) + str(keyword_id) + 'db'] = 1
search = True
query = (
'SELECT id,url,type FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s AND requested = FALSE LIMIT 10'
)
data = db.fetch_data(query, (chat_id, keyword_id, keyword_n))
if data is not None:
if len(data) < 3:
query = (
'DELETE FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s'
)
db.execute(query, (chat_id, keyword_id, keyword_n))
return None
for i in data:
query = (
'UPDATE images SET requested = TRUE WHERE chat_id = %s AND keyword_n = %s AND id = %s'
)
db.execute(query, (chat_id, keyword_n, i[0]))
if search is True:
del shared_dict[str(chat_id) + str(keyword_id) + 'db']
return data
return data
def fetch_images_from_google(chat_id, keyword, keyword_id, keyword_n,
header, db, shared_dict, bot):
if str(chat_id) + keyword in shared_dict:
bot.sendMessage(chat_id, 'po etomu slovu poka idet poisk - ' + keyword)
return 1
shared_dict[str(chat_id) + keyword] = 1
query = keyword.split()
query = '+'.join(query)
print('query - ' + query)
url = 'https://www.google.co.in/search?q=' + urllib.parse.quote(query
) + '&source=lnms&tbm=isch'
soup = BeautifulSoup(urllib.request.urlopen(urllib.request.Request(url,
headers=header)), 'html.parser')
ActualImages = []
for a in soup.find_all('div', {'class': 'rg_meta'}):
link, Type = json.loads(a.text)['ou'], json.loads(a.text)['ity']
ActualImages.append((link, Type))
total_images = len(ActualImages)
if total_images == 0:
del shared_dict[str(chat_id) + keyword]
return None
print('there are total', total_images, 'images')
nuran = {}
i = 0
for a, (img, Type) in enumerate(ActualImages):
if Type == 'png' or Type == 'jpg':
nuran[i] = {}
nuran[i]['url'] = img
nuran[i]['type'] = Type
i += 1
if len(nuran) < 3:
del shared_dict[str(chat_id) + keyword]
return None
del shared_dict[str(chat_id) + keyword]
insert_images(chat_id, keyword_id, keyword_n, nuran, db)
return fetch_images_from_db(chat_id, keyword_id, keyword_n, db, shared_dict
)
def fetch_images(chat_id, keyword, keyword_n, header, db, shared_dict, bot):
keyword_id = check_image_request(chat_id, keyword, db)
if keyword_id is not None:
images = fetch_images_from_db(chat_id, keyword_id[0], keyword_n, db,
shared_dict)
return images if images is not None else fetch_images_from_google(
chat_id, keyword, keyword_id, keyword_n, header, db,
shared_dict, bot)
keyword_id = insert_image_request(chat_id, keyword, db)
return fetch_images_from_google(chat_id, keyword, keyword_id, keyword_n,
header, db, shared_dict, bot)
def get_image(chat_id, keyword, shared_dict, db, bot, msg=True):
print('keyword - ' + keyword)
header = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36'
}
shared_dict[str(chat_id) + 'n'] += 1
keyword_n = len(keyword) % 10
nuran = fetch_images(chat_id, keyword, keyword_n, header, db,
shared_dict, bot)
if nuran == 1:
shared_dict[str(chat_id) + 'n'] -= 1
return
if nuran is None and msg is True:
shared_dict[str(chat_id) + 'n'] -= 1
bot.sendMessage(chat_id, 'ni4ego ne naydeno(')
return
DIR = '/tmp'
index = 0
num = 0
if msg is True:
bot.sendMessage(chat_id, 'lovi fotki')
while 1:
try:
print('trying to open %s' % nuran[index][1])
url = _convert_to_idn(urllib.parse.unquote(nuran[index][1]))
print('unquotted url %s' % url)
url = urllib.parse.quote(url, safe=':/')
req = urllib.request.Request(url, headers=header)
raw_img = urllib.request.urlopen(req, timeout=5).read()
type = 'jpg' if nuran[index][2] == True else 'png'
image_name = ''.join(choice(ascii_letters) for i in range(20))
f = open(os.path.join(DIR, image_name + '.' + type), 'wb')
f.write(raw_img)
f.close()
print('sending %s' % os.path.join(DIR, image_name + '.' + type))
bot.sendPhoto(chat_id, open(os.path.join(DIR, image_name + '.' +
type), 'rb'))
os.unlink(os.path.join(DIR, image_name + '.' + type))
except TelegramError as e:
print('Telegram error - {}'.format(e))
index += 1
continue
except IndexError:
print('index out of range, breaking')
break
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('error - {}'.format(e))
print(exc_type, fname, exc_tb.tb_lineno)
index += 1
continue
num += 1
index += 1
if num >= 3:
break
shared_dict[str(chat_id) + 'n'] -= 1
print('chat id count for %s - %s' % (chat_id, shared_dict[str(chat_id) +
'n']))
def generate_bot_array(lst):
keyboard_lst = []
tmp_lst = []
i = 0
for val in lst:
i += 1
tmp_lst.append(val)
if i % 3 == 0:
keyboard_lst.append(tmp_lst)
tmp_lst = []
keyboard_lst.append(tmp_lst)
return keyboard_lst
def handle_msg(msg, bot, shared_dict, db):
chat_id = str(msg['chat']['id'])
if msg['chat']['id'] in master_users and chat_id + 'chat' in shared_dict:
if msg['text'].upper() == 'STOP':
bot.sendMessage(chat_id, 'Chat has been ended', reply_markup={
'hide_keyboard': True})
del shared_dict[chat_id + 'chat']
return
if shared_dict[chat_id + 'chat'] == 0:
if msg['text'] not in chat_groups:
bot.sendMessage(chat_id, 'Incorrect group', reply_markup={
'hide_keyboard': True})
del shared_dict[chat_id + 'chat']
return
shared_dict[chat_id + 'chat'] = chat_groups[msg['text']]
bot.sendMessage(chat_id, "You're talking with group %s" % msg[
'text'], reply_markup={'hide_keyboard': True})
else:
bot.sendMessage(shared_dict[chat_id + 'chat'], msg['text'])
elif msg['chat']['id'
] in master_users and 'forward' in shared_dict and msg['text'].upper(
) == 'STOP FORWARD':
bot.sendMessage(chat_id, 'OK, forwarding has been disabled, bye')
del shared_dict['forward']
elif msg['chat']['id'] in master_users and msg['text'].upper() == 'CHAT':
bot.sendMessage(chat_id, 'Which one?', reply_markup={'keyboard':
generate_bot_array(chat_groups.keys())})
shared_dict[chat_id + 'chat'] = 0
elif msg['chat']['id'] in master_users and msg['text'].upper(
) == 'FORWARD':
bot.sendMessage(chat_id, "OK, I'll forward all msgs to you")
shared_dict['forward'] = msg['chat']['id']
elif msg['chat']['type'] == 'private' and msg['chat']['id'
] in allowed_users or (msg['chat']['type'] == 'supergroup' or msg[
'chat']['type'] == 'group') and msg['chat']['id'] in allowed_users:
if chat_id + 'n' not in shared_dict:
shared_dict[chat_id + 'n'] = 0
if shared_dict[chat_id + 'n'] >= allowed_users[msg['chat']['id']]:
bot.sendMessage(msg['chat']['id'], 'ya poka zanat')
return
if 'text' in msg and re.match('(FOTKI|ФОТКИ) .+', msg['text'].upper
(), re.IGNORECASE):
bot.sendMessage(chat_id, 'pristupayu k poisku fotok')
get_image(chat_id, re.match('^[^\\s]+ (.+)$', msg['text'], re.
IGNORECASE).group(1), shared_dict, db, bot)
elif msg['chat']['type'] == 'private':
bot.sendMessage(msg['from']['id'], 'idi na huy, ya teba ne znayu')
else:
pass
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import time
import re
from config import allowed_users, master_users, chat_groups
from bs4 import BeautifulSoup
import requests
import urllib.request, urllib.error, urllib.parse
import http.cookiejar
import json
import os
import sys
#from random import randint, choice
from random import uniform, choice
from string import ascii_letters
from image import check_image_request, insert_images, insert_image_request
from telepot.exception import TelegramError
import json
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urllib.parse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = ''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urllib.parse.urlunsplit(parts)
else:
return url
def fetch_images_from_db(chat_id,keyword_id,keyword_n,db,shared_dict):
search = False
if str(chat_id) + str(keyword_id) +'db' in shared_dict:
print("%s for group %s already in progress, sleeping for a while" % (keyword_id,chat_id))
time.sleep(uniform(1,5))
else:
shared_dict[str(chat_id) + str(keyword_id) + 'db'] = 1
search = True
query = "SELECT id,url,type FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s AND requested = FALSE LIMIT 10"
#print 'chat_id - {}, keyword_id - {}, keyword_n - {}'.format(chat_id,keyword_id,keyword_n)
data = db.fetch_data(query,(chat_id,keyword_id,keyword_n,))
if data is not None:
if len(data) < 3:
query = "DELETE FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s"
db.execute(query,(chat_id,keyword_id,keyword_n,))
return None
for i in data:
query = "UPDATE images SET requested = TRUE WHERE chat_id = %s AND keyword_n = %s AND id = %s"
db.execute(query,(chat_id,keyword_n,i[0]))
if search is True:
del shared_dict[str(chat_id) + str(keyword_id) + 'db']
return data
return data
def fetch_images_from_google(chat_id,keyword,keyword_id,keyword_n,header,db,shared_dict,bot):
if str(chat_id) + keyword in shared_dict:
bot.sendMessage(chat_id,'po etomu slovu poka idet poisk - ' + keyword)
return 1
shared_dict[str(chat_id) + keyword] = 1
query = keyword.split()
#query = str('+'.join(query).encode('utf-8'))
query = '+'.join(query)
print('query - ' + query)
url="https://www.google.co.in/search?q="+urllib.parse.quote(query)+"&source=lnms&tbm=isch"
soup = BeautifulSoup(urllib.request.urlopen(urllib.request.Request(url,headers=header)),'html.parser')
ActualImages=[]
for a in soup.find_all("div",{"class":"rg_meta"}):
link , Type =json.loads(a.text)["ou"] ,json.loads(a.text)["ity"]
ActualImages.append((link,Type))
total_images = len(ActualImages)
if total_images == 0:
del shared_dict[str(chat_id) + keyword]
return None
print("there are total" , total_images,"images")
nuran = {}
i = 0
for a, (img , Type) in enumerate( ActualImages):
if Type == 'png' or Type == 'jpg':
nuran[i] = {}
nuran[i]['url'] = img
nuran[i]['type'] = Type
i += 1
if len(nuran) < 3:
del shared_dict[str(chat_id) + keyword]
return None
del shared_dict[str(chat_id) + keyword]
#print shared_dict
insert_images(chat_id,keyword_id,keyword_n,nuran,db)
return fetch_images_from_db(chat_id,keyword_id,keyword_n,db,shared_dict)
def fetch_images(chat_id,keyword,keyword_n,header,db,shared_dict,bot):
keyword_id = check_image_request(chat_id,keyword,db)
if keyword_id is not None:
images = fetch_images_from_db(chat_id,keyword_id[0],keyword_n,db,shared_dict)
return images if images is not None else fetch_images_from_google(chat_id,keyword,keyword_id,keyword_n,header,db,shared_dict,bot)
keyword_id = insert_image_request(chat_id,keyword,db)
return fetch_images_from_google(chat_id,keyword,keyword_id,keyword_n,header,db,shared_dict,bot)
def get_image(chat_id,keyword,shared_dict,db,bot,msg=True):
print('keyword - ' + keyword)
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
shared_dict[str(chat_id) + 'n'] += 1
keyword_n = len(keyword) % 10
nuran = fetch_images(chat_id,keyword,keyword_n,header,db,shared_dict,bot)
if nuran == 1:
shared_dict[str(chat_id) + 'n'] -= 1
return
if nuran is None and msg is True:
shared_dict[str(chat_id) + 'n'] -= 1
bot.sendMessage(chat_id,'ni4ego ne naydeno(')
return
DIR = '/tmp'
index = 0
num = 0
if msg is True:
bot.sendMessage(chat_id,'lovi fotki')
while 1:
try:
print('trying to open %s' % nuran[index][1])
url = _convert_to_idn(urllib.parse.unquote(nuran[index][1]))
print('unquotted url %s' % url)
url = urllib.parse.quote(url,safe=':/')
req = urllib.request.Request(url, headers=header)
raw_img = urllib.request.urlopen(req,timeout=5).read()
type = 'jpg' if nuran[index][2] == True else 'png'
image_name = "".join(choice(ascii_letters) for i in range(20))
f = open(os.path.join(DIR , image_name + "."+type), 'wb')
f.write(raw_img)
f.close()
print('sending %s' % os.path.join(DIR , image_name + "."+type))
bot.sendPhoto(chat_id,open(os.path.join(DIR , image_name + "."+type), 'rb'))
os.unlink(os.path.join(DIR , image_name + "."+type))
except TelegramError as e:
print("Telegram error - {}".format(e))
index += 1
#if e[0] == 'Bad Request: PHOTO_INVALID_DIMENSIONS':
# print('invalid image')
continue
except IndexError:
print("index out of range, breaking")
break
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("error - {}".format(e))
print(exc_type, fname, exc_tb.tb_lineno)
index += 1
continue
num += 1
index += 1
if num >= 3:
break
shared_dict[str(chat_id) + 'n'] -= 1
print('chat id count for %s - %s' % (chat_id,shared_dict[str(chat_id) + 'n']))
def generate_bot_array(lst):
keyboard_lst = []
tmp_lst = []
i = 0
for val in lst:
i += 1
tmp_lst.append(val)
if i % 3 == 0:
keyboard_lst.append(tmp_lst)
tmp_lst = []
keyboard_lst.append(tmp_lst)
return keyboard_lst
def handle_msg(msg,bot,shared_dict,db):
chat_id = str(msg['chat']['id'])
if msg['chat']['id'] in master_users and chat_id + 'chat' in shared_dict:
if msg['text'].upper() == 'STOP':
bot.sendMessage(chat_id,'Chat has been ended',reply_markup={'hide_keyboard': True})
del shared_dict[chat_id + 'chat']
return
if shared_dict[chat_id + 'chat'] == 0:
if msg['text'] not in chat_groups:
bot.sendMessage(chat_id,'Incorrect group',reply_markup={'hide_keyboard': True})
del shared_dict[chat_id + 'chat']
return
shared_dict[chat_id + 'chat'] = chat_groups[msg['text']]
bot.sendMessage(chat_id,"You're talking with group %s" % msg['text'],reply_markup={'hide_keyboard': True})
else:
bot.sendMessage(shared_dict[chat_id + 'chat'],msg['text'])
elif msg['chat']['id'] in master_users and 'forward' in shared_dict and msg['text'].upper() == 'STOP FORWARD':
bot.sendMessage(chat_id,"OK, forwarding has been disabled, bye")
del shared_dict['forward']
elif msg['chat']['id'] in master_users and msg['text'].upper() == 'CHAT':
bot.sendMessage(chat_id,'Which one?',reply_markup={'keyboard': generate_bot_array(chat_groups.keys())})
shared_dict[chat_id + 'chat'] = 0
elif msg['chat']['id'] in master_users and msg['text'].upper() == 'FORWARD':
bot.sendMessage(chat_id,"OK, I'll forward all msgs to you")
shared_dict['forward'] = msg['chat']['id']
elif ((msg['chat']['type'] == 'private' and msg['chat']['id'] in allowed_users) or
(msg['chat']['type'] == 'supergroup' or msg['chat']['type'] == 'group') and msg['chat']['id'] in allowed_users):
if chat_id + 'n' not in shared_dict:
shared_dict[chat_id + 'n'] = 0
# check shared dict
if shared_dict[chat_id + 'n'] >= allowed_users[msg['chat']['id']]:
bot.sendMessage(msg['chat']['id'],'ya poka zanat')
return
# check msgs
if 'text' in msg and re.match(r'(FOTKI|ФОТКИ) .+',msg['text'].upper(),re.IGNORECASE):
bot.sendMessage(chat_id,'pristupayu k poisku fotok')
get_image(chat_id,re.match(r'^[^\s]+ (.+)$',msg['text'],re.IGNORECASE).group(1),shared_dict,db,bot)
elif msg['chat']['type'] == 'private':
bot.sendMessage(msg['from']['id'],'idi na huy, ya teba ne znayu')
else:
pass
|
flexible
|
{
"blob_id": "98dd7446045f09e6d709f8e5e63b0a94341a796e",
"index": 3158,
"step-1": "<mask token>\n\n\ndef fetch_images_from_db(chat_id, keyword_id, keyword_n, db, shared_dict):\n search = False\n if str(chat_id) + str(keyword_id) + 'db' in shared_dict:\n print('%s for group %s already in progress, sleeping for a while' %\n (keyword_id, chat_id))\n time.sleep(uniform(1, 5))\n else:\n shared_dict[str(chat_id) + str(keyword_id) + 'db'] = 1\n search = True\n query = (\n 'SELECT id,url,type FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s AND requested = FALSE LIMIT 10'\n )\n data = db.fetch_data(query, (chat_id, keyword_id, keyword_n))\n if data is not None:\n if len(data) < 3:\n query = (\n 'DELETE FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s'\n )\n db.execute(query, (chat_id, keyword_id, keyword_n))\n return None\n for i in data:\n query = (\n 'UPDATE images SET requested = TRUE WHERE chat_id = %s AND keyword_n = %s AND id = %s'\n )\n db.execute(query, (chat_id, keyword_n, i[0]))\n if search is True:\n del shared_dict[str(chat_id) + str(keyword_id) + 'db']\n return data\n return data\n\n\ndef fetch_images_from_google(chat_id, keyword, keyword_id, keyword_n,\n header, db, shared_dict, bot):\n if str(chat_id) + keyword in shared_dict:\n bot.sendMessage(chat_id, 'po etomu slovu poka idet poisk - ' + keyword)\n return 1\n shared_dict[str(chat_id) + keyword] = 1\n query = keyword.split()\n query = '+'.join(query)\n print('query - ' + query)\n url = 'https://www.google.co.in/search?q=' + urllib.parse.quote(query\n ) + '&source=lnms&tbm=isch'\n soup = BeautifulSoup(urllib.request.urlopen(urllib.request.Request(url,\n headers=header)), 'html.parser')\n ActualImages = []\n for a in soup.find_all('div', {'class': 'rg_meta'}):\n link, Type = json.loads(a.text)['ou'], json.loads(a.text)['ity']\n ActualImages.append((link, Type))\n total_images = len(ActualImages)\n if total_images == 0:\n del shared_dict[str(chat_id) + keyword]\n return None\n print('there are total', total_images, 'images')\n nuran = {}\n i = 0\n for a, (img, Type) in enumerate(ActualImages):\n if Type == 'png' or Type == 'jpg':\n nuran[i] = {}\n nuran[i]['url'] = img\n nuran[i]['type'] = Type\n i += 1\n if len(nuran) < 3:\n del shared_dict[str(chat_id) + keyword]\n return None\n del shared_dict[str(chat_id) + keyword]\n insert_images(chat_id, keyword_id, keyword_n, nuran, db)\n return fetch_images_from_db(chat_id, keyword_id, keyword_n, db, shared_dict\n )\n\n\n<mask token>\n\n\ndef get_image(chat_id, keyword, shared_dict, db, bot, msg=True):\n print('keyword - ' + keyword)\n header = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36'\n }\n shared_dict[str(chat_id) + 'n'] += 1\n keyword_n = len(keyword) % 10\n nuran = fetch_images(chat_id, keyword, keyword_n, header, db,\n shared_dict, bot)\n if nuran == 1:\n shared_dict[str(chat_id) + 'n'] -= 1\n return\n if nuran is None and msg is True:\n shared_dict[str(chat_id) + 'n'] -= 1\n bot.sendMessage(chat_id, 'ni4ego ne naydeno(')\n return\n DIR = '/tmp'\n index = 0\n num = 0\n if msg is True:\n bot.sendMessage(chat_id, 'lovi fotki')\n while 1:\n try:\n print('trying to open %s' % nuran[index][1])\n url = _convert_to_idn(urllib.parse.unquote(nuran[index][1]))\n print('unquotted url %s' % url)\n url = urllib.parse.quote(url, safe=':/')\n req = urllib.request.Request(url, headers=header)\n raw_img = urllib.request.urlopen(req, timeout=5).read()\n type = 'jpg' if nuran[index][2] == True else 'png'\n image_name = ''.join(choice(ascii_letters) for i in range(20))\n f = open(os.path.join(DIR, image_name + '.' + type), 'wb')\n f.write(raw_img)\n f.close()\n print('sending %s' % os.path.join(DIR, image_name + '.' + type))\n bot.sendPhoto(chat_id, open(os.path.join(DIR, image_name + '.' +\n type), 'rb'))\n os.unlink(os.path.join(DIR, image_name + '.' + type))\n except TelegramError as e:\n print('Telegram error - {}'.format(e))\n index += 1\n continue\n except IndexError:\n print('index out of range, breaking')\n break\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print('error - {}'.format(e))\n print(exc_type, fname, exc_tb.tb_lineno)\n index += 1\n continue\n num += 1\n index += 1\n if num >= 3:\n break\n shared_dict[str(chat_id) + 'n'] -= 1\n print('chat id count for %s - %s' % (chat_id, shared_dict[str(chat_id) +\n 'n']))\n\n\n<mask token>\n\n\ndef handle_msg(msg, bot, shared_dict, db):\n chat_id = str(msg['chat']['id'])\n if msg['chat']['id'] in master_users and chat_id + 'chat' in shared_dict:\n if msg['text'].upper() == 'STOP':\n bot.sendMessage(chat_id, 'Chat has been ended', reply_markup={\n 'hide_keyboard': True})\n del shared_dict[chat_id + 'chat']\n return\n if shared_dict[chat_id + 'chat'] == 0:\n if msg['text'] not in chat_groups:\n bot.sendMessage(chat_id, 'Incorrect group', reply_markup={\n 'hide_keyboard': True})\n del shared_dict[chat_id + 'chat']\n return\n shared_dict[chat_id + 'chat'] = chat_groups[msg['text']]\n bot.sendMessage(chat_id, \"You're talking with group %s\" % msg[\n 'text'], reply_markup={'hide_keyboard': True})\n else:\n bot.sendMessage(shared_dict[chat_id + 'chat'], msg['text'])\n elif msg['chat']['id'\n ] in master_users and 'forward' in shared_dict and msg['text'].upper(\n ) == 'STOP FORWARD':\n bot.sendMessage(chat_id, 'OK, forwarding has been disabled, bye')\n del shared_dict['forward']\n elif msg['chat']['id'] in master_users and msg['text'].upper() == 'CHAT':\n bot.sendMessage(chat_id, 'Which one?', reply_markup={'keyboard':\n generate_bot_array(chat_groups.keys())})\n shared_dict[chat_id + 'chat'] = 0\n elif msg['chat']['id'] in master_users and msg['text'].upper(\n ) == 'FORWARD':\n bot.sendMessage(chat_id, \"OK, I'll forward all msgs to you\")\n shared_dict['forward'] = msg['chat']['id']\n elif msg['chat']['type'] == 'private' and msg['chat']['id'\n ] in allowed_users or (msg['chat']['type'] == 'supergroup' or msg[\n 'chat']['type'] == 'group') and msg['chat']['id'] in allowed_users:\n if chat_id + 'n' not in shared_dict:\n shared_dict[chat_id + 'n'] = 0\n if shared_dict[chat_id + 'n'] >= allowed_users[msg['chat']['id']]:\n bot.sendMessage(msg['chat']['id'], 'ya poka zanat')\n return\n if 'text' in msg and re.match('(FOTKI|ФОТКИ) .+', msg['text'].upper\n (), re.IGNORECASE):\n bot.sendMessage(chat_id, 'pristupayu k poisku fotok')\n get_image(chat_id, re.match('^[^\\\\s]+ (.+)$', msg['text'], re.\n IGNORECASE).group(1), shared_dict, db, bot)\n elif msg['chat']['type'] == 'private':\n bot.sendMessage(msg['from']['id'], 'idi na huy, ya teba ne znayu')\n else:\n pass\n",
"step-2": "<mask token>\n\n\ndef fetch_images_from_db(chat_id, keyword_id, keyword_n, db, shared_dict):\n search = False\n if str(chat_id) + str(keyword_id) + 'db' in shared_dict:\n print('%s for group %s already in progress, sleeping for a while' %\n (keyword_id, chat_id))\n time.sleep(uniform(1, 5))\n else:\n shared_dict[str(chat_id) + str(keyword_id) + 'db'] = 1\n search = True\n query = (\n 'SELECT id,url,type FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s AND requested = FALSE LIMIT 10'\n )\n data = db.fetch_data(query, (chat_id, keyword_id, keyword_n))\n if data is not None:\n if len(data) < 3:\n query = (\n 'DELETE FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s'\n )\n db.execute(query, (chat_id, keyword_id, keyword_n))\n return None\n for i in data:\n query = (\n 'UPDATE images SET requested = TRUE WHERE chat_id = %s AND keyword_n = %s AND id = %s'\n )\n db.execute(query, (chat_id, keyword_n, i[0]))\n if search is True:\n del shared_dict[str(chat_id) + str(keyword_id) + 'db']\n return data\n return data\n\n\ndef fetch_images_from_google(chat_id, keyword, keyword_id, keyword_n,\n header, db, shared_dict, bot):\n if str(chat_id) + keyword in shared_dict:\n bot.sendMessage(chat_id, 'po etomu slovu poka idet poisk - ' + keyword)\n return 1\n shared_dict[str(chat_id) + keyword] = 1\n query = keyword.split()\n query = '+'.join(query)\n print('query - ' + query)\n url = 'https://www.google.co.in/search?q=' + urllib.parse.quote(query\n ) + '&source=lnms&tbm=isch'\n soup = BeautifulSoup(urllib.request.urlopen(urllib.request.Request(url,\n headers=header)), 'html.parser')\n ActualImages = []\n for a in soup.find_all('div', {'class': 'rg_meta'}):\n link, Type = json.loads(a.text)['ou'], json.loads(a.text)['ity']\n ActualImages.append((link, Type))\n total_images = len(ActualImages)\n if total_images == 0:\n del shared_dict[str(chat_id) + keyword]\n return None\n print('there are total', total_images, 'images')\n nuran = {}\n i = 0\n for a, (img, Type) in enumerate(ActualImages):\n if Type == 'png' or Type == 'jpg':\n nuran[i] = {}\n nuran[i]['url'] = img\n nuran[i]['type'] = Type\n i += 1\n if len(nuran) < 3:\n del shared_dict[str(chat_id) + keyword]\n return None\n del shared_dict[str(chat_id) + keyword]\n insert_images(chat_id, keyword_id, keyword_n, nuran, db)\n return fetch_images_from_db(chat_id, keyword_id, keyword_n, db, shared_dict\n )\n\n\ndef fetch_images(chat_id, keyword, keyword_n, header, db, shared_dict, bot):\n keyword_id = check_image_request(chat_id, keyword, db)\n if keyword_id is not None:\n images = fetch_images_from_db(chat_id, keyword_id[0], keyword_n, db,\n shared_dict)\n return images if images is not None else fetch_images_from_google(\n chat_id, keyword, keyword_id, keyword_n, header, db,\n shared_dict, bot)\n keyword_id = insert_image_request(chat_id, keyword, db)\n return fetch_images_from_google(chat_id, keyword, keyword_id, keyword_n,\n header, db, shared_dict, bot)\n\n\ndef get_image(chat_id, keyword, shared_dict, db, bot, msg=True):\n print('keyword - ' + keyword)\n header = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36'\n }\n shared_dict[str(chat_id) + 'n'] += 1\n keyword_n = len(keyword) % 10\n nuran = fetch_images(chat_id, keyword, keyword_n, header, db,\n shared_dict, bot)\n if nuran == 1:\n shared_dict[str(chat_id) + 'n'] -= 1\n return\n if nuran is None and msg is True:\n shared_dict[str(chat_id) + 'n'] -= 1\n bot.sendMessage(chat_id, 'ni4ego ne naydeno(')\n return\n DIR = '/tmp'\n index = 0\n num = 0\n if msg is True:\n bot.sendMessage(chat_id, 'lovi fotki')\n while 1:\n try:\n print('trying to open %s' % nuran[index][1])\n url = _convert_to_idn(urllib.parse.unquote(nuran[index][1]))\n print('unquotted url %s' % url)\n url = urllib.parse.quote(url, safe=':/')\n req = urllib.request.Request(url, headers=header)\n raw_img = urllib.request.urlopen(req, timeout=5).read()\n type = 'jpg' if nuran[index][2] == True else 'png'\n image_name = ''.join(choice(ascii_letters) for i in range(20))\n f = open(os.path.join(DIR, image_name + '.' + type), 'wb')\n f.write(raw_img)\n f.close()\n print('sending %s' % os.path.join(DIR, image_name + '.' + type))\n bot.sendPhoto(chat_id, open(os.path.join(DIR, image_name + '.' +\n type), 'rb'))\n os.unlink(os.path.join(DIR, image_name + '.' + type))\n except TelegramError as e:\n print('Telegram error - {}'.format(e))\n index += 1\n continue\n except IndexError:\n print('index out of range, breaking')\n break\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print('error - {}'.format(e))\n print(exc_type, fname, exc_tb.tb_lineno)\n index += 1\n continue\n num += 1\n index += 1\n if num >= 3:\n break\n shared_dict[str(chat_id) + 'n'] -= 1\n print('chat id count for %s - %s' % (chat_id, shared_dict[str(chat_id) +\n 'n']))\n\n\n<mask token>\n\n\ndef handle_msg(msg, bot, shared_dict, db):\n chat_id = str(msg['chat']['id'])\n if msg['chat']['id'] in master_users and chat_id + 'chat' in shared_dict:\n if msg['text'].upper() == 'STOP':\n bot.sendMessage(chat_id, 'Chat has been ended', reply_markup={\n 'hide_keyboard': True})\n del shared_dict[chat_id + 'chat']\n return\n if shared_dict[chat_id + 'chat'] == 0:\n if msg['text'] not in chat_groups:\n bot.sendMessage(chat_id, 'Incorrect group', reply_markup={\n 'hide_keyboard': True})\n del shared_dict[chat_id + 'chat']\n return\n shared_dict[chat_id + 'chat'] = chat_groups[msg['text']]\n bot.sendMessage(chat_id, \"You're talking with group %s\" % msg[\n 'text'], reply_markup={'hide_keyboard': True})\n else:\n bot.sendMessage(shared_dict[chat_id + 'chat'], msg['text'])\n elif msg['chat']['id'\n ] in master_users and 'forward' in shared_dict and msg['text'].upper(\n ) == 'STOP FORWARD':\n bot.sendMessage(chat_id, 'OK, forwarding has been disabled, bye')\n del shared_dict['forward']\n elif msg['chat']['id'] in master_users and msg['text'].upper() == 'CHAT':\n bot.sendMessage(chat_id, 'Which one?', reply_markup={'keyboard':\n generate_bot_array(chat_groups.keys())})\n shared_dict[chat_id + 'chat'] = 0\n elif msg['chat']['id'] in master_users and msg['text'].upper(\n ) == 'FORWARD':\n bot.sendMessage(chat_id, \"OK, I'll forward all msgs to you\")\n shared_dict['forward'] = msg['chat']['id']\n elif msg['chat']['type'] == 'private' and msg['chat']['id'\n ] in allowed_users or (msg['chat']['type'] == 'supergroup' or msg[\n 'chat']['type'] == 'group') and msg['chat']['id'] in allowed_users:\n if chat_id + 'n' not in shared_dict:\n shared_dict[chat_id + 'n'] = 0\n if shared_dict[chat_id + 'n'] >= allowed_users[msg['chat']['id']]:\n bot.sendMessage(msg['chat']['id'], 'ya poka zanat')\n return\n if 'text' in msg and re.match('(FOTKI|ФОТКИ) .+', msg['text'].upper\n (), re.IGNORECASE):\n bot.sendMessage(chat_id, 'pristupayu k poisku fotok')\n get_image(chat_id, re.match('^[^\\\\s]+ (.+)$', msg['text'], re.\n IGNORECASE).group(1), shared_dict, db, bot)\n elif msg['chat']['type'] == 'private':\n bot.sendMessage(msg['from']['id'], 'idi na huy, ya teba ne znayu')\n else:\n pass\n",
"step-3": "<mask token>\n\n\ndef _convert_to_idn(url):\n \"\"\"Convert a URL to IDN notation\"\"\"\n parts = list(urllib.parse.urlsplit(url))\n try:\n parts[1].encode('ascii')\n except UnicodeEncodeError:\n host = parts[1].rsplit(':', 1)\n newhost = []\n port = ''\n if len(host) == 2:\n port = host.pop()\n for h in host[0].split('.'):\n newhost.append(h.encode('idna').decode('utf-8'))\n parts[1] = '.'.join(newhost)\n if port:\n parts[1] += ':' + port\n return urllib.parse.urlunsplit(parts)\n else:\n return url\n\n\ndef fetch_images_from_db(chat_id, keyword_id, keyword_n, db, shared_dict):\n search = False\n if str(chat_id) + str(keyword_id) + 'db' in shared_dict:\n print('%s for group %s already in progress, sleeping for a while' %\n (keyword_id, chat_id))\n time.sleep(uniform(1, 5))\n else:\n shared_dict[str(chat_id) + str(keyword_id) + 'db'] = 1\n search = True\n query = (\n 'SELECT id,url,type FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s AND requested = FALSE LIMIT 10'\n )\n data = db.fetch_data(query, (chat_id, keyword_id, keyword_n))\n if data is not None:\n if len(data) < 3:\n query = (\n 'DELETE FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s'\n )\n db.execute(query, (chat_id, keyword_id, keyword_n))\n return None\n for i in data:\n query = (\n 'UPDATE images SET requested = TRUE WHERE chat_id = %s AND keyword_n = %s AND id = %s'\n )\n db.execute(query, (chat_id, keyword_n, i[0]))\n if search is True:\n del shared_dict[str(chat_id) + str(keyword_id) + 'db']\n return data\n return data\n\n\ndef fetch_images_from_google(chat_id, keyword, keyword_id, keyword_n,\n header, db, shared_dict, bot):\n if str(chat_id) + keyword in shared_dict:\n bot.sendMessage(chat_id, 'po etomu slovu poka idet poisk - ' + keyword)\n return 1\n shared_dict[str(chat_id) + keyword] = 1\n query = keyword.split()\n query = '+'.join(query)\n print('query - ' + query)\n url = 'https://www.google.co.in/search?q=' + urllib.parse.quote(query\n ) + '&source=lnms&tbm=isch'\n soup = BeautifulSoup(urllib.request.urlopen(urllib.request.Request(url,\n headers=header)), 'html.parser')\n ActualImages = []\n for a in soup.find_all('div', {'class': 'rg_meta'}):\n link, Type = json.loads(a.text)['ou'], json.loads(a.text)['ity']\n ActualImages.append((link, Type))\n total_images = len(ActualImages)\n if total_images == 0:\n del shared_dict[str(chat_id) + keyword]\n return None\n print('there are total', total_images, 'images')\n nuran = {}\n i = 0\n for a, (img, Type) in enumerate(ActualImages):\n if Type == 'png' or Type == 'jpg':\n nuran[i] = {}\n nuran[i]['url'] = img\n nuran[i]['type'] = Type\n i += 1\n if len(nuran) < 3:\n del shared_dict[str(chat_id) + keyword]\n return None\n del shared_dict[str(chat_id) + keyword]\n insert_images(chat_id, keyword_id, keyword_n, nuran, db)\n return fetch_images_from_db(chat_id, keyword_id, keyword_n, db, shared_dict\n )\n\n\ndef fetch_images(chat_id, keyword, keyword_n, header, db, shared_dict, bot):\n keyword_id = check_image_request(chat_id, keyword, db)\n if keyword_id is not None:\n images = fetch_images_from_db(chat_id, keyword_id[0], keyword_n, db,\n shared_dict)\n return images if images is not None else fetch_images_from_google(\n chat_id, keyword, keyword_id, keyword_n, header, db,\n shared_dict, bot)\n keyword_id = insert_image_request(chat_id, keyword, db)\n return fetch_images_from_google(chat_id, keyword, keyword_id, keyword_n,\n header, db, shared_dict, bot)\n\n\ndef get_image(chat_id, keyword, shared_dict, db, bot, msg=True):\n print('keyword - ' + keyword)\n header = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36'\n }\n shared_dict[str(chat_id) + 'n'] += 1\n keyword_n = len(keyword) % 10\n nuran = fetch_images(chat_id, keyword, keyword_n, header, db,\n shared_dict, bot)\n if nuran == 1:\n shared_dict[str(chat_id) + 'n'] -= 1\n return\n if nuran is None and msg is True:\n shared_dict[str(chat_id) + 'n'] -= 1\n bot.sendMessage(chat_id, 'ni4ego ne naydeno(')\n return\n DIR = '/tmp'\n index = 0\n num = 0\n if msg is True:\n bot.sendMessage(chat_id, 'lovi fotki')\n while 1:\n try:\n print('trying to open %s' % nuran[index][1])\n url = _convert_to_idn(urllib.parse.unquote(nuran[index][1]))\n print('unquotted url %s' % url)\n url = urllib.parse.quote(url, safe=':/')\n req = urllib.request.Request(url, headers=header)\n raw_img = urllib.request.urlopen(req, timeout=5).read()\n type = 'jpg' if nuran[index][2] == True else 'png'\n image_name = ''.join(choice(ascii_letters) for i in range(20))\n f = open(os.path.join(DIR, image_name + '.' + type), 'wb')\n f.write(raw_img)\n f.close()\n print('sending %s' % os.path.join(DIR, image_name + '.' + type))\n bot.sendPhoto(chat_id, open(os.path.join(DIR, image_name + '.' +\n type), 'rb'))\n os.unlink(os.path.join(DIR, image_name + '.' + type))\n except TelegramError as e:\n print('Telegram error - {}'.format(e))\n index += 1\n continue\n except IndexError:\n print('index out of range, breaking')\n break\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print('error - {}'.format(e))\n print(exc_type, fname, exc_tb.tb_lineno)\n index += 1\n continue\n num += 1\n index += 1\n if num >= 3:\n break\n shared_dict[str(chat_id) + 'n'] -= 1\n print('chat id count for %s - %s' % (chat_id, shared_dict[str(chat_id) +\n 'n']))\n\n\ndef generate_bot_array(lst):\n keyboard_lst = []\n tmp_lst = []\n i = 0\n for val in lst:\n i += 1\n tmp_lst.append(val)\n if i % 3 == 0:\n keyboard_lst.append(tmp_lst)\n tmp_lst = []\n keyboard_lst.append(tmp_lst)\n return keyboard_lst\n\n\ndef handle_msg(msg, bot, shared_dict, db):\n chat_id = str(msg['chat']['id'])\n if msg['chat']['id'] in master_users and chat_id + 'chat' in shared_dict:\n if msg['text'].upper() == 'STOP':\n bot.sendMessage(chat_id, 'Chat has been ended', reply_markup={\n 'hide_keyboard': True})\n del shared_dict[chat_id + 'chat']\n return\n if shared_dict[chat_id + 'chat'] == 0:\n if msg['text'] not in chat_groups:\n bot.sendMessage(chat_id, 'Incorrect group', reply_markup={\n 'hide_keyboard': True})\n del shared_dict[chat_id + 'chat']\n return\n shared_dict[chat_id + 'chat'] = chat_groups[msg['text']]\n bot.sendMessage(chat_id, \"You're talking with group %s\" % msg[\n 'text'], reply_markup={'hide_keyboard': True})\n else:\n bot.sendMessage(shared_dict[chat_id + 'chat'], msg['text'])\n elif msg['chat']['id'\n ] in master_users and 'forward' in shared_dict and msg['text'].upper(\n ) == 'STOP FORWARD':\n bot.sendMessage(chat_id, 'OK, forwarding has been disabled, bye')\n del shared_dict['forward']\n elif msg['chat']['id'] in master_users and msg['text'].upper() == 'CHAT':\n bot.sendMessage(chat_id, 'Which one?', reply_markup={'keyboard':\n generate_bot_array(chat_groups.keys())})\n shared_dict[chat_id + 'chat'] = 0\n elif msg['chat']['id'] in master_users and msg['text'].upper(\n ) == 'FORWARD':\n bot.sendMessage(chat_id, \"OK, I'll forward all msgs to you\")\n shared_dict['forward'] = msg['chat']['id']\n elif msg['chat']['type'] == 'private' and msg['chat']['id'\n ] in allowed_users or (msg['chat']['type'] == 'supergroup' or msg[\n 'chat']['type'] == 'group') and msg['chat']['id'] in allowed_users:\n if chat_id + 'n' not in shared_dict:\n shared_dict[chat_id + 'n'] = 0\n if shared_dict[chat_id + 'n'] >= allowed_users[msg['chat']['id']]:\n bot.sendMessage(msg['chat']['id'], 'ya poka zanat')\n return\n if 'text' in msg and re.match('(FOTKI|ФОТКИ) .+', msg['text'].upper\n (), re.IGNORECASE):\n bot.sendMessage(chat_id, 'pristupayu k poisku fotok')\n get_image(chat_id, re.match('^[^\\\\s]+ (.+)$', msg['text'], re.\n IGNORECASE).group(1), shared_dict, db, bot)\n elif msg['chat']['type'] == 'private':\n bot.sendMessage(msg['from']['id'], 'idi na huy, ya teba ne znayu')\n else:\n pass\n",
"step-4": "import time\nimport re\nfrom config import allowed_users, master_users, chat_groups\nfrom bs4 import BeautifulSoup\nimport requests\nimport urllib.request, urllib.error, urllib.parse\nimport http.cookiejar\nimport json\nimport os\nimport sys\nfrom random import uniform, choice\nfrom string import ascii_letters\nfrom image import check_image_request, insert_images, insert_image_request\nfrom telepot.exception import TelegramError\nimport json\n\n\ndef _convert_to_idn(url):\n \"\"\"Convert a URL to IDN notation\"\"\"\n parts = list(urllib.parse.urlsplit(url))\n try:\n parts[1].encode('ascii')\n except UnicodeEncodeError:\n host = parts[1].rsplit(':', 1)\n newhost = []\n port = ''\n if len(host) == 2:\n port = host.pop()\n for h in host[0].split('.'):\n newhost.append(h.encode('idna').decode('utf-8'))\n parts[1] = '.'.join(newhost)\n if port:\n parts[1] += ':' + port\n return urllib.parse.urlunsplit(parts)\n else:\n return url\n\n\ndef fetch_images_from_db(chat_id, keyword_id, keyword_n, db, shared_dict):\n search = False\n if str(chat_id) + str(keyword_id) + 'db' in shared_dict:\n print('%s for group %s already in progress, sleeping for a while' %\n (keyword_id, chat_id))\n time.sleep(uniform(1, 5))\n else:\n shared_dict[str(chat_id) + str(keyword_id) + 'db'] = 1\n search = True\n query = (\n 'SELECT id,url,type FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s AND requested = FALSE LIMIT 10'\n )\n data = db.fetch_data(query, (chat_id, keyword_id, keyword_n))\n if data is not None:\n if len(data) < 3:\n query = (\n 'DELETE FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s'\n )\n db.execute(query, (chat_id, keyword_id, keyword_n))\n return None\n for i in data:\n query = (\n 'UPDATE images SET requested = TRUE WHERE chat_id = %s AND keyword_n = %s AND id = %s'\n )\n db.execute(query, (chat_id, keyword_n, i[0]))\n if search is True:\n del shared_dict[str(chat_id) + str(keyword_id) + 'db']\n return data\n return data\n\n\ndef fetch_images_from_google(chat_id, keyword, keyword_id, keyword_n,\n header, db, shared_dict, bot):\n if str(chat_id) + keyword in shared_dict:\n bot.sendMessage(chat_id, 'po etomu slovu poka idet poisk - ' + keyword)\n return 1\n shared_dict[str(chat_id) + keyword] = 1\n query = keyword.split()\n query = '+'.join(query)\n print('query - ' + query)\n url = 'https://www.google.co.in/search?q=' + urllib.parse.quote(query\n ) + '&source=lnms&tbm=isch'\n soup = BeautifulSoup(urllib.request.urlopen(urllib.request.Request(url,\n headers=header)), 'html.parser')\n ActualImages = []\n for a in soup.find_all('div', {'class': 'rg_meta'}):\n link, Type = json.loads(a.text)['ou'], json.loads(a.text)['ity']\n ActualImages.append((link, Type))\n total_images = len(ActualImages)\n if total_images == 0:\n del shared_dict[str(chat_id) + keyword]\n return None\n print('there are total', total_images, 'images')\n nuran = {}\n i = 0\n for a, (img, Type) in enumerate(ActualImages):\n if Type == 'png' or Type == 'jpg':\n nuran[i] = {}\n nuran[i]['url'] = img\n nuran[i]['type'] = Type\n i += 1\n if len(nuran) < 3:\n del shared_dict[str(chat_id) + keyword]\n return None\n del shared_dict[str(chat_id) + keyword]\n insert_images(chat_id, keyword_id, keyword_n, nuran, db)\n return fetch_images_from_db(chat_id, keyword_id, keyword_n, db, shared_dict\n )\n\n\ndef fetch_images(chat_id, keyword, keyword_n, header, db, shared_dict, bot):\n keyword_id = check_image_request(chat_id, keyword, db)\n if keyword_id is not None:\n images = fetch_images_from_db(chat_id, keyword_id[0], keyword_n, db,\n shared_dict)\n return images if images is not None else fetch_images_from_google(\n chat_id, keyword, keyword_id, keyword_n, header, db,\n shared_dict, bot)\n keyword_id = insert_image_request(chat_id, keyword, db)\n return fetch_images_from_google(chat_id, keyword, keyword_id, keyword_n,\n header, db, shared_dict, bot)\n\n\ndef get_image(chat_id, keyword, shared_dict, db, bot, msg=True):\n print('keyword - ' + keyword)\n header = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36'\n }\n shared_dict[str(chat_id) + 'n'] += 1\n keyword_n = len(keyword) % 10\n nuran = fetch_images(chat_id, keyword, keyword_n, header, db,\n shared_dict, bot)\n if nuran == 1:\n shared_dict[str(chat_id) + 'n'] -= 1\n return\n if nuran is None and msg is True:\n shared_dict[str(chat_id) + 'n'] -= 1\n bot.sendMessage(chat_id, 'ni4ego ne naydeno(')\n return\n DIR = '/tmp'\n index = 0\n num = 0\n if msg is True:\n bot.sendMessage(chat_id, 'lovi fotki')\n while 1:\n try:\n print('trying to open %s' % nuran[index][1])\n url = _convert_to_idn(urllib.parse.unquote(nuran[index][1]))\n print('unquotted url %s' % url)\n url = urllib.parse.quote(url, safe=':/')\n req = urllib.request.Request(url, headers=header)\n raw_img = urllib.request.urlopen(req, timeout=5).read()\n type = 'jpg' if nuran[index][2] == True else 'png'\n image_name = ''.join(choice(ascii_letters) for i in range(20))\n f = open(os.path.join(DIR, image_name + '.' + type), 'wb')\n f.write(raw_img)\n f.close()\n print('sending %s' % os.path.join(DIR, image_name + '.' + type))\n bot.sendPhoto(chat_id, open(os.path.join(DIR, image_name + '.' +\n type), 'rb'))\n os.unlink(os.path.join(DIR, image_name + '.' + type))\n except TelegramError as e:\n print('Telegram error - {}'.format(e))\n index += 1\n continue\n except IndexError:\n print('index out of range, breaking')\n break\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print('error - {}'.format(e))\n print(exc_type, fname, exc_tb.tb_lineno)\n index += 1\n continue\n num += 1\n index += 1\n if num >= 3:\n break\n shared_dict[str(chat_id) + 'n'] -= 1\n print('chat id count for %s - %s' % (chat_id, shared_dict[str(chat_id) +\n 'n']))\n\n\ndef generate_bot_array(lst):\n keyboard_lst = []\n tmp_lst = []\n i = 0\n for val in lst:\n i += 1\n tmp_lst.append(val)\n if i % 3 == 0:\n keyboard_lst.append(tmp_lst)\n tmp_lst = []\n keyboard_lst.append(tmp_lst)\n return keyboard_lst\n\n\ndef handle_msg(msg, bot, shared_dict, db):\n chat_id = str(msg['chat']['id'])\n if msg['chat']['id'] in master_users and chat_id + 'chat' in shared_dict:\n if msg['text'].upper() == 'STOP':\n bot.sendMessage(chat_id, 'Chat has been ended', reply_markup={\n 'hide_keyboard': True})\n del shared_dict[chat_id + 'chat']\n return\n if shared_dict[chat_id + 'chat'] == 0:\n if msg['text'] not in chat_groups:\n bot.sendMessage(chat_id, 'Incorrect group', reply_markup={\n 'hide_keyboard': True})\n del shared_dict[chat_id + 'chat']\n return\n shared_dict[chat_id + 'chat'] = chat_groups[msg['text']]\n bot.sendMessage(chat_id, \"You're talking with group %s\" % msg[\n 'text'], reply_markup={'hide_keyboard': True})\n else:\n bot.sendMessage(shared_dict[chat_id + 'chat'], msg['text'])\n elif msg['chat']['id'\n ] in master_users and 'forward' in shared_dict and msg['text'].upper(\n ) == 'STOP FORWARD':\n bot.sendMessage(chat_id, 'OK, forwarding has been disabled, bye')\n del shared_dict['forward']\n elif msg['chat']['id'] in master_users and msg['text'].upper() == 'CHAT':\n bot.sendMessage(chat_id, 'Which one?', reply_markup={'keyboard':\n generate_bot_array(chat_groups.keys())})\n shared_dict[chat_id + 'chat'] = 0\n elif msg['chat']['id'] in master_users and msg['text'].upper(\n ) == 'FORWARD':\n bot.sendMessage(chat_id, \"OK, I'll forward all msgs to you\")\n shared_dict['forward'] = msg['chat']['id']\n elif msg['chat']['type'] == 'private' and msg['chat']['id'\n ] in allowed_users or (msg['chat']['type'] == 'supergroup' or msg[\n 'chat']['type'] == 'group') and msg['chat']['id'] in allowed_users:\n if chat_id + 'n' not in shared_dict:\n shared_dict[chat_id + 'n'] = 0\n if shared_dict[chat_id + 'n'] >= allowed_users[msg['chat']['id']]:\n bot.sendMessage(msg['chat']['id'], 'ya poka zanat')\n return\n if 'text' in msg and re.match('(FOTKI|ФОТКИ) .+', msg['text'].upper\n (), re.IGNORECASE):\n bot.sendMessage(chat_id, 'pristupayu k poisku fotok')\n get_image(chat_id, re.match('^[^\\\\s]+ (.+)$', msg['text'], re.\n IGNORECASE).group(1), shared_dict, db, bot)\n elif msg['chat']['type'] == 'private':\n bot.sendMessage(msg['from']['id'], 'idi na huy, ya teba ne znayu')\n else:\n pass\n",
"step-5": "# -*- coding: utf-8 -*-\nimport time\nimport re\nfrom config import allowed_users, master_users, chat_groups\nfrom bs4 import BeautifulSoup\nimport requests\nimport urllib.request, urllib.error, urllib.parse\nimport http.cookiejar\nimport json\nimport os\nimport sys\n#from random import randint, choice\nfrom random import uniform, choice\nfrom string import ascii_letters\nfrom image import check_image_request, insert_images, insert_image_request\nfrom telepot.exception import TelegramError\nimport json\n\ndef _convert_to_idn(url):\n \"\"\"Convert a URL to IDN notation\"\"\"\n # this function should only be called with a unicode string\n # strategy: if the host cannot be encoded in ascii, then\n # it'll be necessary to encode it in idn form\n parts = list(urllib.parse.urlsplit(url))\n try:\n parts[1].encode('ascii')\n except UnicodeEncodeError:\n # the url needs to be converted to idn notation\n host = parts[1].rsplit(':', 1)\n newhost = []\n port = ''\n if len(host) == 2:\n port = host.pop()\n for h in host[0].split('.'):\n newhost.append(h.encode('idna').decode('utf-8'))\n parts[1] = '.'.join(newhost)\n if port:\n parts[1] += ':' + port\n return urllib.parse.urlunsplit(parts)\n else:\n return url \n\ndef fetch_images_from_db(chat_id,keyword_id,keyword_n,db,shared_dict):\n search = False\n if str(chat_id) + str(keyword_id) +'db' in shared_dict:\n print(\"%s for group %s already in progress, sleeping for a while\" % (keyword_id,chat_id))\n time.sleep(uniform(1,5))\n else:\n shared_dict[str(chat_id) + str(keyword_id) + 'db'] = 1\n search = True\n\n query = \"SELECT id,url,type FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s AND requested = FALSE LIMIT 10\"\n #print 'chat_id - {}, keyword_id - {}, keyword_n - {}'.format(chat_id,keyword_id,keyword_n)\n data = db.fetch_data(query,(chat_id,keyword_id,keyword_n,))\n if data is not None:\n if len(data) < 3:\n query = \"DELETE FROM images WHERE chat_id = %s AND keyword_id = %s AND keyword_n = %s\"\n db.execute(query,(chat_id,keyword_id,keyword_n,))\n return None\n for i in data:\n query = \"UPDATE images SET requested = TRUE WHERE chat_id = %s AND keyword_n = %s AND id = %s\"\n db.execute(query,(chat_id,keyword_n,i[0]))\n if search is True:\n del shared_dict[str(chat_id) + str(keyword_id) + 'db']\n return data\n return data\n\ndef fetch_images_from_google(chat_id,keyword,keyword_id,keyword_n,header,db,shared_dict,bot):\n if str(chat_id) + keyword in shared_dict:\n bot.sendMessage(chat_id,'po etomu slovu poka idet poisk - ' + keyword)\n return 1\n\n shared_dict[str(chat_id) + keyword] = 1\n query = keyword.split()\n #query = str('+'.join(query).encode('utf-8'))\n query = '+'.join(query)\n print('query - ' + query)\n\n url=\"https://www.google.co.in/search?q=\"+urllib.parse.quote(query)+\"&source=lnms&tbm=isch\"\n soup = BeautifulSoup(urllib.request.urlopen(urllib.request.Request(url,headers=header)),'html.parser')\n\n ActualImages=[]\n for a in soup.find_all(\"div\",{\"class\":\"rg_meta\"}):\n link , Type =json.loads(a.text)[\"ou\"] ,json.loads(a.text)[\"ity\"]\n ActualImages.append((link,Type))\n\n total_images = len(ActualImages)\n if total_images == 0:\n del shared_dict[str(chat_id) + keyword]\n return None\n\n print(\"there are total\" , total_images,\"images\")\n nuran = {}\n i = 0\n\n for a, (img , Type) in enumerate( ActualImages):\n if Type == 'png' or Type == 'jpg':\n nuran[i] = {}\n nuran[i]['url'] = img\n nuran[i]['type'] = Type\n i += 1\n\n if len(nuran) < 3:\n del shared_dict[str(chat_id) + keyword]\n return None\n\n del shared_dict[str(chat_id) + keyword]\n #print shared_dict\n\n insert_images(chat_id,keyword_id,keyword_n,nuran,db)\n return fetch_images_from_db(chat_id,keyword_id,keyword_n,db,shared_dict)\n\ndef fetch_images(chat_id,keyword,keyword_n,header,db,shared_dict,bot):\n keyword_id = check_image_request(chat_id,keyword,db)\n if keyword_id is not None:\n images = fetch_images_from_db(chat_id,keyword_id[0],keyword_n,db,shared_dict)\n return images if images is not None else fetch_images_from_google(chat_id,keyword,keyword_id,keyword_n,header,db,shared_dict,bot)\n\n keyword_id = insert_image_request(chat_id,keyword,db)\n return fetch_images_from_google(chat_id,keyword,keyword_id,keyword_n,header,db,shared_dict,bot)\n\ndef get_image(chat_id,keyword,shared_dict,db,bot,msg=True):\n print('keyword - ' + keyword)\n\n header={'User-Agent':\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36\"}\n shared_dict[str(chat_id) + 'n'] += 1\n\n keyword_n = len(keyword) % 10\n nuran = fetch_images(chat_id,keyword,keyword_n,header,db,shared_dict,bot)\n if nuran == 1:\n shared_dict[str(chat_id) + 'n'] -= 1\n return\n if nuran is None and msg is True:\n shared_dict[str(chat_id) + 'n'] -= 1\n bot.sendMessage(chat_id,'ni4ego ne naydeno(')\n return\n\n DIR = '/tmp'\n index = 0\n num = 0\n\n if msg is True:\n bot.sendMessage(chat_id,'lovi fotki')\n while 1:\n try:\n print('trying to open %s' % nuran[index][1])\n url = _convert_to_idn(urllib.parse.unquote(nuran[index][1]))\n print('unquotted url %s' % url)\n url = urllib.parse.quote(url,safe=':/')\n req = urllib.request.Request(url, headers=header)\n raw_img = urllib.request.urlopen(req,timeout=5).read()\n type = 'jpg' if nuran[index][2] == True else 'png'\n image_name = \"\".join(choice(ascii_letters) for i in range(20))\n f = open(os.path.join(DIR , image_name + \".\"+type), 'wb')\n f.write(raw_img)\n f.close()\n print('sending %s' % os.path.join(DIR , image_name + \".\"+type))\n bot.sendPhoto(chat_id,open(os.path.join(DIR , image_name + \".\"+type), 'rb'))\n os.unlink(os.path.join(DIR , image_name + \".\"+type))\n except TelegramError as e:\n print(\"Telegram error - {}\".format(e))\n index += 1\n #if e[0] == 'Bad Request: PHOTO_INVALID_DIMENSIONS':\n # print('invalid image')\n continue\n except IndexError:\n print(\"index out of range, breaking\")\n break\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(\"error - {}\".format(e))\n print(exc_type, fname, exc_tb.tb_lineno)\n index += 1\n continue\n num += 1\n index += 1\n if num >= 3:\n break\n\n shared_dict[str(chat_id) + 'n'] -= 1\n print('chat id count for %s - %s' % (chat_id,shared_dict[str(chat_id) + 'n']))\n\ndef generate_bot_array(lst):\n keyboard_lst = []\n tmp_lst = []\n i = 0\n for val in lst:\n i += 1\n tmp_lst.append(val)\n if i % 3 == 0:\n keyboard_lst.append(tmp_lst)\n tmp_lst = []\n\n keyboard_lst.append(tmp_lst)\n return keyboard_lst\n\ndef handle_msg(msg,bot,shared_dict,db):\n chat_id = str(msg['chat']['id'])\n if msg['chat']['id'] in master_users and chat_id + 'chat' in shared_dict:\n if msg['text'].upper() == 'STOP':\n bot.sendMessage(chat_id,'Chat has been ended',reply_markup={'hide_keyboard': True})\n del shared_dict[chat_id + 'chat']\n return\n\n if shared_dict[chat_id + 'chat'] == 0:\n if msg['text'] not in chat_groups:\n bot.sendMessage(chat_id,'Incorrect group',reply_markup={'hide_keyboard': True})\n del shared_dict[chat_id + 'chat']\n return\n\n shared_dict[chat_id + 'chat'] = chat_groups[msg['text']]\n bot.sendMessage(chat_id,\"You're talking with group %s\" % msg['text'],reply_markup={'hide_keyboard': True})\n else:\n bot.sendMessage(shared_dict[chat_id + 'chat'],msg['text'])\n elif msg['chat']['id'] in master_users and 'forward' in shared_dict and msg['text'].upper() == 'STOP FORWARD':\n bot.sendMessage(chat_id,\"OK, forwarding has been disabled, bye\")\n del shared_dict['forward']\n elif msg['chat']['id'] in master_users and msg['text'].upper() == 'CHAT':\n bot.sendMessage(chat_id,'Which one?',reply_markup={'keyboard': generate_bot_array(chat_groups.keys())})\n shared_dict[chat_id + 'chat'] = 0\n elif msg['chat']['id'] in master_users and msg['text'].upper() == 'FORWARD':\n bot.sendMessage(chat_id,\"OK, I'll forward all msgs to you\")\n shared_dict['forward'] = msg['chat']['id']\n elif ((msg['chat']['type'] == 'private' and msg['chat']['id'] in allowed_users) or\n (msg['chat']['type'] == 'supergroup' or msg['chat']['type'] == 'group') and msg['chat']['id'] in allowed_users):\n\n if chat_id + 'n' not in shared_dict:\n shared_dict[chat_id + 'n'] = 0\n\n # check shared dict\n if shared_dict[chat_id + 'n'] >= allowed_users[msg['chat']['id']]:\n bot.sendMessage(msg['chat']['id'],'ya poka zanat')\n return\n\n # check msgs\n if 'text' in msg and re.match(r'(FOTKI|ФОТКИ) .+',msg['text'].upper(),re.IGNORECASE):\n bot.sendMessage(chat_id,'pristupayu k poisku fotok')\n get_image(chat_id,re.match(r'^[^\\s]+ (.+)$',msg['text'],re.IGNORECASE).group(1),shared_dict,db,bot)\n elif msg['chat']['type'] == 'private':\n bot.sendMessage(msg['from']['id'],'idi na huy, ya teba ne znayu')\n else:\n pass\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def runWeka(wekapath, modelpath, datapath):
os.chdir(wekapath)
proc = subprocess.Popen(['/usr/bin/java', '-classpath', 'weka.jar',
'weka.classifiers.functions.MultilayerPerceptron', '-l', modelpath,
'-T', datapath, '-p', '0'], stdout=subprocess.PIPE, stderr=
subprocess.PIPE)
out, err = proc.communicate()
return out
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def runWeka(wekapath, modelpath, datapath):
os.chdir(wekapath)
proc = subprocess.Popen(['/usr/bin/java', '-classpath', 'weka.jar',
'weka.classifiers.functions.MultilayerPerceptron', '-l', modelpath,
'-T', datapath, '-p', '0'], stdout=subprocess.PIPE, stderr=
subprocess.PIPE)
out, err = proc.communicate()
return out
<|reserved_special_token_0|>
if __name__ == '__main__':
my_arg_parser = argparse.ArgumentParser()
my_arg_parser.add_argument('-p', '--weka-path', help=
'Path to Weka application folder', dest='wekapath')
my_arg_parser.add_argument('-m', '--weka-model', help=
'Path to Weka serialized model', dest='modelpath')
my_arg_parser.add_argument('-d', '--weka-dataset', help=
'Path to testset', default='', dest='datapath')
my_args = my_arg_parser.parse_args()
predictions = runWeka(my_args.wekapath, my_args.modelpath, my_args.datapath
)
k = 1
matrix = []
for row in predictions.split('\n'):
if k < 6:
k = k + 1
continue
else:
if row == '':
continue
instance, actual, predicted, error = row.split()
matrix.append([int(instance), float(actual), float(predicted)])
matrix = np.array(matrix)
matrix[:, 2][matrix[:, 2] < 0] = 0
plt.style.use('ggplot')
f = plt.figure(1)
plt.plot(matrix[:, 0], matrix[:, 1], label='actual', color='red')
plt.plot(matrix[:, 0], matrix[:, 2], label='predicted', color='royalblue')
plt.xlabel('Instance number')
plt.ylabel('Packet Loss Rate')
plt.grid(True)
plt.legend(loc=1)
plt.show()
<|reserved_special_token_1|>
import subprocess
import logging
import time
import argparse
import threading
import os
import matplotlib.pyplot as plt
import numpy as np
import argparse
def runWeka(wekapath, modelpath, datapath):
os.chdir(wekapath)
proc = subprocess.Popen(['/usr/bin/java', '-classpath', 'weka.jar',
'weka.classifiers.functions.MultilayerPerceptron', '-l', modelpath,
'-T', datapath, '-p', '0'], stdout=subprocess.PIPE, stderr=
subprocess.PIPE)
out, err = proc.communicate()
return out
<|reserved_special_token_0|>
if __name__ == '__main__':
my_arg_parser = argparse.ArgumentParser()
my_arg_parser.add_argument('-p', '--weka-path', help=
'Path to Weka application folder', dest='wekapath')
my_arg_parser.add_argument('-m', '--weka-model', help=
'Path to Weka serialized model', dest='modelpath')
my_arg_parser.add_argument('-d', '--weka-dataset', help=
'Path to testset', default='', dest='datapath')
my_args = my_arg_parser.parse_args()
predictions = runWeka(my_args.wekapath, my_args.modelpath, my_args.datapath
)
k = 1
matrix = []
for row in predictions.split('\n'):
if k < 6:
k = k + 1
continue
else:
if row == '':
continue
instance, actual, predicted, error = row.split()
matrix.append([int(instance), float(actual), float(predicted)])
matrix = np.array(matrix)
matrix[:, 2][matrix[:, 2] < 0] = 0
plt.style.use('ggplot')
f = plt.figure(1)
plt.plot(matrix[:, 0], matrix[:, 1], label='actual', color='red')
plt.plot(matrix[:, 0], matrix[:, 2], label='predicted', color='royalblue')
plt.xlabel('Instance number')
plt.ylabel('Packet Loss Rate')
plt.grid(True)
plt.legend(loc=1)
plt.show()
<|reserved_special_token_1|>
import subprocess
import logging
import time
import argparse
import threading
import os
import matplotlib.pyplot as plt
import numpy as np
import argparse
def runWeka(wekapath, modelpath, datapath):
os.chdir(wekapath)
proc = subprocess.Popen(['/usr/bin/java', '-classpath', 'weka.jar', 'weka.classifiers.functions.MultilayerPerceptron', '-l', modelpath, '-T', datapath, '-p', '0'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
return out
"""
Test offline trained model in Weka on a test set
"""
if __name__ == '__main__':
#Input arguments
my_arg_parser = argparse.ArgumentParser()
my_arg_parser.add_argument("-p","--weka-path", help="Path to Weka application folder", dest="wekapath")
my_arg_parser.add_argument("-m","--weka-model", help="Path to Weka serialized model", dest="modelpath")
my_arg_parser.add_argument("-d","--weka-dataset", help="Path to testset", default="", dest="datapath")
my_args = my_arg_parser.parse_args()
#wekapath="/home/mkulin/Desktop/eWINE/Experiments/Repository/Testing/Weka_stable-3-6/weka/"
#modelpath="/home/mkulin/Desktop/eWINE/Experiments/Repository/Testing/Neural_network_MACperf_prediction.model"
#datapath="/home/mkulin/Desktop/eWINE/Experiments/Repository/Testing/802_15_4_perf_30s_testset_Weka.csv"
predictions=runWeka(my_args.wekapath, my_args.modelpath, my_args.datapath)
k=1
matrix = []
for row in predictions.split('\n'):
if k<6:
k=k+1
continue
else:
if row=='':
continue
instance, actual, predicted, error=row.split()
matrix.append([int(instance), float(actual), float(predicted)])
matrix=np.array(matrix)
matrix[:,2][matrix[:,2]<0]=0 #disable negative predictions
#Visualize results
plt.style.use('ggplot')
f=plt.figure(1)
plt.plot(matrix[:,0], matrix[:,1], label='actual', color='red')
plt.plot(matrix[:,0], matrix[:,2], label='predicted', color='royalblue')
plt.xlabel('Instance number')
plt.ylabel('Packet Loss Rate')
plt.grid(True)
plt.legend(loc=1)
plt.show()
|
flexible
|
{
"blob_id": "a1f0eced5d122fe8557ebc4d707c87b4194513e3",
"index": 4976,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef runWeka(wekapath, modelpath, datapath):\n os.chdir(wekapath)\n proc = subprocess.Popen(['/usr/bin/java', '-classpath', 'weka.jar',\n 'weka.classifiers.functions.MultilayerPerceptron', '-l', modelpath,\n '-T', datapath, '-p', '0'], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n out, err = proc.communicate()\n return out\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef runWeka(wekapath, modelpath, datapath):\n os.chdir(wekapath)\n proc = subprocess.Popen(['/usr/bin/java', '-classpath', 'weka.jar',\n 'weka.classifiers.functions.MultilayerPerceptron', '-l', modelpath,\n '-T', datapath, '-p', '0'], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n out, err = proc.communicate()\n return out\n\n\n<mask token>\nif __name__ == '__main__':\n my_arg_parser = argparse.ArgumentParser()\n my_arg_parser.add_argument('-p', '--weka-path', help=\n 'Path to Weka application folder', dest='wekapath')\n my_arg_parser.add_argument('-m', '--weka-model', help=\n 'Path to Weka serialized model', dest='modelpath')\n my_arg_parser.add_argument('-d', '--weka-dataset', help=\n 'Path to testset', default='', dest='datapath')\n my_args = my_arg_parser.parse_args()\n predictions = runWeka(my_args.wekapath, my_args.modelpath, my_args.datapath\n )\n k = 1\n matrix = []\n for row in predictions.split('\\n'):\n if k < 6:\n k = k + 1\n continue\n else:\n if row == '':\n continue\n instance, actual, predicted, error = row.split()\n matrix.append([int(instance), float(actual), float(predicted)])\n matrix = np.array(matrix)\n matrix[:, 2][matrix[:, 2] < 0] = 0\n plt.style.use('ggplot')\n f = plt.figure(1)\n plt.plot(matrix[:, 0], matrix[:, 1], label='actual', color='red')\n plt.plot(matrix[:, 0], matrix[:, 2], label='predicted', color='royalblue')\n plt.xlabel('Instance number')\n plt.ylabel('Packet Loss Rate')\n plt.grid(True)\n plt.legend(loc=1)\n plt.show()\n",
"step-4": "import subprocess\nimport logging\nimport time\nimport argparse\nimport threading\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\n\n\ndef runWeka(wekapath, modelpath, datapath):\n os.chdir(wekapath)\n proc = subprocess.Popen(['/usr/bin/java', '-classpath', 'weka.jar',\n 'weka.classifiers.functions.MultilayerPerceptron', '-l', modelpath,\n '-T', datapath, '-p', '0'], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n out, err = proc.communicate()\n return out\n\n\n<mask token>\nif __name__ == '__main__':\n my_arg_parser = argparse.ArgumentParser()\n my_arg_parser.add_argument('-p', '--weka-path', help=\n 'Path to Weka application folder', dest='wekapath')\n my_arg_parser.add_argument('-m', '--weka-model', help=\n 'Path to Weka serialized model', dest='modelpath')\n my_arg_parser.add_argument('-d', '--weka-dataset', help=\n 'Path to testset', default='', dest='datapath')\n my_args = my_arg_parser.parse_args()\n predictions = runWeka(my_args.wekapath, my_args.modelpath, my_args.datapath\n )\n k = 1\n matrix = []\n for row in predictions.split('\\n'):\n if k < 6:\n k = k + 1\n continue\n else:\n if row == '':\n continue\n instance, actual, predicted, error = row.split()\n matrix.append([int(instance), float(actual), float(predicted)])\n matrix = np.array(matrix)\n matrix[:, 2][matrix[:, 2] < 0] = 0\n plt.style.use('ggplot')\n f = plt.figure(1)\n plt.plot(matrix[:, 0], matrix[:, 1], label='actual', color='red')\n plt.plot(matrix[:, 0], matrix[:, 2], label='predicted', color='royalblue')\n plt.xlabel('Instance number')\n plt.ylabel('Packet Loss Rate')\n plt.grid(True)\n plt.legend(loc=1)\n plt.show()\n",
"step-5": "import subprocess\nimport logging\nimport time\nimport argparse\nimport threading\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\n\ndef runWeka(wekapath, modelpath, datapath):\n os.chdir(wekapath)\n proc = subprocess.Popen(['/usr/bin/java', '-classpath', 'weka.jar', 'weka.classifiers.functions.MultilayerPerceptron', '-l', modelpath, '-T', datapath, '-p', '0'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n return out\n\n\n\"\"\"\nTest offline trained model in Weka on a test set\n\"\"\"\nif __name__ == '__main__':\n \n #Input arguments\n my_arg_parser = argparse.ArgumentParser()\n my_arg_parser.add_argument(\"-p\",\"--weka-path\", help=\"Path to Weka application folder\", dest=\"wekapath\")\n my_arg_parser.add_argument(\"-m\",\"--weka-model\", help=\"Path to Weka serialized model\", dest=\"modelpath\")\n my_arg_parser.add_argument(\"-d\",\"--weka-dataset\", help=\"Path to testset\", default=\"\", dest=\"datapath\")\n\n my_args = my_arg_parser.parse_args()\n \n #wekapath=\"/home/mkulin/Desktop/eWINE/Experiments/Repository/Testing/Weka_stable-3-6/weka/\"\n #modelpath=\"/home/mkulin/Desktop/eWINE/Experiments/Repository/Testing/Neural_network_MACperf_prediction.model\"\n #datapath=\"/home/mkulin/Desktop/eWINE/Experiments/Repository/Testing/802_15_4_perf_30s_testset_Weka.csv\"\n \n predictions=runWeka(my_args.wekapath, my_args.modelpath, my_args.datapath)\n \n k=1\n matrix = []\n for row in predictions.split('\\n'):\n \n if k<6:\n k=k+1\n continue\n else:\n if row=='':\n continue\n instance, actual, predicted, error=row.split()\n matrix.append([int(instance), float(actual), float(predicted)])\n \n matrix=np.array(matrix) \n matrix[:,2][matrix[:,2]<0]=0 #disable negative predictions\n \n #Visualize results \n plt.style.use('ggplot')\n f=plt.figure(1)\n plt.plot(matrix[:,0], matrix[:,1], label='actual', color='red')\n plt.plot(matrix[:,0], matrix[:,2], label='predicted', color='royalblue')\n plt.xlabel('Instance number')\n plt.ylabel('Packet Loss Rate')\n plt.grid(True)\n plt.legend(loc=1)\n \n plt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
ii = [('CookGHP3.py', 1), ('AubePRP2.py', 1), ('WilkJMC3.py', 1), (
'LeakWTI3.py', 1), ('AubePRP.py', 2), ('GellWPT.py', 2), ('AdamWEP.py',
1), ('KiddJAE.py', 1), ('CoolWHM.py', 1), ('WadeJEB.py', 1), (
'SoutRD.py', 2), ('WheeJPT.py', 1), ('HowiWRL2.py', 1), ('WilkJMC.py',
1), ('WestJIT.py', 1), ('DequTKM.py', 2), ('StorJCC.py', 1), (
'DibdTRL.py', 1), ('TaylIF.py', 1), ('ThomWEC.py', 1)]
|
flexible
|
{
"blob_id": "dce496c9ae6605e95ffbbb2885ec15b19fb756ef",
"index": 2799,
"step-1": "<mask token>\n",
"step-2": "ii = [('CookGHP3.py', 1), ('AubePRP2.py', 1), ('WilkJMC3.py', 1), (\n 'LeakWTI3.py', 1), ('AubePRP.py', 2), ('GellWPT.py', 2), ('AdamWEP.py',\n 1), ('KiddJAE.py', 1), ('CoolWHM.py', 1), ('WadeJEB.py', 1), (\n 'SoutRD.py', 2), ('WheeJPT.py', 1), ('HowiWRL2.py', 1), ('WilkJMC.py', \n 1), ('WestJIT.py', 1), ('DequTKM.py', 2), ('StorJCC.py', 1), (\n 'DibdTRL.py', 1), ('TaylIF.py', 1), ('ThomWEC.py', 1)]\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import struct
from coapthon import defines
from coapthon.utils import byte_len, bit_len, parse_blockwise
__author__ = 'Giacomo Tanganelli'
__version__ = "2.0"
class BlockwiseLayer(object):
"""
Handles the Blockwise feature.
"""
def __init__(self, parent):
"""
Initialize a Blockwise Layer.
:type parent: coapserver.CoAP
:param parent: the CoAP server
"""
self._parent = parent
def handle_request(self, request):
"""
Store Blockwise parameter required by clients
:param request: the request message
:return: M bit, request
"""
ret = True
for option in request.options:
if option.number == defines.inv_options["Block2"]:
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
num, m, size = parse_blockwise(option.raw_value)
# remember choices
if key in self._parent.blockwise:
block, byte, num2, m2, size2 = self._parent.blockwise[key]
if block == 2:
self._parent.blockwise[key] = (2, byte, num, m, size)
else:
self._parent.blockwise[key] = (2, 0, num, m, size)
else:
self._parent.blockwise[key] = (2, 0, num, m, size)
elif option.number == defines.inv_options["Block1"]:
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
num, m, size = parse_blockwise(option.raw_value)
# remember choices
self._parent.blockwise[key] = (1, 0, num, m, size)
if m == 0:
del self._parent.blockwise[key]
ret = False
return ret, request
def start_block2(self, request):
"""
Initialize a blockwise response. Used if payload > 1024
:param request: the request message
"""
host, port = request.source
key = hash(str(host) + str(port) + str(request.token))
self._parent.blockwise[key] = (2, 0, 0, 1, 1024)
def handle_response(self, key, response, resource):
"""
Handle Blockwise in responses.
:param key: key parameter to search inside the dictionary
:param response: the response message
:param resource: the request message
:return: the new response
"""
block, byte, num, m, size = self._parent.blockwise[key]
payload = resource.payload
if block == 2:
ret = payload[byte:byte + size]
if len(ret) == size:
m = 1
else:
m = 0
response.block2 = (num, m, size)
response.payload = ret
byte += size
num += 1
if m == 0:
del self._parent.blockwise[key]
else:
self._parent.blockwise[key] = (2, byte, num, m, size)
elif block == 1:
if m == 1:
response.code = defines.responses["CONTINUE"]
response.block1 = (num, m, size)
return response
|
normal
|
{
"blob_id": "70d740a7003ca3f2d2cde039b2fc470ef2165e77",
"index": 7078,
"step-1": "<mask token>\n\n\nclass BlockwiseLayer(object):\n <mask token>\n\n def __init__(self, parent):\n \"\"\"\n Initialize a Blockwise Layer.\n\n :type parent: coapserver.CoAP\n :param parent: the CoAP server\n \"\"\"\n self._parent = parent\n\n def handle_request(self, request):\n \"\"\"\n Store Blockwise parameter required by clients\n\n :param request: the request message\n :return: M bit, request\n \"\"\"\n ret = True\n for option in request.options:\n if option.number == defines.inv_options['Block2']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n if key in self._parent.blockwise:\n block, byte, num2, m2, size2 = self._parent.blockwise[key]\n if block == 2:\n self._parent.blockwise[key] = 2, byte, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n elif option.number == defines.inv_options['Block1']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n self._parent.blockwise[key] = 1, 0, num, m, size\n if m == 0:\n del self._parent.blockwise[key]\n ret = False\n return ret, request\n\n def start_block2(self, request):\n \"\"\"\n Initialize a blockwise response. Used if payload > 1024\n\n :param request: the request message\n \"\"\"\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n self._parent.blockwise[key] = 2, 0, 0, 1, 1024\n\n def handle_response(self, key, response, resource):\n \"\"\"\n Handle Blockwise in responses.\n\n :param key: key parameter to search inside the dictionary\n :param response: the response message\n :param resource: the request message\n :return: the new response\n \"\"\"\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = num, m, size\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = 2, byte, num, m, size\n elif block == 1:\n if m == 1:\n response.code = defines.responses['CONTINUE']\n response.block1 = num, m, size\n return response\n",
"step-2": "<mask token>\n\n\nclass BlockwiseLayer(object):\n \"\"\"\n Handles the Blockwise feature.\n \"\"\"\n\n def __init__(self, parent):\n \"\"\"\n Initialize a Blockwise Layer.\n\n :type parent: coapserver.CoAP\n :param parent: the CoAP server\n \"\"\"\n self._parent = parent\n\n def handle_request(self, request):\n \"\"\"\n Store Blockwise parameter required by clients\n\n :param request: the request message\n :return: M bit, request\n \"\"\"\n ret = True\n for option in request.options:\n if option.number == defines.inv_options['Block2']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n if key in self._parent.blockwise:\n block, byte, num2, m2, size2 = self._parent.blockwise[key]\n if block == 2:\n self._parent.blockwise[key] = 2, byte, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n elif option.number == defines.inv_options['Block1']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n self._parent.blockwise[key] = 1, 0, num, m, size\n if m == 0:\n del self._parent.blockwise[key]\n ret = False\n return ret, request\n\n def start_block2(self, request):\n \"\"\"\n Initialize a blockwise response. Used if payload > 1024\n\n :param request: the request message\n \"\"\"\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n self._parent.blockwise[key] = 2, 0, 0, 1, 1024\n\n def handle_response(self, key, response, resource):\n \"\"\"\n Handle Blockwise in responses.\n\n :param key: key parameter to search inside the dictionary\n :param response: the response message\n :param resource: the request message\n :return: the new response\n \"\"\"\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = num, m, size\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = 2, byte, num, m, size\n elif block == 1:\n if m == 1:\n response.code = defines.responses['CONTINUE']\n response.block1 = num, m, size\n return response\n",
"step-3": "<mask token>\n__author__ = 'Giacomo Tanganelli'\n__version__ = '2.0'\n\n\nclass BlockwiseLayer(object):\n \"\"\"\n Handles the Blockwise feature.\n \"\"\"\n\n def __init__(self, parent):\n \"\"\"\n Initialize a Blockwise Layer.\n\n :type parent: coapserver.CoAP\n :param parent: the CoAP server\n \"\"\"\n self._parent = parent\n\n def handle_request(self, request):\n \"\"\"\n Store Blockwise parameter required by clients\n\n :param request: the request message\n :return: M bit, request\n \"\"\"\n ret = True\n for option in request.options:\n if option.number == defines.inv_options['Block2']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n if key in self._parent.blockwise:\n block, byte, num2, m2, size2 = self._parent.blockwise[key]\n if block == 2:\n self._parent.blockwise[key] = 2, byte, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n elif option.number == defines.inv_options['Block1']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n self._parent.blockwise[key] = 1, 0, num, m, size\n if m == 0:\n del self._parent.blockwise[key]\n ret = False\n return ret, request\n\n def start_block2(self, request):\n \"\"\"\n Initialize a blockwise response. Used if payload > 1024\n\n :param request: the request message\n \"\"\"\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n self._parent.blockwise[key] = 2, 0, 0, 1, 1024\n\n def handle_response(self, key, response, resource):\n \"\"\"\n Handle Blockwise in responses.\n\n :param key: key parameter to search inside the dictionary\n :param response: the response message\n :param resource: the request message\n :return: the new response\n \"\"\"\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = num, m, size\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = 2, byte, num, m, size\n elif block == 1:\n if m == 1:\n response.code = defines.responses['CONTINUE']\n response.block1 = num, m, size\n return response\n",
"step-4": "import struct\nfrom coapthon import defines\nfrom coapthon.utils import byte_len, bit_len, parse_blockwise\n__author__ = 'Giacomo Tanganelli'\n__version__ = '2.0'\n\n\nclass BlockwiseLayer(object):\n \"\"\"\n Handles the Blockwise feature.\n \"\"\"\n\n def __init__(self, parent):\n \"\"\"\n Initialize a Blockwise Layer.\n\n :type parent: coapserver.CoAP\n :param parent: the CoAP server\n \"\"\"\n self._parent = parent\n\n def handle_request(self, request):\n \"\"\"\n Store Blockwise parameter required by clients\n\n :param request: the request message\n :return: M bit, request\n \"\"\"\n ret = True\n for option in request.options:\n if option.number == defines.inv_options['Block2']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n if key in self._parent.blockwise:\n block, byte, num2, m2, size2 = self._parent.blockwise[key]\n if block == 2:\n self._parent.blockwise[key] = 2, byte, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n else:\n self._parent.blockwise[key] = 2, 0, num, m, size\n elif option.number == defines.inv_options['Block1']:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n self._parent.blockwise[key] = 1, 0, num, m, size\n if m == 0:\n del self._parent.blockwise[key]\n ret = False\n return ret, request\n\n def start_block2(self, request):\n \"\"\"\n Initialize a blockwise response. Used if payload > 1024\n\n :param request: the request message\n \"\"\"\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n self._parent.blockwise[key] = 2, 0, 0, 1, 1024\n\n def handle_response(self, key, response, resource):\n \"\"\"\n Handle Blockwise in responses.\n\n :param key: key parameter to search inside the dictionary\n :param response: the response message\n :param resource: the request message\n :return: the new response\n \"\"\"\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = num, m, size\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = 2, byte, num, m, size\n elif block == 1:\n if m == 1:\n response.code = defines.responses['CONTINUE']\n response.block1 = num, m, size\n return response\n",
"step-5": "import struct\nfrom coapthon import defines\nfrom coapthon.utils import byte_len, bit_len, parse_blockwise\n\n__author__ = 'Giacomo Tanganelli'\n__version__ = \"2.0\"\n\n\nclass BlockwiseLayer(object):\n \"\"\"\n Handles the Blockwise feature.\n \"\"\"\n\n def __init__(self, parent):\n \"\"\"\n Initialize a Blockwise Layer.\n\n :type parent: coapserver.CoAP\n :param parent: the CoAP server\n \"\"\"\n self._parent = parent\n\n def handle_request(self, request):\n \"\"\"\n Store Blockwise parameter required by clients\n\n :param request: the request message\n :return: M bit, request\n \"\"\"\n ret = True\n for option in request.options:\n if option.number == defines.inv_options[\"Block2\"]:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n # remember choices\n if key in self._parent.blockwise:\n block, byte, num2, m2, size2 = self._parent.blockwise[key]\n if block == 2:\n self._parent.blockwise[key] = (2, byte, num, m, size)\n else:\n self._parent.blockwise[key] = (2, 0, num, m, size)\n else:\n self._parent.blockwise[key] = (2, 0, num, m, size)\n elif option.number == defines.inv_options[\"Block1\"]:\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n num, m, size = parse_blockwise(option.raw_value)\n # remember choices\n self._parent.blockwise[key] = (1, 0, num, m, size)\n if m == 0:\n del self._parent.blockwise[key]\n ret = False\n return ret, request\n\n def start_block2(self, request):\n \"\"\"\n Initialize a blockwise response. Used if payload > 1024\n\n :param request: the request message\n \"\"\"\n host, port = request.source\n key = hash(str(host) + str(port) + str(request.token))\n self._parent.blockwise[key] = (2, 0, 0, 1, 1024)\n\n def handle_response(self, key, response, resource):\n \"\"\"\n Handle Blockwise in responses.\n\n :param key: key parameter to search inside the dictionary\n :param response: the response message\n :param resource: the request message\n :return: the new response\n \"\"\"\n block, byte, num, m, size = self._parent.blockwise[key]\n payload = resource.payload\n if block == 2:\n ret = payload[byte:byte + size]\n\n if len(ret) == size:\n m = 1\n else:\n m = 0\n response.block2 = (num, m, size)\n response.payload = ret\n byte += size\n num += 1\n if m == 0:\n del self._parent.blockwise[key]\n else:\n self._parent.blockwise[key] = (2, byte, num, m, size)\n\n elif block == 1:\n if m == 1:\n response.code = defines.responses[\"CONTINUE\"]\n response.block1 = (num, m, size)\n return response\n\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('driver', '0001_initial')]
operations = [migrations.RemoveField(model_name='driver', name=
'address'), migrations.AddField(model_name='driver', name='city',
field=models.CharField(default='', max_length=255),
preserve_default=False), migrations.AddField(model_name='driver',
name='image', field=models.ImageField(default='', upload_to=
'mechanic_img'), preserve_default=False), migrations.AddField(
model_name='driver', name='location', field=location_field.models.
plain.PlainLocationField(default='', max_length=63),
preserve_default=False), migrations.AlterField(model_name='driver',
name='first_name', field=models.CharField(max_length=150)),
migrations.AlterField(model_name='driver', name='last_name', field=
models.CharField(max_length=150))]
<|reserved_special_token_1|>
from django.db import migrations, models
import location_field.models.plain
class Migration(migrations.Migration):
dependencies = [('driver', '0001_initial')]
operations = [migrations.RemoveField(model_name='driver', name=
'address'), migrations.AddField(model_name='driver', name='city',
field=models.CharField(default='', max_length=255),
preserve_default=False), migrations.AddField(model_name='driver',
name='image', field=models.ImageField(default='', upload_to=
'mechanic_img'), preserve_default=False), migrations.AddField(
model_name='driver', name='location', field=location_field.models.
plain.PlainLocationField(default='', max_length=63),
preserve_default=False), migrations.AlterField(model_name='driver',
name='first_name', field=models.CharField(max_length=150)),
migrations.AlterField(model_name='driver', name='last_name', field=
models.CharField(max_length=150))]
<|reserved_special_token_1|>
# Generated by Django 3.0.7 on 2020-07-05 07:34
from django.db import migrations, models
import location_field.models.plain
class Migration(migrations.Migration):
dependencies = [
('driver', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='driver',
name='address',
),
migrations.AddField(
model_name='driver',
name='city',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='driver',
name='image',
field=models.ImageField(default='', upload_to='mechanic_img'),
preserve_default=False,
),
migrations.AddField(
model_name='driver',
name='location',
field=location_field.models.plain.PlainLocationField(default='', max_length=63),
preserve_default=False,
),
migrations.AlterField(
model_name='driver',
name='first_name',
field=models.CharField(max_length=150),
),
migrations.AlterField(
model_name='driver',
name='last_name',
field=models.CharField(max_length=150),
),
]
|
flexible
|
{
"blob_id": "ea918bdf96572b38461dc1810bd0b8c16efd0f0d",
"index": 5786,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('driver', '0001_initial')]\n operations = [migrations.RemoveField(model_name='driver', name=\n 'address'), migrations.AddField(model_name='driver', name='city',\n field=models.CharField(default='', max_length=255),\n preserve_default=False), migrations.AddField(model_name='driver',\n name='image', field=models.ImageField(default='', upload_to=\n 'mechanic_img'), preserve_default=False), migrations.AddField(\n model_name='driver', name='location', field=location_field.models.\n plain.PlainLocationField(default='', max_length=63),\n preserve_default=False), migrations.AlterField(model_name='driver',\n name='first_name', field=models.CharField(max_length=150)),\n migrations.AlterField(model_name='driver', name='last_name', field=\n models.CharField(max_length=150))]\n",
"step-4": "from django.db import migrations, models\nimport location_field.models.plain\n\n\nclass Migration(migrations.Migration):\n dependencies = [('driver', '0001_initial')]\n operations = [migrations.RemoveField(model_name='driver', name=\n 'address'), migrations.AddField(model_name='driver', name='city',\n field=models.CharField(default='', max_length=255),\n preserve_default=False), migrations.AddField(model_name='driver',\n name='image', field=models.ImageField(default='', upload_to=\n 'mechanic_img'), preserve_default=False), migrations.AddField(\n model_name='driver', name='location', field=location_field.models.\n plain.PlainLocationField(default='', max_length=63),\n preserve_default=False), migrations.AlterField(model_name='driver',\n name='first_name', field=models.CharField(max_length=150)),\n migrations.AlterField(model_name='driver', name='last_name', field=\n models.CharField(max_length=150))]\n",
"step-5": "# Generated by Django 3.0.7 on 2020-07-05 07:34\n\nfrom django.db import migrations, models\nimport location_field.models.plain\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('driver', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='driver',\n name='address',\n ),\n migrations.AddField(\n model_name='driver',\n name='city',\n field=models.CharField(default='', max_length=255),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='driver',\n name='image',\n field=models.ImageField(default='', upload_to='mechanic_img'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='driver',\n name='location',\n field=location_field.models.plain.PlainLocationField(default='', max_length=63),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='driver',\n name='first_name',\n field=models.CharField(max_length=150),\n ),\n migrations.AlterField(\n model_name='driver',\n name='last_name',\n field=models.CharField(max_length=150),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
tokens = Lexer()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
tokens = Lexer()
if __name__ == '__main__':
sys.path.append('Lib')
from lexicalanalyzer import Lexer
main(sys.argv[1], sys.argv[2])
<|reserved_special_token_1|>
'''
@mainpage Rat15S Compiler
@section intro_sec Introduction
This will become a Rat15S compiler. Currently working on Lexical Analyzer.
@author Reza Nikoopour
@author Eric Roe
'''
def main():
tokens = Lexer()
if __name__ == '__main__':
sys.path.append('Lib')
from lexicalanalyzer import Lexer
main(sys.argv[1], sys.argv[2])
|
flexible
|
{
"blob_id": "d081abf3cd9bc323486772b4f6235fbbc9022099",
"index": 5498,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n tokens = Lexer()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n tokens = Lexer()\n\n\nif __name__ == '__main__':\n sys.path.append('Lib')\n from lexicalanalyzer import Lexer\n main(sys.argv[1], sys.argv[2])\n",
"step-4": "'''\n@mainpage Rat15S Compiler\n\n@section intro_sec Introduction\nThis will become a Rat15S compiler. Currently working on Lexical Analyzer.\n@author Reza Nikoopour\n@author Eric Roe\n'''\ndef main():\n tokens = Lexer()\n \nif __name__ == '__main__':\n sys.path.append('Lib')\n from lexicalanalyzer import Lexer\n main(sys.argv[1], sys.argv[2])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('fotbal', '0008_auto_20210601_2109')]
operations = [migrations.RemoveField(model_name='komenty', name='jmeno'
), migrations.DeleteModel(name='Komenty')]
<|reserved_special_token_1|>
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('fotbal', '0008_auto_20210601_2109')]
operations = [migrations.RemoveField(model_name='komenty', name='jmeno'
), migrations.DeleteModel(name='Komenty')]
<|reserved_special_token_1|>
# Generated by Django 2.1.5 on 2021-06-01 19:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('fotbal', '0008_auto_20210601_2109'),
]
operations = [
migrations.RemoveField(
model_name='komenty',
name='jmeno',
),
migrations.DeleteModel(
name='Komenty',
),
]
|
flexible
|
{
"blob_id": "71ffad81bcbc480dc0a750680bc72e1d5c48556a",
"index": 3619,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('fotbal', '0008_auto_20210601_2109')]\n operations = [migrations.RemoveField(model_name='komenty', name='jmeno'\n ), migrations.DeleteModel(name='Komenty')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('fotbal', '0008_auto_20210601_2109')]\n operations = [migrations.RemoveField(model_name='komenty', name='jmeno'\n ), migrations.DeleteModel(name='Komenty')]\n",
"step-5": "# Generated by Django 2.1.5 on 2021-06-01 19:16\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('fotbal', '0008_auto_20210601_2109'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='komenty',\n name='jmeno',\n ),\n migrations.DeleteModel(\n name='Komenty',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.