code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
<|reserved_special_token_0|>
def avg_dissim_within_group_element(node, element_list):
max_diameter = -np.inf
sum_dissm = 0
for i in element_list:
sum_dissm += dissimilarity_matrix[node][i]
if dissimilarity_matrix[node][i] > max_diameter:
max_diameter = dissimilarity_matrix[node][i]
if len(element_list) > 1:
avg = sum_dissm / (len(element_list) - 1)
else:
avg = 0
return avg
def avg_dissim_across_group_element(node, main_list, splinter_list):
if len(splinter_list) == 0:
return 0
sum_dissm = 0
for j in splinter_list:
sum_dissm = sum_dissm + dissimilarity_matrix[node][j]
avg = sum_dissm / len(splinter_list)
return avg
def splinter(main_list, splinter_group):
most_dissm_object_value = -np.inf
most_dissm_object_index = None
for node in main_list:
x = avg_dissim_within_group_element(node, main_list)
y = avg_dissim_across_group_element(node, main_list, splinter_group)
diff = x - y
if diff > most_dissm_object_value:
most_dissm_object_value = diff
most_dissm_object_index = node
if most_dissm_object_value > 0:
return most_dissm_object_index, 1
else:
return -1, -1
def split(element_list):
main_list = element_list
splinter_group = []
most_dissm_object_index, flag = splinter(main_list, splinter_group)
while flag > 0:
main_list.remove(most_dissm_object_index)
splinter_group.append(most_dissm_object_index)
most_dissm_object_index, flag = splinter(element_list, splinter_group)
return main_list, splinter_group
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def avg_dissim_within_group_element(node, element_list):
max_diameter = -np.inf
sum_dissm = 0
for i in element_list:
sum_dissm += dissimilarity_matrix[node][i]
if dissimilarity_matrix[node][i] > max_diameter:
max_diameter = dissimilarity_matrix[node][i]
if len(element_list) > 1:
avg = sum_dissm / (len(element_list) - 1)
else:
avg = 0
return avg
def avg_dissim_across_group_element(node, main_list, splinter_list):
if len(splinter_list) == 0:
return 0
sum_dissm = 0
for j in splinter_list:
sum_dissm = sum_dissm + dissimilarity_matrix[node][j]
avg = sum_dissm / len(splinter_list)
return avg
def splinter(main_list, splinter_group):
most_dissm_object_value = -np.inf
most_dissm_object_index = None
for node in main_list:
x = avg_dissim_within_group_element(node, main_list)
y = avg_dissim_across_group_element(node, main_list, splinter_group)
diff = x - y
if diff > most_dissm_object_value:
most_dissm_object_value = diff
most_dissm_object_index = node
if most_dissm_object_value > 0:
return most_dissm_object_index, 1
else:
return -1, -1
def split(element_list):
main_list = element_list
splinter_group = []
most_dissm_object_index, flag = splinter(main_list, splinter_group)
while flag > 0:
main_list.remove(most_dissm_object_index)
splinter_group.append(most_dissm_object_index)
most_dissm_object_index, flag = splinter(element_list, splinter_group)
return main_list, splinter_group
def max_distance(cluster_list):
max_diameter_cluster_index = None
max_diameter_cluster_value = -np.inf
index = 0
for element_list in cluster_list:
for i in element_list:
for j in element_list:
if dissimilarity_matrix[i][j] > max_diameter_cluster_value:
max_diameter_cluster_value = dissimilarity_matrix[i][j]
max_diameter_cluster_index = index
index += 1
if max_diameter_cluster_value <= 0:
return -1
return max_diameter_cluster_index
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(dissimilarity_matrix)
def avg_dissim_within_group_element(node, element_list):
max_diameter = -np.inf
sum_dissm = 0
for i in element_list:
sum_dissm += dissimilarity_matrix[node][i]
if dissimilarity_matrix[node][i] > max_diameter:
max_diameter = dissimilarity_matrix[node][i]
if len(element_list) > 1:
avg = sum_dissm / (len(element_list) - 1)
else:
avg = 0
return avg
def avg_dissim_across_group_element(node, main_list, splinter_list):
if len(splinter_list) == 0:
return 0
sum_dissm = 0
for j in splinter_list:
sum_dissm = sum_dissm + dissimilarity_matrix[node][j]
avg = sum_dissm / len(splinter_list)
return avg
def splinter(main_list, splinter_group):
most_dissm_object_value = -np.inf
most_dissm_object_index = None
for node in main_list:
x = avg_dissim_within_group_element(node, main_list)
y = avg_dissim_across_group_element(node, main_list, splinter_group)
diff = x - y
if diff > most_dissm_object_value:
most_dissm_object_value = diff
most_dissm_object_index = node
if most_dissm_object_value > 0:
return most_dissm_object_index, 1
else:
return -1, -1
def split(element_list):
main_list = element_list
splinter_group = []
most_dissm_object_index, flag = splinter(main_list, splinter_group)
while flag > 0:
main_list.remove(most_dissm_object_index)
splinter_group.append(most_dissm_object_index)
most_dissm_object_index, flag = splinter(element_list, splinter_group)
return main_list, splinter_group
def max_distance(cluster_list):
max_diameter_cluster_index = None
max_diameter_cluster_value = -np.inf
index = 0
for element_list in cluster_list:
for i in element_list:
for j in element_list:
if dissimilarity_matrix[i][j] > max_diameter_cluster_value:
max_diameter_cluster_value = dissimilarity_matrix[i][j]
max_diameter_cluster_index = index
index += 1
if max_diameter_cluster_value <= 0:
return -1
return max_diameter_cluster_index
if __name__ == '__main__':
argv = sys.argv
num_clusters = sys.argv[-1]
current_clusters = [all_elements]
print(current_clusters)
level = 1
index = 0
with tqdm(total=100) as pbar:
while index != -1 and level != num_clusters:
a_clstr, b_clstr = split(current_clusters[index])
del current_clusters[index]
current_clusters.append(a_clstr)
current_clusters.append(b_clstr)
index = max_distance(current_clusters)
level += 1
pbar.update(10)
for i in range(num_clusters):
pd.DataFrame(current_clusters[i], columns=['id']).to_csv(
'%s_cluster_%d.txt' % (sys.argv[1], i), sep='\t')
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
import sys
from tqdm import tqdm
import time
from scipy.spatial.distance import pdist, squareform
data = pd.read_csv(sys.argv[1], delimiter='\t')
all_elements = [index for index in data.index]
distance_matrix = pdist(data, metric='euclidean')
dissimilarity_matrix = np.array(squareform(distance_matrix))
print(dissimilarity_matrix)
def avg_dissim_within_group_element(node, element_list):
max_diameter = -np.inf
sum_dissm = 0
for i in element_list:
sum_dissm += dissimilarity_matrix[node][i]
if dissimilarity_matrix[node][i] > max_diameter:
max_diameter = dissimilarity_matrix[node][i]
if len(element_list) > 1:
avg = sum_dissm / (len(element_list) - 1)
else:
avg = 0
return avg
def avg_dissim_across_group_element(node, main_list, splinter_list):
if len(splinter_list) == 0:
return 0
sum_dissm = 0
for j in splinter_list:
sum_dissm = sum_dissm + dissimilarity_matrix[node][j]
avg = sum_dissm / len(splinter_list)
return avg
def splinter(main_list, splinter_group):
most_dissm_object_value = -np.inf
most_dissm_object_index = None
for node in main_list:
x = avg_dissim_within_group_element(node, main_list)
y = avg_dissim_across_group_element(node, main_list, splinter_group)
diff = x - y
if diff > most_dissm_object_value:
most_dissm_object_value = diff
most_dissm_object_index = node
if most_dissm_object_value > 0:
return most_dissm_object_index, 1
else:
return -1, -1
def split(element_list):
main_list = element_list
splinter_group = []
most_dissm_object_index, flag = splinter(main_list, splinter_group)
while flag > 0:
main_list.remove(most_dissm_object_index)
splinter_group.append(most_dissm_object_index)
most_dissm_object_index, flag = splinter(element_list, splinter_group)
return main_list, splinter_group
def max_distance(cluster_list):
max_diameter_cluster_index = None
max_diameter_cluster_value = -np.inf
index = 0
for element_list in cluster_list:
for i in element_list:
for j in element_list:
if dissimilarity_matrix[i][j] > max_diameter_cluster_value:
max_diameter_cluster_value = dissimilarity_matrix[i][j]
max_diameter_cluster_index = index
index += 1
if max_diameter_cluster_value <= 0:
return -1
return max_diameter_cluster_index
if __name__ == '__main__':
argv = sys.argv
num_clusters = sys.argv[-1]
current_clusters = [all_elements]
print(current_clusters)
level = 1
index = 0
with tqdm(total=100) as pbar:
while index != -1 and level != num_clusters:
a_clstr, b_clstr = split(current_clusters[index])
del current_clusters[index]
current_clusters.append(a_clstr)
current_clusters.append(b_clstr)
index = max_distance(current_clusters)
level += 1
pbar.update(10)
for i in range(num_clusters):
pd.DataFrame(current_clusters[i], columns=['id']).to_csv(
'%s_cluster_%d.txt' % (sys.argv[1], i), sep='\t')
<|reserved_special_token_1|>
#library
import pandas as pd
import numpy as np
import sys
from tqdm import tqdm # appear the precess of running situation.
import time
from scipy.spatial.distance import pdist, squareform
#0. Data Load
data = pd.read_csv(sys.argv[1], delimiter='\t') # Load train (input text file)
#1. Data Preprocessing
all_elements = [index for index in data.index] # Save index name.
#Make a distance metrix to compute dissimilarity.
distance_matrix = pdist(data, metric='euclidean')
dissimilarity_matrix = np.array(squareform(distance_matrix))
#dissimilarity_matrix = pd.DataFrame(squareform(distance_matrix), columns=all_elements, index=all_elements)
print(dissimilarity_matrix)
#2. Modeling : DIANA Clustering
#2-1. Compute dissimilarity average in ONE Cluster.
def avg_dissim_within_group_element(node, element_list):
max_diameter = -np.inf
sum_dissm = 0 #Set Sum equal zero.
for i in element_list:
sum_dissm += dissimilarity_matrix[node][i] #While iterate element_list, Sum the distance matrix value singly in a node.
if( dissimilarity_matrix[node][i] > max_diameter): #If distance matrix is bigger than max_distance,
max_diameter = dissimilarity_matrix[node][i] # that distance matrix value become a max_diameter.
if(len(element_list)>1):
avg = sum_dissm/(len(element_list)-1) # Average of distance matrix.
else:
avg = 0
return avg
# 2-2. Compute dissimilarity average between different Group(e.g. Cluster1 and Cluster2)
# id in sperated new group = splinter_list
def avg_dissim_across_group_element(node, main_list, splinter_list):
if len(splinter_list) == 0: #there is no spliter group, return zero.
return 0
sum_dissm = 0
for j in splinter_list:
sum_dissm = sum_dissm + dissimilarity_matrix[node][j] #Compute average between Object in splinter group
avg = sum_dissm/(len(splinter_list)) #and all object dissimilarity matrix.
return avg
# 2-3. Cluster Splinter
def splinter(main_list, splinter_group):
most_dissm_object_value = -np.inf #initate minus.
most_dissm_object_index = None
for node in main_list:
x = avg_dissim_within_group_element(node, main_list) # Previously, a point in main group as a standard.
y = avg_dissim_across_group_element(node, main_list, splinter_group) # a point in the seperated group.
diff = x - y # difference between X and Y
if diff > most_dissm_object_value:
most_dissm_object_value = diff
most_dissm_object_index = node # save index and value which has largest value between two groups.
if(most_dissm_object_value>0): # differnce is Plus, Create new splinter group. flag = 1
return (most_dissm_object_index, 1)
else: # difference is minus, flag = -1
return (-1, -1)
# 2-4. Split
def split(element_list):
main_list = element_list
splinter_group = []
(most_dissm_object_index, flag) = splinter(main_list, splinter_group)
while(flag > 0): # Iterate splinter function until a flag become minus.
main_list.remove(most_dissm_object_index) #Delete the most largest dissimilarity average object index in the main list.
splinter_group.append(most_dissm_object_index) # Then, append in the new splinter group.
(most_dissm_object_index, flag) = splinter(element_list, splinter_group)
return (main_list, splinter_group)
# 2-5. look for maximum distance in the current cluster.
def max_distance(cluster_list):
max_diameter_cluster_index = None
max_diameter_cluster_value = -np.inf
index = 0
for element_list in cluster_list:
for i in element_list: #columns
for j in element_list: #rows
#Switch the largest dissimilarity average object(index), value.
if dissimilarity_matrix[i][j] > max_diameter_cluster_value:
max_diameter_cluster_value = dissimilarity_matrix[i][j]
max_diameter_cluster_index = index
index +=1
if(max_diameter_cluster_value <= 0):
return -1
return max_diameter_cluster_index
# main
if __name__ == '__main__':
# Save arguments list
argv = sys.argv
# Set the number of cluster.
num_clusters = sys.argv[-1]
current_clusters = ([all_elements])
print(current_clusters)
level = 1
index = 0
with tqdm(total=100) as pbar:
while((index!=-1) and (level!=num_clusters)): #Proceed until the index equal -1 and setting number of cluster.
(a_clstr, b_clstr) = split(current_clusters[index])
del current_clusters[index] # Delete current cluster.
current_clusters.append(a_clstr) #original cluster
current_clusters.append(b_clstr) #splinter cluster
index = max_distance(current_clusters)
level +=1
pbar.update(10)
for i in range(num_clusters): # Save the results.
pd.DataFrame(current_clusters[i], columns=['id']).to_csv("%s_cluster_%d.txt" %(sys.argv[1], i), sep='\t')
|
flexible
|
{
"blob_id": "267695555e876dc2fe5820dc194490aad9e5e344",
"index": 1361,
"step-1": "<mask token>\n\n\ndef avg_dissim_within_group_element(node, element_list):\n max_diameter = -np.inf\n sum_dissm = 0\n for i in element_list:\n sum_dissm += dissimilarity_matrix[node][i]\n if dissimilarity_matrix[node][i] > max_diameter:\n max_diameter = dissimilarity_matrix[node][i]\n if len(element_list) > 1:\n avg = sum_dissm / (len(element_list) - 1)\n else:\n avg = 0\n return avg\n\n\ndef avg_dissim_across_group_element(node, main_list, splinter_list):\n if len(splinter_list) == 0:\n return 0\n sum_dissm = 0\n for j in splinter_list:\n sum_dissm = sum_dissm + dissimilarity_matrix[node][j]\n avg = sum_dissm / len(splinter_list)\n return avg\n\n\ndef splinter(main_list, splinter_group):\n most_dissm_object_value = -np.inf\n most_dissm_object_index = None\n for node in main_list:\n x = avg_dissim_within_group_element(node, main_list)\n y = avg_dissim_across_group_element(node, main_list, splinter_group)\n diff = x - y\n if diff > most_dissm_object_value:\n most_dissm_object_value = diff\n most_dissm_object_index = node\n if most_dissm_object_value > 0:\n return most_dissm_object_index, 1\n else:\n return -1, -1\n\n\ndef split(element_list):\n main_list = element_list\n splinter_group = []\n most_dissm_object_index, flag = splinter(main_list, splinter_group)\n while flag > 0:\n main_list.remove(most_dissm_object_index)\n splinter_group.append(most_dissm_object_index)\n most_dissm_object_index, flag = splinter(element_list, splinter_group)\n return main_list, splinter_group\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef avg_dissim_within_group_element(node, element_list):\n max_diameter = -np.inf\n sum_dissm = 0\n for i in element_list:\n sum_dissm += dissimilarity_matrix[node][i]\n if dissimilarity_matrix[node][i] > max_diameter:\n max_diameter = dissimilarity_matrix[node][i]\n if len(element_list) > 1:\n avg = sum_dissm / (len(element_list) - 1)\n else:\n avg = 0\n return avg\n\n\ndef avg_dissim_across_group_element(node, main_list, splinter_list):\n if len(splinter_list) == 0:\n return 0\n sum_dissm = 0\n for j in splinter_list:\n sum_dissm = sum_dissm + dissimilarity_matrix[node][j]\n avg = sum_dissm / len(splinter_list)\n return avg\n\n\ndef splinter(main_list, splinter_group):\n most_dissm_object_value = -np.inf\n most_dissm_object_index = None\n for node in main_list:\n x = avg_dissim_within_group_element(node, main_list)\n y = avg_dissim_across_group_element(node, main_list, splinter_group)\n diff = x - y\n if diff > most_dissm_object_value:\n most_dissm_object_value = diff\n most_dissm_object_index = node\n if most_dissm_object_value > 0:\n return most_dissm_object_index, 1\n else:\n return -1, -1\n\n\ndef split(element_list):\n main_list = element_list\n splinter_group = []\n most_dissm_object_index, flag = splinter(main_list, splinter_group)\n while flag > 0:\n main_list.remove(most_dissm_object_index)\n splinter_group.append(most_dissm_object_index)\n most_dissm_object_index, flag = splinter(element_list, splinter_group)\n return main_list, splinter_group\n\n\ndef max_distance(cluster_list):\n max_diameter_cluster_index = None\n max_diameter_cluster_value = -np.inf\n index = 0\n for element_list in cluster_list:\n for i in element_list:\n for j in element_list:\n if dissimilarity_matrix[i][j] > max_diameter_cluster_value:\n max_diameter_cluster_value = dissimilarity_matrix[i][j]\n max_diameter_cluster_index = index\n index += 1\n if max_diameter_cluster_value <= 0:\n return -1\n return max_diameter_cluster_index\n\n\n<mask token>\n",
"step-3": "<mask token>\nprint(dissimilarity_matrix)\n\n\ndef avg_dissim_within_group_element(node, element_list):\n max_diameter = -np.inf\n sum_dissm = 0\n for i in element_list:\n sum_dissm += dissimilarity_matrix[node][i]\n if dissimilarity_matrix[node][i] > max_diameter:\n max_diameter = dissimilarity_matrix[node][i]\n if len(element_list) > 1:\n avg = sum_dissm / (len(element_list) - 1)\n else:\n avg = 0\n return avg\n\n\ndef avg_dissim_across_group_element(node, main_list, splinter_list):\n if len(splinter_list) == 0:\n return 0\n sum_dissm = 0\n for j in splinter_list:\n sum_dissm = sum_dissm + dissimilarity_matrix[node][j]\n avg = sum_dissm / len(splinter_list)\n return avg\n\n\ndef splinter(main_list, splinter_group):\n most_dissm_object_value = -np.inf\n most_dissm_object_index = None\n for node in main_list:\n x = avg_dissim_within_group_element(node, main_list)\n y = avg_dissim_across_group_element(node, main_list, splinter_group)\n diff = x - y\n if diff > most_dissm_object_value:\n most_dissm_object_value = diff\n most_dissm_object_index = node\n if most_dissm_object_value > 0:\n return most_dissm_object_index, 1\n else:\n return -1, -1\n\n\ndef split(element_list):\n main_list = element_list\n splinter_group = []\n most_dissm_object_index, flag = splinter(main_list, splinter_group)\n while flag > 0:\n main_list.remove(most_dissm_object_index)\n splinter_group.append(most_dissm_object_index)\n most_dissm_object_index, flag = splinter(element_list, splinter_group)\n return main_list, splinter_group\n\n\ndef max_distance(cluster_list):\n max_diameter_cluster_index = None\n max_diameter_cluster_value = -np.inf\n index = 0\n for element_list in cluster_list:\n for i in element_list:\n for j in element_list:\n if dissimilarity_matrix[i][j] > max_diameter_cluster_value:\n max_diameter_cluster_value = dissimilarity_matrix[i][j]\n max_diameter_cluster_index = index\n index += 1\n if max_diameter_cluster_value <= 0:\n return -1\n return max_diameter_cluster_index\n\n\nif __name__ == '__main__':\n argv = sys.argv\n num_clusters = sys.argv[-1]\n current_clusters = [all_elements]\n print(current_clusters)\n level = 1\n index = 0\n with tqdm(total=100) as pbar:\n while index != -1 and level != num_clusters:\n a_clstr, b_clstr = split(current_clusters[index])\n del current_clusters[index]\n current_clusters.append(a_clstr)\n current_clusters.append(b_clstr)\n index = max_distance(current_clusters)\n level += 1\n pbar.update(10)\n for i in range(num_clusters):\n pd.DataFrame(current_clusters[i], columns=['id']).to_csv(\n '%s_cluster_%d.txt' % (sys.argv[1], i), sep='\\t')\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport sys\nfrom tqdm import tqdm\nimport time\nfrom scipy.spatial.distance import pdist, squareform\ndata = pd.read_csv(sys.argv[1], delimiter='\\t')\nall_elements = [index for index in data.index]\ndistance_matrix = pdist(data, metric='euclidean')\ndissimilarity_matrix = np.array(squareform(distance_matrix))\nprint(dissimilarity_matrix)\n\n\ndef avg_dissim_within_group_element(node, element_list):\n max_diameter = -np.inf\n sum_dissm = 0\n for i in element_list:\n sum_dissm += dissimilarity_matrix[node][i]\n if dissimilarity_matrix[node][i] > max_diameter:\n max_diameter = dissimilarity_matrix[node][i]\n if len(element_list) > 1:\n avg = sum_dissm / (len(element_list) - 1)\n else:\n avg = 0\n return avg\n\n\ndef avg_dissim_across_group_element(node, main_list, splinter_list):\n if len(splinter_list) == 0:\n return 0\n sum_dissm = 0\n for j in splinter_list:\n sum_dissm = sum_dissm + dissimilarity_matrix[node][j]\n avg = sum_dissm / len(splinter_list)\n return avg\n\n\ndef splinter(main_list, splinter_group):\n most_dissm_object_value = -np.inf\n most_dissm_object_index = None\n for node in main_list:\n x = avg_dissim_within_group_element(node, main_list)\n y = avg_dissim_across_group_element(node, main_list, splinter_group)\n diff = x - y\n if diff > most_dissm_object_value:\n most_dissm_object_value = diff\n most_dissm_object_index = node\n if most_dissm_object_value > 0:\n return most_dissm_object_index, 1\n else:\n return -1, -1\n\n\ndef split(element_list):\n main_list = element_list\n splinter_group = []\n most_dissm_object_index, flag = splinter(main_list, splinter_group)\n while flag > 0:\n main_list.remove(most_dissm_object_index)\n splinter_group.append(most_dissm_object_index)\n most_dissm_object_index, flag = splinter(element_list, splinter_group)\n return main_list, splinter_group\n\n\ndef max_distance(cluster_list):\n max_diameter_cluster_index = None\n max_diameter_cluster_value = -np.inf\n index = 0\n for element_list in cluster_list:\n for i in element_list:\n for j in element_list:\n if dissimilarity_matrix[i][j] > max_diameter_cluster_value:\n max_diameter_cluster_value = dissimilarity_matrix[i][j]\n max_diameter_cluster_index = index\n index += 1\n if max_diameter_cluster_value <= 0:\n return -1\n return max_diameter_cluster_index\n\n\nif __name__ == '__main__':\n argv = sys.argv\n num_clusters = sys.argv[-1]\n current_clusters = [all_elements]\n print(current_clusters)\n level = 1\n index = 0\n with tqdm(total=100) as pbar:\n while index != -1 and level != num_clusters:\n a_clstr, b_clstr = split(current_clusters[index])\n del current_clusters[index]\n current_clusters.append(a_clstr)\n current_clusters.append(b_clstr)\n index = max_distance(current_clusters)\n level += 1\n pbar.update(10)\n for i in range(num_clusters):\n pd.DataFrame(current_clusters[i], columns=['id']).to_csv(\n '%s_cluster_%d.txt' % (sys.argv[1], i), sep='\\t')\n",
"step-5": "#library\nimport pandas as pd\nimport numpy as np\nimport sys\n\nfrom tqdm import tqdm # appear the precess of running situation.\nimport time\n\nfrom scipy.spatial.distance import pdist, squareform\n\n#0. Data Load\ndata = pd.read_csv(sys.argv[1], delimiter='\\t') # Load train (input text file)\n\n#1. Data Preprocessing\nall_elements = [index for index in data.index] # Save index name.\n\n#Make a distance metrix to compute dissimilarity.\ndistance_matrix = pdist(data, metric='euclidean')\ndissimilarity_matrix = np.array(squareform(distance_matrix))\n#dissimilarity_matrix = pd.DataFrame(squareform(distance_matrix), columns=all_elements, index=all_elements)\nprint(dissimilarity_matrix)\n\n#2. Modeling : DIANA Clustering\n#2-1. Compute dissimilarity average in ONE Cluster. \ndef avg_dissim_within_group_element(node, element_list):\n max_diameter = -np.inf\n sum_dissm = 0 #Set Sum equal zero.\n for i in element_list: \n sum_dissm += dissimilarity_matrix[node][i] #While iterate element_list, Sum the distance matrix value singly in a node.\n if( dissimilarity_matrix[node][i] > max_diameter): #If distance matrix is bigger than max_distance,\n max_diameter = dissimilarity_matrix[node][i] # that distance matrix value become a max_diameter.\n if(len(element_list)>1):\n avg = sum_dissm/(len(element_list)-1) # Average of distance matrix.\n else: \n avg = 0\n return avg\n\n# 2-2. Compute dissimilarity average between different Group(e.g. Cluster1 and Cluster2) \n# id in sperated new group = splinter_list\ndef avg_dissim_across_group_element(node, main_list, splinter_list):\n if len(splinter_list) == 0: #there is no spliter group, return zero.\n return 0 \n sum_dissm = 0\n for j in splinter_list:\n sum_dissm = sum_dissm + dissimilarity_matrix[node][j] #Compute average between Object in splinter group \n avg = sum_dissm/(len(splinter_list)) #and all object dissimilarity matrix.\n return avg\n\n# 2-3. Cluster Splinter\ndef splinter(main_list, splinter_group):\n most_dissm_object_value = -np.inf #initate minus.\n most_dissm_object_index = None\n for node in main_list:\n x = avg_dissim_within_group_element(node, main_list) # Previously, a point in main group as a standard.\n y = avg_dissim_across_group_element(node, main_list, splinter_group) # a point in the seperated group.\n diff = x - y # difference between X and Y\n if diff > most_dissm_object_value:\n most_dissm_object_value = diff\n most_dissm_object_index = node # save index and value which has largest value between two groups.\n if(most_dissm_object_value>0): # differnce is Plus, Create new splinter group. flag = 1\n return (most_dissm_object_index, 1)\n else: # difference is minus, flag = -1\n return (-1, -1)\n\n# 2-4. Split\ndef split(element_list):\n main_list = element_list\n splinter_group = [] \n (most_dissm_object_index, flag) = splinter(main_list, splinter_group)\n while(flag > 0): # Iterate splinter function until a flag become minus.\n main_list.remove(most_dissm_object_index) #Delete the most largest dissimilarity average object index in the main list.\n splinter_group.append(most_dissm_object_index) # Then, append in the new splinter group.\n (most_dissm_object_index, flag) = splinter(element_list, splinter_group)\n \n return (main_list, splinter_group)\n\n# 2-5. look for maximum distance in the current cluster.\ndef max_distance(cluster_list):\n max_diameter_cluster_index = None\n max_diameter_cluster_value = -np.inf\n index = 0\n for element_list in cluster_list:\n for i in element_list: #columns\n for j in element_list: #rows\n #Switch the largest dissimilarity average object(index), value. \n if dissimilarity_matrix[i][j] > max_diameter_cluster_value: \n max_diameter_cluster_value = dissimilarity_matrix[i][j]\n max_diameter_cluster_index = index\n \n index +=1\n \n if(max_diameter_cluster_value <= 0):\n return -1\n \n return max_diameter_cluster_index\n\n# main\nif __name__ == '__main__':\n\n # Save arguments list\n argv = sys.argv \n\n # Set the number of cluster.\n num_clusters = sys.argv[-1]\n current_clusters = ([all_elements])\n print(current_clusters)\n level = 1\n index = 0\n\n with tqdm(total=100) as pbar:\n while((index!=-1) and (level!=num_clusters)): #Proceed until the index equal -1 and setting number of cluster.\n (a_clstr, b_clstr) = split(current_clusters[index])\n del current_clusters[index] # Delete current cluster.\n current_clusters.append(a_clstr) #original cluster\n current_clusters.append(b_clstr) #splinter cluster\n index = max_distance(current_clusters)\n level +=1\n pbar.update(10)\n\n for i in range(num_clusters): # Save the results.\n pd.DataFrame(current_clusters[i], columns=['id']).to_csv(\"%s_cluster_%d.txt\" %(sys.argv[1], i), sep='\\t') \n",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for authenticator."""
from absl.testing import absltest
from model import authenticator
class AuthenticatorTest(absltest.TestCase):
"""Tests GetGoogleSheetsService method in authenticator module."""
def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(
self,
):
bad_file_path = './credential.json'
with self.assertRaises(FileNotFoundError):
authenticator.GetGoogleSheetsServiceByCredential(
gcp_credential_path=bad_file_path
)
if __name__ == '__main__':
absltest.main()
|
normal
|
{
"blob_id": "86b24ddaae0d3477a3f82295224b7e84805eed91",
"index": 1413,
"step-1": "<mask token>\n\n\nclass AuthenticatorTest(absltest.TestCase):\n <mask token>\n\n def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(\n self):\n bad_file_path = './credential.json'\n with self.assertRaises(FileNotFoundError):\n authenticator.GetGoogleSheetsServiceByCredential(\n gcp_credential_path=bad_file_path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AuthenticatorTest(absltest.TestCase):\n \"\"\"Tests GetGoogleSheetsService method in authenticator module.\"\"\"\n\n def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(\n self):\n bad_file_path = './credential.json'\n with self.assertRaises(FileNotFoundError):\n authenticator.GetGoogleSheetsServiceByCredential(\n gcp_credential_path=bad_file_path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AuthenticatorTest(absltest.TestCase):\n \"\"\"Tests GetGoogleSheetsService method in authenticator module.\"\"\"\n\n def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(\n self):\n bad_file_path = './credential.json'\n with self.assertRaises(FileNotFoundError):\n authenticator.GetGoogleSheetsServiceByCredential(\n gcp_credential_path=bad_file_path)\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"step-4": "<mask token>\nfrom absl.testing import absltest\nfrom model import authenticator\n\n\nclass AuthenticatorTest(absltest.TestCase):\n \"\"\"Tests GetGoogleSheetsService method in authenticator module.\"\"\"\n\n def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(\n self):\n bad_file_path = './credential.json'\n with self.assertRaises(FileNotFoundError):\n authenticator.GetGoogleSheetsServiceByCredential(\n gcp_credential_path=bad_file_path)\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"step-5": "# Copyright 2023 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the License);\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an AS IS BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for authenticator.\"\"\"\n\nfrom absl.testing import absltest\n\nfrom model import authenticator\n\n\nclass AuthenticatorTest(absltest.TestCase):\n \"\"\"Tests GetGoogleSheetsService method in authenticator module.\"\"\"\n\n def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(\n self,\n ):\n bad_file_path = './credential.json'\n\n with self.assertRaises(FileNotFoundError):\n authenticator.GetGoogleSheetsServiceByCredential(\n gcp_credential_path=bad_file_path\n )\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
'''
THROW with or without parameters
Which of the following is true about the THROW statement?
Answer the question
50XP
Possible Answers
- The THROW statement without parameters should be placed within a CATCH block.
- The THROW statement with parameters can only be placed within a CATCH block.
- The THROW statement without parameters can't re-throw an original error.
Answer : The THROW statement without parameters should be placed within a CATCH block.
'''
|
normal
|
{
"blob_id": "75023c7600fcceda0dc225992e7c433291b1a190",
"index": 7254,
"step-1": "<mask token>\n",
"step-2": "'''\nTHROW with or without parameters\n\n\nWhich of the following is true about the THROW statement?\n\nAnswer the question\n50XP\n\nPossible Answers\n\n - The THROW statement without parameters should be placed within a CATCH block.\n\n - The THROW statement with parameters can only be placed within a CATCH block.\n\n - The THROW statement without parameters can't re-throw an original error.\n\nAnswer : The THROW statement without parameters should be placed within a CATCH block.\n\n'''\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from django.db import models
class Author(models.Model):
author = models.CharField(
"Author",
max_length=30,
blank=False,
null=False
)
biography = models.TextField(
"About author",
max_length=500,
blank=True,
null=True
)
def __str__(self):
return self.author
class Series(models.Model):
title = models.CharField(
"Title of series",
max_length=100,
blank=False,
null=False
)
description = models.TextField(
"About this series",
max_length=500,
blank=True,
null=True
)
def __str__(self):
return self.title
class Genre(models.Model):
genre = models.CharField(
"Genre",
max_length=50,
blank=False,
null=False
)
description = models.TextField(
"About this genre",
max_length=500,
blank=True,
null=True
)
def __str__(self):
return self.genre
class PublishingHouse(models.Model):
house = models.CharField(
"Publishing House",
max_length=40,
blank=False,
null=False
)
history = models.TextField(
"Other books of this house",
max_length=500,
blank=True,
null=True
)
def __str__(self):
return self.house
|
normal
|
{
"blob_id": "b34ad8d7fc8df0ab86c5930ab2b5aa1f86d13ae3",
"index": 7580,
"step-1": "<mask token>\n\n\nclass Series(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Genre(models.Model):\n genre = models.CharField('Genre', max_length=50, blank=False, null=False)\n description = models.TextField('About this genre', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.genre\n\n\nclass PublishingHouse(models.Model):\n house = models.CharField('Publishing House', max_length=40, blank=False,\n null=False)\n history = models.TextField('Other books of this house', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.house\n",
"step-2": "<mask token>\n\n\nclass Series(models.Model):\n title = models.CharField('Title of series', max_length=100, blank=False,\n null=False)\n description = models.TextField('About this series', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.title\n\n\nclass Genre(models.Model):\n genre = models.CharField('Genre', max_length=50, blank=False, null=False)\n description = models.TextField('About this genre', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.genre\n\n\nclass PublishingHouse(models.Model):\n house = models.CharField('Publishing House', max_length=40, blank=False,\n null=False)\n history = models.TextField('Other books of this house', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.house\n",
"step-3": "<mask token>\n\n\nclass Author(models.Model):\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.author\n\n\nclass Series(models.Model):\n title = models.CharField('Title of series', max_length=100, blank=False,\n null=False)\n description = models.TextField('About this series', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.title\n\n\nclass Genre(models.Model):\n genre = models.CharField('Genre', max_length=50, blank=False, null=False)\n description = models.TextField('About this genre', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.genre\n\n\nclass PublishingHouse(models.Model):\n house = models.CharField('Publishing House', max_length=40, blank=False,\n null=False)\n history = models.TextField('Other books of this house', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.house\n",
"step-4": "<mask token>\n\n\nclass Author(models.Model):\n author = models.CharField('Author', max_length=30, blank=False, null=False)\n biography = models.TextField('About author', max_length=500, blank=True,\n null=True)\n\n def __str__(self):\n return self.author\n\n\nclass Series(models.Model):\n title = models.CharField('Title of series', max_length=100, blank=False,\n null=False)\n description = models.TextField('About this series', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.title\n\n\nclass Genre(models.Model):\n genre = models.CharField('Genre', max_length=50, blank=False, null=False)\n description = models.TextField('About this genre', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.genre\n\n\nclass PublishingHouse(models.Model):\n house = models.CharField('Publishing House', max_length=40, blank=False,\n null=False)\n history = models.TextField('Other books of this house', max_length=500,\n blank=True, null=True)\n\n def __str__(self):\n return self.house\n",
"step-5": "from django.db import models\n\n\nclass Author(models.Model):\n author = models.CharField(\n \"Author\",\n max_length=30,\n blank=False,\n null=False\n )\n\n biography = models.TextField(\n \"About author\",\n max_length=500,\n blank=True,\n null=True\n )\n\n def __str__(self):\n return self.author\n\n\nclass Series(models.Model):\n title = models.CharField(\n \"Title of series\",\n max_length=100,\n blank=False,\n null=False\n )\n\n description = models.TextField(\n \"About this series\",\n max_length=500,\n blank=True,\n null=True\n )\n\n def __str__(self):\n return self.title\n\n\nclass Genre(models.Model):\n genre = models.CharField(\n \"Genre\",\n max_length=50,\n blank=False,\n null=False\n )\n\n description = models.TextField(\n \"About this genre\",\n max_length=500,\n blank=True,\n null=True\n )\n\n def __str__(self):\n return self.genre\n\n\nclass PublishingHouse(models.Model):\n house = models.CharField(\n \"Publishing House\",\n max_length=40,\n blank=False,\n null=False\n\n )\n\n history = models.TextField(\n \"Other books of this house\",\n max_length=500,\n blank=True,\n null=True\n )\n\n def __str__(self):\n return self.house\n",
"step-ids": [
7,
9,
11,
12,
14
]
}
|
[
7,
9,
11,
12,
14
] |
from flask import Flask, render_template , request
import joblib
# importing all the important libraires
import numpy as np
import pandas as pd
import nltk
import string
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
# download the model
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
lemma = WordNetLemmatizer()
# initialse the app
app = Flask(__name__)
#load the model
tfidf = joblib.load('tfidf_vector_model.pkl')
model = joblib.load('netflix_75.pkl')
@app.route('/')
def hello():
return render_template('form.html')
@app.route('/submit' , methods = ["POST"])
def form_data():
user_data = request.form.get('user_data')
user_data1 = [user_data]
vector = tfidf.transform(user_data1)
my_pred = model.predict(vector)
if my_pred[0] == 1:
out = 'positve review'
else:
out = 'negative review'
return render_template('predict.html' , data = f' {out}')
if __name__ == '__main__':
app.run(debug = True)
|
normal
|
{
"blob_id": "df92166378c8a8cc0ba02d0ba33d75bbd94510a7",
"index": 4754,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef hello():\n return render_template('form.html')\n\n\[email protected]('/submit', methods=['POST'])\ndef form_data():\n user_data = request.form.get('user_data')\n user_data1 = [user_data]\n vector = tfidf.transform(user_data1)\n my_pred = model.predict(vector)\n if my_pred[0] == 1:\n out = 'positve review'\n else:\n out = 'negative review'\n return render_template('predict.html', data=f' {out}')\n\n\n<mask token>\n",
"step-2": "<mask token>\nnltk.download('wordnet')\n<mask token>\n\n\[email protected]('/')\ndef hello():\n return render_template('form.html')\n\n\[email protected]('/submit', methods=['POST'])\ndef form_data():\n user_data = request.form.get('user_data')\n user_data1 = [user_data]\n vector = tfidf.transform(user_data1)\n my_pred = model.predict(vector)\n if my_pred[0] == 1:\n out = 'positve review'\n else:\n out = 'negative review'\n return render_template('predict.html', data=f' {out}')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\nnltk.download('wordnet')\n<mask token>\nlemma = WordNetLemmatizer()\napp = Flask(__name__)\ntfidf = joblib.load('tfidf_vector_model.pkl')\nmodel = joblib.load('netflix_75.pkl')\n\n\[email protected]('/')\ndef hello():\n return render_template('form.html')\n\n\[email protected]('/submit', methods=['POST'])\ndef form_data():\n user_data = request.form.get('user_data')\n user_data1 = [user_data]\n vector = tfidf.transform(user_data1)\n my_pred = model.predict(vector)\n if my_pred[0] == 1:\n out = 'positve review'\n else:\n out = 'negative review'\n return render_template('predict.html', data=f' {out}')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask, render_template, request\nimport joblib\nimport numpy as np\nimport pandas as pd\nimport nltk\nimport string\nfrom nltk.corpus import stopwords\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nnltk.download('wordnet')\nfrom nltk.stem import WordNetLemmatizer\nlemma = WordNetLemmatizer()\napp = Flask(__name__)\ntfidf = joblib.load('tfidf_vector_model.pkl')\nmodel = joblib.load('netflix_75.pkl')\n\n\[email protected]('/')\ndef hello():\n return render_template('form.html')\n\n\[email protected]('/submit', methods=['POST'])\ndef form_data():\n user_data = request.form.get('user_data')\n user_data1 = [user_data]\n vector = tfidf.transform(user_data1)\n my_pred = model.predict(vector)\n if my_pred[0] == 1:\n out = 'positve review'\n else:\n out = 'negative review'\n return render_template('predict.html', data=f' {out}')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask, render_template , request\r\nimport joblib\r\n\r\n\r\n# importing all the important libraires\r\nimport numpy as np\r\nimport pandas as pd\r\nimport nltk\r\nimport string\r\nfrom nltk.corpus import stopwords\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\r\n# download the model\r\nnltk.download('wordnet')\r\nfrom nltk.stem import WordNetLemmatizer\r\n\r\nlemma = WordNetLemmatizer()\r\n\r\n# initialse the app\r\napp = Flask(__name__)\r\n\r\n#load the model\r\ntfidf = joblib.load('tfidf_vector_model.pkl')\r\nmodel = joblib.load('netflix_75.pkl')\r\n\r\[email protected]('/')\r\ndef hello():\r\n return render_template('form.html')\r\n\r\[email protected]('/submit' , methods = [\"POST\"])\r\ndef form_data():\r\n user_data = request.form.get('user_data')\r\n user_data1 = [user_data]\r\n vector = tfidf.transform(user_data1)\r\n my_pred = model.predict(vector)\r\n\r\n if my_pred[0] == 1:\r\n out = 'positve review'\r\n else:\r\n out = 'negative review'\r\n \r\n \r\n\r\n \r\n\r\n return render_template('predict.html' , data = f' {out}')\r\n\r\nif __name__ == '__main__':\r\n app.run(debug = True)\r\n\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
'''
def Sort(a):
i=1
while i<len(a):
j=i
while j>0 and a[j-1] > a[j]:
temp = a[j-1]
a[j-1] = a[j]
a[j] = temp
j-=1
i+=1
return a
'''
def Sort(a):
i=1
n=len(a)
while i<len(a):
j=i
print(i-1,'\t',i)
while a[j-1]>a[j] and j>=0:
j-=1
print('Key : ',a[i],' inserting at: ',j, '\t in ',a)
if n>2:
j1=n-2
temp = arr[n-1]
while arr[j1] > temp and j1>=0:
arr[j1+1] = arr[j1]
j1-=1
print(' '.join(list(map(str, arr))))
arr[j1+1] = temp
print(' '.join(list(map(str, arr))))
elif n==1:
return arr
else: # len(arr) =2
temp = arr[1]
arr[1]=arr[0]
print(' '.join(list(map(str, arr))))
arr[0] = temp
print(' '.join(list(map(str, arr))))
i+=1
return a
|
normal
|
{
"blob_id": "3f8b8b8cfbe712f09734d0fb7302073187d65a73",
"index": 982,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Sort(a):\n i = 1\n n = len(a)\n while i < len(a):\n j = i\n print(i - 1, '\\t', i)\n while a[j - 1] > a[j] and j >= 0:\n j -= 1\n print('Key : ', a[i], ' inserting at: ', j, '\\t in ', a)\n if n > 2:\n j1 = n - 2\n temp = arr[n - 1]\n while arr[j1] > temp and j1 >= 0:\n arr[j1 + 1] = arr[j1]\n j1 -= 1\n print(' '.join(list(map(str, arr))))\n arr[j1 + 1] = temp\n print(' '.join(list(map(str, arr))))\n elif n == 1:\n return arr\n else:\n temp = arr[1]\n arr[1] = arr[0]\n print(' '.join(list(map(str, arr))))\n arr[0] = temp\n print(' '.join(list(map(str, arr))))\n i += 1\n return a\n",
"step-3": "'''\ndef Sort(a):\n i=1\n while i<len(a):\n j=i\n while j>0 and a[j-1] > a[j]:\n temp = a[j-1]\n a[j-1] = a[j]\n a[j] = temp\n j-=1\n i+=1\n return a\n'''\ndef Sort(a):\n i=1\n n=len(a)\n while i<len(a):\n j=i\n print(i-1,'\\t',i)\n while a[j-1]>a[j] and j>=0:\n j-=1\n print('Key : ',a[i],' inserting at: ',j, '\\t in ',a)\n if n>2:\n j1=n-2\n temp = arr[n-1]\n while arr[j1] > temp and j1>=0:\n arr[j1+1] = arr[j1]\n j1-=1\n print(' '.join(list(map(str, arr))))\n arr[j1+1] = temp\n print(' '.join(list(map(str, arr))))\n elif n==1: \n return arr\n else: # len(arr) =2\n temp = arr[1]\n arr[1]=arr[0]\n print(' '.join(list(map(str, arr))))\n arr[0] = temp \n print(' '.join(list(map(str, arr))))\n i+=1\n return a\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np, argparse, sys, itertools, os, errno, warnings
from mpi4py import MPI
from enlib import enmap as en, powspec, utils
from enlib.degrees_of_freedom import DOF, Arg
from enlib.cg import CG
warnings.filterwarnings("ignore")
#from matplotlib.pylab import *
parser = argparse.ArgumentParser()
parser.add_argument("freqs")
parser.add_argument("maps")
parser.add_argument("noise")
parser.add_argument("powspec")
parser.add_argument("posfile")
parser.add_argument("odir")
parser.add_argument("-R", "--radius", type=float, default=30)
parser.add_argument("--burnin", type=int, default=10)
parser.add_argument("-n", "--nsamp", type=int, default=50)
parser.add_argument("--dump", type=int, default=0)
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-i", type=int, default=0)
parser.add_argument("--nmax", type=int, default=0)
parser.add_argument("--mindist-group", type=float, default=10)
parser.add_argument("-c", "--cont", action="store_true")
args = parser.parse_args()
comm = MPI.COMM_WORLD
myid = comm.rank
nproc= comm.size
r2c = 180/np.pi
r2b = r2c*60*(8*np.log(2))**0.5
def read_maps(fmt, n, ntot=4):
try:
maps = en.read_map(fmt)
if maps.ndim == ntot-1: maps = en.enmap([maps]*n,maps.wcs)
if maps.ndim != ntot: raise ValueError("Map %s must have %d dimensions" % (fmt,ntot))
return maps
except (IOError, OSError):
maps = [en.read_map(fmt % i) for i in range(n)]
maps = en.ndmap(maps, maps[0].wcs)
if maps.ndim != ntot: maps = maps.reshape(maps.shape[:-2]+(1,)*(maps.ndim-ntot)+maps.shape[-2:])
return maps
def flat_noise(shape, wcs, sigmas):
res = en.zeros([len(sigmas),shape[-3],shape[-3],shape[-2],shape[-1]], wcs)
for i,s in enumerate(sigmas):
res[i] = (np.eye(shape[-3])*s**2)[:,:,None,None]
return res
def read_noise(info, shape, wcs, n):
try:
nmat = flat_noise(shape, wcs, parse_floats(info))
except ValueError:
nmat = read_maps(info, n, 5)
if len(nmat) != n: raise ValueError("Number of noise maps (%d) != number of signal maps (%d)!" % (len(nmat), n))
if np.any(nmat.shape[-2:] != shape[-2:]): raise ValueError("Noise and maps have inconsistent shape!")
return nmat
def parse_floats(strs): return np.array([float(w) for w in strs.split(",")])
def apodize(m, rad, apod_fun):
scale = m.extent()/m.shape[-2:]
y = np.arange(m.shape[-2])*scale[0]
x = np.arange(m.shape[-1])*scale[1]
yfun = apod_fun(y, rad)*apod_fun(y[-1]-y, rad)
xfun = apod_fun(x, rad)*apod_fun(x[-1]-x, rad)
a = yfun[:,None]*xfun[None,:]
return m*a
def apod_step(x, r): return x>r
def apod_butter(x, r): return (1+(x/r)**-4)**-1
def apod_cos(x,r): return (1-np.cos(np.min(1,nx/r)*np.pi))/2
# Read our inputs
freqs = parse_floats(args.freqs)
maps = read_maps(args.maps, len(freqs))
ncomp = maps.shape[-3]
nfreq = maps.shape[-4]
noise = read_noise(args.noise, maps.shape, maps.wcs, len(freqs))
ps = powspec.read_spectrum(args.powspec, expand="diag")[:ncomp,:ncomp]
poss = np.loadtxt(args.posfile)[:,:2]/r2c
R = args.radius/r2c/60
beam_fiducial = 1.5/r2b
beam_range = [0.8/r2b,3.0/r2b]
beam_max_asym = 2
apod_rad = R/10
# We will cut out small mini-maps around each source candadate and
# sample the CMB and source parameters jointly. But some candiates
# are so near each other that they aren't independent. These must
# be grouped into groups.
def build_groups(poss):
def dist(a,b): return np.sum((poss[a]-poss[b])**2)**0.5*180*60/np.pi
rest = set(range(len(poss)))
groups = []
while len(rest) > 0:
group = []
tocheck = [rest.pop()]
# Find distance to all other points
while len(tocheck) > 0:
current = tocheck.pop()
rnew = set()
while rest:
other = rest.pop()
if dist(current,other) < args.mindist_group:
tocheck.append(other)
else:
rnew.add(other)
rest = rnew
group.append(current)
groups.append(group)
return groups
groups = build_groups(poss)
print "Found %d groups" % len(groups)
# We will sample (cmb,A,pos,ibeam) jointly in gibbs fashion:
# cmb,A <- P(cmb,A|data,A,pos,ibeam) # direct, but requires cr
# pos,ibeam <- P(pos,ibeam|data,cmb,A) # MCMC
# To take into account the nonperiodicity of each submap, we must introduce
# a region of extra noise around the edge.
class CMBSampler:
"""Draws samples from P(s,a|d,Cl,N,T), where T[ntemp,nfreq,ncomp,ny,nx] is a set of templates.
a[ntemp] is the set of template amplitudes."""
def __init__(self, maps, inoise, ps, T=None):
self.d = maps
self.iN = inoise
self.hN = en.multi_pow(inoise, 0.5, axes=[1,2])
self.iS = en.spec2flat(maps.shape[-3:], maps.wcs, ps, -1.0)
self.hS = en.spec2flat(maps.shape[-3:], maps.wcs, ps, -0.5)
self.ps = ps
self.b, self.x = None, None
# Prepare the preconditioner. It approximates the noise as the
# same in every pixel, and ignores the cmb-template coupling.
# See M(self,u) for details.
iN_white = np.array(np.sum(np.mean(np.mean(self.iN,-1),-1),0))
# iN_white is now in pixel space, but the preconditioner needs it
# in harmonic space, which introduces a
#norm = np.prod((maps.box[1]-maps.box[0])/maps.shape[-2:])
#norm = 1./np.prod(maps.shape[-2:])
#iN_white /= norm
self.S_prec = en.multi_pow(self.iS + iN_white[:,:,None,None], -1)
# The template
self.set_template(T)
def set_template(self, T):
if T is None: T = np.zeros((0,)+self.d.shape)
self.T = T
self.TT = np.einsum("aijyx,bijyx->ab",self.T,self.T)
self.dof = DOF(Arg(default=self.d[0]), Arg(shape=T.shape[:1]))
def P(self, u):
s, a = self.dof.unzip(u)
return s[None,:,:,:] + np.sum(self.T*a[:,None,None,None,None],0)
def PT(self, d):
return self.dof.zip(np.sum(d,0), np.einsum("qijyx,ijyx->q",self.T, d))
def A(self, u):
s, a = self.dof.unzip(u)
# U"u = [S"s, 0a]
Uu = self.dof.zip(en.harm2map(en.map_mul(self.iS, en.map2harm(s))),a*0)
# P'N"P u
PNPu = self.PT(en.map_mul(self.iN, self.P(u)))
return Uu + PNPu
def M(self, u):
# Multiplying things out, the full expression for A is:
# [ S" + sum(N") sum(N"T) ]
# [ sum(T'N") sum(T'T) ]
# A reasonable approximation for this is
# [ S" + sum(sigma^{-2}) 0 ]
# [ 0 sum(T'T) ]
# which can be directly inverted.
s, a = self.dof.unzip(u)
# Solve for the cmb signal component
res_s = en.harm2map(en.map_mul(self.S_prec,en.map2harm(s)))
res_a = np.linalg.solve(self.TT, a)
return self.dof.zip(res_s, res_a)
def calc_b(self):
PNd = self.PT(en.map_mul(self.iN, self.d))
Uw1_s = en.harm2map(en.map_mul(self.hS, en.rand_gauss_harm(self.d.shape[-3:],self.d.wcs)))
Uw1_a = np.zeros(self.T.shape[0])
Uw1 = self.dof.zip(Uw1_s, Uw1_a)
PNw2 = self.PT(en.map_mul(self.hN, en.rand_gauss(self.d.shape, self.d.wcs)))
return PNd + Uw1 + PNw2
def solve(self, b, x0, verbose=False):
cg = CG(self.A, b, x0=x0*0, M=self.M)
while cg.err > 1e-6:
cg.step()
if verbose:
print "%5d %15.7e %15.7e" % (cg.i, cg.err, cg.err_true) #, self.dof.unzip(cg.x)[1]
#if cg.i % 10 == 0:
# s, a = self.dof.unzip(cg.x)
# matshow(s[0]); colorbar(); show()
return cg.x
def sample(self, verbose=False):
self.b = self.calc_b()
if self.x is None: self.x = self.dof.zip(self.d[0], np.zeros(self.T.shape[0]))
self.x = self.solve(self.b, self.x, verbose)
return self.dof.unzip(self.x)
class PtsrcModel:
"""This class converts from point source shape parameters to amplitude
basis functions."""
def __init__(self, template):
self.pos = template.posmap()
self.nfreq, self.ncomp = template.shape[:2]
self.nparam = self.nfreq*self.ncomp
def get_templates(self, pos, irads):
x = utils.rewind(self.pos - pos[:,None,None],0,2*np.pi)
W = np.array([[irads[0],irads[2]],[irads[2],irads[1]]])
xWx = np.sum(np.einsum("ab,byx->ayx", W, x)*x,0)
profile = np.exp(-0.5*xWx)
bases = np.eye(self.nfreq*self.ncomp).reshape(self.nfreq*self.ncomp,self.nfreq,self.ncomp)
return profile[None,None,None]*bases[:,:,:,None,None]
def get_model(self, amps, pos, irads):
return np.sum((self.get_templates(pos, irads).T*amps.T).T,0)
class ShapeSampler:
def __init__(self, maps, inoise, model, amps, pos, pos0, irads, nsamp=200, stepsize=0.02, maxdist=1.5*np.pi/180/60):
self.maps = maps
self.inoise = inoise
self.model= model
self.nsamp= nsamp
self.stepsize = stepsize
self.amps = amps
self.pos, self.irads = pos, irads
self.pos0 = pos0
self.maxdist=maxdist
self.lik = self.getlik(self.amps, self.pos, self.irads)
def getlik(self, amps, pos, irads):
if irads[0] < 0 or irads[1] < 0: return np.inf
if irads[0]*irads[1]-irads[2]**2 <= 0: return np.inf
sigma, phi = expand_beam(irads)
# The beam has a tendency to run off in unrealistic directions,
# so we need a relatively strong prior on it.
if np.min(sigma) < beam_range[0] or np.max(sigma) > beam_range[1] or np.max(sigma)/np.min(sigma) > beam_max_asym: return np.inf
template = self.model.get_model(amps, pos, irads)
residual = self.maps-template
tmp = np.einsum("fabyx,abyx->fayx",self.inoise, residual)
deviation = np.sum((pos-self.pos0)**2)**0.5/self.maxdist
penalty = 1+max(deviation-1,0)**2
return 0.5*np.sum(tmp*residual)*penalty
def newpos(self, pos):
# Draw pos with gaussian prior centered on previous position
# With a width given by the fiducial beam size.
step = self.stepsize
if np.random.uniform() < 0.1: step*100 # Sometimes try larger steps to break out of ruts
return pos + np.random.standard_normal(2) * beam_fiducial * self.stepsize
def newshape(self, irads):
return irads + np.random.standard_normal(3) * 1.0/beam_fiducial**2 * self.stepsize * 0.5
def newamp(self, amps):
return amps + np.random.standard_normal(len(amps)) * 1000 * self.stepsize
def subsample(self, verbose=False):
pos = self.newpos(self.pos)
lik = self.getlik(self.amps, pos, self.irads)
if np.random.uniform() < np.exp(self.lik-lik):
self.pos, self.lik = pos, lik
irads = self.newshape(self.irads)
lik = self.getlik(self.amps, self.pos, irads)
if np.random.uniform() < np.exp(self.lik-lik):
self.irads, self.lik = irads, lik
amps = self.newamp(self.amps)
lik = self.getlik(amps, self.pos, self.irads)
if np.random.uniform() < np.exp(self.lik-lik):
self.amps, self.lik = amps, lik
if verbose:
sigma, phi = expand_beam(self.irads)
print (" %9.2f"*len(self.amps)+" %10.5f %10.5f %8.3f %8.3f %8.3f") % (tuple(self.amps)+tuple(self.pos*r2c)+tuple(sigma*r2b)+(phi*r2c,))
return self.amps, self.pos, self.irads
def sample(self, verbose=False):
"""Draw a new, uncorrelated sample."""
for i in range(self.nsamp): self.subsample(verbose)
return self.amps, self.pos, self.irads
class ShapeSamplerMulti:
def __init__(self, maps, inoise, model, amps, pos, pos0, irads, nsamp=1500, stepsize=0.02, maxdist=1.5*np.pi/180/60):
self.samplers = [ShapeSampler(maps, inoise, model, amp1, pos1, pos01, irads1, nsamp=1, stepsize=stepsize, maxdist=maxdist) for amp1, pos1, pos01, irads1 in zip(amps, pos, pos0, irads)]
self.nsamp = nsamp
def sample(self, verbose=False):
for i in range(self.nsamp):
for sampler in self.samplers:
sampler.sample(verbose)
amps = np.array([s.amps for s in self.samplers])
pos = np.array([s.pos for s in self.samplers])
irads= np.array([s.irads for s in self.samplers])
return amps, pos, irads
class GibbsSampler:
def __init__(self, maps, inoise, ps, pos0, amp0, irads0, cmb0):
self.maps = maps
self.inoise = inoise
self.ps = ps
self.src_model = PtsrcModel(maps)
self.pos, self.amp, self.irads, self.cmb = pos0, amp0, irads0, cmb0
self.pos0 = pos0
self.cmb_sampler = CMBSampler(maps, inoise, ps)
def sample(self, verbose=False):
# First draw cmb,amp <- P(cmb,amp|data,pos,irads)
src_template = self.src_model.get_templates(self.pos, self.irads)
self.cmb_sampler.set_template(src_template)
self.cmb, self.amp = self.cmb_sampler.sample(verbose)
# Then draw pos,irads <- P(pos,irads|data,cmb,amp)
maps_nocmb = self.maps - self.cmb[None,:,:,:]
shape_sampler = ShapeSampler(maps_nocmb, self.inoise, self.src_model, self.amp, self.pos, self.pos0, self.irads)
self.amp, self.pos, self.irads = shape_sampler.sample(verbose)
return self.pos, self.amp, self.irads, self.cmb
class GibbsSamplerMulti:
"""Like GibbsSampler, but samples multiple points jointly.
This means that the source amplitude parameters will be arrays."""
def __init__(self, maps, inoise, ps, pos0, amp0, irads0, cmb0):
self.maps = maps
self.inoise = inoise
self.ps = ps
self.src_model = PtsrcModel(maps)
self.pos, self.amp, self.irads, self.cmb = pos0, amp0, irads0, cmb0
self.pos0 = pos0
self.cmb_sampler = CMBSampler(maps, inoise, ps)
def sample(self, verbose=False):
# First draw cmb,amp <- P(cmb,amp|data,pos,irads)
src_template = np.concatenate([self.src_model.get_templates(pos, irads) for pos,irads in zip(self.pos, self.irads)])
self.cmb_sampler.set_template(src_template)
self.cmb, self.amp = self.cmb_sampler.sample(verbose)
# Separate amps for each source
self.amp = self.amp.reshape(self.pos.shape[0],-1)
# Then draw pos,irads <- P(pos,irads|data,cmb,amp)
maps_nocmb = self.maps - self.cmb[None,:,:,:]
shape_sampler = ShapeSamplerMulti(maps_nocmb, self.inoise, self.src_model, self.amp, self.pos, self.pos0, self.irads)
self.amp, self.pos, self.irads = shape_sampler.sample(verbose)
return self.pos, self.amp, self.irads, self.cmb
def expand_beam(irads):
C = np.array([[irads[0],irads[2]],[irads[2],irads[1]]])
E, V = np.linalg.eigh(C)
phi = np.arctan2(V[1,0],V[0,0])
sigma = E**-0.5
if sigma[1] > sigma[0]:
sigma = sigma[::-1]
phi += np.pi/2
phi %= np.pi
return sigma, phi
def smooth_gauss(m, sigma):
l = np.sum(m.lmap()**2,0)**0.5
return np.real(en.ifft(en.fft(m)*np.exp(-0.5*(l*sigma)**2)))
def get_startpoint(maps, inoise, ps, rad=5):
# Filter away the CMB
sampler = CMBSampler(maps, inoise, ps, maps[None][:0])
cmb, _ = sampler.sample()
residual = maps - cmb[None]
# Smooth based on fiducial beam
residual = smooth_gauss(residual, beam_fiducial)
# Extract best point near center
cpix = np.array(residual.shape[-2:])/2
center = np.sum(np.sum((residual[:,:,cpix[0]-rad:cpix[0]+rad,cpix[1]-rad:cpix[1]+rad])**2,0),0)
I = np.argmax(center)
ipix = np.unravel_index(I, center.shape)
pos = center.posmap()[:,ipix[0],ipix[1]]
return pos
def B(T,nu):
c = 299792458.0
h = 6.62606957e-34
k = 1.3806488e-23
return 2*h*nu**3/c**2/(np.exp(h*nu/k/T)-1)
def uK2mJ(amp,b1,b2):
T0 = 2.73; nu=148e9
dB = B(T0+amp*1e-6,nu)-B(T0,nu)
return dB*2*np.pi*b1*b2/1e-29
def output_dummy(id):
with open(args.odir+"/samps%03d.txt" % id, "w") as ofile:
pass
utils.mkdir(args.odir)
if args.nmax > 0: groups = groups[:args.nmax]
for i in range(myid, len(groups), nproc):
if i < args.i: continue
group = groups[i]
if args.cont:
# If all our members are done, skip to next group
try:
lens = [len(np.loadtxt(args.odir + "/samps%03d.txt" % j)) for j in group]
if np.min(lens) >= args.nsamp:
continue
except (IOError, OSError): pass
print "%5d/%d %3d:" % (i+1, len(groups), myid),
print (" %3d"*len(group)) % tuple(group)
pos0 = np.array([poss[j] for j in group])
# Cut out a relevant region
box = np.array([np.min(pos0,0)-R,np.max(pos0,0)+R])
submap = maps.submap(box)
if submap.size == 0:
for g in group:
output_dummy(g)
continue
subnoise = apodize(noise.submap(box), apod_rad, apod_step)
# Set up initial values for the sampler
irads = np.tile(np.array([1/beam_fiducial**2,1/beam_fiducial**2,0]),(len(group),1))
amp = np.zeros([len(group),ncomp*nfreq])
cmb = submap[0]
sampler = GibbsSamplerMulti(submap, subnoise, ps, pos0, amp, irads, cmb)
# Open ofiles
ofiles = [open(args.odir + "/samps%03d.txt" % j, "w") for j in group]
for j in xrange(-args.burnin, args.nsamp):
pos, amp, irad, cmb = sampler.sample(args.verbose)
if j >= 0:
for mypos, myamp, myirad, ofile, isrc in zip(pos, amp, irad, ofiles,group):
sigma, phi = expand_beam(myirad)
mJ = uK2mJ(myamp,sigma[0],sigma[1])
print >> ofile, (" %10.5f"*2 + " %6.1f"*len(myamp) + "%8.3f %8.3f %8.3f" + " %6.2f"*len(mJ)) % (tuple(mypos*r2c)+tuple(myamp)+tuple(sigma*r2b)+(phi*r2c,)+tuple(mJ))
ofile.flush()
if args.dump > 0 and j % args.dump == 0:
dumpdir = args.odir + "/dump%03d" % isrc
utils.mkdir(dumpdir)
src = sampler.src_model.get_model(myamp, mypos, myirad)
residual = submap - src - cmb[None]
# Cut out our area
mybox = np.array([poss[isrc]-R,poss[isrc]+R])
mycmb, myres, mymod, mysub = [a.submap(mybox) for a in [cmb,residual,src,submap]]
en.write_map(dumpdir + "/cmb%03d.hdf" % j, mycmb)
en.write_map(dumpdir + "/residual%03d.hdf" % j, myres)
en.write_map(dumpdir + "/model%03d.hdf" % j, mymod)
en.write_map(dumpdir + "/submap.hdf", mysub)
|
normal
|
{
"blob_id": "6a601d1c7c3c162c0902d03e6c39f8d75d4bcaf0",
"index": 798,
"step-1": "import numpy as np, argparse, sys, itertools, os, errno, warnings\nfrom mpi4py import MPI\nfrom enlib import enmap as en, powspec, utils\nfrom enlib.degrees_of_freedom import DOF, Arg\nfrom enlib.cg import CG\nwarnings.filterwarnings(\"ignore\")\n\n#from matplotlib.pylab import *\nparser = argparse.ArgumentParser()\nparser.add_argument(\"freqs\")\nparser.add_argument(\"maps\")\nparser.add_argument(\"noise\")\nparser.add_argument(\"powspec\")\nparser.add_argument(\"posfile\")\nparser.add_argument(\"odir\")\nparser.add_argument(\"-R\", \"--radius\", type=float, default=30)\nparser.add_argument(\"--burnin\", type=int, default=10)\nparser.add_argument(\"-n\", \"--nsamp\", type=int, default=50)\nparser.add_argument(\"--dump\", type=int, default=0)\nparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\nparser.add_argument(\"-i\", type=int, default=0)\nparser.add_argument(\"--nmax\", type=int, default=0)\nparser.add_argument(\"--mindist-group\", type=float, default=10)\nparser.add_argument(\"-c\", \"--cont\", action=\"store_true\")\nargs = parser.parse_args()\n\ncomm = MPI.COMM_WORLD\nmyid = comm.rank\nnproc= comm.size\nr2c = 180/np.pi\nr2b = r2c*60*(8*np.log(2))**0.5\n\ndef read_maps(fmt, n, ntot=4):\n\ttry:\n\t\tmaps = en.read_map(fmt)\n\t\tif maps.ndim == ntot-1: maps = en.enmap([maps]*n,maps.wcs)\n\t\tif maps.ndim != ntot: raise ValueError(\"Map %s must have %d dimensions\" % (fmt,ntot))\n\t\treturn maps\n\texcept (IOError, OSError):\n\t\tmaps = [en.read_map(fmt % i) for i in range(n)]\n\t\tmaps = en.ndmap(maps, maps[0].wcs)\n\t\tif maps.ndim != ntot: maps = maps.reshape(maps.shape[:-2]+(1,)*(maps.ndim-ntot)+maps.shape[-2:])\n\t\treturn maps\n\ndef flat_noise(shape, wcs, sigmas):\n\tres = en.zeros([len(sigmas),shape[-3],shape[-3],shape[-2],shape[-1]], wcs)\n\tfor i,s in enumerate(sigmas):\n\t\tres[i] = (np.eye(shape[-3])*s**2)[:,:,None,None]\n\treturn res\n\ndef read_noise(info, shape, wcs, n):\n\ttry:\n\t\tnmat = flat_noise(shape, wcs, parse_floats(info))\n\texcept ValueError:\n\t\tnmat = read_maps(info, n, 5)\n\tif len(nmat) != n: raise ValueError(\"Number of noise maps (%d) != number of signal maps (%d)!\" % (len(nmat), n))\n\tif np.any(nmat.shape[-2:] != shape[-2:]): raise ValueError(\"Noise and maps have inconsistent shape!\")\n\treturn nmat\n\ndef parse_floats(strs): return np.array([float(w) for w in strs.split(\",\")])\n\ndef apodize(m, rad, apod_fun):\n\tscale = m.extent()/m.shape[-2:]\n\ty = np.arange(m.shape[-2])*scale[0]\n\tx = np.arange(m.shape[-1])*scale[1]\n\tyfun = apod_fun(y, rad)*apod_fun(y[-1]-y, rad)\n\txfun = apod_fun(x, rad)*apod_fun(x[-1]-x, rad)\n\ta = yfun[:,None]*xfun[None,:]\n\treturn m*a\ndef apod_step(x, r): return x>r\ndef apod_butter(x, r): return (1+(x/r)**-4)**-1\ndef apod_cos(x,r): return (1-np.cos(np.min(1,nx/r)*np.pi))/2\n\n# Read our inputs\nfreqs = parse_floats(args.freqs)\nmaps = read_maps(args.maps, len(freqs))\nncomp = maps.shape[-3]\nnfreq = maps.shape[-4]\nnoise = read_noise(args.noise, maps.shape, maps.wcs, len(freqs))\nps = powspec.read_spectrum(args.powspec, expand=\"diag\")[:ncomp,:ncomp]\nposs = np.loadtxt(args.posfile)[:,:2]/r2c\nR = args.radius/r2c/60\nbeam_fiducial = 1.5/r2b\nbeam_range = [0.8/r2b,3.0/r2b]\nbeam_max_asym = 2\napod_rad = R/10\n\n# We will cut out small mini-maps around each source candadate and\n# sample the CMB and source parameters jointly. But some candiates\n# are so near each other that they aren't independent. These must\n# be grouped into groups.\ndef build_groups(poss):\n\tdef dist(a,b): return np.sum((poss[a]-poss[b])**2)**0.5*180*60/np.pi\n\trest = set(range(len(poss)))\n\tgroups = []\n\twhile len(rest) > 0:\n\t\tgroup = []\n\t\ttocheck = [rest.pop()]\n\t\t# Find distance to all other points\n\t\twhile len(tocheck) > 0:\n\t\t\tcurrent = tocheck.pop()\n\t\t\trnew = set()\n\t\t\twhile rest:\n\t\t\t\tother = rest.pop()\n\t\t\t\tif dist(current,other) < args.mindist_group:\n\t\t\t\t\ttocheck.append(other)\n\t\t\t\telse:\n\t\t\t\t\trnew.add(other)\n\t\t\trest = rnew\n\t\t\tgroup.append(current)\n\t\tgroups.append(group)\n\treturn groups\ngroups = build_groups(poss)\nprint \"Found %d groups\" % len(groups)\n\n# We will sample (cmb,A,pos,ibeam) jointly in gibbs fashion:\n# cmb,A <- P(cmb,A|data,A,pos,ibeam) # direct, but requires cr\n# pos,ibeam <- P(pos,ibeam|data,cmb,A) # MCMC\n# To take into account the nonperiodicity of each submap, we must introduce\n# a region of extra noise around the edge.\n\nclass CMBSampler:\n\t\"\"\"Draws samples from P(s,a|d,Cl,N,T), where T[ntemp,nfreq,ncomp,ny,nx] is a set of templates.\n\ta[ntemp] is the set of template amplitudes.\"\"\"\n\tdef __init__(self, maps, inoise, ps, T=None):\n\t\tself.d = maps\n\t\tself.iN = inoise\n\t\tself.hN = en.multi_pow(inoise, 0.5, axes=[1,2])\n\t\tself.iS = en.spec2flat(maps.shape[-3:], maps.wcs, ps, -1.0)\n\t\tself.hS = en.spec2flat(maps.shape[-3:], maps.wcs, ps, -0.5)\n\t\tself.ps = ps\n\t\tself.b, self.x = None, None\n\t\t# Prepare the preconditioner. It approximates the noise as the\n\t\t# same in every pixel, and ignores the cmb-template coupling.\n\t\t# See M(self,u) for details.\n\t\tiN_white = np.array(np.sum(np.mean(np.mean(self.iN,-1),-1),0))\n\t\t# iN_white is now in pixel space, but the preconditioner needs it\n\t\t# in harmonic space, which introduces a \n\t\t#norm = np.prod((maps.box[1]-maps.box[0])/maps.shape[-2:])\n\t\t#norm = 1./np.prod(maps.shape[-2:])\n\t\t#iN_white /= norm\n\t\tself.S_prec = en.multi_pow(self.iS + iN_white[:,:,None,None], -1)\n\n\t\t# The template\n\t\tself.set_template(T)\n\tdef set_template(self, T):\n\t\tif T is None: T = np.zeros((0,)+self.d.shape)\n\t\tself.T = T\n\t\tself.TT = np.einsum(\"aijyx,bijyx->ab\",self.T,self.T)\n\t\tself.dof = DOF(Arg(default=self.d[0]), Arg(shape=T.shape[:1]))\n\tdef P(self, u):\n\t\ts, a = self.dof.unzip(u)\n\t\treturn s[None,:,:,:] + np.sum(self.T*a[:,None,None,None,None],0)\n\tdef PT(self, d):\n\t\treturn self.dof.zip(np.sum(d,0), np.einsum(\"qijyx,ijyx->q\",self.T, d))\n\tdef A(self, u):\n\t\ts, a = self.dof.unzip(u)\n\t\t# U\"u = [S\"s, 0a]\n\t\tUu = self.dof.zip(en.harm2map(en.map_mul(self.iS, en.map2harm(s))),a*0)\n\t\t# P'N\"P u\n\t\tPNPu = self.PT(en.map_mul(self.iN, self.P(u)))\n\t\treturn Uu + PNPu\n\tdef M(self, u):\n\t\t# Multiplying things out, the full expression for A is:\n\t\t# [ S\" + sum(N\") sum(N\"T) ]\n\t\t# [ sum(T'N\") sum(T'T) ]\n\t\t# A reasonable approximation for this is\n\t\t# [ S\" + sum(sigma^{-2}) 0 ]\n\t\t# [ 0 sum(T'T) ]\n\t\t# which can be directly inverted.\n\t\ts, a = self.dof.unzip(u)\n\t\t# Solve for the cmb signal component\n\t\tres_s = en.harm2map(en.map_mul(self.S_prec,en.map2harm(s)))\n\t\tres_a = np.linalg.solve(self.TT, a)\n\t\treturn self.dof.zip(res_s, res_a)\n\tdef calc_b(self):\n\t\tPNd = self.PT(en.map_mul(self.iN, self.d))\n\t\tUw1_s = en.harm2map(en.map_mul(self.hS, en.rand_gauss_harm(self.d.shape[-3:],self.d.wcs)))\n\t\tUw1_a = np.zeros(self.T.shape[0])\n\t\tUw1 = self.dof.zip(Uw1_s, Uw1_a)\n\t\tPNw2 = self.PT(en.map_mul(self.hN, en.rand_gauss(self.d.shape, self.d.wcs)))\n\t\treturn PNd + Uw1 + PNw2\n\tdef solve(self, b, x0, verbose=False):\n\t\tcg = CG(self.A, b, x0=x0*0, M=self.M)\n\t\twhile cg.err > 1e-6:\n\t\t\tcg.step()\n\t\t\tif verbose:\n\t\t\t\tprint \"%5d %15.7e %15.7e\" % (cg.i, cg.err, cg.err_true) #, self.dof.unzip(cg.x)[1]\n\t\t\t#if cg.i % 10 == 0:\n\t\t\t#\ts, a = self.dof.unzip(cg.x)\n\t\t\t#\tmatshow(s[0]); colorbar(); show()\n\t\treturn cg.x\n\tdef sample(self, verbose=False):\n\t\tself.b = self.calc_b()\n\t\tif self.x is None: self.x = self.dof.zip(self.d[0], np.zeros(self.T.shape[0]))\n\t\tself.x = self.solve(self.b, self.x, verbose)\n\t\treturn self.dof.unzip(self.x)\n\nclass PtsrcModel:\n\t\"\"\"This class converts from point source shape parameters to amplitude\n\tbasis functions.\"\"\"\n\tdef __init__(self, template):\n\t\tself.pos = template.posmap()\n\t\tself.nfreq, self.ncomp = template.shape[:2]\n\t\tself.nparam = self.nfreq*self.ncomp\n\tdef get_templates(self, pos, irads):\n\t\tx = utils.rewind(self.pos - pos[:,None,None],0,2*np.pi)\n\t\tW = np.array([[irads[0],irads[2]],[irads[2],irads[1]]])\n\t\txWx = np.sum(np.einsum(\"ab,byx->ayx\", W, x)*x,0)\n\t\tprofile = np.exp(-0.5*xWx)\n\t\tbases = np.eye(self.nfreq*self.ncomp).reshape(self.nfreq*self.ncomp,self.nfreq,self.ncomp)\n\t\treturn profile[None,None,None]*bases[:,:,:,None,None]\n\tdef get_model(self, amps, pos, irads):\n\t\treturn np.sum((self.get_templates(pos, irads).T*amps.T).T,0)\n\nclass ShapeSampler:\n\tdef __init__(self, maps, inoise, model, amps, pos, pos0, irads, nsamp=200, stepsize=0.02, maxdist=1.5*np.pi/180/60):\n\t\tself.maps = maps\n\t\tself.inoise = inoise\n\t\tself.model= model\n\t\tself.nsamp= nsamp\n\t\tself.stepsize = stepsize\n\t\tself.amps = amps\n\t\tself.pos, self.irads = pos, irads\n\t\tself.pos0 = pos0\n\t\tself.maxdist=maxdist\n\t\tself.lik = self.getlik(self.amps, self.pos, self.irads)\n\tdef getlik(self, amps, pos, irads):\n\t\tif irads[0] < 0 or irads[1] < 0: return np.inf\n\t\tif irads[0]*irads[1]-irads[2]**2 <= 0: return np.inf\n\t\tsigma, phi = expand_beam(irads)\n\t\t# The beam has a tendency to run off in unrealistic directions,\n\t\t# so we need a relatively strong prior on it.\n\t\tif np.min(sigma) < beam_range[0] or np.max(sigma) > beam_range[1] or np.max(sigma)/np.min(sigma) > beam_max_asym: return np.inf\n\t\ttemplate = self.model.get_model(amps, pos, irads)\n\t\tresidual = self.maps-template\n\t\ttmp = np.einsum(\"fabyx,abyx->fayx\",self.inoise, residual)\n\t\tdeviation = np.sum((pos-self.pos0)**2)**0.5/self.maxdist\n\t\tpenalty = 1+max(deviation-1,0)**2\n\t\treturn 0.5*np.sum(tmp*residual)*penalty\n\tdef newpos(self, pos):\n\t\t# Draw pos with gaussian prior centered on previous position\n\t\t# With a width given by the fiducial beam size.\n\t\tstep = self.stepsize\n\t\tif np.random.uniform() < 0.1: step*100 # Sometimes try larger steps to break out of ruts\n\t\treturn pos + np.random.standard_normal(2) * beam_fiducial * self.stepsize\n\tdef newshape(self, irads):\n\t\treturn irads + np.random.standard_normal(3) * 1.0/beam_fiducial**2 * self.stepsize * 0.5\n\tdef newamp(self, amps):\n\t\treturn amps + np.random.standard_normal(len(amps)) * 1000 * self.stepsize\n\tdef subsample(self, verbose=False):\n\t\tpos = self.newpos(self.pos)\n\t\tlik = self.getlik(self.amps, pos, self.irads)\n\t\tif np.random.uniform() < np.exp(self.lik-lik):\n\t\t\tself.pos, self.lik = pos, lik\n\t\tirads = self.newshape(self.irads)\n\t\tlik = self.getlik(self.amps, self.pos, irads)\n\t\tif np.random.uniform() < np.exp(self.lik-lik):\n\t\t\tself.irads, self.lik = irads, lik\n\t\tamps = self.newamp(self.amps)\n\t\tlik = self.getlik(amps, self.pos, self.irads)\n\t\tif np.random.uniform() < np.exp(self.lik-lik):\n\t\t\tself.amps, self.lik = amps, lik\n\t\tif verbose:\n\t\t\tsigma, phi = expand_beam(self.irads)\n\t\t\tprint (\" %9.2f\"*len(self.amps)+\" %10.5f %10.5f %8.3f %8.3f %8.3f\") % (tuple(self.amps)+tuple(self.pos*r2c)+tuple(sigma*r2b)+(phi*r2c,))\n\t\treturn self.amps, self.pos, self.irads\n\tdef sample(self, verbose=False):\n\t\t\"\"\"Draw a new, uncorrelated sample.\"\"\"\n\t\tfor i in range(self.nsamp): self.subsample(verbose)\n\t\treturn self.amps, self.pos, self.irads\n\nclass ShapeSamplerMulti:\n\tdef __init__(self, maps, inoise, model, amps, pos, pos0, irads, nsamp=1500, stepsize=0.02, maxdist=1.5*np.pi/180/60):\n\t\tself.samplers = [ShapeSampler(maps, inoise, model, amp1, pos1, pos01, irads1, nsamp=1, stepsize=stepsize, maxdist=maxdist) for amp1, pos1, pos01, irads1 in zip(amps, pos, pos0, irads)]\n\t\tself.nsamp = nsamp\n\tdef sample(self, verbose=False):\n\t\tfor i in range(self.nsamp):\n\t\t\tfor sampler in self.samplers:\n\t\t\t\tsampler.sample(verbose)\n\t\tamps = np.array([s.amps for s in self.samplers])\n\t\tpos = np.array([s.pos for s in self.samplers])\n\t\tirads= np.array([s.irads for s in self.samplers])\n\t\treturn amps, pos, irads\n\nclass GibbsSampler:\n\tdef __init__(self, maps, inoise, ps, pos0, amp0, irads0, cmb0):\n\t\tself.maps = maps\n\t\tself.inoise = inoise\n\t\tself.ps = ps\n\t\tself.src_model = PtsrcModel(maps)\n\t\tself.pos, self.amp, self.irads, self.cmb = pos0, amp0, irads0, cmb0\n\t\tself.pos0 = pos0\n\t\tself.cmb_sampler = CMBSampler(maps, inoise, ps)\n\tdef sample(self, verbose=False):\n\t\t# First draw cmb,amp <- P(cmb,amp|data,pos,irads)\n\t\tsrc_template = self.src_model.get_templates(self.pos, self.irads)\n\t\tself.cmb_sampler.set_template(src_template)\n\t\tself.cmb, self.amp = self.cmb_sampler.sample(verbose)\n\t\t# Then draw pos,irads <- P(pos,irads|data,cmb,amp)\n\t\tmaps_nocmb = self.maps - self.cmb[None,:,:,:]\n\t\tshape_sampler = ShapeSampler(maps_nocmb, self.inoise, self.src_model, self.amp, self.pos, self.pos0, self.irads)\n\t\tself.amp, self.pos, self.irads = shape_sampler.sample(verbose)\n\t\treturn self.pos, self.amp, self.irads, self.cmb\n\nclass GibbsSamplerMulti:\n\t\"\"\"Like GibbsSampler, but samples multiple points jointly.\n\tThis means that the source amplitude parameters will be arrays.\"\"\"\n\tdef __init__(self, maps, inoise, ps, pos0, amp0, irads0, cmb0):\n\t\tself.maps = maps\n\t\tself.inoise = inoise\n\t\tself.ps = ps\n\t\tself.src_model = PtsrcModel(maps)\n\t\tself.pos, self.amp, self.irads, self.cmb = pos0, amp0, irads0, cmb0\n\t\tself.pos0 = pos0\n\t\tself.cmb_sampler = CMBSampler(maps, inoise, ps)\n\tdef sample(self, verbose=False):\n\t\t# First draw cmb,amp <- P(cmb,amp|data,pos,irads)\n\t\tsrc_template = np.concatenate([self.src_model.get_templates(pos, irads) for pos,irads in zip(self.pos, self.irads)])\n\t\tself.cmb_sampler.set_template(src_template)\n\t\tself.cmb, self.amp = self.cmb_sampler.sample(verbose)\n\t\t# Separate amps for each source\n\t\tself.amp = self.amp.reshape(self.pos.shape[0],-1)\n\t\t# Then draw pos,irads <- P(pos,irads|data,cmb,amp)\n\t\tmaps_nocmb = self.maps - self.cmb[None,:,:,:]\n\t\tshape_sampler = ShapeSamplerMulti(maps_nocmb, self.inoise, self.src_model, self.amp, self.pos, self.pos0, self.irads)\n\t\tself.amp, self.pos, self.irads = shape_sampler.sample(verbose)\n\t\treturn self.pos, self.amp, self.irads, self.cmb\n\ndef expand_beam(irads):\n\tC = np.array([[irads[0],irads[2]],[irads[2],irads[1]]])\n\tE, V = np.linalg.eigh(C)\n\tphi = np.arctan2(V[1,0],V[0,0])\n\tsigma = E**-0.5\n\tif sigma[1] > sigma[0]:\n\t\tsigma = sigma[::-1]\n\t\tphi += np.pi/2\n\tphi %= np.pi\n\treturn sigma, phi\n\ndef smooth_gauss(m, sigma):\n\tl = np.sum(m.lmap()**2,0)**0.5\n\treturn np.real(en.ifft(en.fft(m)*np.exp(-0.5*(l*sigma)**2)))\n\ndef get_startpoint(maps, inoise, ps, rad=5):\n\t# Filter away the CMB\n\tsampler = CMBSampler(maps, inoise, ps, maps[None][:0])\n\tcmb, _ = sampler.sample()\n\tresidual = maps - cmb[None]\n\t# Smooth based on fiducial beam\n\tresidual = smooth_gauss(residual, beam_fiducial)\n\t# Extract best point near center\n\tcpix = np.array(residual.shape[-2:])/2\n\tcenter = np.sum(np.sum((residual[:,:,cpix[0]-rad:cpix[0]+rad,cpix[1]-rad:cpix[1]+rad])**2,0),0)\n\tI = np.argmax(center)\n\tipix = np.unravel_index(I, center.shape)\n\tpos = center.posmap()[:,ipix[0],ipix[1]]\n\treturn pos\n\ndef B(T,nu):\n\tc = 299792458.0\n\th = 6.62606957e-34\n\tk = 1.3806488e-23\n\treturn 2*h*nu**3/c**2/(np.exp(h*nu/k/T)-1)\ndef uK2mJ(amp,b1,b2):\n\tT0 = 2.73; nu=148e9\n\tdB = B(T0+amp*1e-6,nu)-B(T0,nu)\n\treturn dB*2*np.pi*b1*b2/1e-29\n\ndef output_dummy(id):\n\twith open(args.odir+\"/samps%03d.txt\" % id, \"w\") as ofile:\n\t\tpass\n\nutils.mkdir(args.odir)\n\nif args.nmax > 0: groups = groups[:args.nmax]\n\nfor i in range(myid, len(groups), nproc):\n\tif i < args.i: continue\n\tgroup = groups[i]\n\tif args.cont:\n\t\t# If all our members are done, skip to next group\n\t\ttry:\n\t\t\tlens = [len(np.loadtxt(args.odir + \"/samps%03d.txt\" % j)) for j in group]\n\t\t\tif np.min(lens) >= args.nsamp:\n\t\t\t\tcontinue\n\t\texcept (IOError, OSError): pass\n\tprint \"%5d/%d %3d:\" % (i+1, len(groups), myid),\n\tprint (\" %3d\"*len(group)) % tuple(group)\n\tpos0 = np.array([poss[j] for j in group])\n\t# Cut out a relevant region\n\tbox = np.array([np.min(pos0,0)-R,np.max(pos0,0)+R])\n\tsubmap = maps.submap(box)\n\tif submap.size == 0:\n\t\tfor g in group:\n\t\t\toutput_dummy(g)\n\t\tcontinue\n\tsubnoise = apodize(noise.submap(box), apod_rad, apod_step)\n\t# Set up initial values for the sampler\n\tirads = np.tile(np.array([1/beam_fiducial**2,1/beam_fiducial**2,0]),(len(group),1))\n\tamp = np.zeros([len(group),ncomp*nfreq])\n\tcmb = submap[0]\n\tsampler = GibbsSamplerMulti(submap, subnoise, ps, pos0, amp, irads, cmb)\n\t# Open ofiles\n\tofiles = [open(args.odir + \"/samps%03d.txt\" % j, \"w\") for j in group]\n\tfor j in xrange(-args.burnin, args.nsamp):\n\t\tpos, amp, irad, cmb = sampler.sample(args.verbose)\n\t\tif j >= 0:\n\t\t\tfor mypos, myamp, myirad, ofile, isrc in zip(pos, amp, irad, ofiles,group):\n\t\t\t\tsigma, phi = expand_beam(myirad)\n\t\t\t\tmJ = uK2mJ(myamp,sigma[0],sigma[1])\n\t\t\t\tprint >> ofile, (\" %10.5f\"*2 + \" %6.1f\"*len(myamp) + \"%8.3f %8.3f %8.3f\" + \" %6.2f\"*len(mJ)) % (tuple(mypos*r2c)+tuple(myamp)+tuple(sigma*r2b)+(phi*r2c,)+tuple(mJ))\n\t\t\t\tofile.flush()\n\t\t\t\tif args.dump > 0 and j % args.dump == 0:\n\t\t\t\t\tdumpdir = args.odir + \"/dump%03d\" % isrc\n\t\t\t\t\tutils.mkdir(dumpdir)\n\t\t\t\t\tsrc = sampler.src_model.get_model(myamp, mypos, myirad)\n\t\t\t\t\tresidual = submap - src - cmb[None]\n\t\t\t\t\t# Cut out our area\n\t\t\t\t\tmybox = np.array([poss[isrc]-R,poss[isrc]+R])\n\t\t\t\t\tmycmb, myres, mymod, mysub = [a.submap(mybox) for a in [cmb,residual,src,submap]]\n\t\t\t\t\ten.write_map(dumpdir + \"/cmb%03d.hdf\" % j, mycmb)\n\t\t\t\t\ten.write_map(dumpdir + \"/residual%03d.hdf\" % j, myres)\n\t\t\t\t\ten.write_map(dumpdir + \"/model%03d.hdf\" % j, mymod)\n\t\t\t\t\ten.write_map(dumpdir + \"/submap.hdf\", mysub)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# 나의 풀이
def solution(prices):
# 초 단위로 기록된 주식가격이 담긴 배열 prices # 가격이 떨어지지 않은 기간을 리턴
answer = [0]*len(prices)
for i in range(len(prices)-1):
for j in range(i+1, len(prices)):
answer[i] += 1
# 가격이 떨어졌을 경우
if prices[i] > prices[j]:
break
return answer
|
normal
|
{
"blob_id": "23b6d754adf1616bc6ea1f8c74984fbd8dade6dd",
"index": 4238,
"step-1": "<mask token>\n",
"step-2": "def solution(prices):\n answer = [0] * len(prices)\n for i in range(len(prices) - 1):\n for j in range(i + 1, len(prices)):\n answer[i] += 1\n if prices[i] > prices[j]:\n break\n return answer\n",
"step-3": "# 나의 풀이\ndef solution(prices):\n # 초 단위로 기록된 주식가격이 담긴 배열 prices # 가격이 떨어지지 않은 기간을 리턴\n answer = [0]*len(prices)\n \n for i in range(len(prices)-1):\n for j in range(i+1, len(prices)):\n answer[i] += 1\n # 가격이 떨어졌을 경우\n if prices[i] > prices[j]:\n break\n\n return answer\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
from OpenGL.constant import Constant as _C
# End users want this...
from OpenGL.raw.GLES2 import _errors
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
_EXTENSION_NAME = 'GLES2_NV_viewport_array'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.GLES2, 'GLES2_NV_viewport_array',
error_checker=_errors._error_checker)
GL_DEPTH_RANGE = _C('GL_DEPTH_RANGE', 0x0B70)
GL_MAX_VIEWPORTS_NV = _C('GL_MAX_VIEWPORTS_NV', 0x825B)
GL_SCISSOR_BOX = _C('GL_SCISSOR_BOX', 0x0C10)
GL_SCISSOR_TEST = _C('GL_SCISSOR_TEST', 0x0C11)
GL_VIEWPORT = _C('GL_VIEWPORT', 0x0BA2)
GL_VIEWPORT_BOUNDS_RANGE_NV = _C('GL_VIEWPORT_BOUNDS_RANGE_NV', 0x825D)
GL_VIEWPORT_INDEX_PROVOKING_VERTEX_NV=_C('GL_VIEWPORT_INDEX_PROVOKING_VERTEX_NV',0x825F)
GL_VIEWPORT_SUBPIXEL_BITS_NV=_C('GL_VIEWPORT_SUBPIXEL_BITS_NV',0x825C)
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLfloatArray)
def glDepthRangeArrayfvNV(first,count,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat)
def glDepthRangeIndexedfNV(index,n,f):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint)
def glDisableiNV(target,index):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint)
def glEnableiNV(target,index):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,arrays.GLfloatArray)
def glGetFloati_vNV(target,index,data):pass
@_f
@_p.types(_cs.GLboolean,_cs.GLenum,_cs.GLuint)
def glIsEnablediNV(target,index):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLintArray)
def glScissorArrayvNV(first,count,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei)
def glScissorIndexedNV(index,left,bottom,width,height):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLintArray)
def glScissorIndexedvNV(index,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLfloatArray)
def glViewportArrayvNV(first,count,v):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)
def glViewportIndexedfNV(index,x,y,w,h):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLfloatArray)
def glViewportIndexedfvNV(index,v):pass
|
normal
|
{
"blob_id": "9535973f9714926269490b8550a67c74d04d8f0a",
"index": 834,
"step-1": "<mask token>\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLfloatArray)\ndef glDepthRangeArrayfvNV(first, count, v):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLfloat, _cs.GLfloat)\ndef glDepthRangeIndexedfNV(index, n, f):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLenum, _cs.GLuint)\ndef glDisableiNV(target, index):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLenum, _cs.GLuint)\ndef glEnableiNV(target, index):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLenum, _cs.GLuint, arrays.GLfloatArray)\ndef glGetFloati_vNV(target, index, data):\n pass\n\n\n@_f\n@_p.types(_cs.GLboolean, _cs.GLenum, _cs.GLuint)\ndef glIsEnablediNV(target, index):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLintArray)\ndef glScissorArrayvNV(first, count, v):\n pass\n\n\n<mask token>\n\n\n@_f\n@_p.types(None, _cs.GLuint, arrays.GLintArray)\ndef glScissorIndexedvNV(index, v):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLfloatArray)\ndef glViewportArrayvNV(first, count, v):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLfloat, _cs.GLfloat, _cs.GLfloat, _cs.GLfloat)\ndef glViewportIndexedfNV(index, x, y, w, h):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, arrays.GLfloatArray)\ndef glViewportIndexedfvNV(index, v):\n pass\n",
"step-2": "<mask token>\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLfloatArray)\ndef glDepthRangeArrayfvNV(first, count, v):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLfloat, _cs.GLfloat)\ndef glDepthRangeIndexedfNV(index, n, f):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLenum, _cs.GLuint)\ndef glDisableiNV(target, index):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLenum, _cs.GLuint)\ndef glEnableiNV(target, index):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLenum, _cs.GLuint, arrays.GLfloatArray)\ndef glGetFloati_vNV(target, index, data):\n pass\n\n\n@_f\n@_p.types(_cs.GLboolean, _cs.GLenum, _cs.GLuint)\ndef glIsEnablediNV(target, index):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLintArray)\ndef glScissorArrayvNV(first, count, v):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLint, _cs.GLint, _cs.GLsizei, _cs.GLsizei)\ndef glScissorIndexedNV(index, left, bottom, width, height):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, arrays.GLintArray)\ndef glScissorIndexedvNV(index, v):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLfloatArray)\ndef glViewportArrayvNV(first, count, v):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLfloat, _cs.GLfloat, _cs.GLfloat, _cs.GLfloat)\ndef glViewportIndexedfNV(index, x, y, w, h):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, arrays.GLfloatArray)\ndef glViewportIndexedfvNV(index, v):\n pass\n",
"step-3": "<mask token>\n\n\ndef _f(function):\n return _p.createFunction(function, _p.PLATFORM.GLES2,\n 'GLES2_NV_viewport_array', error_checker=_errors._error_checker)\n\n\n<mask token>\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLfloatArray)\ndef glDepthRangeArrayfvNV(first, count, v):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLfloat, _cs.GLfloat)\ndef glDepthRangeIndexedfNV(index, n, f):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLenum, _cs.GLuint)\ndef glDisableiNV(target, index):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLenum, _cs.GLuint)\ndef glEnableiNV(target, index):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLenum, _cs.GLuint, arrays.GLfloatArray)\ndef glGetFloati_vNV(target, index, data):\n pass\n\n\n@_f\n@_p.types(_cs.GLboolean, _cs.GLenum, _cs.GLuint)\ndef glIsEnablediNV(target, index):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLintArray)\ndef glScissorArrayvNV(first, count, v):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLint, _cs.GLint, _cs.GLsizei, _cs.GLsizei)\ndef glScissorIndexedNV(index, left, bottom, width, height):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, arrays.GLintArray)\ndef glScissorIndexedvNV(index, v):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLfloatArray)\ndef glViewportArrayvNV(first, count, v):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLfloat, _cs.GLfloat, _cs.GLfloat, _cs.GLfloat)\ndef glViewportIndexedfNV(index, x, y, w, h):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, arrays.GLfloatArray)\ndef glViewportIndexedfvNV(index, v):\n pass\n",
"step-4": "<mask token>\n_EXTENSION_NAME = 'GLES2_NV_viewport_array'\n\n\ndef _f(function):\n return _p.createFunction(function, _p.PLATFORM.GLES2,\n 'GLES2_NV_viewport_array', error_checker=_errors._error_checker)\n\n\nGL_DEPTH_RANGE = _C('GL_DEPTH_RANGE', 2928)\nGL_MAX_VIEWPORTS_NV = _C('GL_MAX_VIEWPORTS_NV', 33371)\nGL_SCISSOR_BOX = _C('GL_SCISSOR_BOX', 3088)\nGL_SCISSOR_TEST = _C('GL_SCISSOR_TEST', 3089)\nGL_VIEWPORT = _C('GL_VIEWPORT', 2978)\nGL_VIEWPORT_BOUNDS_RANGE_NV = _C('GL_VIEWPORT_BOUNDS_RANGE_NV', 33373)\nGL_VIEWPORT_INDEX_PROVOKING_VERTEX_NV = _C(\n 'GL_VIEWPORT_INDEX_PROVOKING_VERTEX_NV', 33375)\nGL_VIEWPORT_SUBPIXEL_BITS_NV = _C('GL_VIEWPORT_SUBPIXEL_BITS_NV', 33372)\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLfloatArray)\ndef glDepthRangeArrayfvNV(first, count, v):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLfloat, _cs.GLfloat)\ndef glDepthRangeIndexedfNV(index, n, f):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLenum, _cs.GLuint)\ndef glDisableiNV(target, index):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLenum, _cs.GLuint)\ndef glEnableiNV(target, index):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLenum, _cs.GLuint, arrays.GLfloatArray)\ndef glGetFloati_vNV(target, index, data):\n pass\n\n\n@_f\n@_p.types(_cs.GLboolean, _cs.GLenum, _cs.GLuint)\ndef glIsEnablediNV(target, index):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLintArray)\ndef glScissorArrayvNV(first, count, v):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLint, _cs.GLint, _cs.GLsizei, _cs.GLsizei)\ndef glScissorIndexedNV(index, left, bottom, width, height):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, arrays.GLintArray)\ndef glScissorIndexedvNV(index, v):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLsizei, arrays.GLfloatArray)\ndef glViewportArrayvNV(first, count, v):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, _cs.GLfloat, _cs.GLfloat, _cs.GLfloat, _cs.GLfloat)\ndef glViewportIndexedfNV(index, x, y, w, h):\n pass\n\n\n@_f\n@_p.types(None, _cs.GLuint, arrays.GLfloatArray)\ndef glViewportIndexedfvNV(index, v):\n pass\n",
"step-5": "'''Autogenerated by xml_generate script, do not edit!'''\nfrom OpenGL import platform as _p, arrays\nfrom OpenGL.constant import Constant as _C\n# End users want this...\nfrom OpenGL.raw.GLES2 import _errors\n# Code generation uses this\nfrom OpenGL.raw.GLES2 import _types as _cs\n\n_EXTENSION_NAME = 'GLES2_NV_viewport_array'\n\n\ndef _f(function):\n return _p.createFunction(function, _p.PLATFORM.GLES2, 'GLES2_NV_viewport_array',\n error_checker=_errors._error_checker)\n\n\nGL_DEPTH_RANGE = _C('GL_DEPTH_RANGE', 0x0B70)\nGL_MAX_VIEWPORTS_NV = _C('GL_MAX_VIEWPORTS_NV', 0x825B)\nGL_SCISSOR_BOX = _C('GL_SCISSOR_BOX', 0x0C10)\nGL_SCISSOR_TEST = _C('GL_SCISSOR_TEST', 0x0C11)\nGL_VIEWPORT = _C('GL_VIEWPORT', 0x0BA2)\nGL_VIEWPORT_BOUNDS_RANGE_NV = _C('GL_VIEWPORT_BOUNDS_RANGE_NV', 0x825D)\nGL_VIEWPORT_INDEX_PROVOKING_VERTEX_NV=_C('GL_VIEWPORT_INDEX_PROVOKING_VERTEX_NV',0x825F)\nGL_VIEWPORT_SUBPIXEL_BITS_NV=_C('GL_VIEWPORT_SUBPIXEL_BITS_NV',0x825C)\n@_f\n@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLfloatArray)\ndef glDepthRangeArrayfvNV(first,count,v):pass\n@_f\n@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat)\ndef glDepthRangeIndexedfNV(index,n,f):pass\n@_f\n@_p.types(None,_cs.GLenum,_cs.GLuint)\ndef glDisableiNV(target,index):pass\n@_f\n@_p.types(None,_cs.GLenum,_cs.GLuint)\ndef glEnableiNV(target,index):pass\n@_f\n@_p.types(None,_cs.GLenum,_cs.GLuint,arrays.GLfloatArray)\ndef glGetFloati_vNV(target,index,data):pass\n@_f\n@_p.types(_cs.GLboolean,_cs.GLenum,_cs.GLuint)\ndef glIsEnablediNV(target,index):pass\n@_f\n@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLintArray)\ndef glScissorArrayvNV(first,count,v):pass\n@_f\n@_p.types(None,_cs.GLuint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei)\ndef glScissorIndexedNV(index,left,bottom,width,height):pass\n@_f\n@_p.types(None,_cs.GLuint,arrays.GLintArray)\ndef glScissorIndexedvNV(index,v):pass\n@_f\n@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLfloatArray)\ndef glViewportArrayvNV(first,count,v):pass\n@_f\n@_p.types(None,_cs.GLuint,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat,_cs.GLfloat)\ndef glViewportIndexedfNV(index,x,y,w,h):pass\n@_f\n@_p.types(None,_cs.GLuint,arrays.GLfloatArray)\ndef glViewportIndexedfvNV(index,v):pass\n",
"step-ids": [
11,
12,
13,
14,
16
]
}
|
[
11,
12,
13,
14,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('beerFriends', '0006_auto_20190726_1504')]
operations = [migrations.AlterField(model_name='beer', name='rating',
field=models.FloatField(blank=True, null=True))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('beerFriends', '0006_auto_20190726_1504')]
operations = [migrations.AlterField(model_name='beer', name='rating',
field=models.FloatField(blank=True, null=True))]
<|reserved_special_token_1|>
# Generated by Django 2.2.3 on 2019-07-27 10:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('beerFriends', '0006_auto_20190726_1504'),
]
operations = [
migrations.AlterField(
model_name='beer',
name='rating',
field=models.FloatField(blank=True, null=True),
),
]
|
flexible
|
{
"blob_id": "68f3d3fce52d08381adc522ee032ef3181aec82a",
"index": 400,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('beerFriends', '0006_auto_20190726_1504')]\n operations = [migrations.AlterField(model_name='beer', name='rating',\n field=models.FloatField(blank=True, null=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('beerFriends', '0006_auto_20190726_1504')]\n operations = [migrations.AlterField(model_name='beer', name='rating',\n field=models.FloatField(blank=True, null=True))]\n",
"step-5": "# Generated by Django 2.2.3 on 2019-07-27 10:41\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('beerFriends', '0006_auto_20190726_1504'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='beer',\n name='rating',\n field=models.FloatField(blank=True, null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from machine import Pin, PWM
import time
# externe LED zit op pin D1 (GPIO5)
PinNum = 5
# pwm initialisatie
pwm1 = PWM(Pin(PinNum))
pwm1.freq(60)
pwm1.duty(0)
step = 100
for i in range(10):
# oplichten
while step < 1000:
pwm1.duty(step)
time.sleep_ms(500)
step+=100
# uitdoven
while step > 0:
pwm1.duty(step)
time.sleep_ms(500)
step-=200
# pwm resetten
pwm1.deinit()
|
normal
|
{
"blob_id": "9f31694d80f2dcc50a76b32aa296871694d3644d",
"index": 7838,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npwm1.freq(60)\npwm1.duty(0)\n<mask token>\nfor i in range(10):\n while step < 1000:\n pwm1.duty(step)\n time.sleep_ms(500)\n step += 100\n while step > 0:\n pwm1.duty(step)\n time.sleep_ms(500)\n step -= 200\npwm1.deinit()\n",
"step-3": "<mask token>\nPinNum = 5\npwm1 = PWM(Pin(PinNum))\npwm1.freq(60)\npwm1.duty(0)\nstep = 100\nfor i in range(10):\n while step < 1000:\n pwm1.duty(step)\n time.sleep_ms(500)\n step += 100\n while step > 0:\n pwm1.duty(step)\n time.sleep_ms(500)\n step -= 200\npwm1.deinit()\n",
"step-4": "from machine import Pin, PWM\nimport time\nPinNum = 5\npwm1 = PWM(Pin(PinNum))\npwm1.freq(60)\npwm1.duty(0)\nstep = 100\nfor i in range(10):\n while step < 1000:\n pwm1.duty(step)\n time.sleep_ms(500)\n step += 100\n while step > 0:\n pwm1.duty(step)\n time.sleep_ms(500)\n step -= 200\npwm1.deinit()\n",
"step-5": "from machine import Pin, PWM\nimport time\n\n# externe LED zit op pin D1 (GPIO5)\nPinNum = 5\n\n# pwm initialisatie\npwm1 = PWM(Pin(PinNum))\npwm1.freq(60)\npwm1.duty(0)\n\nstep = 100\nfor i in range(10):\n # oplichten\n while step < 1000:\n pwm1.duty(step)\n time.sleep_ms(500)\n step+=100\n # uitdoven \n while step > 0:\n pwm1.duty(step)\n time.sleep_ms(500)\n step-=200\n\n# pwm resetten \npwm1.deinit()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class MongoDB:
<|reserved_special_token_0|>
def get_one(self, query):
return self.table.find_one(query, property={'_id': False})
<|reserved_special_token_0|>
def add(self, kv_dict):
return self.table.insert_one(kv_dict)
def delete(self, query):
return self.table.delete_many(query)
def check(self, query):
return self.table.find_one(query)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MongoDB:
def __init__(self, host, port, db, table):
self.host = host
self.port = port
self.client = MongoClient(host=self.host, port=self.port)
self.db = self.client[db]
self.table = self.db[table]
def get_one(self, query):
return self.table.find_one(query, property={'_id': False})
<|reserved_special_token_0|>
def add(self, kv_dict):
return self.table.insert_one(kv_dict)
def delete(self, query):
return self.table.delete_many(query)
def check(self, query):
return self.table.find_one(query)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MongoDB:
def __init__(self, host, port, db, table):
self.host = host
self.port = port
self.client = MongoClient(host=self.host, port=self.port)
self.db = self.client[db]
self.table = self.db[table]
def get_one(self, query):
return self.table.find_one(query, property={'_id': False})
def get_all(self, query):
return self.table.find(query)
def add(self, kv_dict):
return self.table.insert_one(kv_dict)
def delete(self, query):
return self.table.delete_many(query)
def check(self, query):
return self.table.find_one(query)
<|reserved_special_token_1|>
from pymongo import MongoClient
class MongoDB:
def __init__(self, host, port, db, table):
self.host = host
self.port = port
self.client = MongoClient(host=self.host, port=self.port)
self.db = self.client[db]
self.table = self.db[table]
def get_one(self, query):
return self.table.find_one(query, property={'_id': False})
def get_all(self, query):
return self.table.find(query)
def add(self, kv_dict):
return self.table.insert_one(kv_dict)
def delete(self, query):
return self.table.delete_many(query)
def check(self, query):
return self.table.find_one(query)
<|reserved_special_token_1|>
from pymongo import MongoClient
class MongoDB():
def __init__(self, host, port, db, table):
self.host = host
self.port = port
self.client = MongoClient(host=self.host, port=self.port)
self.db = self.client[db]
self.table = self.db[table]
# 获取一条数据
def get_one(self, query):
return self.table.find_one(query, property={"_id":False})
# 获取多条数据
def get_all(self, query):
return self.table.find(query)
# 添加数据
def add(self, kv_dict):
return self.table.insert_one(kv_dict)
# 删除数据
def delete(self, query):
return self.table.delete_many(query)
# 查看集合中是否包含满足的数据 如果有返回True
def check(self, query):
return self.table.find_one(query)
|
flexible
|
{
"blob_id": "b5f88a6d119f2c3ce8fb77cf8c45b6c9252f5128",
"index": 7619,
"step-1": "<mask token>\n\n\nclass MongoDB:\n <mask token>\n\n def get_one(self, query):\n return self.table.find_one(query, property={'_id': False})\n <mask token>\n\n def add(self, kv_dict):\n return self.table.insert_one(kv_dict)\n\n def delete(self, query):\n return self.table.delete_many(query)\n\n def check(self, query):\n return self.table.find_one(query)\n",
"step-2": "<mask token>\n\n\nclass MongoDB:\n\n def __init__(self, host, port, db, table):\n self.host = host\n self.port = port\n self.client = MongoClient(host=self.host, port=self.port)\n self.db = self.client[db]\n self.table = self.db[table]\n\n def get_one(self, query):\n return self.table.find_one(query, property={'_id': False})\n <mask token>\n\n def add(self, kv_dict):\n return self.table.insert_one(kv_dict)\n\n def delete(self, query):\n return self.table.delete_many(query)\n\n def check(self, query):\n return self.table.find_one(query)\n",
"step-3": "<mask token>\n\n\nclass MongoDB:\n\n def __init__(self, host, port, db, table):\n self.host = host\n self.port = port\n self.client = MongoClient(host=self.host, port=self.port)\n self.db = self.client[db]\n self.table = self.db[table]\n\n def get_one(self, query):\n return self.table.find_one(query, property={'_id': False})\n\n def get_all(self, query):\n return self.table.find(query)\n\n def add(self, kv_dict):\n return self.table.insert_one(kv_dict)\n\n def delete(self, query):\n return self.table.delete_many(query)\n\n def check(self, query):\n return self.table.find_one(query)\n",
"step-4": "from pymongo import MongoClient\n\n\nclass MongoDB:\n\n def __init__(self, host, port, db, table):\n self.host = host\n self.port = port\n self.client = MongoClient(host=self.host, port=self.port)\n self.db = self.client[db]\n self.table = self.db[table]\n\n def get_one(self, query):\n return self.table.find_one(query, property={'_id': False})\n\n def get_all(self, query):\n return self.table.find(query)\n\n def add(self, kv_dict):\n return self.table.insert_one(kv_dict)\n\n def delete(self, query):\n return self.table.delete_many(query)\n\n def check(self, query):\n return self.table.find_one(query)\n",
"step-5": "from pymongo import MongoClient\n\nclass MongoDB():\n def __init__(self, host, port, db, table):\n self.host = host\n self.port = port\n self.client = MongoClient(host=self.host, port=self.port)\n self.db = self.client[db]\n self.table = self.db[table]\n\n # 获取一条数据\n def get_one(self, query):\n return self.table.find_one(query, property={\"_id\":False})\n\n # 获取多条数据\n def get_all(self, query):\n return self.table.find(query)\n\n # 添加数据\n def add(self, kv_dict):\n return self.table.insert_one(kv_dict)\n\n # 删除数据\n def delete(self, query):\n return self.table.delete_many(query)\n\n # 查看集合中是否包含满足的数据 如果有返回True\n def check(self, query):\n return self.table.find_one(query)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(s)
<|reserved_special_token_1|>
lista = [2, 3.2, 4, 52, 6.25]
s = sum(lista)
print(s)
|
flexible
|
{
"blob_id": "05aa8eac846154024d25d639da565135e41403c2",
"index": 9611,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(s)\n",
"step-3": "lista = [2, 3.2, 4, 52, 6.25]\ns = sum(lista)\nprint(s)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Generated by Django 3.2.5 on 2021-08-05 07:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('organization', '0010_auto_20210801_1623'),
('quote', '0004_auto_20210805_1032'),
]
operations = [
migrations.CreateModel(
name='FollowUp',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True, verbose_name='تاریخ ثبت')),
('text', models.TextField(default=None, verbose_name='متن پیگیری')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='کاربر ثبت کننده')),
('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='organization.organization', verbose_name='سازمان')),
],
),
]
|
normal
|
{
"blob_id": "f2c53efa4b7c2df592582e3093ff269b703be1e0",
"index": 3054,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('organization', '0010_auto_20210801_1623'), (\n 'quote', '0004_auto_20210805_1032')]\n operations = [migrations.CreateModel(name='FollowUp', fields=[('id',\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('timestamp', models.DateTimeField(\n auto_now_add=True, verbose_name='تاریخ ثبت')), ('text', models.\n TextField(default=None, verbose_name='متن پیگیری')), ('creator',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL, verbose_name='کاربر ثبت کننده')), (\n 'organization', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='organization.organization', verbose_name=\n 'سازمان'))])]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('organization', '0010_auto_20210801_1623'), (\n 'quote', '0004_auto_20210805_1032')]\n operations = [migrations.CreateModel(name='FollowUp', fields=[('id',\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('timestamp', models.DateTimeField(\n auto_now_add=True, verbose_name='تاریخ ثبت')), ('text', models.\n TextField(default=None, verbose_name='متن پیگیری')), ('creator',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL, verbose_name='کاربر ثبت کننده')), (\n 'organization', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='organization.organization', verbose_name=\n 'سازمان'))])]\n",
"step-5": "# Generated by Django 3.2.5 on 2021-08-05 07:19\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('organization', '0010_auto_20210801_1623'),\n ('quote', '0004_auto_20210805_1032'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='FollowUp',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('timestamp', models.DateTimeField(auto_now_add=True, verbose_name='تاریخ ثبت')),\n ('text', models.TextField(default=None, verbose_name='متن پیگیری')),\n ('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='کاربر ثبت کننده')),\n ('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='organization.organization', verbose_name='سازمان')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import numpy as np
import urllib.request
import urllib.parse
import json
def predict(input_text):
URL = "http://127.0.0.1:8000/api/v1/predict/"
values = {
"format": "json",
"input_text": input_text,
}
data = urllib.parse.urlencode({'input_text': input_text}).encode('utf-8')
request = urllib.request.Request(URL, data)
response = urllib.request.urlopen(request)
result= json.loads(response.read())
return result['neg_pos']
if __name__ == '__main__':
print("Start if __name__ == '__main__'")
print('load csv file ....')
df = pd.read_csv("test.csv", engine="python", encoding="utf-8-sig")
df["PREDICT"] = np.nan #予測列を追加
print('Getting prediction results ....')
for index, row in df.iterrows():
df.at[index, "PREDICT"] = predict(row['INPUT'])
print('save results to csv file')
df.to_csv("predicted_test .csv", encoding="utf-8-sig", index=False)
print('Processing terminated normally.')
|
normal
|
{
"blob_id": "b7632cc7d8fc2f9096f7a6bb61c471dc61689f70",
"index": 8342,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef predict(input_text):\n URL = 'http://127.0.0.1:8000/api/v1/predict/'\n values = {'format': 'json', 'input_text': input_text}\n data = urllib.parse.urlencode({'input_text': input_text}).encode('utf-8')\n request = urllib.request.Request(URL, data)\n response = urllib.request.urlopen(request)\n result = json.loads(response.read())\n return result['neg_pos']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef predict(input_text):\n URL = 'http://127.0.0.1:8000/api/v1/predict/'\n values = {'format': 'json', 'input_text': input_text}\n data = urllib.parse.urlencode({'input_text': input_text}).encode('utf-8')\n request = urllib.request.Request(URL, data)\n response = urllib.request.urlopen(request)\n result = json.loads(response.read())\n return result['neg_pos']\n\n\nif __name__ == '__main__':\n print(\"Start if __name__ == '__main__'\")\n print('load csv file ....')\n df = pd.read_csv('test.csv', engine='python', encoding='utf-8-sig')\n df['PREDICT'] = np.nan\n print('Getting prediction results ....')\n for index, row in df.iterrows():\n df.at[index, 'PREDICT'] = predict(row['INPUT'])\n print('save results to csv file')\n df.to_csv('predicted_test .csv', encoding='utf-8-sig', index=False)\n print('Processing terminated normally.')\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport urllib.request\nimport urllib.parse\nimport json\n\n\ndef predict(input_text):\n URL = 'http://127.0.0.1:8000/api/v1/predict/'\n values = {'format': 'json', 'input_text': input_text}\n data = urllib.parse.urlencode({'input_text': input_text}).encode('utf-8')\n request = urllib.request.Request(URL, data)\n response = urllib.request.urlopen(request)\n result = json.loads(response.read())\n return result['neg_pos']\n\n\nif __name__ == '__main__':\n print(\"Start if __name__ == '__main__'\")\n print('load csv file ....')\n df = pd.read_csv('test.csv', engine='python', encoding='utf-8-sig')\n df['PREDICT'] = np.nan\n print('Getting prediction results ....')\n for index, row in df.iterrows():\n df.at[index, 'PREDICT'] = predict(row['INPUT'])\n print('save results to csv file')\n df.to_csv('predicted_test .csv', encoding='utf-8-sig', index=False)\n print('Processing terminated normally.')\n",
"step-5": "import pandas as pd\r\nimport numpy as np\r\nimport urllib.request\r\nimport urllib.parse\r\nimport json\r\n\r\ndef predict(input_text):\r\n URL = \"http://127.0.0.1:8000/api/v1/predict/\"\r\n values = {\r\n \"format\": \"json\",\r\n \"input_text\": input_text,\r\n }\r\n data = urllib.parse.urlencode({'input_text': input_text}).encode('utf-8')\r\n request = urllib.request.Request(URL, data)\r\n response = urllib.request.urlopen(request)\r\n result= json.loads(response.read())\r\n return result['neg_pos']\r\n\r\nif __name__ == '__main__':\r\n print(\"Start if __name__ == '__main__'\")\r\n print('load csv file ....')\r\n df = pd.read_csv(\"test.csv\", engine=\"python\", encoding=\"utf-8-sig\")\r\n df[\"PREDICT\"] = np.nan #予測列を追加\r\n print('Getting prediction results ....')\r\n for index, row in df.iterrows():\r\n df.at[index, \"PREDICT\"] = predict(row['INPUT'])\r\n print('save results to csv file')\r\n df.to_csv(\"predicted_test .csv\", encoding=\"utf-8-sig\", index=False)\r\n print('Processing terminated normally.')\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class PolicyEstimator:
<|reserved_special_token_0|>
class ValueEstimator:
def __init__(self, reuse=False, trainable=True):
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.
uint8, name='X')
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')
x = tf.to_float(self.states) / 255.0
with tf.variable_scope('shared', reuse=reuse):
fc1 = build_shared_network(x, add_summaries=not reuse)
with tf.variable_scope('value_net'):
self.logits = tf.layers.dense(fc1, 1, activation=None)
self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name=
'logits')
self.losses = tf.squared_difference(self.logits, self.targets)
self.loss = tf.reduce_sum(self.losses, name='loss')
self.predictions = {'logits': self.logits}
prefix = tf.get_variable_scope().name
tf.summary.scalar(self.loss.name, self.loss)
tf.summary.scalar('{}/max_value'.format(prefix), tf.reduce_max(
self.logits))
tf.summary.scalar('{}/min_value'.format(prefix), tf.reduce_min(
self.logits))
tf.summary.scalar('{}/mean_value'.format(prefix), tf.
reduce_mean(self.logits))
tf.summary.scalar('{}/reward_max'.format(prefix), tf.reduce_max
(self.targets))
tf.summary.scalar('{}/reward_min'.format(prefix), tf.reduce_min
(self.targets))
tf.summary.scalar('{}/reward_mean'.format(prefix), tf.
reduce_mean(self.targets))
tf.summary.histogram('{}/reward_targets'.format(prefix), self.
targets)
tf.summary.histogram('{}/values'.format(prefix), self.logits)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99,
0.0, 1e-06)
self.grads_and_vars = self.optimizer.compute_gradients(self
.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.
grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.
grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if 'policy_net' in s.name or
'shared' in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name]
self.summaries = tf.summary.merge(summaries)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PolicyEstimator:
def __init__(self, num_ouptuts, reuse=False, trainable=True):
self.num_outputs = num_ouptuts
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.
uint8, name='X')
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')
self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name=
'actions')
x = tf.to_float(self.states) / 255.0
batch_size = tf.shape(self.states)[0]
with tf.variable_scope('shared', reuse=reuse):
fc1 = build_shared_network(x, add_summaries=not reuse)
with tf.variable_scope('policy_net'):
self.logits = tf.layers.dense(fc1, num_ouptuts, activation=None)
self.probs = tf.nn.softmax(self.logits) + 1e-08
self.predictions = {'logits': self.logits, 'probs': self.probs}
self.entropy = -tf.reduce_sum(self.probs * tf.log(self.probs),
1, name='entropy')
self.entropy_mean = tf.reduce_mean(self.entropy, name=
'entropy_mean')
gather_indices = tf.range(batch_size) * tf.shape(self.probs)[1
] + self.actions
self.picked_action_probs = tf.gather(tf.reshape(self.probs, [-1
]), gather_indices)
self.losses = -(tf.log(self.picked_action_probs) * self.targets +
0.01 * self.entropy)
self.loss = tf.reduce_sum(self.losses, name='loss')
tf.summary.scalar(self.loss.op.name, self.loss)
tf.summary.scalar(self.entropy_mean.op.name, self.entropy_mean)
tf.summary.histogram(self.entropy.op.name, self.entropy)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99,
0.0, 1e-06)
self.grads_and_vars = self.optimizer.compute_gradients(self
.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.
grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.
grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if 'policy_net' in s.
name or 'shared' in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name
]
self.summaries = tf.summary.merge(summaries)
class ValueEstimator:
def __init__(self, reuse=False, trainable=True):
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.
uint8, name='X')
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')
x = tf.to_float(self.states) / 255.0
with tf.variable_scope('shared', reuse=reuse):
fc1 = build_shared_network(x, add_summaries=not reuse)
with tf.variable_scope('value_net'):
self.logits = tf.layers.dense(fc1, 1, activation=None)
self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name=
'logits')
self.losses = tf.squared_difference(self.logits, self.targets)
self.loss = tf.reduce_sum(self.losses, name='loss')
self.predictions = {'logits': self.logits}
prefix = tf.get_variable_scope().name
tf.summary.scalar(self.loss.name, self.loss)
tf.summary.scalar('{}/max_value'.format(prefix), tf.reduce_max(
self.logits))
tf.summary.scalar('{}/min_value'.format(prefix), tf.reduce_min(
self.logits))
tf.summary.scalar('{}/mean_value'.format(prefix), tf.
reduce_mean(self.logits))
tf.summary.scalar('{}/reward_max'.format(prefix), tf.reduce_max
(self.targets))
tf.summary.scalar('{}/reward_min'.format(prefix), tf.reduce_min
(self.targets))
tf.summary.scalar('{}/reward_mean'.format(prefix), tf.
reduce_mean(self.targets))
tf.summary.histogram('{}/reward_targets'.format(prefix), self.
targets)
tf.summary.histogram('{}/values'.format(prefix), self.logits)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99,
0.0, 1e-06)
self.grads_and_vars = self.optimizer.compute_gradients(self
.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.
grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.
grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if 'policy_net' in s.name or
'shared' in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name]
self.summaries = tf.summary.merge(summaries)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def build_shared_network(x, add_summaries=False):
conv1 = tf.layers.conv2d(x, 16, 8, 4, activation=tf.nn.relu, name='conv1')
conv2 = tf.layers.conv2d(conv1, 32, 4, 2, activation=tf.nn.relu, name=
'conv2')
fc1 = tf.layers.dense(tf.layers.flatten(conv2), 256, name='fc1')
if add_summaries:
tf.contrib.layers.summarize_activation(conv1)
tf.contrib.layers.summarize_activation(conv2)
tf.contrib.layers.summarize_activation(fc1)
return fc1
class PolicyEstimator:
def __init__(self, num_ouptuts, reuse=False, trainable=True):
self.num_outputs = num_ouptuts
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.
uint8, name='X')
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')
self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name=
'actions')
x = tf.to_float(self.states) / 255.0
batch_size = tf.shape(self.states)[0]
with tf.variable_scope('shared', reuse=reuse):
fc1 = build_shared_network(x, add_summaries=not reuse)
with tf.variable_scope('policy_net'):
self.logits = tf.layers.dense(fc1, num_ouptuts, activation=None)
self.probs = tf.nn.softmax(self.logits) + 1e-08
self.predictions = {'logits': self.logits, 'probs': self.probs}
self.entropy = -tf.reduce_sum(self.probs * tf.log(self.probs),
1, name='entropy')
self.entropy_mean = tf.reduce_mean(self.entropy, name=
'entropy_mean')
gather_indices = tf.range(batch_size) * tf.shape(self.probs)[1
] + self.actions
self.picked_action_probs = tf.gather(tf.reshape(self.probs, [-1
]), gather_indices)
self.losses = -(tf.log(self.picked_action_probs) * self.targets +
0.01 * self.entropy)
self.loss = tf.reduce_sum(self.losses, name='loss')
tf.summary.scalar(self.loss.op.name, self.loss)
tf.summary.scalar(self.entropy_mean.op.name, self.entropy_mean)
tf.summary.histogram(self.entropy.op.name, self.entropy)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99,
0.0, 1e-06)
self.grads_and_vars = self.optimizer.compute_gradients(self
.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.
grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.
grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if 'policy_net' in s.
name or 'shared' in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name
]
self.summaries = tf.summary.merge(summaries)
class ValueEstimator:
def __init__(self, reuse=False, trainable=True):
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.
uint8, name='X')
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')
x = tf.to_float(self.states) / 255.0
with tf.variable_scope('shared', reuse=reuse):
fc1 = build_shared_network(x, add_summaries=not reuse)
with tf.variable_scope('value_net'):
self.logits = tf.layers.dense(fc1, 1, activation=None)
self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name=
'logits')
self.losses = tf.squared_difference(self.logits, self.targets)
self.loss = tf.reduce_sum(self.losses, name='loss')
self.predictions = {'logits': self.logits}
prefix = tf.get_variable_scope().name
tf.summary.scalar(self.loss.name, self.loss)
tf.summary.scalar('{}/max_value'.format(prefix), tf.reduce_max(
self.logits))
tf.summary.scalar('{}/min_value'.format(prefix), tf.reduce_min(
self.logits))
tf.summary.scalar('{}/mean_value'.format(prefix), tf.
reduce_mean(self.logits))
tf.summary.scalar('{}/reward_max'.format(prefix), tf.reduce_max
(self.targets))
tf.summary.scalar('{}/reward_min'.format(prefix), tf.reduce_min
(self.targets))
tf.summary.scalar('{}/reward_mean'.format(prefix), tf.
reduce_mean(self.targets))
tf.summary.histogram('{}/reward_targets'.format(prefix), self.
targets)
tf.summary.histogram('{}/values'.format(prefix), self.logits)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99,
0.0, 1e-06)
self.grads_and_vars = self.optimizer.compute_gradients(self
.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.
grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.
grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if 'policy_net' in s.name or
'shared' in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name]
self.summaries = tf.summary.merge(summaries)
<|reserved_special_token_1|>
import tensorflow as tf
def build_shared_network(x, add_summaries=False):
conv1 = tf.layers.conv2d(x, 16, 8, 4, activation=tf.nn.relu, name='conv1')
conv2 = tf.layers.conv2d(conv1, 32, 4, 2, activation=tf.nn.relu, name=
'conv2')
fc1 = tf.layers.dense(tf.layers.flatten(conv2), 256, name='fc1')
if add_summaries:
tf.contrib.layers.summarize_activation(conv1)
tf.contrib.layers.summarize_activation(conv2)
tf.contrib.layers.summarize_activation(fc1)
return fc1
class PolicyEstimator:
def __init__(self, num_ouptuts, reuse=False, trainable=True):
self.num_outputs = num_ouptuts
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.
uint8, name='X')
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')
self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name=
'actions')
x = tf.to_float(self.states) / 255.0
batch_size = tf.shape(self.states)[0]
with tf.variable_scope('shared', reuse=reuse):
fc1 = build_shared_network(x, add_summaries=not reuse)
with tf.variable_scope('policy_net'):
self.logits = tf.layers.dense(fc1, num_ouptuts, activation=None)
self.probs = tf.nn.softmax(self.logits) + 1e-08
self.predictions = {'logits': self.logits, 'probs': self.probs}
self.entropy = -tf.reduce_sum(self.probs * tf.log(self.probs),
1, name='entropy')
self.entropy_mean = tf.reduce_mean(self.entropy, name=
'entropy_mean')
gather_indices = tf.range(batch_size) * tf.shape(self.probs)[1
] + self.actions
self.picked_action_probs = tf.gather(tf.reshape(self.probs, [-1
]), gather_indices)
self.losses = -(tf.log(self.picked_action_probs) * self.targets +
0.01 * self.entropy)
self.loss = tf.reduce_sum(self.losses, name='loss')
tf.summary.scalar(self.loss.op.name, self.loss)
tf.summary.scalar(self.entropy_mean.op.name, self.entropy_mean)
tf.summary.histogram(self.entropy.op.name, self.entropy)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99,
0.0, 1e-06)
self.grads_and_vars = self.optimizer.compute_gradients(self
.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.
grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.
grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if 'policy_net' in s.
name or 'shared' in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name
]
self.summaries = tf.summary.merge(summaries)
class ValueEstimator:
def __init__(self, reuse=False, trainable=True):
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.
uint8, name='X')
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')
x = tf.to_float(self.states) / 255.0
with tf.variable_scope('shared', reuse=reuse):
fc1 = build_shared_network(x, add_summaries=not reuse)
with tf.variable_scope('value_net'):
self.logits = tf.layers.dense(fc1, 1, activation=None)
self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name=
'logits')
self.losses = tf.squared_difference(self.logits, self.targets)
self.loss = tf.reduce_sum(self.losses, name='loss')
self.predictions = {'logits': self.logits}
prefix = tf.get_variable_scope().name
tf.summary.scalar(self.loss.name, self.loss)
tf.summary.scalar('{}/max_value'.format(prefix), tf.reduce_max(
self.logits))
tf.summary.scalar('{}/min_value'.format(prefix), tf.reduce_min(
self.logits))
tf.summary.scalar('{}/mean_value'.format(prefix), tf.
reduce_mean(self.logits))
tf.summary.scalar('{}/reward_max'.format(prefix), tf.reduce_max
(self.targets))
tf.summary.scalar('{}/reward_min'.format(prefix), tf.reduce_min
(self.targets))
tf.summary.scalar('{}/reward_mean'.format(prefix), tf.
reduce_mean(self.targets))
tf.summary.histogram('{}/reward_targets'.format(prefix), self.
targets)
tf.summary.histogram('{}/values'.format(prefix), self.logits)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99,
0.0, 1e-06)
self.grads_and_vars = self.optimizer.compute_gradients(self
.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.
grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.
grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if 'policy_net' in s.name or
'shared' in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name]
self.summaries = tf.summary.merge(summaries)
<|reserved_special_token_1|>
import tensorflow as tf
def build_shared_network(x, add_summaries=False):
conv1 = tf.layers.conv2d(x, 16, 8, 4, activation=tf.nn.relu, name="conv1")
conv2 = tf.layers.conv2d(conv1, 32, 4, 2, activation=tf.nn.relu, name="conv2")
fc1 = tf.layers.dense(tf.layers.flatten(conv2), 256, name="fc1")
if add_summaries:
tf.contrib.layers.summarize_activation(conv1)
tf.contrib.layers.summarize_activation(conv2)
tf.contrib.layers.summarize_activation(fc1)
return fc1
class PolicyEstimator():
def __init__(self, num_ouptuts, reuse=False, trainable=True):
self.num_outputs = num_ouptuts
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name="X")
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name="Y")
self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name="actions")
x = tf.to_float(self.states) / 255.0
batch_size = tf.shape(self.states)[0]
with tf.variable_scope("shared", reuse=reuse):
fc1 = build_shared_network(x, add_summaries=(not reuse))
with tf.variable_scope("policy_net"):
self.logits = tf.layers.dense(fc1, num_ouptuts, activation=None)
self.probs = tf.nn.softmax(self.logits) + 1e-8
self.predictions = {"logits": self.logits, "probs": self.probs}
self.entropy = -tf.reduce_sum(self.probs * tf.log(self.probs), 1, name="entropy")
self.entropy_mean = tf.reduce_mean(self.entropy, name="entropy_mean")
# 배열을 리스트처럼 만듬 => 각 데이터의 시작 부분(offset) + action값(onehot 아님) = action의 위치
# 그 후 tf.gather을 이용해 원하는 action에 해당하는 확률값만 뽑아냄
gather_indices = tf.range(batch_size) * tf.shape(self.probs)[1] + self.actions
self.picked_action_probs = tf.gather(tf.reshape(self.probs, [-1]), gather_indices)
self.losses = - (tf.log(self.picked_action_probs) * self.targets + 0.01*self.entropy)
self.loss = tf.reduce_sum(self.losses, name="loss")
tf.summary.scalar(self.loss.op.name, self.loss)
tf.summary.scalar(self.entropy_mean.op.name, self.entropy_mean)
tf.summary.histogram(self.entropy.op.name, self.entropy)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)
self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
# grad가 None인 경우 학습이 망가지는 것을 막기 위해서 이렇게 만든 듯 하다.
self.grads_and_vars = [[grad, var] for grad, var in self.grads_and_vars if grad is not None]
# 여기 train_op 정작 쓰진 않음. worker에서 apply_gradient를 함. 지워도 될 듯
self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if "policy_net" in s.name or "shared" in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name]
self.summaries = tf.summary.merge(summaries)
class ValueEstimator():
def __init__(self, reuse=False, trainable=True):
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name="X")
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name="Y")
x = tf.to_float(self.states) / 255.0
with tf.variable_scope("shared", reuse=reuse):
fc1 = build_shared_network(x, add_summaries=(not reuse))
with tf.variable_scope("value_net"):
self.logits = tf.layers.dense(fc1, 1, activation=None)
# squeeze는 1인 차원(행렬)을 날림. => [1, 2, 3] squeeze => [2, 3]
self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name="logits")
self.losses = tf.squared_difference(self.logits, self.targets)
self.loss = tf.reduce_sum(self.losses, name="loss")
self.predictions = { "logits": self.logits }
prefix = tf.get_variable_scope().name
tf.summary.scalar(self.loss.name, self.loss)
tf.summary.scalar("{}/max_value".format(prefix), tf.reduce_max(self.logits))
tf.summary.scalar("{}/min_value".format(prefix), tf.reduce_min(self.logits))
tf.summary.scalar("{}/mean_value".format(prefix), tf.reduce_mean(self.logits))
tf.summary.scalar("{}/reward_max".format(prefix), tf.reduce_max(self.targets))
tf.summary.scalar("{}/reward_min".format(prefix), tf.reduce_min(self.targets))
tf.summary.scalar("{}/reward_mean".format(prefix), tf.reduce_mean(self.targets))
tf.summary.histogram("{}/reward_targets".format(prefix), self.targets)
tf.summary.histogram("{}/values".format(prefix), self.logits)
if trainable:
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)
self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step=tf.train.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries = [s for s in summary_ops if "policy_net" in s.name or "shared" in s.name]
summaries = [s for s in summary_ops if var_scope_name in s.name]
self.summaries = tf.summary.merge(summaries)
|
flexible
|
{
"blob_id": "0fbf8efd39f583581c46fcd3f84c65a7787145cd",
"index": 847,
"step-1": "<mask token>\n\n\nclass PolicyEstimator:\n <mask token>\n\n\nclass ValueEstimator:\n\n def __init__(self, reuse=False, trainable=True):\n self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.\n uint8, name='X')\n self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')\n x = tf.to_float(self.states) / 255.0\n with tf.variable_scope('shared', reuse=reuse):\n fc1 = build_shared_network(x, add_summaries=not reuse)\n with tf.variable_scope('value_net'):\n self.logits = tf.layers.dense(fc1, 1, activation=None)\n self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name=\n 'logits')\n self.losses = tf.squared_difference(self.logits, self.targets)\n self.loss = tf.reduce_sum(self.losses, name='loss')\n self.predictions = {'logits': self.logits}\n prefix = tf.get_variable_scope().name\n tf.summary.scalar(self.loss.name, self.loss)\n tf.summary.scalar('{}/max_value'.format(prefix), tf.reduce_max(\n self.logits))\n tf.summary.scalar('{}/min_value'.format(prefix), tf.reduce_min(\n self.logits))\n tf.summary.scalar('{}/mean_value'.format(prefix), tf.\n reduce_mean(self.logits))\n tf.summary.scalar('{}/reward_max'.format(prefix), tf.reduce_max\n (self.targets))\n tf.summary.scalar('{}/reward_min'.format(prefix), tf.reduce_min\n (self.targets))\n tf.summary.scalar('{}/reward_mean'.format(prefix), tf.\n reduce_mean(self.targets))\n tf.summary.histogram('{}/reward_targets'.format(prefix), self.\n targets)\n tf.summary.histogram('{}/values'.format(prefix), self.logits)\n if trainable:\n self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, \n 0.0, 1e-06)\n self.grads_and_vars = self.optimizer.compute_gradients(self\n .loss)\n self.grads_and_vars = [[grad, var] for grad, var in self.\n grads_and_vars if grad is not None]\n self.train_op = self.optimizer.apply_gradients(self.\n grads_and_vars, global_step=tf.train.get_global_step())\n var_scope_name = tf.get_variable_scope().name\n summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)\n summaries = [s for s in summary_ops if 'policy_net' in s.name or\n 'shared' in s.name]\n summaries = [s for s in summary_ops if var_scope_name in s.name]\n self.summaries = tf.summary.merge(summaries)\n",
"step-2": "<mask token>\n\n\nclass PolicyEstimator:\n\n def __init__(self, num_ouptuts, reuse=False, trainable=True):\n self.num_outputs = num_ouptuts\n self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.\n uint8, name='X')\n self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')\n self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name=\n 'actions')\n x = tf.to_float(self.states) / 255.0\n batch_size = tf.shape(self.states)[0]\n with tf.variable_scope('shared', reuse=reuse):\n fc1 = build_shared_network(x, add_summaries=not reuse)\n with tf.variable_scope('policy_net'):\n self.logits = tf.layers.dense(fc1, num_ouptuts, activation=None)\n self.probs = tf.nn.softmax(self.logits) + 1e-08\n self.predictions = {'logits': self.logits, 'probs': self.probs}\n self.entropy = -tf.reduce_sum(self.probs * tf.log(self.probs), \n 1, name='entropy')\n self.entropy_mean = tf.reduce_mean(self.entropy, name=\n 'entropy_mean')\n gather_indices = tf.range(batch_size) * tf.shape(self.probs)[1\n ] + self.actions\n self.picked_action_probs = tf.gather(tf.reshape(self.probs, [-1\n ]), gather_indices)\n self.losses = -(tf.log(self.picked_action_probs) * self.targets +\n 0.01 * self.entropy)\n self.loss = tf.reduce_sum(self.losses, name='loss')\n tf.summary.scalar(self.loss.op.name, self.loss)\n tf.summary.scalar(self.entropy_mean.op.name, self.entropy_mean)\n tf.summary.histogram(self.entropy.op.name, self.entropy)\n if trainable:\n self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, \n 0.0, 1e-06)\n self.grads_and_vars = self.optimizer.compute_gradients(self\n .loss)\n self.grads_and_vars = [[grad, var] for grad, var in self.\n grads_and_vars if grad is not None]\n self.train_op = self.optimizer.apply_gradients(self.\n grads_and_vars, global_step=tf.train.get_global_step())\n var_scope_name = tf.get_variable_scope().name\n summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)\n summaries = [s for s in summary_ops if 'policy_net' in s.\n name or 'shared' in s.name]\n summaries = [s for s in summary_ops if var_scope_name in s.name\n ]\n self.summaries = tf.summary.merge(summaries)\n\n\nclass ValueEstimator:\n\n def __init__(self, reuse=False, trainable=True):\n self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.\n uint8, name='X')\n self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')\n x = tf.to_float(self.states) / 255.0\n with tf.variable_scope('shared', reuse=reuse):\n fc1 = build_shared_network(x, add_summaries=not reuse)\n with tf.variable_scope('value_net'):\n self.logits = tf.layers.dense(fc1, 1, activation=None)\n self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name=\n 'logits')\n self.losses = tf.squared_difference(self.logits, self.targets)\n self.loss = tf.reduce_sum(self.losses, name='loss')\n self.predictions = {'logits': self.logits}\n prefix = tf.get_variable_scope().name\n tf.summary.scalar(self.loss.name, self.loss)\n tf.summary.scalar('{}/max_value'.format(prefix), tf.reduce_max(\n self.logits))\n tf.summary.scalar('{}/min_value'.format(prefix), tf.reduce_min(\n self.logits))\n tf.summary.scalar('{}/mean_value'.format(prefix), tf.\n reduce_mean(self.logits))\n tf.summary.scalar('{}/reward_max'.format(prefix), tf.reduce_max\n (self.targets))\n tf.summary.scalar('{}/reward_min'.format(prefix), tf.reduce_min\n (self.targets))\n tf.summary.scalar('{}/reward_mean'.format(prefix), tf.\n reduce_mean(self.targets))\n tf.summary.histogram('{}/reward_targets'.format(prefix), self.\n targets)\n tf.summary.histogram('{}/values'.format(prefix), self.logits)\n if trainable:\n self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, \n 0.0, 1e-06)\n self.grads_and_vars = self.optimizer.compute_gradients(self\n .loss)\n self.grads_and_vars = [[grad, var] for grad, var in self.\n grads_and_vars if grad is not None]\n self.train_op = self.optimizer.apply_gradients(self.\n grads_and_vars, global_step=tf.train.get_global_step())\n var_scope_name = tf.get_variable_scope().name\n summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)\n summaries = [s for s in summary_ops if 'policy_net' in s.name or\n 'shared' in s.name]\n summaries = [s for s in summary_ops if var_scope_name in s.name]\n self.summaries = tf.summary.merge(summaries)\n",
"step-3": "<mask token>\n\n\ndef build_shared_network(x, add_summaries=False):\n conv1 = tf.layers.conv2d(x, 16, 8, 4, activation=tf.nn.relu, name='conv1')\n conv2 = tf.layers.conv2d(conv1, 32, 4, 2, activation=tf.nn.relu, name=\n 'conv2')\n fc1 = tf.layers.dense(tf.layers.flatten(conv2), 256, name='fc1')\n if add_summaries:\n tf.contrib.layers.summarize_activation(conv1)\n tf.contrib.layers.summarize_activation(conv2)\n tf.contrib.layers.summarize_activation(fc1)\n return fc1\n\n\nclass PolicyEstimator:\n\n def __init__(self, num_ouptuts, reuse=False, trainable=True):\n self.num_outputs = num_ouptuts\n self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.\n uint8, name='X')\n self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')\n self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name=\n 'actions')\n x = tf.to_float(self.states) / 255.0\n batch_size = tf.shape(self.states)[0]\n with tf.variable_scope('shared', reuse=reuse):\n fc1 = build_shared_network(x, add_summaries=not reuse)\n with tf.variable_scope('policy_net'):\n self.logits = tf.layers.dense(fc1, num_ouptuts, activation=None)\n self.probs = tf.nn.softmax(self.logits) + 1e-08\n self.predictions = {'logits': self.logits, 'probs': self.probs}\n self.entropy = -tf.reduce_sum(self.probs * tf.log(self.probs), \n 1, name='entropy')\n self.entropy_mean = tf.reduce_mean(self.entropy, name=\n 'entropy_mean')\n gather_indices = tf.range(batch_size) * tf.shape(self.probs)[1\n ] + self.actions\n self.picked_action_probs = tf.gather(tf.reshape(self.probs, [-1\n ]), gather_indices)\n self.losses = -(tf.log(self.picked_action_probs) * self.targets +\n 0.01 * self.entropy)\n self.loss = tf.reduce_sum(self.losses, name='loss')\n tf.summary.scalar(self.loss.op.name, self.loss)\n tf.summary.scalar(self.entropy_mean.op.name, self.entropy_mean)\n tf.summary.histogram(self.entropy.op.name, self.entropy)\n if trainable:\n self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, \n 0.0, 1e-06)\n self.grads_and_vars = self.optimizer.compute_gradients(self\n .loss)\n self.grads_and_vars = [[grad, var] for grad, var in self.\n grads_and_vars if grad is not None]\n self.train_op = self.optimizer.apply_gradients(self.\n grads_and_vars, global_step=tf.train.get_global_step())\n var_scope_name = tf.get_variable_scope().name\n summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)\n summaries = [s for s in summary_ops if 'policy_net' in s.\n name or 'shared' in s.name]\n summaries = [s for s in summary_ops if var_scope_name in s.name\n ]\n self.summaries = tf.summary.merge(summaries)\n\n\nclass ValueEstimator:\n\n def __init__(self, reuse=False, trainable=True):\n self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.\n uint8, name='X')\n self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')\n x = tf.to_float(self.states) / 255.0\n with tf.variable_scope('shared', reuse=reuse):\n fc1 = build_shared_network(x, add_summaries=not reuse)\n with tf.variable_scope('value_net'):\n self.logits = tf.layers.dense(fc1, 1, activation=None)\n self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name=\n 'logits')\n self.losses = tf.squared_difference(self.logits, self.targets)\n self.loss = tf.reduce_sum(self.losses, name='loss')\n self.predictions = {'logits': self.logits}\n prefix = tf.get_variable_scope().name\n tf.summary.scalar(self.loss.name, self.loss)\n tf.summary.scalar('{}/max_value'.format(prefix), tf.reduce_max(\n self.logits))\n tf.summary.scalar('{}/min_value'.format(prefix), tf.reduce_min(\n self.logits))\n tf.summary.scalar('{}/mean_value'.format(prefix), tf.\n reduce_mean(self.logits))\n tf.summary.scalar('{}/reward_max'.format(prefix), tf.reduce_max\n (self.targets))\n tf.summary.scalar('{}/reward_min'.format(prefix), tf.reduce_min\n (self.targets))\n tf.summary.scalar('{}/reward_mean'.format(prefix), tf.\n reduce_mean(self.targets))\n tf.summary.histogram('{}/reward_targets'.format(prefix), self.\n targets)\n tf.summary.histogram('{}/values'.format(prefix), self.logits)\n if trainable:\n self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, \n 0.0, 1e-06)\n self.grads_and_vars = self.optimizer.compute_gradients(self\n .loss)\n self.grads_and_vars = [[grad, var] for grad, var in self.\n grads_and_vars if grad is not None]\n self.train_op = self.optimizer.apply_gradients(self.\n grads_and_vars, global_step=tf.train.get_global_step())\n var_scope_name = tf.get_variable_scope().name\n summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)\n summaries = [s for s in summary_ops if 'policy_net' in s.name or\n 'shared' in s.name]\n summaries = [s for s in summary_ops if var_scope_name in s.name]\n self.summaries = tf.summary.merge(summaries)\n",
"step-4": "import tensorflow as tf\n\n\ndef build_shared_network(x, add_summaries=False):\n conv1 = tf.layers.conv2d(x, 16, 8, 4, activation=tf.nn.relu, name='conv1')\n conv2 = tf.layers.conv2d(conv1, 32, 4, 2, activation=tf.nn.relu, name=\n 'conv2')\n fc1 = tf.layers.dense(tf.layers.flatten(conv2), 256, name='fc1')\n if add_summaries:\n tf.contrib.layers.summarize_activation(conv1)\n tf.contrib.layers.summarize_activation(conv2)\n tf.contrib.layers.summarize_activation(fc1)\n return fc1\n\n\nclass PolicyEstimator:\n\n def __init__(self, num_ouptuts, reuse=False, trainable=True):\n self.num_outputs = num_ouptuts\n self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.\n uint8, name='X')\n self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')\n self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name=\n 'actions')\n x = tf.to_float(self.states) / 255.0\n batch_size = tf.shape(self.states)[0]\n with tf.variable_scope('shared', reuse=reuse):\n fc1 = build_shared_network(x, add_summaries=not reuse)\n with tf.variable_scope('policy_net'):\n self.logits = tf.layers.dense(fc1, num_ouptuts, activation=None)\n self.probs = tf.nn.softmax(self.logits) + 1e-08\n self.predictions = {'logits': self.logits, 'probs': self.probs}\n self.entropy = -tf.reduce_sum(self.probs * tf.log(self.probs), \n 1, name='entropy')\n self.entropy_mean = tf.reduce_mean(self.entropy, name=\n 'entropy_mean')\n gather_indices = tf.range(batch_size) * tf.shape(self.probs)[1\n ] + self.actions\n self.picked_action_probs = tf.gather(tf.reshape(self.probs, [-1\n ]), gather_indices)\n self.losses = -(tf.log(self.picked_action_probs) * self.targets +\n 0.01 * self.entropy)\n self.loss = tf.reduce_sum(self.losses, name='loss')\n tf.summary.scalar(self.loss.op.name, self.loss)\n tf.summary.scalar(self.entropy_mean.op.name, self.entropy_mean)\n tf.summary.histogram(self.entropy.op.name, self.entropy)\n if trainable:\n self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, \n 0.0, 1e-06)\n self.grads_and_vars = self.optimizer.compute_gradients(self\n .loss)\n self.grads_and_vars = [[grad, var] for grad, var in self.\n grads_and_vars if grad is not None]\n self.train_op = self.optimizer.apply_gradients(self.\n grads_and_vars, global_step=tf.train.get_global_step())\n var_scope_name = tf.get_variable_scope().name\n summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)\n summaries = [s for s in summary_ops if 'policy_net' in s.\n name or 'shared' in s.name]\n summaries = [s for s in summary_ops if var_scope_name in s.name\n ]\n self.summaries = tf.summary.merge(summaries)\n\n\nclass ValueEstimator:\n\n def __init__(self, reuse=False, trainable=True):\n self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.\n uint8, name='X')\n self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name='Y')\n x = tf.to_float(self.states) / 255.0\n with tf.variable_scope('shared', reuse=reuse):\n fc1 = build_shared_network(x, add_summaries=not reuse)\n with tf.variable_scope('value_net'):\n self.logits = tf.layers.dense(fc1, 1, activation=None)\n self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name=\n 'logits')\n self.losses = tf.squared_difference(self.logits, self.targets)\n self.loss = tf.reduce_sum(self.losses, name='loss')\n self.predictions = {'logits': self.logits}\n prefix = tf.get_variable_scope().name\n tf.summary.scalar(self.loss.name, self.loss)\n tf.summary.scalar('{}/max_value'.format(prefix), tf.reduce_max(\n self.logits))\n tf.summary.scalar('{}/min_value'.format(prefix), tf.reduce_min(\n self.logits))\n tf.summary.scalar('{}/mean_value'.format(prefix), tf.\n reduce_mean(self.logits))\n tf.summary.scalar('{}/reward_max'.format(prefix), tf.reduce_max\n (self.targets))\n tf.summary.scalar('{}/reward_min'.format(prefix), tf.reduce_min\n (self.targets))\n tf.summary.scalar('{}/reward_mean'.format(prefix), tf.\n reduce_mean(self.targets))\n tf.summary.histogram('{}/reward_targets'.format(prefix), self.\n targets)\n tf.summary.histogram('{}/values'.format(prefix), self.logits)\n if trainable:\n self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, \n 0.0, 1e-06)\n self.grads_and_vars = self.optimizer.compute_gradients(self\n .loss)\n self.grads_and_vars = [[grad, var] for grad, var in self.\n grads_and_vars if grad is not None]\n self.train_op = self.optimizer.apply_gradients(self.\n grads_and_vars, global_step=tf.train.get_global_step())\n var_scope_name = tf.get_variable_scope().name\n summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)\n summaries = [s for s in summary_ops if 'policy_net' in s.name or\n 'shared' in s.name]\n summaries = [s for s in summary_ops if var_scope_name in s.name]\n self.summaries = tf.summary.merge(summaries)\n",
"step-5": "import tensorflow as tf\n\ndef build_shared_network(x, add_summaries=False):\n conv1 = tf.layers.conv2d(x, 16, 8, 4, activation=tf.nn.relu, name=\"conv1\")\n conv2 = tf.layers.conv2d(conv1, 32, 4, 2, activation=tf.nn.relu, name=\"conv2\")\n\n fc1 = tf.layers.dense(tf.layers.flatten(conv2), 256, name=\"fc1\")\n\n if add_summaries:\n tf.contrib.layers.summarize_activation(conv1)\n tf.contrib.layers.summarize_activation(conv2)\n tf.contrib.layers.summarize_activation(fc1)\n\n return fc1\n\nclass PolicyEstimator():\n def __init__(self, num_ouptuts, reuse=False, trainable=True):\n self.num_outputs = num_ouptuts\n\n self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name=\"X\")\n self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name=\"Y\")\n self.actions = tf.placeholder(shape=[None], dtype=tf.int32, name=\"actions\")\n\n x = tf.to_float(self.states) / 255.0\n batch_size = tf.shape(self.states)[0]\n\n with tf.variable_scope(\"shared\", reuse=reuse):\n fc1 = build_shared_network(x, add_summaries=(not reuse))\n\n with tf.variable_scope(\"policy_net\"):\n self.logits = tf.layers.dense(fc1, num_ouptuts, activation=None)\n self.probs = tf.nn.softmax(self.logits) + 1e-8\n\n self.predictions = {\"logits\": self.logits, \"probs\": self.probs}\n\n self.entropy = -tf.reduce_sum(self.probs * tf.log(self.probs), 1, name=\"entropy\")\n self.entropy_mean = tf.reduce_mean(self.entropy, name=\"entropy_mean\")\n\n # 배열을 리스트처럼 만듬 => 각 데이터의 시작 부분(offset) + action값(onehot 아님) = action의 위치\n # 그 후 tf.gather을 이용해 원하는 action에 해당하는 확률값만 뽑아냄\n gather_indices = tf.range(batch_size) * tf.shape(self.probs)[1] + self.actions\n self.picked_action_probs = tf.gather(tf.reshape(self.probs, [-1]), gather_indices)\n\n self.losses = - (tf.log(self.picked_action_probs) * self.targets + 0.01*self.entropy)\n self.loss = tf.reduce_sum(self.losses, name=\"loss\")\n\n tf.summary.scalar(self.loss.op.name, self.loss)\n tf.summary.scalar(self.entropy_mean.op.name, self.entropy_mean)\n tf.summary.histogram(self.entropy.op.name, self.entropy)\n\n if trainable:\n self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)\n self.grads_and_vars = self.optimizer.compute_gradients(self.loss)\n # grad가 None인 경우 학습이 망가지는 것을 막기 위해서 이렇게 만든 듯 하다.\n self.grads_and_vars = [[grad, var] for grad, var in self.grads_and_vars if grad is not None]\n # 여기 train_op 정작 쓰진 않음. worker에서 apply_gradient를 함. 지워도 될 듯\n self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step=tf.train.get_global_step())\n\n var_scope_name = tf.get_variable_scope().name\n summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)\n summaries = [s for s in summary_ops if \"policy_net\" in s.name or \"shared\" in s.name]\n summaries = [s for s in summary_ops if var_scope_name in s.name]\n self.summaries = tf.summary.merge(summaries)\n\nclass ValueEstimator():\n def __init__(self, reuse=False, trainable=True):\n\n self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name=\"X\")\n self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name=\"Y\")\n\n x = tf.to_float(self.states) / 255.0\n\n with tf.variable_scope(\"shared\", reuse=reuse):\n fc1 = build_shared_network(x, add_summaries=(not reuse))\n\n with tf.variable_scope(\"value_net\"):\n self.logits = tf.layers.dense(fc1, 1, activation=None)\n # squeeze는 1인 차원(행렬)을 날림. => [1, 2, 3] squeeze => [2, 3]\n self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name=\"logits\")\n\n self.losses = tf.squared_difference(self.logits, self.targets)\n self.loss = tf.reduce_sum(self.losses, name=\"loss\")\n\n self.predictions = { \"logits\": self.logits }\n\n prefix = tf.get_variable_scope().name\n tf.summary.scalar(self.loss.name, self.loss)\n tf.summary.scalar(\"{}/max_value\".format(prefix), tf.reduce_max(self.logits))\n tf.summary.scalar(\"{}/min_value\".format(prefix), tf.reduce_min(self.logits))\n tf.summary.scalar(\"{}/mean_value\".format(prefix), tf.reduce_mean(self.logits))\n tf.summary.scalar(\"{}/reward_max\".format(prefix), tf.reduce_max(self.targets))\n tf.summary.scalar(\"{}/reward_min\".format(prefix), tf.reduce_min(self.targets))\n tf.summary.scalar(\"{}/reward_mean\".format(prefix), tf.reduce_mean(self.targets))\n tf.summary.histogram(\"{}/reward_targets\".format(prefix), self.targets)\n tf.summary.histogram(\"{}/values\".format(prefix), self.logits)\n\n if trainable:\n self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)\n self.grads_and_vars = self.optimizer.compute_gradients(self.loss)\n self.grads_and_vars = [[grad, var] for grad, var in self.grads_and_vars if grad is not None]\n self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step=tf.train.get_global_step())\n\n var_scope_name = tf.get_variable_scope().name\n summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)\n summaries = [s for s in summary_ops if \"policy_net\" in s.name or \"shared\" in s.name]\n summaries = [s for s in summary_ops if var_scope_name in s.name]\n self.summaries = tf.summary.merge(summaries)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import pandas as pd
import numpy as np
import json
from pprint import pprint
from shapely.geometry import shape, Point
from geopy.geocoders import Nominatim
from geopy.exc import GeocoderTimedOut
from geopy.exc import GeocoderServiceError
import collections
from matplotlib import pyplot as plt
import time
import csv
geolocator = Nominatim(user_agent='Neel')
def get_neighborhoods():
with open('AnalysisNeighborhoods.geojson') as f:
neighborhoods_obj = json.load(f)
return neighborhoods_obj
def get_point_from_loc(location_str):
location_str = location_str.replace('(', '')
location_str = location_str.replace(')', '')
location_str = location_str.replace(',', '')
lat_lon = location_str.split(' ')
return Point(float(lat_lon[1]), float(lat_lon[0]))
def get_address_from_block(block_addr):
block_addr = block_addr.replace('Block Of', '')
block_addr_split = block_addr.split(' ')
block_addr = block_addr_split
# make it an address instead of block start
#print block_addr
block_addr[0] = str(int(block_addr[0]) + 1)
block_addr = ' '.join(block_addr) + ' San Francisco CA'
return block_addr
# Using latitude longitude location, find the neighborhood the eviction belongs to
def get_neighborhoods_from_locations(evictions, neighborhoods):
num_found = 0
num_total = 0
locations_dict = collections.defaultdict(int)
locations_with_years_dict = collections.defaultdict(lambda: collections.defaultdict(int))
for index, eviction in evictions.iterrows():
point = get_point_from_loc(eviction['Location'])
found_location = False
for feature in neighborhoods['features']:
polygon = shape(feature['geometry'])
if polygon.contains(point):
#print('Found containing polygon:', feature['properties']['nhood']())
num_found += 1
found_location = True
neighborhood = feature['properties']['nhood']
year = int(eviction['File Date'].split('/')[2])
if year > 90: year = year + 1900
else: year = year + 2000
locations_dict[neighborhood] += 1
locations_with_years_dict[neighborhood][str(year)] += 1
break
if not found_location:
print('Location ' + str(eviction['Eviction ID']) + ' not found, Given [location: ' + str(eviction['Neighborhoods - Analysis Boundaries']))
num_total += 1
years = [str(i) for i in range(1997, 2019)]
#years = ['97', '98', '99', '00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18']
with open('Evictions_By_Location.csv', mode='w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"')
csv_writer.writerow(['Location', 'Number of Evictions'])
for k, v in locations_dict.items():
csv_writer.writerow([k, v])
with open('Evictions_By_Year_Location.csv', mode='w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"')
header = ['Location']
for year in years:
header.append(year)
csv_writer.writerow(header)
for k, v in locations_with_years_dict.items():
row = [k]
for year in years:
row.append(v[year])
csv_writer.writerow(row)
for k, v in locations_with_years_dict.items():
print k
evictions = [int(v[year]) for year in years]
# plt.figure()
# plt.plot(years, evictions)
plt.title(k)
for year in years:
print year + ': ' + str(v[year])
print ''
# plt.show()
return locations_dict, locations_with_years_dict
def get_geocode_address(addr):
try:
return geolocator.geocode(addr)
except (GeocoderTimedOut, GeocoderServiceError) as e:
time.sleep(5)
return get_geocode_address(addr)
#For rows missing latitude longitude location,
# use the block address to add missing lat long to dataframe
# If the block address is incorrect, print it so we can correct it manually
def set_missing_locations(evictions):
missing_location_rows = evictions[evictions['Location'].isnull()]
print('Num missing ' + str(len(missing_location_rows)))
num_not_found = 0
num_found = 0
for index, row in missing_location_rows.iterrows():
#print row['Eviction ID']
addr = get_address_from_block(row['Address'])
location = get_geocode_address(addr)
if location == None:
num_not_found += 1
print('NOT FOUND ' + str(row['Eviction ID']) + ': ' + addr)
else:
evictions.at[index, 'Location'] = '(' + str(location.latitude) + ', ' + str(location.longitude) + ')'
num_found += 1
if (num_found + num_not_found) % 50 == 0:
print('Processed ' + str(num_found + num_not_found) + ' evictions')
print 'Total not found ' + str(num_not_found)
print 'Total found ' + str(num_found)
evictions.to_csv('Eviction_Notices_With_Locations.csv')
evictions = pd.read_csv('Eviction_Notices_With_Locations.csv')
neighborhoods = get_neighborhoods()
#set_missing_locations(evictions)
locations_dict, locations_with_years_dict = get_neighborhoods_from_locations(evictions, neighborhoods)
with open('AnalysisNeighborhoods.geojson') as f:
data = json.loads(f.read())
years = [i for i in range(1997, 2019)]
for neighborhood_obj in data['features']:
neighborhood_name = neighborhood_obj['properties']['nhood']
neighborhood_obj['properties']['evictions'] = {}
neighborhood_obj['properties']['evictions']['total'] = locations_dict[neighborhood_name]
for year in years:
neighborhood_obj['properties']['evictions'][str(year)] = locations_with_years_dict[neighborhood_name][year]
with open('AnalysisNeighborhoods.geojson', 'w') as f:
json.dump(data, f)
|
normal
|
{
"blob_id": "c1bb2052b3f623c6787ba080dff2dc81f4d6f55e",
"index": 1818,
"step-1": "import pandas as pd\nimport numpy as np\nimport json\nfrom pprint import pprint\nfrom shapely.geometry import shape, Point\nfrom geopy.geocoders import Nominatim\nfrom geopy.exc import GeocoderTimedOut\nfrom geopy.exc import GeocoderServiceError\nimport collections\nfrom matplotlib import pyplot as plt\nimport time\nimport csv\n\n\ngeolocator = Nominatim(user_agent='Neel')\n\ndef get_neighborhoods():\n with open('AnalysisNeighborhoods.geojson') as f:\n neighborhoods_obj = json.load(f)\n return neighborhoods_obj\n\ndef get_point_from_loc(location_str):\n location_str = location_str.replace('(', '')\n location_str = location_str.replace(')', '')\n location_str = location_str.replace(',', '')\n lat_lon = location_str.split(' ')\n return Point(float(lat_lon[1]), float(lat_lon[0]))\n\ndef get_address_from_block(block_addr):\n block_addr = block_addr.replace('Block Of', '')\n block_addr_split = block_addr.split(' ')\n\n block_addr = block_addr_split\n # make it an address instead of block start\n #print block_addr\n block_addr[0] = str(int(block_addr[0]) + 1)\n block_addr = ' '.join(block_addr) + ' San Francisco CA'\n return block_addr\n\n# Using latitude longitude location, find the neighborhood the eviction belongs to\ndef get_neighborhoods_from_locations(evictions, neighborhoods):\n num_found = 0\n num_total = 0\n locations_dict = collections.defaultdict(int)\n locations_with_years_dict = collections.defaultdict(lambda: collections.defaultdict(int))\n for index, eviction in evictions.iterrows():\n point = get_point_from_loc(eviction['Location'])\n found_location = False\n for feature in neighborhoods['features']:\n polygon = shape(feature['geometry'])\n if polygon.contains(point):\n #print('Found containing polygon:', feature['properties']['nhood']())\n num_found += 1\n found_location = True\n neighborhood = feature['properties']['nhood']\n year = int(eviction['File Date'].split('/')[2])\n if year > 90: year = year + 1900\n else: year = year + 2000\n\n locations_dict[neighborhood] += 1\n locations_with_years_dict[neighborhood][str(year)] += 1\n break\n if not found_location:\n print('Location ' + str(eviction['Eviction ID']) + ' not found, Given [location: ' + str(eviction['Neighborhoods - Analysis Boundaries']))\n num_total += 1\n\n years = [str(i) for i in range(1997, 2019)]\n #years = ['97', '98', '99', '00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18']\n with open('Evictions_By_Location.csv', mode='w') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"')\n csv_writer.writerow(['Location', 'Number of Evictions'])\n for k, v in locations_dict.items():\n csv_writer.writerow([k, v])\n\n with open('Evictions_By_Year_Location.csv', mode='w') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"')\n header = ['Location']\n for year in years:\n header.append(year)\n csv_writer.writerow(header)\n for k, v in locations_with_years_dict.items():\n row = [k]\n for year in years:\n row.append(v[year])\n csv_writer.writerow(row)\n\n\n for k, v in locations_with_years_dict.items():\n print k\n evictions = [int(v[year]) for year in years]\n # plt.figure()\n # plt.plot(years, evictions)\n plt.title(k)\n for year in years:\n print year + ': ' + str(v[year])\n print ''\n # plt.show()\n return locations_dict, locations_with_years_dict\n\n\ndef get_geocode_address(addr):\n try:\n return geolocator.geocode(addr)\n except (GeocoderTimedOut, GeocoderServiceError) as e:\n time.sleep(5)\n return get_geocode_address(addr)\n\n#For rows missing latitude longitude location,\n# use the block address to add missing lat long to dataframe\n# If the block address is incorrect, print it so we can correct it manually\ndef set_missing_locations(evictions):\n\n missing_location_rows = evictions[evictions['Location'].isnull()]\n print('Num missing ' + str(len(missing_location_rows)))\n num_not_found = 0\n num_found = 0\n for index, row in missing_location_rows.iterrows():\n #print row['Eviction ID']\n addr = get_address_from_block(row['Address'])\n location = get_geocode_address(addr)\n if location == None:\n num_not_found += 1\n print('NOT FOUND ' + str(row['Eviction ID']) + ': ' + addr)\n else:\n evictions.at[index, 'Location'] = '(' + str(location.latitude) + ', ' + str(location.longitude) + ')'\n num_found += 1\n if (num_found + num_not_found) % 50 == 0:\n print('Processed ' + str(num_found + num_not_found) + ' evictions')\n\n print 'Total not found ' + str(num_not_found)\n print 'Total found ' + str(num_found)\n evictions.to_csv('Eviction_Notices_With_Locations.csv')\n\n\nevictions = pd.read_csv('Eviction_Notices_With_Locations.csv')\nneighborhoods = get_neighborhoods()\n#set_missing_locations(evictions)\n\nlocations_dict, locations_with_years_dict = get_neighborhoods_from_locations(evictions, neighborhoods)\n\nwith open('AnalysisNeighborhoods.geojson') as f:\n data = json.loads(f.read())\n\nyears = [i for i in range(1997, 2019)]\n\nfor neighborhood_obj in data['features']:\n neighborhood_name = neighborhood_obj['properties']['nhood']\n neighborhood_obj['properties']['evictions'] = {}\n neighborhood_obj['properties']['evictions']['total'] = locations_dict[neighborhood_name]\n for year in years:\n neighborhood_obj['properties']['evictions'][str(year)] = locations_with_years_dict[neighborhood_name][year]\n\nwith open('AnalysisNeighborhoods.geojson', 'w') as f:\n json.dump(data, f)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
consonant += consonant.upper()
<|reserved_special_token_0|>
for c in message:
if c in vowel:
vowel_count += 1
elif c in consonant:
consonant_count += 1
print(vowel_count, consonant_count)
<|reserved_special_token_1|>
message = input()
vowel = 'aeiouAEIOU'
consonant = 'bcdfghjklmnpqrstvwxyz'
consonant += consonant.upper()
vowel_count = 0
consonant_count = 0
for c in message:
if c in vowel:
vowel_count += 1
elif c in consonant:
consonant_count += 1
print(vowel_count, consonant_count)
|
flexible
|
{
"blob_id": "edf704d720abdb09d176937664c9ba98bcd253a5",
"index": 8320,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nconsonant += consonant.upper()\n<mask token>\nfor c in message:\n if c in vowel:\n vowel_count += 1\n elif c in consonant:\n consonant_count += 1\nprint(vowel_count, consonant_count)\n",
"step-3": "message = input()\nvowel = 'aeiouAEIOU'\nconsonant = 'bcdfghjklmnpqrstvwxyz'\nconsonant += consonant.upper()\nvowel_count = 0\nconsonant_count = 0\nfor c in message:\n if c in vowel:\n vowel_count += 1\n elif c in consonant:\n consonant_count += 1\nprint(vowel_count, consonant_count)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def trigLSQ(data):
noPoints = len(data['x'])
order = int(noPoints / 2) if int(noPoints / 2) < noPoints / 2 else int(
noPoints / 2) - 1
c = lambda a: np.array([np.cos(a * float(data['x'][i])) for i in range(
noPoints)])
s = lambda a: np.array([np.sin(a * float(data['x'][i])) for i in range(
noPoints)])
y = np.array([data['y'][i] for i in range(noPoints)])
matrix = np.array([[np.dot(c(i['index']) if i['cos'] else s(i['index']),
c(j['index']) if j['cos'] else s(j['index'])) for i in
alternatingRange(order)] for j in alternatingRange(order)])
res = [[np.dot(y, c(i['index']) if i['cos'] else s(i['index']))] for i in
alternatingRange(order)]
sol = np.linalg.solve(matrix, res)
F = 0
for j, i in enumerate(alternatingRange(order)):
F += sol[j][0] * sp.sympify(('cos(' if i['cos'] else 'sin(') + str(
i['index']) + ' * 2*pi/12 * x)')
return F
<|reserved_special_token_1|>
<|reserved_special_token_0|>
alternatingRange = lambda m: [{'index': j, 'cos': True if k == 0 else False
} for j in range(m + 1) for k in range(2 if j != 0 else 1)]
def trigLSQ(data):
noPoints = len(data['x'])
order = int(noPoints / 2) if int(noPoints / 2) < noPoints / 2 else int(
noPoints / 2) - 1
c = lambda a: np.array([np.cos(a * float(data['x'][i])) for i in range(
noPoints)])
s = lambda a: np.array([np.sin(a * float(data['x'][i])) for i in range(
noPoints)])
y = np.array([data['y'][i] for i in range(noPoints)])
matrix = np.array([[np.dot(c(i['index']) if i['cos'] else s(i['index']),
c(j['index']) if j['cos'] else s(j['index'])) for i in
alternatingRange(order)] for j in alternatingRange(order)])
res = [[np.dot(y, c(i['index']) if i['cos'] else s(i['index']))] for i in
alternatingRange(order)]
sol = np.linalg.solve(matrix, res)
F = 0
for j, i in enumerate(alternatingRange(order)):
F += sol[j][0] * sp.sympify(('cos(' if i['cos'] else 'sin(') + str(
i['index']) + ' * 2*pi/12 * x)')
return F
<|reserved_special_token_1|>
import numpy as np
import sympy as sp
alternatingRange = lambda m: [{'index': j, 'cos': True if k == 0 else False
} for j in range(m + 1) for k in range(2 if j != 0 else 1)]
def trigLSQ(data):
noPoints = len(data['x'])
order = int(noPoints / 2) if int(noPoints / 2) < noPoints / 2 else int(
noPoints / 2) - 1
c = lambda a: np.array([np.cos(a * float(data['x'][i])) for i in range(
noPoints)])
s = lambda a: np.array([np.sin(a * float(data['x'][i])) for i in range(
noPoints)])
y = np.array([data['y'][i] for i in range(noPoints)])
matrix = np.array([[np.dot(c(i['index']) if i['cos'] else s(i['index']),
c(j['index']) if j['cos'] else s(j['index'])) for i in
alternatingRange(order)] for j in alternatingRange(order)])
res = [[np.dot(y, c(i['index']) if i['cos'] else s(i['index']))] for i in
alternatingRange(order)]
sol = np.linalg.solve(matrix, res)
F = 0
for j, i in enumerate(alternatingRange(order)):
F += sol[j][0] * sp.sympify(('cos(' if i['cos'] else 'sin(') + str(
i['index']) + ' * 2*pi/12 * x)')
return F
<|reserved_special_token_1|>
import numpy as np
import sympy as sp
# (index: int, cos: bool)
# 0 1 1 2 2 3 3 4 4 5 5 ...
# {0, cos}, {1, cos}, {1, sen}, {2, cos}, {2, sen}, ...
alternatingRange = lambda m : [{'index': j, 'cos': True if k == 0 else False} for j in range(m + 1) for k in range(2 if j != 0 else 1)]
# data: "dict"
# data = {'x': [x-points], 'y': [y-points]}
def trigLSQ(data):
noPoints = len(data['x']) # N
order = int(noPoints/2) if int(noPoints/2) < noPoints/2 else int(noPoints/2)-1 # m
c = lambda a : np.array([np.cos(a * float(data['x'][i])) for i in range(noPoints)])
s = lambda a : np.array([np.sin(a * float(data['x'][i])) for i in range(noPoints)])
y = np.array([data['y'][i] for i in range(noPoints)])
# matrix * sol = res
matrix = np.array(
[[np.dot(c(i['index']) if i['cos'] else s(i['index']), c(j['index']) if j['cos'] else s(j['index'])) for i in alternatingRange(order)] for j in alternatingRange(order)]
)
res = [[np.dot(y, c(i['index']) if i['cos'] else s(i['index']))] for i in alternatingRange(order)]
sol = np.linalg.solve(matrix, res)
# F is the function approximation
F = 0
for j, i in enumerate(alternatingRange(order)): F += sol[j][0] * sp.sympify(('cos(' if i['cos'] else 'sin(') + str(i['index']) + ' * 2*pi/12 * x)')
return F
# x = 2kpi/N --> k = xN/2pi
|
flexible
|
{
"blob_id": "98c2fdf0dfc9a660a3eb9a359aa9ca14d83c60ce",
"index": 4588,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef trigLSQ(data):\n noPoints = len(data['x'])\n order = int(noPoints / 2) if int(noPoints / 2) < noPoints / 2 else int(\n noPoints / 2) - 1\n c = lambda a: np.array([np.cos(a * float(data['x'][i])) for i in range(\n noPoints)])\n s = lambda a: np.array([np.sin(a * float(data['x'][i])) for i in range(\n noPoints)])\n y = np.array([data['y'][i] for i in range(noPoints)])\n matrix = np.array([[np.dot(c(i['index']) if i['cos'] else s(i['index']),\n c(j['index']) if j['cos'] else s(j['index'])) for i in\n alternatingRange(order)] for j in alternatingRange(order)])\n res = [[np.dot(y, c(i['index']) if i['cos'] else s(i['index']))] for i in\n alternatingRange(order)]\n sol = np.linalg.solve(matrix, res)\n F = 0\n for j, i in enumerate(alternatingRange(order)):\n F += sol[j][0] * sp.sympify(('cos(' if i['cos'] else 'sin(') + str(\n i['index']) + ' * 2*pi/12 * x)')\n return F\n",
"step-3": "<mask token>\nalternatingRange = lambda m: [{'index': j, 'cos': True if k == 0 else False\n } for j in range(m + 1) for k in range(2 if j != 0 else 1)]\n\n\ndef trigLSQ(data):\n noPoints = len(data['x'])\n order = int(noPoints / 2) if int(noPoints / 2) < noPoints / 2 else int(\n noPoints / 2) - 1\n c = lambda a: np.array([np.cos(a * float(data['x'][i])) for i in range(\n noPoints)])\n s = lambda a: np.array([np.sin(a * float(data['x'][i])) for i in range(\n noPoints)])\n y = np.array([data['y'][i] for i in range(noPoints)])\n matrix = np.array([[np.dot(c(i['index']) if i['cos'] else s(i['index']),\n c(j['index']) if j['cos'] else s(j['index'])) for i in\n alternatingRange(order)] for j in alternatingRange(order)])\n res = [[np.dot(y, c(i['index']) if i['cos'] else s(i['index']))] for i in\n alternatingRange(order)]\n sol = np.linalg.solve(matrix, res)\n F = 0\n for j, i in enumerate(alternatingRange(order)):\n F += sol[j][0] * sp.sympify(('cos(' if i['cos'] else 'sin(') + str(\n i['index']) + ' * 2*pi/12 * x)')\n return F\n",
"step-4": "import numpy as np\nimport sympy as sp\nalternatingRange = lambda m: [{'index': j, 'cos': True if k == 0 else False\n } for j in range(m + 1) for k in range(2 if j != 0 else 1)]\n\n\ndef trigLSQ(data):\n noPoints = len(data['x'])\n order = int(noPoints / 2) if int(noPoints / 2) < noPoints / 2 else int(\n noPoints / 2) - 1\n c = lambda a: np.array([np.cos(a * float(data['x'][i])) for i in range(\n noPoints)])\n s = lambda a: np.array([np.sin(a * float(data['x'][i])) for i in range(\n noPoints)])\n y = np.array([data['y'][i] for i in range(noPoints)])\n matrix = np.array([[np.dot(c(i['index']) if i['cos'] else s(i['index']),\n c(j['index']) if j['cos'] else s(j['index'])) for i in\n alternatingRange(order)] for j in alternatingRange(order)])\n res = [[np.dot(y, c(i['index']) if i['cos'] else s(i['index']))] for i in\n alternatingRange(order)]\n sol = np.linalg.solve(matrix, res)\n F = 0\n for j, i in enumerate(alternatingRange(order)):\n F += sol[j][0] * sp.sympify(('cos(' if i['cos'] else 'sin(') + str(\n i['index']) + ' * 2*pi/12 * x)')\n return F\n",
"step-5": "import numpy as np\nimport sympy as sp\n\n# (index: int, cos: bool)\n# 0 1 1 2 2 3 3 4 4 5 5 ...\n# {0, cos}, {1, cos}, {1, sen}, {2, cos}, {2, sen}, ...\nalternatingRange = lambda m : [{'index': j, 'cos': True if k == 0 else False} for j in range(m + 1) for k in range(2 if j != 0 else 1)]\n\n# data: \"dict\"\n# data = {'x': [x-points], 'y': [y-points]}\ndef trigLSQ(data):\n noPoints = len(data['x']) # N\n order = int(noPoints/2) if int(noPoints/2) < noPoints/2 else int(noPoints/2)-1 # m\n\n c = lambda a : np.array([np.cos(a * float(data['x'][i])) for i in range(noPoints)])\n s = lambda a : np.array([np.sin(a * float(data['x'][i])) for i in range(noPoints)])\n\n y = np.array([data['y'][i] for i in range(noPoints)])\n\n # matrix * sol = res\n\n matrix = np.array(\n [[np.dot(c(i['index']) if i['cos'] else s(i['index']), c(j['index']) if j['cos'] else s(j['index'])) for i in alternatingRange(order)] for j in alternatingRange(order)]\n )\n res = [[np.dot(y, c(i['index']) if i['cos'] else s(i['index']))] for i in alternatingRange(order)]\n sol = np.linalg.solve(matrix, res)\n\n # F is the function approximation\n F = 0\n for j, i in enumerate(alternatingRange(order)): F += sol[j][0] * sp.sympify(('cos(' if i['cos'] else 'sin(') + str(i['index']) + ' * 2*pi/12 * x)')\n\n return F\n\n# x = 2kpi/N --> k = xN/2pi",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
a = []
for i in range((2 * int(input()))):
a.append(int(input()))
if 1 in a:
c = a.index(max(a))
if a[c + 1] == 1:
print(c)
else:
del a[c]
s = a.index(max(a))
if a[s + 1] == 1:
print(s)
else:
print('-1')
|
normal
|
{
"blob_id": "e3e50df47ef074f13382e249832c065ebdce18a6",
"index": 8406,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(2 * int(input())):\n a.append(int(input()))\nif 1 in a:\n c = a.index(max(a))\n if a[c + 1] == 1:\n print(c)\n else:\n del a[c]\n s = a.index(max(a))\n if a[s + 1] == 1:\n print(s)\nelse:\n print('-1')\n",
"step-3": "a = []\nfor i in range(2 * int(input())):\n a.append(int(input()))\nif 1 in a:\n c = a.index(max(a))\n if a[c + 1] == 1:\n print(c)\n else:\n del a[c]\n s = a.index(max(a))\n if a[s + 1] == 1:\n print(s)\nelse:\n print('-1')\n",
"step-4": "a = []\nfor i in range((2 * int(input()))):\n a.append(int(input()))\nif 1 in a:\n c = a.index(max(a))\n if a[c + 1] == 1:\n print(c)\n else:\n del a[c]\n s = a.index(max(a))\n if a[s + 1] == 1:\n print(s)\nelse:\n print('-1')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from plone import api
from plone.app.robotframework.testing import AUTOLOGIN_LIBRARY_FIXTURE
from plone.app.testing import applyProfile
from plone.app.testing import FunctionalTesting
from plone.app.testing import IntegrationTesting
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone.testing import z2
from zope.configuration import xmlconfig
class OiRAFixture(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
z2.installProduct(app, 'Products.membrane')
z2.installProduct(app, 'Products.statusmessages')
import Products.statusmessages
xmlconfig.file('configure.zcml',
Products.statusmessages,
context=configurationContext)
import Products.membrane
xmlconfig.file('configure.zcml',
Products.membrane,
context=configurationContext)
import euphorie.client.tests
xmlconfig.file("configure.zcml",
euphorie.client.tests,
context=configurationContext)
import osha.oira
xmlconfig.file('configure.zcml',
osha.oira,
context=configurationContext)
def setUpPloneSite(self, portal):
wftool = api.portal.get_tool(name='portal_workflow')
wftool.setDefaultChain('plone_workflow')
applyProfile(portal, 'euphorie.content:default')
applyProfile(portal, 'euphorie.client:default')
applyProfile(portal, 'euphorie.deployment:default')
applyProfile(portal, 'osha.oira:default')
OIRA_FIXTURE = OiRAFixture()
OIRA_INTEGRATION_TESTING = \
IntegrationTesting(
bases=(OIRA_FIXTURE,),
name="osha.oira:Integration"
)
OIRA_SUITE_ROBOT = FunctionalTesting(
bases=(OIRA_FIXTURE,
AUTOLOGIN_LIBRARY_FIXTURE,
z2.ZSERVER_FIXTURE),
name="OIRA_SUITE_ROBOT")
|
normal
|
{
"blob_id": "eec2b818ea9d50161bad60e8bf83dcb7ce9bf9fa",
"index": 7428,
"step-1": "<mask token>\n\n\nclass OiRAFixture(PloneSandboxLayer):\n <mask token>\n\n def setUpZope(self, app, configurationContext):\n z2.installProduct(app, 'Products.membrane')\n z2.installProduct(app, 'Products.statusmessages')\n import Products.statusmessages\n xmlconfig.file('configure.zcml', Products.statusmessages, context=\n configurationContext)\n import Products.membrane\n xmlconfig.file('configure.zcml', Products.membrane, context=\n configurationContext)\n import euphorie.client.tests\n xmlconfig.file('configure.zcml', euphorie.client.tests, context=\n configurationContext)\n import osha.oira\n xmlconfig.file('configure.zcml', osha.oira, context=\n configurationContext)\n\n def setUpPloneSite(self, portal):\n wftool = api.portal.get_tool(name='portal_workflow')\n wftool.setDefaultChain('plone_workflow')\n applyProfile(portal, 'euphorie.content:default')\n applyProfile(portal, 'euphorie.client:default')\n applyProfile(portal, 'euphorie.deployment:default')\n applyProfile(portal, 'osha.oira:default')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass OiRAFixture(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n z2.installProduct(app, 'Products.membrane')\n z2.installProduct(app, 'Products.statusmessages')\n import Products.statusmessages\n xmlconfig.file('configure.zcml', Products.statusmessages, context=\n configurationContext)\n import Products.membrane\n xmlconfig.file('configure.zcml', Products.membrane, context=\n configurationContext)\n import euphorie.client.tests\n xmlconfig.file('configure.zcml', euphorie.client.tests, context=\n configurationContext)\n import osha.oira\n xmlconfig.file('configure.zcml', osha.oira, context=\n configurationContext)\n\n def setUpPloneSite(self, portal):\n wftool = api.portal.get_tool(name='portal_workflow')\n wftool.setDefaultChain('plone_workflow')\n applyProfile(portal, 'euphorie.content:default')\n applyProfile(portal, 'euphorie.client:default')\n applyProfile(portal, 'euphorie.deployment:default')\n applyProfile(portal, 'osha.oira:default')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass OiRAFixture(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n z2.installProduct(app, 'Products.membrane')\n z2.installProduct(app, 'Products.statusmessages')\n import Products.statusmessages\n xmlconfig.file('configure.zcml', Products.statusmessages, context=\n configurationContext)\n import Products.membrane\n xmlconfig.file('configure.zcml', Products.membrane, context=\n configurationContext)\n import euphorie.client.tests\n xmlconfig.file('configure.zcml', euphorie.client.tests, context=\n configurationContext)\n import osha.oira\n xmlconfig.file('configure.zcml', osha.oira, context=\n configurationContext)\n\n def setUpPloneSite(self, portal):\n wftool = api.portal.get_tool(name='portal_workflow')\n wftool.setDefaultChain('plone_workflow')\n applyProfile(portal, 'euphorie.content:default')\n applyProfile(portal, 'euphorie.client:default')\n applyProfile(portal, 'euphorie.deployment:default')\n applyProfile(portal, 'osha.oira:default')\n\n\nOIRA_FIXTURE = OiRAFixture()\nOIRA_INTEGRATION_TESTING = IntegrationTesting(bases=(OIRA_FIXTURE,), name=\n 'osha.oira:Integration')\nOIRA_SUITE_ROBOT = FunctionalTesting(bases=(OIRA_FIXTURE,\n AUTOLOGIN_LIBRARY_FIXTURE, z2.ZSERVER_FIXTURE), name='OIRA_SUITE_ROBOT')\n",
"step-4": "from plone import api\nfrom plone.app.robotframework.testing import AUTOLOGIN_LIBRARY_FIXTURE\nfrom plone.app.testing import applyProfile\nfrom plone.app.testing import FunctionalTesting\nfrom plone.app.testing import IntegrationTesting\nfrom plone.app.testing import PLONE_FIXTURE\nfrom plone.app.testing import PloneSandboxLayer\nfrom plone.testing import z2\nfrom zope.configuration import xmlconfig\n\n\nclass OiRAFixture(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n z2.installProduct(app, 'Products.membrane')\n z2.installProduct(app, 'Products.statusmessages')\n import Products.statusmessages\n xmlconfig.file('configure.zcml', Products.statusmessages, context=\n configurationContext)\n import Products.membrane\n xmlconfig.file('configure.zcml', Products.membrane, context=\n configurationContext)\n import euphorie.client.tests\n xmlconfig.file('configure.zcml', euphorie.client.tests, context=\n configurationContext)\n import osha.oira\n xmlconfig.file('configure.zcml', osha.oira, context=\n configurationContext)\n\n def setUpPloneSite(self, portal):\n wftool = api.portal.get_tool(name='portal_workflow')\n wftool.setDefaultChain('plone_workflow')\n applyProfile(portal, 'euphorie.content:default')\n applyProfile(portal, 'euphorie.client:default')\n applyProfile(portal, 'euphorie.deployment:default')\n applyProfile(portal, 'osha.oira:default')\n\n\nOIRA_FIXTURE = OiRAFixture()\nOIRA_INTEGRATION_TESTING = IntegrationTesting(bases=(OIRA_FIXTURE,), name=\n 'osha.oira:Integration')\nOIRA_SUITE_ROBOT = FunctionalTesting(bases=(OIRA_FIXTURE,\n AUTOLOGIN_LIBRARY_FIXTURE, z2.ZSERVER_FIXTURE), name='OIRA_SUITE_ROBOT')\n",
"step-5": "from plone import api\nfrom plone.app.robotframework.testing import AUTOLOGIN_LIBRARY_FIXTURE\nfrom plone.app.testing import applyProfile\nfrom plone.app.testing import FunctionalTesting\nfrom plone.app.testing import IntegrationTesting\nfrom plone.app.testing import PLONE_FIXTURE\nfrom plone.app.testing import PloneSandboxLayer\nfrom plone.testing import z2\nfrom zope.configuration import xmlconfig\n\n\nclass OiRAFixture(PloneSandboxLayer):\n defaultBases = (PLONE_FIXTURE,)\n\n def setUpZope(self, app, configurationContext):\n z2.installProduct(app, 'Products.membrane')\n z2.installProduct(app, 'Products.statusmessages')\n import Products.statusmessages\n xmlconfig.file('configure.zcml',\n Products.statusmessages,\n context=configurationContext)\n import Products.membrane\n xmlconfig.file('configure.zcml',\n Products.membrane,\n context=configurationContext)\n import euphorie.client.tests\n xmlconfig.file(\"configure.zcml\",\n euphorie.client.tests,\n context=configurationContext)\n import osha.oira\n xmlconfig.file('configure.zcml',\n osha.oira,\n context=configurationContext)\n\n def setUpPloneSite(self, portal):\n wftool = api.portal.get_tool(name='portal_workflow')\n wftool.setDefaultChain('plone_workflow')\n applyProfile(portal, 'euphorie.content:default')\n applyProfile(portal, 'euphorie.client:default')\n applyProfile(portal, 'euphorie.deployment:default')\n applyProfile(portal, 'osha.oira:default')\n\nOIRA_FIXTURE = OiRAFixture()\nOIRA_INTEGRATION_TESTING = \\\n IntegrationTesting(\n bases=(OIRA_FIXTURE,),\n name=\"osha.oira:Integration\"\n )\n\nOIRA_SUITE_ROBOT = FunctionalTesting(\n bases=(OIRA_FIXTURE,\n AUTOLOGIN_LIBRARY_FIXTURE,\n z2.ZSERVER_FIXTURE),\n name=\"OIRA_SUITE_ROBOT\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def precision_recall_fscore(nodes, y_true, y_pred):
df = pd.DataFrame({'true': y_true, 'pred': y_pred}, index=nodes)
n_predicted_nodes = len(df[df['pred'] != 0])
n_corrects = len(df[df['pred'] == df['true']])
n_test = len(nodes)
p = n_corrects / n_predicted_nodes
r = n_corrects / n_test
f = 2 * p * r / (p + r)
return p, r, f, n_predicted_nodes, n_corrects, n_test
def main(args):
labeled_nodes, labels = util.load_labellist(args.labelfile)
if args.random_state:
random_state = args.random_state
n_splits = args.n_splits if args.n_splits else 10
cv = KFold(n_splits=n_splits, shuffle=True, random_state=random_state)
for i, (train, test) in enumerate(cv.split(labeled_nodes)):
filepath = os.path.join(args.resultfile_or_dir, '{}_{}.{}'.
format(args.stem, i, args.ext))
test_nodes, y_pred = util.load_result(filepath)
y_test = labels[test]
assert np.all(labeled_nodes[test] == test_nodes
), '分割後のノードID集合が異なる。random_stateが違うのでは?'
print(args.resultfile_or_dir, i, *precision_recall_fscore(
test_nodes, y_test, y_pred), sep='\t')
else:
resultfile = args.resultfile_or_dir
if args.resultfile_or_dir == '-':
resultfile = sys.stdin
test_nodes, y_pred = util.load_result(resultfile)
assert np.all(labeled_nodes == test_nodes)
print(args.resultfile_or_dir, *precision_recall_fscore(test_nodes,
labels, y_pred), sep='\t')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def precision_recall_fscore(nodes, y_true, y_pred):
df = pd.DataFrame({'true': y_true, 'pred': y_pred}, index=nodes)
n_predicted_nodes = len(df[df['pred'] != 0])
n_corrects = len(df[df['pred'] == df['true']])
n_test = len(nodes)
p = n_corrects / n_predicted_nodes
r = n_corrects / n_test
f = 2 * p * r / (p + r)
return p, r, f, n_predicted_nodes, n_corrects, n_test
def main(args):
labeled_nodes, labels = util.load_labellist(args.labelfile)
if args.random_state:
random_state = args.random_state
n_splits = args.n_splits if args.n_splits else 10
cv = KFold(n_splits=n_splits, shuffle=True, random_state=random_state)
for i, (train, test) in enumerate(cv.split(labeled_nodes)):
filepath = os.path.join(args.resultfile_or_dir, '{}_{}.{}'.
format(args.stem, i, args.ext))
test_nodes, y_pred = util.load_result(filepath)
y_test = labels[test]
assert np.all(labeled_nodes[test] == test_nodes
), '分割後のノードID集合が異なる。random_stateが違うのでは?'
print(args.resultfile_or_dir, i, *precision_recall_fscore(
test_nodes, y_test, y_pred), sep='\t')
else:
resultfile = args.resultfile_or_dir
if args.resultfile_or_dir == '-':
resultfile = sys.stdin
test_nodes, y_pred = util.load_result(resultfile)
assert np.all(labeled_nodes == test_nodes)
print(args.resultfile_or_dir, *precision_recall_fscore(test_nodes,
labels, y_pred), sep='\t')
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('labelfile')
parser.add_argument('resultfile_or_dir')
parser.add_argument('--random-state', type=int)
parser.add_argument('--n-splits', type=int, default=10)
parser.add_argument('--stem', default='result', help='結果ファイル名の文字列')
parser.add_argument('--ext', default='tsv', help='結果ファイルの拡張子')
return parser.parse_args()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def precision_recall_fscore(nodes, y_true, y_pred):
df = pd.DataFrame({'true': y_true, 'pred': y_pred}, index=nodes)
n_predicted_nodes = len(df[df['pred'] != 0])
n_corrects = len(df[df['pred'] == df['true']])
n_test = len(nodes)
p = n_corrects / n_predicted_nodes
r = n_corrects / n_test
f = 2 * p * r / (p + r)
return p, r, f, n_predicted_nodes, n_corrects, n_test
def main(args):
labeled_nodes, labels = util.load_labellist(args.labelfile)
if args.random_state:
random_state = args.random_state
n_splits = args.n_splits if args.n_splits else 10
cv = KFold(n_splits=n_splits, shuffle=True, random_state=random_state)
for i, (train, test) in enumerate(cv.split(labeled_nodes)):
filepath = os.path.join(args.resultfile_or_dir, '{}_{}.{}'.
format(args.stem, i, args.ext))
test_nodes, y_pred = util.load_result(filepath)
y_test = labels[test]
assert np.all(labeled_nodes[test] == test_nodes
), '分割後のノードID集合が異なる。random_stateが違うのでは?'
print(args.resultfile_or_dir, i, *precision_recall_fscore(
test_nodes, y_test, y_pred), sep='\t')
else:
resultfile = args.resultfile_or_dir
if args.resultfile_or_dir == '-':
resultfile = sys.stdin
test_nodes, y_pred = util.load_result(resultfile)
assert np.all(labeled_nodes == test_nodes)
print(args.resultfile_or_dir, *precision_recall_fscore(test_nodes,
labels, y_pred), sep='\t')
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('labelfile')
parser.add_argument('resultfile_or_dir')
parser.add_argument('--random-state', type=int)
parser.add_argument('--n-splits', type=int, default=10)
parser.add_argument('--stem', default='result', help='結果ファイル名の文字列')
parser.add_argument('--ext', default='tsv', help='結果ファイルの拡張子')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
df = main(args)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sys
import os.path
import snlocest.util as util
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.metrics import precision_recall_fscore_support, classification_report
def precision_recall_fscore(nodes, y_true, y_pred):
df = pd.DataFrame({'true': y_true, 'pred': y_pred}, index=nodes)
n_predicted_nodes = len(df[df['pred'] != 0])
n_corrects = len(df[df['pred'] == df['true']])
n_test = len(nodes)
p = n_corrects / n_predicted_nodes
r = n_corrects / n_test
f = 2 * p * r / (p + r)
return p, r, f, n_predicted_nodes, n_corrects, n_test
def main(args):
labeled_nodes, labels = util.load_labellist(args.labelfile)
if args.random_state:
random_state = args.random_state
n_splits = args.n_splits if args.n_splits else 10
cv = KFold(n_splits=n_splits, shuffle=True, random_state=random_state)
for i, (train, test) in enumerate(cv.split(labeled_nodes)):
filepath = os.path.join(args.resultfile_or_dir, '{}_{}.{}'.
format(args.stem, i, args.ext))
test_nodes, y_pred = util.load_result(filepath)
y_test = labels[test]
assert np.all(labeled_nodes[test] == test_nodes
), '分割後のノードID集合が異なる。random_stateが違うのでは?'
print(args.resultfile_or_dir, i, *precision_recall_fscore(
test_nodes, y_test, y_pred), sep='\t')
else:
resultfile = args.resultfile_or_dir
if args.resultfile_or_dir == '-':
resultfile = sys.stdin
test_nodes, y_pred = util.load_result(resultfile)
assert np.all(labeled_nodes == test_nodes)
print(args.resultfile_or_dir, *precision_recall_fscore(test_nodes,
labels, y_pred), sep='\t')
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('labelfile')
parser.add_argument('resultfile_or_dir')
parser.add_argument('--random-state', type=int)
parser.add_argument('--n-splits', type=int, default=10)
parser.add_argument('--stem', default='result', help='結果ファイル名の文字列')
parser.add_argument('--ext', default='tsv', help='結果ファイルの拡張子')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
df = main(args)
<|reserved_special_token_1|>
# coding: utf-8
'''
Precision, Recall, F1で評価する
Leave-one-outの結果
K-foldの結果
'''
import sys
import os.path
import snlocest.util as util
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.metrics import precision_recall_fscore_support, classification_report
def precision_recall_fscore(nodes, y_true, y_pred):
df = pd.DataFrame({'true': y_true, 'pred': y_pred}, index=nodes)
#df['pred'].replace(0, np.NaN, inplace=True)
n_predicted_nodes = len(df[df['pred'] != 0])
n_corrects = len(df[df['pred'] == df['true']])
n_test = len(nodes)
p = n_corrects / n_predicted_nodes
r = n_corrects / n_test
f = 2 * p * r / (p + r)
return (p, r, f, n_predicted_nodes, n_corrects, n_test)
def main(args):
# ラベルが付けられたノードIDのリスト、それに対応するラベルのリスト
labeled_nodes, labels = util.load_labellist(args.labelfile)
if args.random_state:
random_state = args.random_state
n_splits = args.n_splits if args.n_splits else 10
cv = KFold(n_splits=n_splits, shuffle=True, random_state=random_state)
for i, (train, test) in enumerate(cv.split(labeled_nodes)):
filepath = os.path.join(args.resultfile_or_dir, '{}_{}.{}'.format(args.stem, i, args.ext))
test_nodes, y_pred = util.load_result(filepath)
y_test = labels[test]
assert np.all(labeled_nodes[test] == test_nodes), '分割後のノードID集合が異なる。random_stateが違うのでは?'
#assert len(y_pred) == len(y_test)
#precision, recall, f1, support = precision_recall_fscore_support(y_test, y_pred, average='micro')
#print(args.resultfile_or_dir, i, precision, recall, f1, support, sep='\t')
#report = classification_report(y_test, y_pred)
#print(report)
print(args.resultfile_or_dir, i, *precision_recall_fscore(test_nodes, y_test, y_pred), sep='\t')
else:
resultfile = args.resultfile_or_dir
# '-'が指定されたら標準入力から読み込む
if args.resultfile_or_dir == '-':
resultfile = sys.stdin
test_nodes, y_pred = util.load_result(resultfile)
assert np.all(labeled_nodes == test_nodes)
print(args.resultfile_or_dir, *precision_recall_fscore(test_nodes, labels, y_pred), sep='\t')
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('labelfile')
parser.add_argument('resultfile_or_dir')
parser.add_argument('--random-state', type=int)
parser.add_argument('--n-splits', type=int, default=10)
parser.add_argument('--stem', default='result', help='結果ファイル名の文字列')
parser.add_argument('--ext', default='tsv', help='結果ファイルの拡張子')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
df = main(args)
|
flexible
|
{
"blob_id": "79c7a2f2e5f0301c15efe1b26a7839a12098f793",
"index": 6618,
"step-1": "<mask token>\n\n\ndef precision_recall_fscore(nodes, y_true, y_pred):\n df = pd.DataFrame({'true': y_true, 'pred': y_pred}, index=nodes)\n n_predicted_nodes = len(df[df['pred'] != 0])\n n_corrects = len(df[df['pred'] == df['true']])\n n_test = len(nodes)\n p = n_corrects / n_predicted_nodes\n r = n_corrects / n_test\n f = 2 * p * r / (p + r)\n return p, r, f, n_predicted_nodes, n_corrects, n_test\n\n\ndef main(args):\n labeled_nodes, labels = util.load_labellist(args.labelfile)\n if args.random_state:\n random_state = args.random_state\n n_splits = args.n_splits if args.n_splits else 10\n cv = KFold(n_splits=n_splits, shuffle=True, random_state=random_state)\n for i, (train, test) in enumerate(cv.split(labeled_nodes)):\n filepath = os.path.join(args.resultfile_or_dir, '{}_{}.{}'.\n format(args.stem, i, args.ext))\n test_nodes, y_pred = util.load_result(filepath)\n y_test = labels[test]\n assert np.all(labeled_nodes[test] == test_nodes\n ), '分割後のノードID集合が異なる。random_stateが違うのでは?'\n print(args.resultfile_or_dir, i, *precision_recall_fscore(\n test_nodes, y_test, y_pred), sep='\\t')\n else:\n resultfile = args.resultfile_or_dir\n if args.resultfile_or_dir == '-':\n resultfile = sys.stdin\n test_nodes, y_pred = util.load_result(resultfile)\n assert np.all(labeled_nodes == test_nodes)\n print(args.resultfile_or_dir, *precision_recall_fscore(test_nodes,\n labels, y_pred), sep='\\t')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef precision_recall_fscore(nodes, y_true, y_pred):\n df = pd.DataFrame({'true': y_true, 'pred': y_pred}, index=nodes)\n n_predicted_nodes = len(df[df['pred'] != 0])\n n_corrects = len(df[df['pred'] == df['true']])\n n_test = len(nodes)\n p = n_corrects / n_predicted_nodes\n r = n_corrects / n_test\n f = 2 * p * r / (p + r)\n return p, r, f, n_predicted_nodes, n_corrects, n_test\n\n\ndef main(args):\n labeled_nodes, labels = util.load_labellist(args.labelfile)\n if args.random_state:\n random_state = args.random_state\n n_splits = args.n_splits if args.n_splits else 10\n cv = KFold(n_splits=n_splits, shuffle=True, random_state=random_state)\n for i, (train, test) in enumerate(cv.split(labeled_nodes)):\n filepath = os.path.join(args.resultfile_or_dir, '{}_{}.{}'.\n format(args.stem, i, args.ext))\n test_nodes, y_pred = util.load_result(filepath)\n y_test = labels[test]\n assert np.all(labeled_nodes[test] == test_nodes\n ), '分割後のノードID集合が異なる。random_stateが違うのでは?'\n print(args.resultfile_or_dir, i, *precision_recall_fscore(\n test_nodes, y_test, y_pred), sep='\\t')\n else:\n resultfile = args.resultfile_or_dir\n if args.resultfile_or_dir == '-':\n resultfile = sys.stdin\n test_nodes, y_pred = util.load_result(resultfile)\n assert np.all(labeled_nodes == test_nodes)\n print(args.resultfile_or_dir, *precision_recall_fscore(test_nodes,\n labels, y_pred), sep='\\t')\n\n\ndef parse_args():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('labelfile')\n parser.add_argument('resultfile_or_dir')\n parser.add_argument('--random-state', type=int)\n parser.add_argument('--n-splits', type=int, default=10)\n parser.add_argument('--stem', default='result', help='結果ファイル名の文字列')\n parser.add_argument('--ext', default='tsv', help='結果ファイルの拡張子')\n return parser.parse_args()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef precision_recall_fscore(nodes, y_true, y_pred):\n df = pd.DataFrame({'true': y_true, 'pred': y_pred}, index=nodes)\n n_predicted_nodes = len(df[df['pred'] != 0])\n n_corrects = len(df[df['pred'] == df['true']])\n n_test = len(nodes)\n p = n_corrects / n_predicted_nodes\n r = n_corrects / n_test\n f = 2 * p * r / (p + r)\n return p, r, f, n_predicted_nodes, n_corrects, n_test\n\n\ndef main(args):\n labeled_nodes, labels = util.load_labellist(args.labelfile)\n if args.random_state:\n random_state = args.random_state\n n_splits = args.n_splits if args.n_splits else 10\n cv = KFold(n_splits=n_splits, shuffle=True, random_state=random_state)\n for i, (train, test) in enumerate(cv.split(labeled_nodes)):\n filepath = os.path.join(args.resultfile_or_dir, '{}_{}.{}'.\n format(args.stem, i, args.ext))\n test_nodes, y_pred = util.load_result(filepath)\n y_test = labels[test]\n assert np.all(labeled_nodes[test] == test_nodes\n ), '分割後のノードID集合が異なる。random_stateが違うのでは?'\n print(args.resultfile_or_dir, i, *precision_recall_fscore(\n test_nodes, y_test, y_pred), sep='\\t')\n else:\n resultfile = args.resultfile_or_dir\n if args.resultfile_or_dir == '-':\n resultfile = sys.stdin\n test_nodes, y_pred = util.load_result(resultfile)\n assert np.all(labeled_nodes == test_nodes)\n print(args.resultfile_or_dir, *precision_recall_fscore(test_nodes,\n labels, y_pred), sep='\\t')\n\n\ndef parse_args():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('labelfile')\n parser.add_argument('resultfile_or_dir')\n parser.add_argument('--random-state', type=int)\n parser.add_argument('--n-splits', type=int, default=10)\n parser.add_argument('--stem', default='result', help='結果ファイル名の文字列')\n parser.add_argument('--ext', default='tsv', help='結果ファイルの拡張子')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n df = main(args)\n",
"step-4": "<mask token>\nimport sys\nimport os.path\nimport snlocest.util as util\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import precision_recall_fscore_support, classification_report\n\n\ndef precision_recall_fscore(nodes, y_true, y_pred):\n df = pd.DataFrame({'true': y_true, 'pred': y_pred}, index=nodes)\n n_predicted_nodes = len(df[df['pred'] != 0])\n n_corrects = len(df[df['pred'] == df['true']])\n n_test = len(nodes)\n p = n_corrects / n_predicted_nodes\n r = n_corrects / n_test\n f = 2 * p * r / (p + r)\n return p, r, f, n_predicted_nodes, n_corrects, n_test\n\n\ndef main(args):\n labeled_nodes, labels = util.load_labellist(args.labelfile)\n if args.random_state:\n random_state = args.random_state\n n_splits = args.n_splits if args.n_splits else 10\n cv = KFold(n_splits=n_splits, shuffle=True, random_state=random_state)\n for i, (train, test) in enumerate(cv.split(labeled_nodes)):\n filepath = os.path.join(args.resultfile_or_dir, '{}_{}.{}'.\n format(args.stem, i, args.ext))\n test_nodes, y_pred = util.load_result(filepath)\n y_test = labels[test]\n assert np.all(labeled_nodes[test] == test_nodes\n ), '分割後のノードID集合が異なる。random_stateが違うのでは?'\n print(args.resultfile_or_dir, i, *precision_recall_fscore(\n test_nodes, y_test, y_pred), sep='\\t')\n else:\n resultfile = args.resultfile_or_dir\n if args.resultfile_or_dir == '-':\n resultfile = sys.stdin\n test_nodes, y_pred = util.load_result(resultfile)\n assert np.all(labeled_nodes == test_nodes)\n print(args.resultfile_or_dir, *precision_recall_fscore(test_nodes,\n labels, y_pred), sep='\\t')\n\n\ndef parse_args():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('labelfile')\n parser.add_argument('resultfile_or_dir')\n parser.add_argument('--random-state', type=int)\n parser.add_argument('--n-splits', type=int, default=10)\n parser.add_argument('--stem', default='result', help='結果ファイル名の文字列')\n parser.add_argument('--ext', default='tsv', help='結果ファイルの拡張子')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n df = main(args)\n",
"step-5": "# coding: utf-8\n\n'''\nPrecision, Recall, F1で評価する\n\nLeave-one-outの結果\n\nK-foldの結果\n'''\n\nimport sys\nimport os.path\nimport snlocest.util as util\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import precision_recall_fscore_support, classification_report\n\n\ndef precision_recall_fscore(nodes, y_true, y_pred):\n df = pd.DataFrame({'true': y_true, 'pred': y_pred}, index=nodes)\n #df['pred'].replace(0, np.NaN, inplace=True)\n n_predicted_nodes = len(df[df['pred'] != 0])\n n_corrects = len(df[df['pred'] == df['true']])\n n_test = len(nodes)\n p = n_corrects / n_predicted_nodes\n r = n_corrects / n_test\n f = 2 * p * r / (p + r)\n return (p, r, f, n_predicted_nodes, n_corrects, n_test)\n\ndef main(args):\n # ラベルが付けられたノードIDのリスト、それに対応するラベルのリスト\n labeled_nodes, labels = util.load_labellist(args.labelfile)\n\n if args.random_state:\n random_state = args.random_state\n n_splits = args.n_splits if args.n_splits else 10\n\n cv = KFold(n_splits=n_splits, shuffle=True, random_state=random_state)\n for i, (train, test) in enumerate(cv.split(labeled_nodes)):\n filepath = os.path.join(args.resultfile_or_dir, '{}_{}.{}'.format(args.stem, i, args.ext))\n test_nodes, y_pred = util.load_result(filepath)\n\n y_test = labels[test]\n assert np.all(labeled_nodes[test] == test_nodes), '分割後のノードID集合が異なる。random_stateが違うのでは?'\n #assert len(y_pred) == len(y_test)\n #precision, recall, f1, support = precision_recall_fscore_support(y_test, y_pred, average='micro')\n #print(args.resultfile_or_dir, i, precision, recall, f1, support, sep='\\t')\n #report = classification_report(y_test, y_pred)\n #print(report)\n print(args.resultfile_or_dir, i, *precision_recall_fscore(test_nodes, y_test, y_pred), sep='\\t')\n else:\n resultfile = args.resultfile_or_dir\n # '-'が指定されたら標準入力から読み込む\n if args.resultfile_or_dir == '-':\n resultfile = sys.stdin\n test_nodes, y_pred = util.load_result(resultfile)\n assert np.all(labeled_nodes == test_nodes)\n print(args.resultfile_or_dir, *precision_recall_fscore(test_nodes, labels, y_pred), sep='\\t')\n\ndef parse_args():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('labelfile')\n parser.add_argument('resultfile_or_dir')\n parser.add_argument('--random-state', type=int)\n parser.add_argument('--n-splits', type=int, default=10)\n parser.add_argument('--stem', default='result', help='結果ファイル名の文字列')\n parser.add_argument('--ext', default='tsv', help='結果ファイルの拡張子')\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = parse_args()\n df = main(args)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os
import argparse
import torch
import model.model as module_arch
from utils.util import remove_weight_norms
from train import get_instance
from librosa import load
from librosa.output import write_wav
from time import time
def main(config, resume, infile, outfile, sigma, dur, half):
# build model architecture
model = get_instance(module_arch, 'arch', config)
model.summary()
# load state dict
checkpoint = torch.load(resume)
state_dict = checkpoint['state_dict']
if config['n_gpu'] > 1:
model = torch.nn.DataParallel(model)
model.load_state_dict(state_dict)
if config['n_gpu'] > 1:
model = model.module
model.apply(remove_weight_norms)
# prepare model for testing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
model.eval()
sr = config['arch']['args']['sr']
y, _ = load(infile, sr=sr, duration=dur)
y = torch.Tensor(y).to(device)
# get mel before turn to half, because sparse.half is not implement yet
mel = model.get_mel(y[None, :])
if half:
model = model.half()
mel = mel.half()
start = time()
x = model.infer(mel, sigma)
cost = time() - start
print("Time cost: {:.4f}, Speed: {:.4f} kHz".format(cost, x.numel() / cost / 1000))
# print(x.max(), x.min())
write_wav(outfile, x.cpu().float().numpy(), sr, False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='WaveGlow inference')
parser.add_argument('infile', type=str, help='wave file to generate mel-spectrogram')
parser.add_argument('outfile', type=str, help='output file name')
parser.add_argument('--duration', type=float, help='duration of audio, in seconds')
parser.add_argument('--half', action='store_true')
parser.add_argument('-s', '--sigma', type=float, default=1.0)
parser.add_argument('-r', '--resume', default=None, type=str,
help='path to latest checkpoint (default: None)')
parser.add_argument('-d', '--device', default=None, type=str,
help='indices of GPUs to enable (default: all)')
args = parser.parse_args()
if args.resume:
config = torch.load(args.resume)['config']
if args.device:
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
main(config, args.resume, args.infile, args.outfile, args.sigma, args.duration, args.half)
|
normal
|
{
"blob_id": "a2421a8673a524c32539555596711a71a8e00dbf",
"index": 439,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(config, resume, infile, outfile, sigma, dur, half):\n model = get_instance(module_arch, 'arch', config)\n model.summary()\n checkpoint = torch.load(resume)\n state_dict = checkpoint['state_dict']\n if config['n_gpu'] > 1:\n model = torch.nn.DataParallel(model)\n model.load_state_dict(state_dict)\n if config['n_gpu'] > 1:\n model = model.module\n model.apply(remove_weight_norms)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = model.to(device)\n model.eval()\n sr = config['arch']['args']['sr']\n y, _ = load(infile, sr=sr, duration=dur)\n y = torch.Tensor(y).to(device)\n mel = model.get_mel(y[None, :])\n if half:\n model = model.half()\n mel = mel.half()\n start = time()\n x = model.infer(mel, sigma)\n cost = time() - start\n print('Time cost: {:.4f}, Speed: {:.4f} kHz'.format(cost, x.numel() /\n cost / 1000))\n write_wav(outfile, x.cpu().float().numpy(), sr, False)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main(config, resume, infile, outfile, sigma, dur, half):\n model = get_instance(module_arch, 'arch', config)\n model.summary()\n checkpoint = torch.load(resume)\n state_dict = checkpoint['state_dict']\n if config['n_gpu'] > 1:\n model = torch.nn.DataParallel(model)\n model.load_state_dict(state_dict)\n if config['n_gpu'] > 1:\n model = model.module\n model.apply(remove_weight_norms)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = model.to(device)\n model.eval()\n sr = config['arch']['args']['sr']\n y, _ = load(infile, sr=sr, duration=dur)\n y = torch.Tensor(y).to(device)\n mel = model.get_mel(y[None, :])\n if half:\n model = model.half()\n mel = mel.half()\n start = time()\n x = model.infer(mel, sigma)\n cost = time() - start\n print('Time cost: {:.4f}, Speed: {:.4f} kHz'.format(cost, x.numel() /\n cost / 1000))\n write_wav(outfile, x.cpu().float().numpy(), sr, False)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='WaveGlow inference')\n parser.add_argument('infile', type=str, help=\n 'wave file to generate mel-spectrogram')\n parser.add_argument('outfile', type=str, help='output file name')\n parser.add_argument('--duration', type=float, help=\n 'duration of audio, in seconds')\n parser.add_argument('--half', action='store_true')\n parser.add_argument('-s', '--sigma', type=float, default=1.0)\n parser.add_argument('-r', '--resume', default=None, type=str, help=\n 'path to latest checkpoint (default: None)')\n parser.add_argument('-d', '--device', default=None, type=str, help=\n 'indices of GPUs to enable (default: all)')\n args = parser.parse_args()\n if args.resume:\n config = torch.load(args.resume)['config']\n if args.device:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.device\n main(config, args.resume, args.infile, args.outfile, args.sigma, args.\n duration, args.half)\n",
"step-4": "import os\nimport argparse\nimport torch\nimport model.model as module_arch\nfrom utils.util import remove_weight_norms\nfrom train import get_instance\nfrom librosa import load\nfrom librosa.output import write_wav\nfrom time import time\n\n\ndef main(config, resume, infile, outfile, sigma, dur, half):\n model = get_instance(module_arch, 'arch', config)\n model.summary()\n checkpoint = torch.load(resume)\n state_dict = checkpoint['state_dict']\n if config['n_gpu'] > 1:\n model = torch.nn.DataParallel(model)\n model.load_state_dict(state_dict)\n if config['n_gpu'] > 1:\n model = model.module\n model.apply(remove_weight_norms)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = model.to(device)\n model.eval()\n sr = config['arch']['args']['sr']\n y, _ = load(infile, sr=sr, duration=dur)\n y = torch.Tensor(y).to(device)\n mel = model.get_mel(y[None, :])\n if half:\n model = model.half()\n mel = mel.half()\n start = time()\n x = model.infer(mel, sigma)\n cost = time() - start\n print('Time cost: {:.4f}, Speed: {:.4f} kHz'.format(cost, x.numel() /\n cost / 1000))\n write_wav(outfile, x.cpu().float().numpy(), sr, False)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='WaveGlow inference')\n parser.add_argument('infile', type=str, help=\n 'wave file to generate mel-spectrogram')\n parser.add_argument('outfile', type=str, help='output file name')\n parser.add_argument('--duration', type=float, help=\n 'duration of audio, in seconds')\n parser.add_argument('--half', action='store_true')\n parser.add_argument('-s', '--sigma', type=float, default=1.0)\n parser.add_argument('-r', '--resume', default=None, type=str, help=\n 'path to latest checkpoint (default: None)')\n parser.add_argument('-d', '--device', default=None, type=str, help=\n 'indices of GPUs to enable (default: all)')\n args = parser.parse_args()\n if args.resume:\n config = torch.load(args.resume)['config']\n if args.device:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.device\n main(config, args.resume, args.infile, args.outfile, args.sigma, args.\n duration, args.half)\n",
"step-5": "import os\nimport argparse\nimport torch\nimport model.model as module_arch\nfrom utils.util import remove_weight_norms\nfrom train import get_instance\nfrom librosa import load\nfrom librosa.output import write_wav\nfrom time import time\n\n\ndef main(config, resume, infile, outfile, sigma, dur, half):\n # build model architecture\n model = get_instance(module_arch, 'arch', config)\n model.summary()\n\n # load state dict\n checkpoint = torch.load(resume)\n state_dict = checkpoint['state_dict']\n if config['n_gpu'] > 1:\n model = torch.nn.DataParallel(model)\n model.load_state_dict(state_dict)\n\n if config['n_gpu'] > 1:\n model = model.module\n model.apply(remove_weight_norms)\n\n # prepare model for testing\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = model.to(device)\n model.eval()\n\n sr = config['arch']['args']['sr']\n y, _ = load(infile, sr=sr, duration=dur)\n y = torch.Tensor(y).to(device)\n\n # get mel before turn to half, because sparse.half is not implement yet\n mel = model.get_mel(y[None, :])\n\n if half:\n model = model.half()\n mel = mel.half()\n start = time()\n x = model.infer(mel, sigma)\n cost = time() - start\n print(\"Time cost: {:.4f}, Speed: {:.4f} kHz\".format(cost, x.numel() / cost / 1000))\n # print(x.max(), x.min())\n write_wav(outfile, x.cpu().float().numpy(), sr, False)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='WaveGlow inference')\n parser.add_argument('infile', type=str, help='wave file to generate mel-spectrogram')\n parser.add_argument('outfile', type=str, help='output file name')\n parser.add_argument('--duration', type=float, help='duration of audio, in seconds')\n parser.add_argument('--half', action='store_true')\n parser.add_argument('-s', '--sigma', type=float, default=1.0)\n parser.add_argument('-r', '--resume', default=None, type=str,\n help='path to latest checkpoint (default: None)')\n parser.add_argument('-d', '--device', default=None, type=str,\n help='indices of GPUs to enable (default: all)')\n\n args = parser.parse_args()\n\n if args.resume:\n config = torch.load(args.resume)['config']\n if args.device:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device\n\n main(config, args.resume, args.infile, args.outfile, args.sigma, args.duration, args.half)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from valuate.predict import *
def get_profit_rate(intent, popularity):
"""
获取畅销系数
"""
# 按畅销程度分级,各交易方式相比于标价的固定比例
profits = gl.PROFITS
profit = profits[popularity]
# 计算各交易方式的价格相比于标价的固定比例
if intent == 'sell':
# 商家收购价相比加权平均价的比例
profit_rate = 1 - profit[0] - profit[1]
elif intent == 'buy':
# 商家真实售价相比加权平均价的比例
profit_rate = 1 - profit[0]
elif intent == 'release':
# 建议标价相比加权平均价的比例
profit_rate = 1
elif intent == 'private':
# C2C价格相比加权平均价的比例
profit_rate = 1 - profit[0] - profit[2]
elif intent == 'lowest':
# 最低成交价相比加权平均价的比例
profit_rate = 1 - profit[0] - profit[1] - profit[3]
elif intent == 'cpo':
# 认证二手车价相比加权平均价的差异比例
profit_rate = 1 - profit[0] - profit[8]
elif intent == 'replace':
# 4S店置换价相比加权平均价的比例
profit_rate = 1 - profit[0] - profit[4]
elif intent == 'auction':
# 拍卖价相比加权平均价的差异比例
profit_rate = 1 - profit[0] - profit[5]
elif intent == 'avg-buy':
# 平均买车价相比加权平均价的差异比例
profit_rate = 1 - profit[0] - profit[7]
elif intent == 'avg-sell':
# 平均卖车价价相比加权平均价的差异比例
profit_rate = 1 - profit[0] - profit[6]
return profit_rate
def cal_intent_condition(prices, price_bn):
"""
计算所有交易方式的4个级别车况价
"""
if(prices[2] * 1.03) > price_bn:
rate = (prices[2] * 1.03) / price_bn
prices = prices / rate
df1 = pd.DataFrame(prices)
df2 = pd.DataFrame([gl.CAR_CONDITION_COEFFICIENT_VALUES])
all_map = df1.dot(df2)
all_map.columns = ['excellent', 'good', 'fair', 'bad']
all_map['intent'] = pd.Series(gl.INTENT_TYPE).values
all_map = all_map.loc[:, ['intent', 'excellent', 'good', 'fair', 'bad']]
all_map[['excellent', 'good', 'fair', 'bad']] = all_map[['excellent', 'good', 'fair', 'bad']].astype(int)
return all_map
def process_mile(price, use_time, mile):
"""
mile处理
"""
# 正常行驶的车辆以一年2.5万公里为正常基数,低于2.5万公里的价格的浮动在+3.5%以内
# 大于2.5万公里的若每年的平均行驶里程大于2.5万公里小于5万公里价格浮动在-3.5-7.5%
# 若年平均形式里程大于5万公里及以上影响价格在-7.5-12.5%之间
mile_per_month = mile / use_time
if mile_per_month < gl.MILE_THRESHOLD_2_5:
return price + 0.035 * (1 - mile_per_month/gl.MILE_THRESHOLD_2_5) * price
elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:
return price - (0.04 * (mile_per_month/gl.MILE_THRESHOLD_5)+0.035) * price
elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:
return price - (0.05 * (mile_per_month/gl.MILE_THRESHOLD_5)+0.075) * price
else:
return price - 0.125 * price
def process_profit_rate(df):
"""
畅销系数处理
"""
return get_profit_rate(df['intent'], df['popularity'])
def process_buy_profit_rate(df):
"""
畅销系数处理
"""
return get_profit_rate(df['intent_source'], df['popularity'])
def process_unreasonable_history_price(data, nums):
"""
处理不合理历史价格趋势
"""
if nums == 0:
return data
temp = data[1:]
temp.sort()
for i, value in enumerate(temp):
data[i+1] = temp[i]
for i in range(0, nums):
rate = (data[i + 1] - data[i]) / data[i + 1]
if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):
data[i + 1] = int(data[i] * 1.0083)
return data
def process_unreasonable_future_price(data, nums):
"""
处理不合理未来价格趋势
"""
temp = data[1:]
temp.sort(reverse=True)
for i, value in enumerate(temp):
data[i+1] = temp[i]
for i in range(0, nums):
rate = (data[i] - data[i + 1]) / data[i]
if (data[i] <= data[i + 1]) | (0.036 > rate) | (0.188 < rate):
data[i + 1] = int(data[i] * 0.9)
return data
def process_fill_zero(hedge):
temp = hedge
if len(hedge) < 18:
for i in range(0, (18-len(hedge))):
temp = '0'+temp
return temp
def predict_from_db(model_detail_slug, city, use_time):
"""
从生产库查询预测
"""
# 查找city和model_detail_slug编号
city_id = province_city_map.loc[city, 'city_id']
model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id']
# 计算查询字段编号和月编号
if (use_time % 6) == 0:
column_num = str(int(use_time / 6) - 1)
month_num = 6
else:
column_num = str(int(use_time / 6))
month_num = use_time % 6
# 查询
record = db_operate.query_valuate(model_detail_slug_id, city_id, column_num, use_time)
# 查找对应值
dealer_hedge = str(record.loc[0, 'b2c_year_'+column_num])
dealer_hedge = process_fill_zero(dealer_hedge)
dealer_hedge = dealer_hedge[(month_num-1)*3:month_num*3]
dealer_hedge = int(dealer_hedge) / 1000
cpersonal_hedge = str(record.loc[0, 'c2c_year_'+column_num])
cpersonal_hedge = process_fill_zero(cpersonal_hedge)
cpersonal_hedge = cpersonal_hedge[(month_num-1)*3:month_num*3]
cpersonal_hedge = int(cpersonal_hedge) / 1000
return dealer_hedge, cpersonal_hedge
def predict_from_db_history(model_detail_slug, city, use_time):
"""
从生产库查询预测
"""
# 查找city和model_detail_slug编号
city_id = province_city_map.loc[city, 'city_id']
model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id']
# 计算查询字段编号和月编号
if (use_time % 6) == 0:
column_num = int(use_time / 6) - 1
month_num = 6
else:
column_num = int(use_time / 6)
month_num = use_time % 6
# 查询
dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(model_detail_slug_id, city_id, column_num, use_time)
# 查找对应值
result = []
if len(dealer_hedge) == 1:
dealer_hedge = process_fill_zero(dealer_hedge[0])
cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])
for i in range(0, use_time):
dealer_per = dealer_hedge[i*3:(i+1)*3]
cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]
result.append([int(dealer_per)/1000, int(cpersonal_per)/1000, use_time])
result.reverse()
elif len(dealer_hedge) == 2:
dealer_hedge = process_fill_zero(dealer_hedge[0])+process_fill_zero(dealer_hedge[1])
cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])+process_fill_zero(cpersonal_hedge[1])
for i in range(month_num-1, month_num+6):
dealer_per = dealer_hedge[i*3:(i+1)*3]
cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]
result.append([int(dealer_per)/1000, int(cpersonal_per)/1000, use_time])
result.reverse()
return result
def predict_from_db_future(model_detail_slug, city, use_time, times):
"""
从生产库查询预测
"""
# 查找city和model_detail_slug编号
city_id = province_city_map.loc[city, 'city_id']
model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id']
# 计算查询字段编号和月编号
if (use_time % 6) == 0:
column_num = int(use_time / 6) - 1
month_num = 6
else:
column_num = int(use_time / 6)
month_num = use_time % 6
# 查询
record = db_operate.query_valuate_future(model_detail_slug_id, city_id)
# 查找对应值
result = []
for i in range(0, times):
dealer_hedge = str(record.loc[0, 'b2c_year_' + str(column_num+i*2)])
dealer_hedge = process_fill_zero(dealer_hedge)
dealer_hedge = dealer_hedge[(month_num - 1) * 3:month_num * 3]
dealer_hedge = int(dealer_hedge) / 1000
cpersonal_hedge = str(record.loc[0, 'c2c_year_' + str(column_num+i*2)])
cpersonal_hedge = process_fill_zero(cpersonal_hedge)
cpersonal_hedge = cpersonal_hedge[(month_num - 1) * 3:month_num * 3]
cpersonal_hedge = int(cpersonal_hedge) / 1000
result.append([dealer_hedge, cpersonal_hedge, use_time+i*12])
return result
def process_prices_relate(dealer_price, cpersonal_price):
"""
人工处理三类价格的相关性
"""
buy = dealer_price
private = cpersonal_price
# 计算buy与private的比例关系
private_buy_rate = (buy - private) / private
# 人工处理预测不合理的三类价格
if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):
private = int(buy * (1 - 0.0875))
sell = int(private * (1 - 0.0525))
return buy, private, sell
def process_adjust_profit(model_detail_slug, popularity):
"""
调整值调整
"""
index = str(model_detail_slug)+'_'+str(popularity)
if index in model_detail_slug_popularity_index:
rate = adjust_profit.loc[index, 'rate']
else:
rate = 0
return rate
def check_params_value(city, model_detail_slug, use_time, mile, category):
"""
校验参数
"""
# 校验city
if city not in cities:
raise ApiParamsValueError('city', city, 'Unknown city!')
# 校验model
if model_detail_slug not in models:
raise ApiParamsValueError('model_detail_slug', model_detail_slug, 'Unknown model!')
# 校验mile
if not ((isinstance(mile, int)) | (isinstance(mile, float))):
raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')
elif mile < 0:
raise ApiParamsValueError('mile', mile, 'Mile must be greater than zero!')
# 校验use_time
if not isinstance(use_time, int):
raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')
if category == 'valuate':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time, 'The use_time of Forecast must be in 1-240!')
elif category == 'history':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time, 'The use_time of historical trend must be in 1-240!')
elif category == 'future':
if (use_time < 1) | (use_time > 240):
raise ApiParamsValueError('use_time', use_time, 'The use_time of future trend must be in 1-240!')
class Predict(object):
def __init__(self):
"""
加载各类匹配表和模型
"""
self.result = []
self.valuate_model = []
def add_process_intent(self, buy, private, sell, popularity, price_bn):
"""
根据交易方式修正预测值
"""
# 组合结果
self.result = result_map.copy()
self.result.loc[(self.result['intent'] == 'buy'), 'predict_price'] = buy
self.result.loc[(self.result['intent'] == 'private'), 'predict_price'] = private
self.result.loc[(self.result['intent'] == 'sell'), 'predict_price'] = sell
self.result['predict_price'] = self.result['predict_price'].fillna(buy)
self.result['popularity'] = popularity
self.result['profit_rate'] = self.result.apply(process_profit_rate, axis=1)
self.result['buy_profit_rate'] = self.result.apply(process_buy_profit_rate, axis=1)
self.result['predict_price'] = self.result['predict_price'] / self.result['buy_profit_rate']
self.result['predict_price'] = self.result['profit_rate'] * self.result['predict_price']
# 计算所有交易类型
self.result = cal_intent_condition(self.result.predict_price.values, price_bn)
def follow_process(self, use_time, mile, price_bn, dealer_hedge, cpersonal_hedge, province, model_slug, model_detail_slug):
"""
后续跟进处理
"""
# 获取价格
dealer_price, cpersonal_price = dealer_hedge * price_bn, cpersonal_hedge * price_bn
# 处理mile
dealer_price = process_mile(dealer_price, use_time, mile)
cpersonal_price = process_mile(cpersonal_price, use_time, mile)
# 处理价格之间的相关性
buy, private, sell = process_prices_relate(dealer_price, cpersonal_price)
# 获取流行度
index = str(model_slug) + '_' + str(province)
if index in province_popularity_index:
popularity = province_popularity_map.loc[index, 'popularity']
else:
popularity = 'C'
# 进行调整值最终调整
rate = process_adjust_profit(model_detail_slug, popularity)
buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (1 + rate)
return buy, private, sell, popularity
def predict(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, ret_type='records'):
"""
预测返回
"""
# 校验参数
check_params_value(city, model_detail_slug, use_time, mile, category='valuate')
# 查找款型对应的新车指导价,调整后的款型
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug']
# 预测返回保值率
dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug, city, use_time)
buy, private, sell, popularity = self.follow_process(use_time, mile, price_bn, dealer_hedge, cpersonal_hedge, province, model_slug, model_detail_slug)
# 根据交易方式修正预测值
self.add_process_intent(buy, private, sell, popularity, price_bn)
if ret_type == 'records':
return self.result.to_dict('records')
else:
return self.result
def predict_for_history(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2):
"""
预测历史数据返回
"""
# 校验参数
check_params_value(city, model_detail_slug, use_time, mile, category='valuate')
# 查找款型对应的新车指导价,调整后的款型
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug']
# 预测返回保值率
data_buy = []
data_sell = []
data_private = []
result = predict_from_db_history(final_model_detail_slug, city, use_time)
for dealer_hedge, cpersonal_hedge, use_time_per in result:
buy, private, sell, popularity = self.follow_process(use_time_per, mile, price_bn, dealer_hedge,
cpersonal_hedge, province, model_slug,
model_detail_slug)
data_buy.append(int(buy))
data_private.append(int(private))
data_sell.append(int(sell))
return data_buy, data_private, data_sell
def predict_for_future(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, times=3):
"""
预测历史数据返回
"""
# 校验参数
check_params_value(city, model_detail_slug, use_time, mile, category='valuate')
# 查找款型对应的新车指导价,调整后的款型
price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']
price_bn = price_bn * 10000
province = province_city_map.loc[city, 'province']
model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']
final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug']
# 预测返回保值率
data_buy = []
data_sell = []
data_private = []
result = predict_from_db_future(final_model_detail_slug, city, use_time, times)
for dealer_hedge, cpersonal_hedge, use_time_per in result:
buy, private, sell, popularity = self.follow_process(use_time_per, mile, price_bn, dealer_hedge,
cpersonal_hedge, province, model_slug,
model_detail_slug)
data_buy.append(int(buy))
data_private.append(int(private))
data_sell.append(int(sell))
return data_buy, data_private, data_sell
def history_price_trend(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, ret_type='records'):
"""
计算历史价格趋势
"""
# 校验参数
check_params_value(city, model_detail_slug, use_time, mile, category='history')
# 计算时间
times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']
nums = 6
if use_time <= 6:
times_str = []
nums = use_time-1
for i in range(0, nums+1):
times_str.append(str(-i))
# 计算车商交易价,车商收购价的历史价格走势
data_buy, data_private, data_sell = self.predict_for_history(city, model_detail_slug, use_time, mile)
# 处理异常值
data_buy = process_unreasonable_history_price(data_buy, nums)
data_sell = process_unreasonable_history_price(data_sell, nums)
data_private = process_unreasonable_history_price(data_private, nums)
result_b_2_c = pd.DataFrame([data_buy], columns=times_str)
result_b_2_c['type'] = 'buy'
result_c_2_b = pd.DataFrame([data_sell], columns=times_str)
result_c_2_b['type'] = 'sell'
result_c_2_c = pd.DataFrame([data_private], columns=times_str)
result_c_2_c['type'] = 'private'
result = result_b_2_c.append(result_c_2_b, ignore_index=True)
result = result.append(result_c_2_c, ignore_index=True)
if ret_type == 'records':
return result.to_dict('records')
else:
return result
def future_price_trend(self, city='深圳', model_detail_slug='model_25023_cs', use_time=365, mile=2, ret_type='records'):
"""
计算未来价格趋势
"""
# 校验参数
check_params_value(city, model_detail_slug, use_time, mile, category='future')
# 计算时间
times_str = ['0', '12', '24', '36']
nums = 3
if use_time > 204:
times_str = []
nums = int((240-use_time) / 12)
for i in range(0, nums+1):
times_str.append(str(i*12))
# 计算个人交易价的未来价格趋势
data_buy, data_private, data_sell = self.predict_for_future(city, model_detail_slug, use_time, mile, len(times_str))
data_buy = process_unreasonable_future_price(data_buy, nums)
data_sell = process_unreasonable_future_price(data_sell, nums)
data_private = process_unreasonable_future_price(data_private, nums)
result_b_2_c = pd.DataFrame([data_buy], columns=times_str)
result_b_2_c['type'] = 'buy'
result_c_2_b = pd.DataFrame([data_sell], columns=times_str)
result_c_2_b['type'] = 'sell'
result_c_2_c = pd.DataFrame([data_private], columns=times_str)
result_c_2_c['type'] = 'private'
result = result_b_2_c.append(result_c_2_b, ignore_index=True)
result = result.append(result_c_2_c, ignore_index=True)
if ret_type == 'records':
return result.to_dict('records')
else:
return result
|
normal
|
{
"blob_id": "1f01989f10be5404d415d4abd1ef9ab6c8695aba",
"index": 6069,
"step-1": "<mask token>\n\n\ndef process_mile(price, use_time, mile):\n \"\"\"\n mile处理\n \"\"\"\n mile_per_month = mile / use_time\n if mile_per_month < gl.MILE_THRESHOLD_2_5:\n return price + 0.035 * (1 - mile_per_month / gl.MILE_THRESHOLD_2_5\n ) * price\n elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:\n return price - (0.04 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.035\n ) * price\n elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:\n return price - (0.05 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.075\n ) * price\n else:\n return price - 0.125 * price\n\n\n<mask token>\n\n\ndef process_prices_relate(dealer_price, cpersonal_price):\n \"\"\"\n 人工处理三类价格的相关性\n \"\"\"\n buy = dealer_price\n private = cpersonal_price\n private_buy_rate = (buy - private) / private\n if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):\n private = int(buy * (1 - 0.0875))\n sell = int(private * (1 - 0.0525))\n return buy, private, sell\n\n\n<mask token>\n\n\ndef check_params_value(city, model_detail_slug, use_time, mile, category):\n \"\"\"\n 校验参数\n \"\"\"\n if city not in cities:\n raise ApiParamsValueError('city', city, 'Unknown city!')\n if model_detail_slug not in models:\n raise ApiParamsValueError('model_detail_slug', model_detail_slug,\n 'Unknown model!')\n if not isinstance(mile, int) | isinstance(mile, float):\n raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')\n elif mile < 0:\n raise ApiParamsValueError('mile', mile,\n 'Mile must be greater than zero!')\n if not isinstance(use_time, int):\n raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')\n if category == 'valuate':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of Forecast must be in 1-240!')\n elif category == 'history':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of historical trend must be in 1-240!')\n elif category == 'future':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of future trend must be in 1-240!')\n\n\nclass Predict(object):\n\n def __init__(self):\n \"\"\"\n 加载各类匹配表和模型\n \"\"\"\n self.result = []\n self.valuate_model = []\n\n def add_process_intent(self, buy, private, sell, popularity, price_bn):\n \"\"\"\n 根据交易方式修正预测值\n \"\"\"\n self.result = result_map.copy()\n self.result.loc[self.result['intent'] == 'buy', 'predict_price'] = buy\n self.result.loc[self.result['intent'] == 'private', 'predict_price'\n ] = private\n self.result.loc[self.result['intent'] == 'sell', 'predict_price'\n ] = sell\n self.result['predict_price'] = self.result['predict_price'].fillna(buy)\n self.result['popularity'] = popularity\n self.result['profit_rate'] = self.result.apply(process_profit_rate,\n axis=1)\n self.result['buy_profit_rate'] = self.result.apply(\n process_buy_profit_rate, axis=1)\n self.result['predict_price'] = self.result['predict_price'\n ] / self.result['buy_profit_rate']\n self.result['predict_price'] = self.result['profit_rate'\n ] * self.result['predict_price']\n self.result = cal_intent_condition(self.result.predict_price.values,\n price_bn)\n\n def follow_process(self, use_time, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug, model_detail_slug):\n \"\"\"\n 后续跟进处理\n \"\"\"\n dealer_price, cpersonal_price = (dealer_hedge * price_bn, \n cpersonal_hedge * price_bn)\n dealer_price = process_mile(dealer_price, use_time, mile)\n cpersonal_price = process_mile(cpersonal_price, use_time, mile)\n buy, private, sell = process_prices_relate(dealer_price,\n cpersonal_price)\n index = str(model_slug) + '_' + str(province)\n if index in province_popularity_index:\n popularity = province_popularity_map.loc[index, 'popularity']\n else:\n popularity = 'C'\n rate = process_adjust_profit(model_detail_slug, popularity)\n buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (\n 1 + rate)\n return buy, private, sell, popularity\n\n def predict(self, city='深圳', model_detail_slug='model_25023_cs',\n use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 预测返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug\n , city, use_time)\n buy, private, sell, popularity = self.follow_process(use_time, mile,\n price_bn, dealer_hedge, cpersonal_hedge, province, model_slug,\n model_detail_slug)\n self.add_process_intent(buy, private, sell, popularity, price_bn)\n if ret_type == 'records':\n return self.result.to_dict('records')\n else:\n return self.result\n\n def predict_for_history(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_history(final_model_detail_slug, city,\n use_time)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def predict_for_future(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, times=3):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_future(final_model_detail_slug, city,\n use_time, times)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def history_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 计算历史价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='history')\n times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']\n nums = 6\n if use_time <= 6:\n times_str = []\n nums = use_time - 1\n for i in range(0, nums + 1):\n times_str.append(str(-i))\n data_buy, data_private, data_sell = self.predict_for_history(city,\n model_detail_slug, use_time, mile)\n data_buy = process_unreasonable_history_price(data_buy, nums)\n data_sell = process_unreasonable_history_price(data_sell, nums)\n data_private = process_unreasonable_history_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n def future_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=365, mile=2, ret_type='records'):\n \"\"\"\n 计算未来价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='future')\n times_str = ['0', '12', '24', '36']\n nums = 3\n if use_time > 204:\n times_str = []\n nums = int((240 - use_time) / 12)\n for i in range(0, nums + 1):\n times_str.append(str(i * 12))\n data_buy, data_private, data_sell = self.predict_for_future(city,\n model_detail_slug, use_time, mile, len(times_str))\n data_buy = process_unreasonable_future_price(data_buy, nums)\n data_sell = process_unreasonable_future_price(data_sell, nums)\n data_private = process_unreasonable_future_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n",
"step-2": "<mask token>\n\n\ndef process_mile(price, use_time, mile):\n \"\"\"\n mile处理\n \"\"\"\n mile_per_month = mile / use_time\n if mile_per_month < gl.MILE_THRESHOLD_2_5:\n return price + 0.035 * (1 - mile_per_month / gl.MILE_THRESHOLD_2_5\n ) * price\n elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:\n return price - (0.04 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.035\n ) * price\n elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:\n return price - (0.05 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.075\n ) * price\n else:\n return price - 0.125 * price\n\n\n<mask token>\n\n\ndef process_unreasonable_history_price(data, nums):\n \"\"\"\n 处理不合理历史价格趋势\n \"\"\"\n if nums == 0:\n return data\n temp = data[1:]\n temp.sort()\n for i, value in enumerate(temp):\n data[i + 1] = temp[i]\n for i in range(0, nums):\n rate = (data[i + 1] - data[i]) / data[i + 1]\n if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):\n data[i + 1] = int(data[i] * 1.0083)\n return data\n\n\n<mask token>\n\n\ndef process_fill_zero(hedge):\n temp = hedge\n if len(hedge) < 18:\n for i in range(0, 18 - len(hedge)):\n temp = '0' + temp\n return temp\n\n\n<mask token>\n\n\ndef predict_from_db_history(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug_id']\n if use_time % 6 == 0:\n column_num = int(use_time / 6) - 1\n month_num = 6\n else:\n column_num = int(use_time / 6)\n month_num = use_time % 6\n dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(\n model_detail_slug_id, city_id, column_num, use_time)\n result = []\n if len(dealer_hedge) == 1:\n dealer_hedge = process_fill_zero(dealer_hedge[0])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])\n for i in range(0, use_time):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n elif len(dealer_hedge) == 2:\n dealer_hedge = process_fill_zero(dealer_hedge[0]) + process_fill_zero(\n dealer_hedge[1])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0]\n ) + process_fill_zero(cpersonal_hedge[1])\n for i in range(month_num - 1, month_num + 6):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n return result\n\n\n<mask token>\n\n\ndef process_prices_relate(dealer_price, cpersonal_price):\n \"\"\"\n 人工处理三类价格的相关性\n \"\"\"\n buy = dealer_price\n private = cpersonal_price\n private_buy_rate = (buy - private) / private\n if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):\n private = int(buy * (1 - 0.0875))\n sell = int(private * (1 - 0.0525))\n return buy, private, sell\n\n\n<mask token>\n\n\ndef check_params_value(city, model_detail_slug, use_time, mile, category):\n \"\"\"\n 校验参数\n \"\"\"\n if city not in cities:\n raise ApiParamsValueError('city', city, 'Unknown city!')\n if model_detail_slug not in models:\n raise ApiParamsValueError('model_detail_slug', model_detail_slug,\n 'Unknown model!')\n if not isinstance(mile, int) | isinstance(mile, float):\n raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')\n elif mile < 0:\n raise ApiParamsValueError('mile', mile,\n 'Mile must be greater than zero!')\n if not isinstance(use_time, int):\n raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')\n if category == 'valuate':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of Forecast must be in 1-240!')\n elif category == 'history':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of historical trend must be in 1-240!')\n elif category == 'future':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of future trend must be in 1-240!')\n\n\nclass Predict(object):\n\n def __init__(self):\n \"\"\"\n 加载各类匹配表和模型\n \"\"\"\n self.result = []\n self.valuate_model = []\n\n def add_process_intent(self, buy, private, sell, popularity, price_bn):\n \"\"\"\n 根据交易方式修正预测值\n \"\"\"\n self.result = result_map.copy()\n self.result.loc[self.result['intent'] == 'buy', 'predict_price'] = buy\n self.result.loc[self.result['intent'] == 'private', 'predict_price'\n ] = private\n self.result.loc[self.result['intent'] == 'sell', 'predict_price'\n ] = sell\n self.result['predict_price'] = self.result['predict_price'].fillna(buy)\n self.result['popularity'] = popularity\n self.result['profit_rate'] = self.result.apply(process_profit_rate,\n axis=1)\n self.result['buy_profit_rate'] = self.result.apply(\n process_buy_profit_rate, axis=1)\n self.result['predict_price'] = self.result['predict_price'\n ] / self.result['buy_profit_rate']\n self.result['predict_price'] = self.result['profit_rate'\n ] * self.result['predict_price']\n self.result = cal_intent_condition(self.result.predict_price.values,\n price_bn)\n\n def follow_process(self, use_time, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug, model_detail_slug):\n \"\"\"\n 后续跟进处理\n \"\"\"\n dealer_price, cpersonal_price = (dealer_hedge * price_bn, \n cpersonal_hedge * price_bn)\n dealer_price = process_mile(dealer_price, use_time, mile)\n cpersonal_price = process_mile(cpersonal_price, use_time, mile)\n buy, private, sell = process_prices_relate(dealer_price,\n cpersonal_price)\n index = str(model_slug) + '_' + str(province)\n if index in province_popularity_index:\n popularity = province_popularity_map.loc[index, 'popularity']\n else:\n popularity = 'C'\n rate = process_adjust_profit(model_detail_slug, popularity)\n buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (\n 1 + rate)\n return buy, private, sell, popularity\n\n def predict(self, city='深圳', model_detail_slug='model_25023_cs',\n use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 预测返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug\n , city, use_time)\n buy, private, sell, popularity = self.follow_process(use_time, mile,\n price_bn, dealer_hedge, cpersonal_hedge, province, model_slug,\n model_detail_slug)\n self.add_process_intent(buy, private, sell, popularity, price_bn)\n if ret_type == 'records':\n return self.result.to_dict('records')\n else:\n return self.result\n\n def predict_for_history(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_history(final_model_detail_slug, city,\n use_time)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def predict_for_future(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, times=3):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_future(final_model_detail_slug, city,\n use_time, times)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def history_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 计算历史价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='history')\n times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']\n nums = 6\n if use_time <= 6:\n times_str = []\n nums = use_time - 1\n for i in range(0, nums + 1):\n times_str.append(str(-i))\n data_buy, data_private, data_sell = self.predict_for_history(city,\n model_detail_slug, use_time, mile)\n data_buy = process_unreasonable_history_price(data_buy, nums)\n data_sell = process_unreasonable_history_price(data_sell, nums)\n data_private = process_unreasonable_history_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n def future_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=365, mile=2, ret_type='records'):\n \"\"\"\n 计算未来价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='future')\n times_str = ['0', '12', '24', '36']\n nums = 3\n if use_time > 204:\n times_str = []\n nums = int((240 - use_time) / 12)\n for i in range(0, nums + 1):\n times_str.append(str(i * 12))\n data_buy, data_private, data_sell = self.predict_for_future(city,\n model_detail_slug, use_time, mile, len(times_str))\n data_buy = process_unreasonable_future_price(data_buy, nums)\n data_sell = process_unreasonable_future_price(data_sell, nums)\n data_private = process_unreasonable_future_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n",
"step-3": "<mask token>\n\n\ndef process_mile(price, use_time, mile):\n \"\"\"\n mile处理\n \"\"\"\n mile_per_month = mile / use_time\n if mile_per_month < gl.MILE_THRESHOLD_2_5:\n return price + 0.035 * (1 - mile_per_month / gl.MILE_THRESHOLD_2_5\n ) * price\n elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:\n return price - (0.04 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.035\n ) * price\n elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:\n return price - (0.05 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.075\n ) * price\n else:\n return price - 0.125 * price\n\n\n<mask token>\n\n\ndef process_unreasonable_history_price(data, nums):\n \"\"\"\n 处理不合理历史价格趋势\n \"\"\"\n if nums == 0:\n return data\n temp = data[1:]\n temp.sort()\n for i, value in enumerate(temp):\n data[i + 1] = temp[i]\n for i in range(0, nums):\n rate = (data[i + 1] - data[i]) / data[i + 1]\n if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):\n data[i + 1] = int(data[i] * 1.0083)\n return data\n\n\n<mask token>\n\n\ndef process_fill_zero(hedge):\n temp = hedge\n if len(hedge) < 18:\n for i in range(0, 18 - len(hedge)):\n temp = '0' + temp\n return temp\n\n\ndef predict_from_db(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug_id']\n if use_time % 6 == 0:\n column_num = str(int(use_time / 6) - 1)\n month_num = 6\n else:\n column_num = str(int(use_time / 6))\n month_num = use_time % 6\n record = db_operate.query_valuate(model_detail_slug_id, city_id,\n column_num, use_time)\n dealer_hedge = str(record.loc[0, 'b2c_year_' + column_num])\n dealer_hedge = process_fill_zero(dealer_hedge)\n dealer_hedge = dealer_hedge[(month_num - 1) * 3:month_num * 3]\n dealer_hedge = int(dealer_hedge) / 1000\n cpersonal_hedge = str(record.loc[0, 'c2c_year_' + column_num])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge)\n cpersonal_hedge = cpersonal_hedge[(month_num - 1) * 3:month_num * 3]\n cpersonal_hedge = int(cpersonal_hedge) / 1000\n return dealer_hedge, cpersonal_hedge\n\n\ndef predict_from_db_history(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug_id']\n if use_time % 6 == 0:\n column_num = int(use_time / 6) - 1\n month_num = 6\n else:\n column_num = int(use_time / 6)\n month_num = use_time % 6\n dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(\n model_detail_slug_id, city_id, column_num, use_time)\n result = []\n if len(dealer_hedge) == 1:\n dealer_hedge = process_fill_zero(dealer_hedge[0])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])\n for i in range(0, use_time):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n elif len(dealer_hedge) == 2:\n dealer_hedge = process_fill_zero(dealer_hedge[0]) + process_fill_zero(\n dealer_hedge[1])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0]\n ) + process_fill_zero(cpersonal_hedge[1])\n for i in range(month_num - 1, month_num + 6):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n return result\n\n\n<mask token>\n\n\ndef process_prices_relate(dealer_price, cpersonal_price):\n \"\"\"\n 人工处理三类价格的相关性\n \"\"\"\n buy = dealer_price\n private = cpersonal_price\n private_buy_rate = (buy - private) / private\n if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):\n private = int(buy * (1 - 0.0875))\n sell = int(private * (1 - 0.0525))\n return buy, private, sell\n\n\n<mask token>\n\n\ndef check_params_value(city, model_detail_slug, use_time, mile, category):\n \"\"\"\n 校验参数\n \"\"\"\n if city not in cities:\n raise ApiParamsValueError('city', city, 'Unknown city!')\n if model_detail_slug not in models:\n raise ApiParamsValueError('model_detail_slug', model_detail_slug,\n 'Unknown model!')\n if not isinstance(mile, int) | isinstance(mile, float):\n raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')\n elif mile < 0:\n raise ApiParamsValueError('mile', mile,\n 'Mile must be greater than zero!')\n if not isinstance(use_time, int):\n raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')\n if category == 'valuate':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of Forecast must be in 1-240!')\n elif category == 'history':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of historical trend must be in 1-240!')\n elif category == 'future':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of future trend must be in 1-240!')\n\n\nclass Predict(object):\n\n def __init__(self):\n \"\"\"\n 加载各类匹配表和模型\n \"\"\"\n self.result = []\n self.valuate_model = []\n\n def add_process_intent(self, buy, private, sell, popularity, price_bn):\n \"\"\"\n 根据交易方式修正预测值\n \"\"\"\n self.result = result_map.copy()\n self.result.loc[self.result['intent'] == 'buy', 'predict_price'] = buy\n self.result.loc[self.result['intent'] == 'private', 'predict_price'\n ] = private\n self.result.loc[self.result['intent'] == 'sell', 'predict_price'\n ] = sell\n self.result['predict_price'] = self.result['predict_price'].fillna(buy)\n self.result['popularity'] = popularity\n self.result['profit_rate'] = self.result.apply(process_profit_rate,\n axis=1)\n self.result['buy_profit_rate'] = self.result.apply(\n process_buy_profit_rate, axis=1)\n self.result['predict_price'] = self.result['predict_price'\n ] / self.result['buy_profit_rate']\n self.result['predict_price'] = self.result['profit_rate'\n ] * self.result['predict_price']\n self.result = cal_intent_condition(self.result.predict_price.values,\n price_bn)\n\n def follow_process(self, use_time, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug, model_detail_slug):\n \"\"\"\n 后续跟进处理\n \"\"\"\n dealer_price, cpersonal_price = (dealer_hedge * price_bn, \n cpersonal_hedge * price_bn)\n dealer_price = process_mile(dealer_price, use_time, mile)\n cpersonal_price = process_mile(cpersonal_price, use_time, mile)\n buy, private, sell = process_prices_relate(dealer_price,\n cpersonal_price)\n index = str(model_slug) + '_' + str(province)\n if index in province_popularity_index:\n popularity = province_popularity_map.loc[index, 'popularity']\n else:\n popularity = 'C'\n rate = process_adjust_profit(model_detail_slug, popularity)\n buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (\n 1 + rate)\n return buy, private, sell, popularity\n\n def predict(self, city='深圳', model_detail_slug='model_25023_cs',\n use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 预测返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug\n , city, use_time)\n buy, private, sell, popularity = self.follow_process(use_time, mile,\n price_bn, dealer_hedge, cpersonal_hedge, province, model_slug,\n model_detail_slug)\n self.add_process_intent(buy, private, sell, popularity, price_bn)\n if ret_type == 'records':\n return self.result.to_dict('records')\n else:\n return self.result\n\n def predict_for_history(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_history(final_model_detail_slug, city,\n use_time)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def predict_for_future(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, times=3):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_future(final_model_detail_slug, city,\n use_time, times)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def history_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 计算历史价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='history')\n times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']\n nums = 6\n if use_time <= 6:\n times_str = []\n nums = use_time - 1\n for i in range(0, nums + 1):\n times_str.append(str(-i))\n data_buy, data_private, data_sell = self.predict_for_history(city,\n model_detail_slug, use_time, mile)\n data_buy = process_unreasonable_history_price(data_buy, nums)\n data_sell = process_unreasonable_history_price(data_sell, nums)\n data_private = process_unreasonable_history_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n def future_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=365, mile=2, ret_type='records'):\n \"\"\"\n 计算未来价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='future')\n times_str = ['0', '12', '24', '36']\n nums = 3\n if use_time > 204:\n times_str = []\n nums = int((240 - use_time) / 12)\n for i in range(0, nums + 1):\n times_str.append(str(i * 12))\n data_buy, data_private, data_sell = self.predict_for_future(city,\n model_detail_slug, use_time, mile, len(times_str))\n data_buy = process_unreasonable_future_price(data_buy, nums)\n data_sell = process_unreasonable_future_price(data_sell, nums)\n data_private = process_unreasonable_future_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n",
"step-4": "<mask token>\n\n\ndef get_profit_rate(intent, popularity):\n \"\"\"\n 获取畅销系数\n \"\"\"\n profits = gl.PROFITS\n profit = profits[popularity]\n if intent == 'sell':\n profit_rate = 1 - profit[0] - profit[1]\n elif intent == 'buy':\n profit_rate = 1 - profit[0]\n elif intent == 'release':\n profit_rate = 1\n elif intent == 'private':\n profit_rate = 1 - profit[0] - profit[2]\n elif intent == 'lowest':\n profit_rate = 1 - profit[0] - profit[1] - profit[3]\n elif intent == 'cpo':\n profit_rate = 1 - profit[0] - profit[8]\n elif intent == 'replace':\n profit_rate = 1 - profit[0] - profit[4]\n elif intent == 'auction':\n profit_rate = 1 - profit[0] - profit[5]\n elif intent == 'avg-buy':\n profit_rate = 1 - profit[0] - profit[7]\n elif intent == 'avg-sell':\n profit_rate = 1 - profit[0] - profit[6]\n return profit_rate\n\n\ndef cal_intent_condition(prices, price_bn):\n \"\"\"\n 计算所有交易方式的4个级别车况价\n \"\"\"\n if prices[2] * 1.03 > price_bn:\n rate = prices[2] * 1.03 / price_bn\n prices = prices / rate\n df1 = pd.DataFrame(prices)\n df2 = pd.DataFrame([gl.CAR_CONDITION_COEFFICIENT_VALUES])\n all_map = df1.dot(df2)\n all_map.columns = ['excellent', 'good', 'fair', 'bad']\n all_map['intent'] = pd.Series(gl.INTENT_TYPE).values\n all_map = all_map.loc[:, ['intent', 'excellent', 'good', 'fair', 'bad']]\n all_map[['excellent', 'good', 'fair', 'bad']] = all_map[['excellent',\n 'good', 'fair', 'bad']].astype(int)\n return all_map\n\n\ndef process_mile(price, use_time, mile):\n \"\"\"\n mile处理\n \"\"\"\n mile_per_month = mile / use_time\n if mile_per_month < gl.MILE_THRESHOLD_2_5:\n return price + 0.035 * (1 - mile_per_month / gl.MILE_THRESHOLD_2_5\n ) * price\n elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:\n return price - (0.04 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.035\n ) * price\n elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:\n return price - (0.05 * (mile_per_month / gl.MILE_THRESHOLD_5) + 0.075\n ) * price\n else:\n return price - 0.125 * price\n\n\n<mask token>\n\n\ndef process_unreasonable_history_price(data, nums):\n \"\"\"\n 处理不合理历史价格趋势\n \"\"\"\n if nums == 0:\n return data\n temp = data[1:]\n temp.sort()\n for i, value in enumerate(temp):\n data[i + 1] = temp[i]\n for i in range(0, nums):\n rate = (data[i + 1] - data[i]) / data[i + 1]\n if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):\n data[i + 1] = int(data[i] * 1.0083)\n return data\n\n\n<mask token>\n\n\ndef process_fill_zero(hedge):\n temp = hedge\n if len(hedge) < 18:\n for i in range(0, 18 - len(hedge)):\n temp = '0' + temp\n return temp\n\n\ndef predict_from_db(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug_id']\n if use_time % 6 == 0:\n column_num = str(int(use_time / 6) - 1)\n month_num = 6\n else:\n column_num = str(int(use_time / 6))\n month_num = use_time % 6\n record = db_operate.query_valuate(model_detail_slug_id, city_id,\n column_num, use_time)\n dealer_hedge = str(record.loc[0, 'b2c_year_' + column_num])\n dealer_hedge = process_fill_zero(dealer_hedge)\n dealer_hedge = dealer_hedge[(month_num - 1) * 3:month_num * 3]\n dealer_hedge = int(dealer_hedge) / 1000\n cpersonal_hedge = str(record.loc[0, 'c2c_year_' + column_num])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge)\n cpersonal_hedge = cpersonal_hedge[(month_num - 1) * 3:month_num * 3]\n cpersonal_hedge = int(cpersonal_hedge) / 1000\n return dealer_hedge, cpersonal_hedge\n\n\ndef predict_from_db_history(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug_id']\n if use_time % 6 == 0:\n column_num = int(use_time / 6) - 1\n month_num = 6\n else:\n column_num = int(use_time / 6)\n month_num = use_time % 6\n dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(\n model_detail_slug_id, city_id, column_num, use_time)\n result = []\n if len(dealer_hedge) == 1:\n dealer_hedge = process_fill_zero(dealer_hedge[0])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])\n for i in range(0, use_time):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n elif len(dealer_hedge) == 2:\n dealer_hedge = process_fill_zero(dealer_hedge[0]) + process_fill_zero(\n dealer_hedge[1])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0]\n ) + process_fill_zero(cpersonal_hedge[1])\n for i in range(month_num - 1, month_num + 6):\n dealer_per = dealer_hedge[i * 3:(i + 1) * 3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per) / 1000, int(cpersonal_per) / \n 1000, use_time])\n result.reverse()\n return result\n\n\n<mask token>\n\n\ndef process_prices_relate(dealer_price, cpersonal_price):\n \"\"\"\n 人工处理三类价格的相关性\n \"\"\"\n buy = dealer_price\n private = cpersonal_price\n private_buy_rate = (buy - private) / private\n if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):\n private = int(buy * (1 - 0.0875))\n sell = int(private * (1 - 0.0525))\n return buy, private, sell\n\n\n<mask token>\n\n\ndef check_params_value(city, model_detail_slug, use_time, mile, category):\n \"\"\"\n 校验参数\n \"\"\"\n if city not in cities:\n raise ApiParamsValueError('city', city, 'Unknown city!')\n if model_detail_slug not in models:\n raise ApiParamsValueError('model_detail_slug', model_detail_slug,\n 'Unknown model!')\n if not isinstance(mile, int) | isinstance(mile, float):\n raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')\n elif mile < 0:\n raise ApiParamsValueError('mile', mile,\n 'Mile must be greater than zero!')\n if not isinstance(use_time, int):\n raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')\n if category == 'valuate':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of Forecast must be in 1-240!')\n elif category == 'history':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of historical trend must be in 1-240!')\n elif category == 'future':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time,\n 'The use_time of future trend must be in 1-240!')\n\n\nclass Predict(object):\n\n def __init__(self):\n \"\"\"\n 加载各类匹配表和模型\n \"\"\"\n self.result = []\n self.valuate_model = []\n\n def add_process_intent(self, buy, private, sell, popularity, price_bn):\n \"\"\"\n 根据交易方式修正预测值\n \"\"\"\n self.result = result_map.copy()\n self.result.loc[self.result['intent'] == 'buy', 'predict_price'] = buy\n self.result.loc[self.result['intent'] == 'private', 'predict_price'\n ] = private\n self.result.loc[self.result['intent'] == 'sell', 'predict_price'\n ] = sell\n self.result['predict_price'] = self.result['predict_price'].fillna(buy)\n self.result['popularity'] = popularity\n self.result['profit_rate'] = self.result.apply(process_profit_rate,\n axis=1)\n self.result['buy_profit_rate'] = self.result.apply(\n process_buy_profit_rate, axis=1)\n self.result['predict_price'] = self.result['predict_price'\n ] / self.result['buy_profit_rate']\n self.result['predict_price'] = self.result['profit_rate'\n ] * self.result['predict_price']\n self.result = cal_intent_condition(self.result.predict_price.values,\n price_bn)\n\n def follow_process(self, use_time, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug, model_detail_slug):\n \"\"\"\n 后续跟进处理\n \"\"\"\n dealer_price, cpersonal_price = (dealer_hedge * price_bn, \n cpersonal_hedge * price_bn)\n dealer_price = process_mile(dealer_price, use_time, mile)\n cpersonal_price = process_mile(cpersonal_price, use_time, mile)\n buy, private, sell = process_prices_relate(dealer_price,\n cpersonal_price)\n index = str(model_slug) + '_' + str(province)\n if index in province_popularity_index:\n popularity = province_popularity_map.loc[index, 'popularity']\n else:\n popularity = 'C'\n rate = process_adjust_profit(model_detail_slug, popularity)\n buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (\n 1 + rate)\n return buy, private, sell, popularity\n\n def predict(self, city='深圳', model_detail_slug='model_25023_cs',\n use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 预测返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug\n , city, use_time)\n buy, private, sell, popularity = self.follow_process(use_time, mile,\n price_bn, dealer_hedge, cpersonal_hedge, province, model_slug,\n model_detail_slug)\n self.add_process_intent(buy, private, sell, popularity, price_bn)\n if ret_type == 'records':\n return self.result.to_dict('records')\n else:\n return self.result\n\n def predict_for_history(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_history(final_model_detail_slug, city,\n use_time)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def predict_for_future(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, times=3):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='valuate')\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug,\n 'final_model_detail_slug']\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_future(final_model_detail_slug, city,\n use_time, times)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per,\n mile, price_bn, dealer_hedge, cpersonal_hedge, province,\n model_slug, model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def history_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 计算历史价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='history')\n times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']\n nums = 6\n if use_time <= 6:\n times_str = []\n nums = use_time - 1\n for i in range(0, nums + 1):\n times_str.append(str(-i))\n data_buy, data_private, data_sell = self.predict_for_history(city,\n model_detail_slug, use_time, mile)\n data_buy = process_unreasonable_history_price(data_buy, nums)\n data_sell = process_unreasonable_history_price(data_sell, nums)\n data_private = process_unreasonable_history_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n def future_price_trend(self, city='深圳', model_detail_slug=\n 'model_25023_cs', use_time=365, mile=2, ret_type='records'):\n \"\"\"\n 计算未来价格趋势\n \"\"\"\n check_params_value(city, model_detail_slug, use_time, mile,\n category='future')\n times_str = ['0', '12', '24', '36']\n nums = 3\n if use_time > 204:\n times_str = []\n nums = int((240 - use_time) / 12)\n for i in range(0, nums + 1):\n times_str.append(str(i * 12))\n data_buy, data_private, data_sell = self.predict_for_future(city,\n model_detail_slug, use_time, mile, len(times_str))\n data_buy = process_unreasonable_future_price(data_buy, nums)\n data_sell = process_unreasonable_future_price(data_sell, nums)\n data_private = process_unreasonable_future_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n",
"step-5": "from valuate.predict import *\n\n\ndef get_profit_rate(intent, popularity):\n \"\"\"\n 获取畅销系数\n \"\"\"\n # 按畅销程度分级,各交易方式相比于标价的固定比例\n profits = gl.PROFITS\n profit = profits[popularity]\n # 计算各交易方式的价格相比于标价的固定比例\n if intent == 'sell':\n # 商家收购价相比加权平均价的比例\n profit_rate = 1 - profit[0] - profit[1]\n elif intent == 'buy':\n # 商家真实售价相比加权平均价的比例\n profit_rate = 1 - profit[0]\n elif intent == 'release':\n # 建议标价相比加权平均价的比例\n profit_rate = 1\n elif intent == 'private':\n # C2C价格相比加权平均价的比例\n profit_rate = 1 - profit[0] - profit[2]\n elif intent == 'lowest':\n # 最低成交价相比加权平均价的比例\n profit_rate = 1 - profit[0] - profit[1] - profit[3]\n elif intent == 'cpo':\n # 认证二手车价相比加权平均价的差异比例\n profit_rate = 1 - profit[0] - profit[8]\n elif intent == 'replace':\n # 4S店置换价相比加权平均价的比例\n profit_rate = 1 - profit[0] - profit[4]\n elif intent == 'auction':\n # 拍卖价相比加权平均价的差异比例\n profit_rate = 1 - profit[0] - profit[5]\n elif intent == 'avg-buy':\n # 平均买车价相比加权平均价的差异比例\n profit_rate = 1 - profit[0] - profit[7]\n elif intent == 'avg-sell':\n # 平均卖车价价相比加权平均价的差异比例\n profit_rate = 1 - profit[0] - profit[6]\n return profit_rate\n\n\ndef cal_intent_condition(prices, price_bn):\n \"\"\"\n 计算所有交易方式的4个级别车况价\n \"\"\"\n if(prices[2] * 1.03) > price_bn:\n rate = (prices[2] * 1.03) / price_bn\n prices = prices / rate\n\n df1 = pd.DataFrame(prices)\n df2 = pd.DataFrame([gl.CAR_CONDITION_COEFFICIENT_VALUES])\n all_map = df1.dot(df2)\n all_map.columns = ['excellent', 'good', 'fair', 'bad']\n all_map['intent'] = pd.Series(gl.INTENT_TYPE).values\n all_map = all_map.loc[:, ['intent', 'excellent', 'good', 'fair', 'bad']]\n all_map[['excellent', 'good', 'fair', 'bad']] = all_map[['excellent', 'good', 'fair', 'bad']].astype(int)\n return all_map\n\n\ndef process_mile(price, use_time, mile):\n \"\"\"\n mile处理\n \"\"\"\n # 正常行驶的车辆以一年2.5万公里为正常基数,低于2.5万公里的价格的浮动在+3.5%以内\n # 大于2.5万公里的若每年的平均行驶里程大于2.5万公里小于5万公里价格浮动在-3.5-7.5%\n # 若年平均形式里程大于5万公里及以上影响价格在-7.5-12.5%之间\n mile_per_month = mile / use_time\n if mile_per_month < gl.MILE_THRESHOLD_2_5:\n return price + 0.035 * (1 - mile_per_month/gl.MILE_THRESHOLD_2_5) * price\n elif gl.MILE_THRESHOLD_2_5 <= mile_per_month < gl.MILE_THRESHOLD_5:\n return price - (0.04 * (mile_per_month/gl.MILE_THRESHOLD_5)+0.035) * price\n elif gl.MILE_THRESHOLD_5 <= mile_per_month < gl.MILE_THRESHOLD_10:\n return price - (0.05 * (mile_per_month/gl.MILE_THRESHOLD_5)+0.075) * price\n else:\n return price - 0.125 * price\n\n\ndef process_profit_rate(df):\n \"\"\"\n 畅销系数处理\n \"\"\"\n return get_profit_rate(df['intent'], df['popularity'])\n\n\ndef process_buy_profit_rate(df):\n \"\"\"\n 畅销系数处理\n \"\"\"\n return get_profit_rate(df['intent_source'], df['popularity'])\n\n\ndef process_unreasonable_history_price(data, nums):\n \"\"\"\n 处理不合理历史价格趋势\n \"\"\"\n if nums == 0:\n return data\n\n temp = data[1:]\n temp.sort()\n for i, value in enumerate(temp):\n data[i+1] = temp[i]\n\n for i in range(0, nums):\n rate = (data[i + 1] - data[i]) / data[i + 1]\n if (data[i] >= data[i + 1]) | (0.003 > rate) | (0.0157 < rate):\n data[i + 1] = int(data[i] * 1.0083)\n\n return data\n\n\ndef process_unreasonable_future_price(data, nums):\n \"\"\"\n 处理不合理未来价格趋势\n \"\"\"\n temp = data[1:]\n temp.sort(reverse=True)\n for i, value in enumerate(temp):\n data[i+1] = temp[i]\n\n for i in range(0, nums):\n rate = (data[i] - data[i + 1]) / data[i]\n if (data[i] <= data[i + 1]) | (0.036 > rate) | (0.188 < rate):\n data[i + 1] = int(data[i] * 0.9)\n\n return data\n\n\ndef process_fill_zero(hedge):\n temp = hedge\n if len(hedge) < 18:\n for i in range(0, (18-len(hedge))):\n temp = '0'+temp\n return temp\n\n\ndef predict_from_db(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n # 查找city和model_detail_slug编号\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id']\n # 计算查询字段编号和月编号\n if (use_time % 6) == 0:\n column_num = str(int(use_time / 6) - 1)\n month_num = 6\n else:\n column_num = str(int(use_time / 6))\n month_num = use_time % 6\n # 查询\n record = db_operate.query_valuate(model_detail_slug_id, city_id, column_num, use_time)\n # 查找对应值\n dealer_hedge = str(record.loc[0, 'b2c_year_'+column_num])\n dealer_hedge = process_fill_zero(dealer_hedge)\n dealer_hedge = dealer_hedge[(month_num-1)*3:month_num*3]\n dealer_hedge = int(dealer_hedge) / 1000\n cpersonal_hedge = str(record.loc[0, 'c2c_year_'+column_num])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge)\n cpersonal_hedge = cpersonal_hedge[(month_num-1)*3:month_num*3]\n cpersonal_hedge = int(cpersonal_hedge) / 1000\n return dealer_hedge, cpersonal_hedge\n\n\ndef predict_from_db_history(model_detail_slug, city, use_time):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n # 查找city和model_detail_slug编号\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id']\n # 计算查询字段编号和月编号\n if (use_time % 6) == 0:\n column_num = int(use_time / 6) - 1\n month_num = 6\n else:\n column_num = int(use_time / 6)\n month_num = use_time % 6\n # 查询\n dealer_hedge, cpersonal_hedge = db_operate.query_valuate_history(model_detail_slug_id, city_id, column_num, use_time)\n # 查找对应值\n result = []\n if len(dealer_hedge) == 1:\n dealer_hedge = process_fill_zero(dealer_hedge[0])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])\n for i in range(0, use_time):\n dealer_per = dealer_hedge[i*3:(i+1)*3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per)/1000, int(cpersonal_per)/1000, use_time])\n result.reverse()\n elif len(dealer_hedge) == 2:\n dealer_hedge = process_fill_zero(dealer_hedge[0])+process_fill_zero(dealer_hedge[1])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge[0])+process_fill_zero(cpersonal_hedge[1])\n for i in range(month_num-1, month_num+6):\n dealer_per = dealer_hedge[i*3:(i+1)*3]\n cpersonal_per = cpersonal_hedge[i * 3:(i + 1) * 3]\n result.append([int(dealer_per)/1000, int(cpersonal_per)/1000, use_time])\n result.reverse()\n return result\n\n\ndef predict_from_db_future(model_detail_slug, city, use_time, times):\n \"\"\"\n 从生产库查询预测\n \"\"\"\n # 查找city和model_detail_slug编号\n city_id = province_city_map.loc[city, 'city_id']\n model_detail_slug_id = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug_id']\n # 计算查询字段编号和月编号\n if (use_time % 6) == 0:\n column_num = int(use_time / 6) - 1\n month_num = 6\n else:\n column_num = int(use_time / 6)\n month_num = use_time % 6\n # 查询\n record = db_operate.query_valuate_future(model_detail_slug_id, city_id)\n # 查找对应值\n result = []\n for i in range(0, times):\n dealer_hedge = str(record.loc[0, 'b2c_year_' + str(column_num+i*2)])\n dealer_hedge = process_fill_zero(dealer_hedge)\n dealer_hedge = dealer_hedge[(month_num - 1) * 3:month_num * 3]\n dealer_hedge = int(dealer_hedge) / 1000\n cpersonal_hedge = str(record.loc[0, 'c2c_year_' + str(column_num+i*2)])\n cpersonal_hedge = process_fill_zero(cpersonal_hedge)\n cpersonal_hedge = cpersonal_hedge[(month_num - 1) * 3:month_num * 3]\n cpersonal_hedge = int(cpersonal_hedge) / 1000\n result.append([dealer_hedge, cpersonal_hedge, use_time+i*12])\n return result\n\n\ndef process_prices_relate(dealer_price, cpersonal_price):\n \"\"\"\n 人工处理三类价格的相关性\n \"\"\"\n buy = dealer_price\n private = cpersonal_price\n # 计算buy与private的比例关系\n private_buy_rate = (buy - private) / private\n # 人工处理预测不合理的三类价格\n if (private_buy_rate < 0) | (abs(private_buy_rate) > 0.12):\n private = int(buy * (1 - 0.0875))\n\n sell = int(private * (1 - 0.0525))\n return buy, private, sell\n\n\ndef process_adjust_profit(model_detail_slug, popularity):\n \"\"\"\n 调整值调整\n \"\"\"\n index = str(model_detail_slug)+'_'+str(popularity)\n if index in model_detail_slug_popularity_index:\n rate = adjust_profit.loc[index, 'rate']\n else:\n rate = 0\n return rate\n\n\ndef check_params_value(city, model_detail_slug, use_time, mile, category):\n \"\"\"\n 校验参数\n \"\"\"\n # 校验city\n if city not in cities:\n raise ApiParamsValueError('city', city, 'Unknown city!')\n # 校验model\n if model_detail_slug not in models:\n raise ApiParamsValueError('model_detail_slug', model_detail_slug, 'Unknown model!')\n # 校验mile\n if not ((isinstance(mile, int)) | (isinstance(mile, float))):\n raise ApiParamsTypeError('mile', mile, 'Mile must be int or float!')\n elif mile < 0:\n raise ApiParamsValueError('mile', mile, 'Mile must be greater than zero!')\n # 校验use_time\n if not isinstance(use_time, int):\n raise ApiParamsTypeError('use_time', use_time, 'Use_time must be int!')\n if category == 'valuate':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time, 'The use_time of Forecast must be in 1-240!')\n elif category == 'history':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time, 'The use_time of historical trend must be in 1-240!')\n elif category == 'future':\n if (use_time < 1) | (use_time > 240):\n raise ApiParamsValueError('use_time', use_time, 'The use_time of future trend must be in 1-240!')\n\n\nclass Predict(object):\n\n def __init__(self):\n \"\"\"\n 加载各类匹配表和模型\n \"\"\"\n self.result = []\n self.valuate_model = []\n\n def add_process_intent(self, buy, private, sell, popularity, price_bn):\n \"\"\"\n 根据交易方式修正预测值\n \"\"\"\n # 组合结果\n self.result = result_map.copy()\n self.result.loc[(self.result['intent'] == 'buy'), 'predict_price'] = buy\n self.result.loc[(self.result['intent'] == 'private'), 'predict_price'] = private\n self.result.loc[(self.result['intent'] == 'sell'), 'predict_price'] = sell\n self.result['predict_price'] = self.result['predict_price'].fillna(buy)\n\n self.result['popularity'] = popularity\n self.result['profit_rate'] = self.result.apply(process_profit_rate, axis=1)\n self.result['buy_profit_rate'] = self.result.apply(process_buy_profit_rate, axis=1)\n self.result['predict_price'] = self.result['predict_price'] / self.result['buy_profit_rate']\n self.result['predict_price'] = self.result['profit_rate'] * self.result['predict_price']\n\n # 计算所有交易类型\n self.result = cal_intent_condition(self.result.predict_price.values, price_bn)\n\n def follow_process(self, use_time, mile, price_bn, dealer_hedge, cpersonal_hedge, province, model_slug, model_detail_slug):\n \"\"\"\n 后续跟进处理\n \"\"\"\n # 获取价格\n dealer_price, cpersonal_price = dealer_hedge * price_bn, cpersonal_hedge * price_bn\n # 处理mile\n dealer_price = process_mile(dealer_price, use_time, mile)\n cpersonal_price = process_mile(cpersonal_price, use_time, mile)\n # 处理价格之间的相关性\n buy, private, sell = process_prices_relate(dealer_price, cpersonal_price)\n # 获取流行度\n index = str(model_slug) + '_' + str(province)\n if index in province_popularity_index:\n popularity = province_popularity_map.loc[index, 'popularity']\n else:\n popularity = 'C'\n # 进行调整值最终调整\n rate = process_adjust_profit(model_detail_slug, popularity)\n buy, private, sell = buy * (1 + rate), private * (1 + rate), sell * (1 + rate)\n return buy, private, sell, popularity\n\n def predict(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 预测返回\n \"\"\"\n # 校验参数\n check_params_value(city, model_detail_slug, use_time, mile, category='valuate')\n\n # 查找款型对应的新车指导价,调整后的款型\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug']\n # 预测返回保值率\n dealer_hedge, cpersonal_hedge = predict_from_db(final_model_detail_slug, city, use_time)\n buy, private, sell, popularity = self.follow_process(use_time, mile, price_bn, dealer_hedge, cpersonal_hedge, province, model_slug, model_detail_slug)\n # 根据交易方式修正预测值\n self.add_process_intent(buy, private, sell, popularity, price_bn)\n\n if ret_type == 'records':\n return self.result.to_dict('records')\n else:\n return self.result\n\n def predict_for_history(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n # 校验参数\n check_params_value(city, model_detail_slug, use_time, mile, category='valuate')\n\n # 查找款型对应的新车指导价,调整后的款型\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug']\n\n # 预测返回保值率\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_history(final_model_detail_slug, city, use_time)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug,\n model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def predict_for_future(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, times=3):\n \"\"\"\n 预测历史数据返回\n \"\"\"\n # 校验参数\n check_params_value(city, model_detail_slug, use_time, mile, category='valuate')\n\n # 查找款型对应的新车指导价,调整后的款型\n price_bn = model_detail_map.loc[model_detail_slug, 'final_price_bn']\n price_bn = price_bn * 10000\n province = province_city_map.loc[city, 'province']\n model_slug = model_detail_map.loc[model_detail_slug, 'model_slug']\n final_model_detail_slug = model_detail_map.loc[model_detail_slug, 'final_model_detail_slug']\n\n # 预测返回保值率\n data_buy = []\n data_sell = []\n data_private = []\n result = predict_from_db_future(final_model_detail_slug, city, use_time, times)\n for dealer_hedge, cpersonal_hedge, use_time_per in result:\n buy, private, sell, popularity = self.follow_process(use_time_per, mile, price_bn, dealer_hedge,\n cpersonal_hedge, province, model_slug,\n model_detail_slug)\n data_buy.append(int(buy))\n data_private.append(int(private))\n data_sell.append(int(sell))\n return data_buy, data_private, data_sell\n\n def history_price_trend(self, city='深圳', model_detail_slug='model_25023_cs', use_time=12, mile=2, ret_type='records'):\n \"\"\"\n 计算历史价格趋势\n \"\"\"\n # 校验参数\n check_params_value(city, model_detail_slug, use_time, mile, category='history')\n # 计算时间\n times_str = ['0', '-1', '-2', '-3', '-4', '-5', '-6']\n nums = 6\n if use_time <= 6:\n times_str = []\n nums = use_time-1\n for i in range(0, nums+1):\n times_str.append(str(-i))\n # 计算车商交易价,车商收购价的历史价格走势\n data_buy, data_private, data_sell = self.predict_for_history(city, model_detail_slug, use_time, mile)\n\n # 处理异常值\n data_buy = process_unreasonable_history_price(data_buy, nums)\n data_sell = process_unreasonable_history_price(data_sell, nums)\n data_private = process_unreasonable_history_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n def future_price_trend(self, city='深圳', model_detail_slug='model_25023_cs', use_time=365, mile=2, ret_type='records'):\n \"\"\"\n 计算未来价格趋势\n \"\"\"\n # 校验参数\n check_params_value(city, model_detail_slug, use_time, mile, category='future')\n # 计算时间\n times_str = ['0', '12', '24', '36']\n nums = 3\n if use_time > 204:\n times_str = []\n nums = int((240-use_time) / 12)\n for i in range(0, nums+1):\n times_str.append(str(i*12))\n # 计算个人交易价的未来价格趋势\n data_buy, data_private, data_sell = self.predict_for_future(city, model_detail_slug, use_time, mile, len(times_str))\n\n data_buy = process_unreasonable_future_price(data_buy, nums)\n data_sell = process_unreasonable_future_price(data_sell, nums)\n data_private = process_unreasonable_future_price(data_private, nums)\n result_b_2_c = pd.DataFrame([data_buy], columns=times_str)\n result_b_2_c['type'] = 'buy'\n result_c_2_b = pd.DataFrame([data_sell], columns=times_str)\n result_c_2_b['type'] = 'sell'\n result_c_2_c = pd.DataFrame([data_private], columns=times_str)\n result_c_2_c['type'] = 'private'\n\n result = result_b_2_c.append(result_c_2_b, ignore_index=True)\n result = result.append(result_c_2_c, ignore_index=True)\n\n if ret_type == 'records':\n return result.to_dict('records')\n else:\n return result\n\n",
"step-ids": [
12,
15,
16,
18,
25
]
}
|
[
12,
15,
16,
18,
25
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def html_print(text, title=''):
from IPython.core.display import display, HTML
display(HTML('<h4>' + str(title) + '</h4>'))
html = display(HTML('<font size=2 face=Verdana>' + text + '</font>'))
return html
<|reserved_special_token_1|>
def html_print(text, title=''):
from IPython.core.display import display, HTML
# create title for the content
display(HTML("<h4>" + str(title) + "</h4>"))
# create content
html = display(HTML("<font size=2 face=Verdana>" + text + "</font>"))
return html
|
flexible
|
{
"blob_id": "84a63f60a45f1f8fc1efec8f30345a43c3c30c63",
"index": 7332,
"step-1": "<mask token>\n",
"step-2": "def html_print(text, title=''):\n from IPython.core.display import display, HTML\n display(HTML('<h4>' + str(title) + '</h4>'))\n html = display(HTML('<font size=2 face=Verdana>' + text + '</font>'))\n return html\n",
"step-3": "def html_print(text, title=''):\n\n from IPython.core.display import display, HTML\n\n # create title for the content\n display(HTML(\"<h4>\" + str(title) + \"</h4>\"))\n\n # create content\n html = display(HTML(\"<font size=2 face=Verdana>\" + text + \"</font>\"))\n\n return html\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from obp.models import *
from django.utils.html import format_html
from jet.admin import CompactInline
#from django.utils.translation import ugettext_lazy as _
from jet.dashboard import modules
from jet.dashboard.dashboard import Dashboard, AppIndexDashboard
# Register your models here.
#admin.site.register(Special_offers)
#admin.site.register(Stock)
#admin.site.register(Product_order)
class ProductCompositionInline(admin.TabularInline):
model = Product_composition
class OrderInline(admin.TabularInline):
model = Product_order
class Client_OrderInline(admin.TabularInline):
model = Order
class MyAdminSite(AdminSite):
site_header = 'Pizza-Day'
index_template = "admin/index.html"
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
#Создание ценового фильтра
class PriceListFilter(admin.SimpleListFilter):
title = 'Цена'
parameter_name = 'цена'
#Название фильтров
def lookups(self, request, model_admin):
return (
('1', 'до 199'),
('2', '200 - 299'),
('3', '300 - 449'),
('4', 'от 450'),
)
#Запрос на выборку
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(price__gte= 0,
price__lte=199)
if self.value() == '2':
return queryset.filter(price__gte = 200,
price__lte = 299)
return go()
if self.value() == '3':
return queryset.filter(price__gte = 300,
price__lte = 449)
if self.value() == '4':
return queryset.filter(price__gte=200,
price__lte=299)
#Отображаемые поля
list_display = ('get_image_html', 'id', 'section_id', 'title', 'getSize', 'getPrice' )
list_displat_links = ('')
inlines = [
ProductCompositionInline
]
#Поле по которому можно сделать поиск
search_fields = ['title']
#Список фильтраций
list_filter = ['section_id', PriceListFilter]
#Получение html блока с рисунком товара
def get_image_html(self, obj):
return format_html('<img src = "{}" style = "height: 30px; border-radius: 5px;"></img>', obj.image.url)
get_image_html.short_description = "Фото товара"
#Получение цены
def getPrice(self, obj):
try:
object = Stock.objects.get( id = obj.id , status = True)
return format_html("<del>{} грн.</del> <span>{} грн. </span>".format(obj.price, object.value) )
except:
pass
#else:
return format_html("<span>" + str( obj.price ) + " грн." + "</span>")
getPrice.short_description = "Цена"
#Получение строки веса + его еденицу измерения
def getSize(self, obj):
return str( obj.size ) + obj.unitSize
getSize.short_description = "Вес"
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
class PriceListFilter(admin.SimpleListFilter):
title = 'Цена'
parameter_name = 'цена'
def lookups(self, request, model_admin):
return (
('1', 'до 199'),
('2', '200 - 299'),
('3', '300 - 449'),
('4', 'от 450'),
)
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(price__gte= 0,
price__lte=199)
if self.value() == '2':
return queryset.filter(price__gte = 200,
price__lte = 299)
if self.value() == '3':
return queryset.filter(price__gte = 300,
price__lte = 449)
if self.value() == '4':
return queryset.filter(price__gte=200,
price__lte=299)
list_display = ('id', 'dateTimeOrder', 'price', 'status' )
list_filter = ['dateTimeOrder', PriceListFilter, "status"]
list_editable = ["status"]
inlines = [
OrderInline
]
@admin.register(Client)
class ClientAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'phone_number' )
inlines = [
Client_OrderInline
]
@admin.register(Section)
class SectionAdmin(admin.ModelAdmin):
list_display = ('id', 'title')
class StockAdmin(admin.ModelAdmin):
list_display = ("product_id", "value", "status" )
search_fields = ['product_id__title']
list_filter = ['status']
admin_site = MyAdminSite(name='myadmin')
admin_site.register(Product, ProductAdmin)
admin_site.register(Client, ClientAdmin)
admin_site.register(Order, OrderAdmin)
admin_site.register(Section, SectionAdmin)
admin_site.register(Stock, StockAdmin)
admin_site.register(Product_comment)
admin_site.register(Client_Auth)
admin_site.register(Special_offers)
admin_site.register(Product_rating)
#
#
# class CustomIndexDashboard(Dashboard):
# columns = 3
#
# def init_with_context(self, context):
# self.available_children.append(modules.LinkList)
# self.children.append(modules.LinkList(
# _('Support'),
# children=[
# {
# 'title': _('Django documentation'),
# 'url': 'http://docs.djangoproject.com/',
# 'external': True,
# },
# {
# 'title': _('Django "django-users" mailing list'),
# 'url': 'http://groups.google.com/group/django-users',
# 'external': True,
# },
# {
# 'title': _('Django irc channel'),
# 'url': 'irc://irc.freenode.net/django',
# 'external': True,
# },
# ],
# column=0,
# order=0
# ))
|
normal
|
{
"blob_id": "d301ffa790d6444519e354a2b6f8d65f67d380c0",
"index": 1739,
"step-1": "<mask token>\n\n\nclass Client_OrderInline(admin.TabularInline):\n <mask token>\n\n\nclass MyAdminSite(AdminSite):\n site_header = 'Pizza-Day'\n index_template = 'admin/index.html'\n\n\[email protected](Product)\nclass ProductAdmin(admin.ModelAdmin):\n\n\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n\n def lookups(self, request, model_admin):\n return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (\n '4', 'от 450')\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte=0, price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte=200, price__lte=299)\n return go()\n if self.value() == '3':\n return queryset.filter(price__gte=300, price__lte=449)\n if self.value() == '4':\n return queryset.filter(price__gte=200, price__lte=299)\n list_display = ('get_image_html', 'id', 'section_id', 'title',\n 'getSize', 'getPrice')\n list_displat_links = ''\n inlines = [ProductCompositionInline]\n search_fields = ['title']\n list_filter = ['section_id', PriceListFilter]\n\n def get_image_html(self, obj):\n return format_html(\n '<img src = \"{}\" style = \"height: 30px; border-radius: 5px;\"></img>'\n , obj.image.url)\n get_image_html.short_description = 'Фото товара'\n\n def getPrice(self, obj):\n try:\n object = Stock.objects.get(id=obj.id, status=True)\n return format_html('<del>{} грн.</del> <span>{} грн. </span>'.\n format(obj.price, object.value))\n except:\n pass\n return format_html('<span>' + str(obj.price) + ' грн.' + '</span>')\n getPrice.short_description = 'Цена'\n\n def getSize(self, obj):\n return str(obj.size) + obj.unitSize\n getSize.short_description = 'Вес'\n\n\[email protected](Order)\nclass OrderAdmin(admin.ModelAdmin):\n\n\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n\n def lookups(self, request, model_admin):\n return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (\n '4', 'от 450')\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte=0, price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte=200, price__lte=299)\n if self.value() == '3':\n return queryset.filter(price__gte=300, price__lte=449)\n if self.value() == '4':\n return queryset.filter(price__gte=200, price__lte=299)\n list_display = 'id', 'dateTimeOrder', 'price', 'status'\n list_filter = ['dateTimeOrder', PriceListFilter, 'status']\n list_editable = ['status']\n inlines = [OrderInline]\n\n\[email protected](Client)\nclass ClientAdmin(admin.ModelAdmin):\n list_display = 'id', 'name', 'phone_number'\n inlines = [Client_OrderInline]\n\n\[email protected](Section)\nclass SectionAdmin(admin.ModelAdmin):\n list_display = 'id', 'title'\n\n\nclass StockAdmin(admin.ModelAdmin):\n list_display = 'product_id', 'value', 'status'\n search_fields = ['product_id__title']\n list_filter = ['status']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Client_OrderInline(admin.TabularInline):\n model = Order\n\n\nclass MyAdminSite(AdminSite):\n site_header = 'Pizza-Day'\n index_template = 'admin/index.html'\n\n\[email protected](Product)\nclass ProductAdmin(admin.ModelAdmin):\n\n\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n\n def lookups(self, request, model_admin):\n return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (\n '4', 'от 450')\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte=0, price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte=200, price__lte=299)\n return go()\n if self.value() == '3':\n return queryset.filter(price__gte=300, price__lte=449)\n if self.value() == '4':\n return queryset.filter(price__gte=200, price__lte=299)\n list_display = ('get_image_html', 'id', 'section_id', 'title',\n 'getSize', 'getPrice')\n list_displat_links = ''\n inlines = [ProductCompositionInline]\n search_fields = ['title']\n list_filter = ['section_id', PriceListFilter]\n\n def get_image_html(self, obj):\n return format_html(\n '<img src = \"{}\" style = \"height: 30px; border-radius: 5px;\"></img>'\n , obj.image.url)\n get_image_html.short_description = 'Фото товара'\n\n def getPrice(self, obj):\n try:\n object = Stock.objects.get(id=obj.id, status=True)\n return format_html('<del>{} грн.</del> <span>{} грн. </span>'.\n format(obj.price, object.value))\n except:\n pass\n return format_html('<span>' + str(obj.price) + ' грн.' + '</span>')\n getPrice.short_description = 'Цена'\n\n def getSize(self, obj):\n return str(obj.size) + obj.unitSize\n getSize.short_description = 'Вес'\n\n\[email protected](Order)\nclass OrderAdmin(admin.ModelAdmin):\n\n\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n\n def lookups(self, request, model_admin):\n return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (\n '4', 'от 450')\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte=0, price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte=200, price__lte=299)\n if self.value() == '3':\n return queryset.filter(price__gte=300, price__lte=449)\n if self.value() == '4':\n return queryset.filter(price__gte=200, price__lte=299)\n list_display = 'id', 'dateTimeOrder', 'price', 'status'\n list_filter = ['dateTimeOrder', PriceListFilter, 'status']\n list_editable = ['status']\n inlines = [OrderInline]\n\n\[email protected](Client)\nclass ClientAdmin(admin.ModelAdmin):\n list_display = 'id', 'name', 'phone_number'\n inlines = [Client_OrderInline]\n\n\[email protected](Section)\nclass SectionAdmin(admin.ModelAdmin):\n list_display = 'id', 'title'\n\n\nclass StockAdmin(admin.ModelAdmin):\n list_display = 'product_id', 'value', 'status'\n search_fields = ['product_id__title']\n list_filter = ['status']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass OrderInline(admin.TabularInline):\n <mask token>\n\n\nclass Client_OrderInline(admin.TabularInline):\n model = Order\n\n\nclass MyAdminSite(AdminSite):\n site_header = 'Pizza-Day'\n index_template = 'admin/index.html'\n\n\[email protected](Product)\nclass ProductAdmin(admin.ModelAdmin):\n\n\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n\n def lookups(self, request, model_admin):\n return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (\n '4', 'от 450')\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte=0, price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte=200, price__lte=299)\n return go()\n if self.value() == '3':\n return queryset.filter(price__gte=300, price__lte=449)\n if self.value() == '4':\n return queryset.filter(price__gte=200, price__lte=299)\n list_display = ('get_image_html', 'id', 'section_id', 'title',\n 'getSize', 'getPrice')\n list_displat_links = ''\n inlines = [ProductCompositionInline]\n search_fields = ['title']\n list_filter = ['section_id', PriceListFilter]\n\n def get_image_html(self, obj):\n return format_html(\n '<img src = \"{}\" style = \"height: 30px; border-radius: 5px;\"></img>'\n , obj.image.url)\n get_image_html.short_description = 'Фото товара'\n\n def getPrice(self, obj):\n try:\n object = Stock.objects.get(id=obj.id, status=True)\n return format_html('<del>{} грн.</del> <span>{} грн. </span>'.\n format(obj.price, object.value))\n except:\n pass\n return format_html('<span>' + str(obj.price) + ' грн.' + '</span>')\n getPrice.short_description = 'Цена'\n\n def getSize(self, obj):\n return str(obj.size) + obj.unitSize\n getSize.short_description = 'Вес'\n\n\[email protected](Order)\nclass OrderAdmin(admin.ModelAdmin):\n\n\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n\n def lookups(self, request, model_admin):\n return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (\n '4', 'от 450')\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte=0, price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte=200, price__lte=299)\n if self.value() == '3':\n return queryset.filter(price__gte=300, price__lte=449)\n if self.value() == '4':\n return queryset.filter(price__gte=200, price__lte=299)\n list_display = 'id', 'dateTimeOrder', 'price', 'status'\n list_filter = ['dateTimeOrder', PriceListFilter, 'status']\n list_editable = ['status']\n inlines = [OrderInline]\n\n\[email protected](Client)\nclass ClientAdmin(admin.ModelAdmin):\n list_display = 'id', 'name', 'phone_number'\n inlines = [Client_OrderInline]\n\n\[email protected](Section)\nclass SectionAdmin(admin.ModelAdmin):\n list_display = 'id', 'title'\n\n\nclass StockAdmin(admin.ModelAdmin):\n list_display = 'product_id', 'value', 'status'\n search_fields = ['product_id__title']\n list_filter = ['status']\n\n\n<mask token>\n",
"step-4": "from django.contrib import admin\nfrom django.contrib.admin.sites import AdminSite\nfrom obp.models import *\nfrom django.utils.html import format_html\nfrom jet.admin import CompactInline\nfrom jet.dashboard import modules\nfrom jet.dashboard.dashboard import Dashboard, AppIndexDashboard\n\n\nclass ProductCompositionInline(admin.TabularInline):\n model = Product_composition\n\n\nclass OrderInline(admin.TabularInline):\n model = Product_order\n\n\nclass Client_OrderInline(admin.TabularInline):\n model = Order\n\n\nclass MyAdminSite(AdminSite):\n site_header = 'Pizza-Day'\n index_template = 'admin/index.html'\n\n\[email protected](Product)\nclass ProductAdmin(admin.ModelAdmin):\n\n\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n\n def lookups(self, request, model_admin):\n return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (\n '4', 'от 450')\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte=0, price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte=200, price__lte=299)\n return go()\n if self.value() == '3':\n return queryset.filter(price__gte=300, price__lte=449)\n if self.value() == '4':\n return queryset.filter(price__gte=200, price__lte=299)\n list_display = ('get_image_html', 'id', 'section_id', 'title',\n 'getSize', 'getPrice')\n list_displat_links = ''\n inlines = [ProductCompositionInline]\n search_fields = ['title']\n list_filter = ['section_id', PriceListFilter]\n\n def get_image_html(self, obj):\n return format_html(\n '<img src = \"{}\" style = \"height: 30px; border-radius: 5px;\"></img>'\n , obj.image.url)\n get_image_html.short_description = 'Фото товара'\n\n def getPrice(self, obj):\n try:\n object = Stock.objects.get(id=obj.id, status=True)\n return format_html('<del>{} грн.</del> <span>{} грн. </span>'.\n format(obj.price, object.value))\n except:\n pass\n return format_html('<span>' + str(obj.price) + ' грн.' + '</span>')\n getPrice.short_description = 'Цена'\n\n def getSize(self, obj):\n return str(obj.size) + obj.unitSize\n getSize.short_description = 'Вес'\n\n\[email protected](Order)\nclass OrderAdmin(admin.ModelAdmin):\n\n\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n\n def lookups(self, request, model_admin):\n return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (\n '4', 'от 450')\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte=0, price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte=200, price__lte=299)\n if self.value() == '3':\n return queryset.filter(price__gte=300, price__lte=449)\n if self.value() == '4':\n return queryset.filter(price__gte=200, price__lte=299)\n list_display = 'id', 'dateTimeOrder', 'price', 'status'\n list_filter = ['dateTimeOrder', PriceListFilter, 'status']\n list_editable = ['status']\n inlines = [OrderInline]\n\n\[email protected](Client)\nclass ClientAdmin(admin.ModelAdmin):\n list_display = 'id', 'name', 'phone_number'\n inlines = [Client_OrderInline]\n\n\[email protected](Section)\nclass SectionAdmin(admin.ModelAdmin):\n list_display = 'id', 'title'\n\n\nclass StockAdmin(admin.ModelAdmin):\n list_display = 'product_id', 'value', 'status'\n search_fields = ['product_id__title']\n list_filter = ['status']\n\n\nadmin_site = MyAdminSite(name='myadmin')\nadmin_site.register(Product, ProductAdmin)\nadmin_site.register(Client, ClientAdmin)\nadmin_site.register(Order, OrderAdmin)\nadmin_site.register(Section, SectionAdmin)\nadmin_site.register(Stock, StockAdmin)\nadmin_site.register(Product_comment)\nadmin_site.register(Client_Auth)\nadmin_site.register(Special_offers)\nadmin_site.register(Product_rating)\n",
"step-5": "from django.contrib import admin\nfrom django.contrib.admin.sites import AdminSite\nfrom obp.models import *\nfrom django.utils.html import format_html\nfrom jet.admin import CompactInline\n#from django.utils.translation import ugettext_lazy as _\nfrom jet.dashboard import modules\nfrom jet.dashboard.dashboard import Dashboard, AppIndexDashboard\n# Register your models here.\n\n#admin.site.register(Special_offers)\n#admin.site.register(Stock)\n#admin.site.register(Product_order)\nclass ProductCompositionInline(admin.TabularInline):\n model = Product_composition\n\nclass OrderInline(admin.TabularInline):\n model = Product_order\n\nclass Client_OrderInline(admin.TabularInline):\n model = Order\n\nclass MyAdminSite(AdminSite):\n site_header = 'Pizza-Day'\n index_template = \"admin/index.html\"\n\n\[email protected](Product)\nclass ProductAdmin(admin.ModelAdmin):\n #Создание ценового фильтра\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n #Название фильтров\n def lookups(self, request, model_admin):\n return (\n ('1', 'до 199'),\n ('2', '200 - 299'),\n ('3', '300 - 449'),\n ('4', 'от 450'),\n )\n #Запрос на выборку\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte= 0,\n price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte = 200,\n price__lte = 299)\n return go()\n if self.value() == '3':\n return queryset.filter(price__gte = 300,\n price__lte = 449)\n if self.value() == '4':\n return queryset.filter(price__gte=200,\n price__lte=299)\n #Отображаемые поля\n list_display = ('get_image_html', 'id', 'section_id', 'title', 'getSize', 'getPrice' )\n\n list_displat_links = ('')\n\n inlines = [\n ProductCompositionInline\n ]\n #Поле по которому можно сделать поиск\n search_fields = ['title']\n #Список фильтраций\n list_filter = ['section_id', PriceListFilter]\n\n #Получение html блока с рисунком товара\n def get_image_html(self, obj):\n return format_html('<img src = \"{}\" style = \"height: 30px; border-radius: 5px;\"></img>', obj.image.url)\n get_image_html.short_description = \"Фото товара\"\n\n #Получение цены\n def getPrice(self, obj):\n\n try:\n object = Stock.objects.get( id = obj.id , status = True)\n return format_html(\"<del>{} грн.</del> <span>{} грн. </span>\".format(obj.price, object.value) )\n except:\n pass\n #else:\n return format_html(\"<span>\" + str( obj.price ) + \" грн.\" + \"</span>\")\n getPrice.short_description = \"Цена\"\n\n #Получение строки веса + его еденицу измерения\n def getSize(self, obj):\n return str( obj.size ) + obj.unitSize\n getSize.short_description = \"Вес\"\n\n\[email protected](Order)\nclass OrderAdmin(admin.ModelAdmin):\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n def lookups(self, request, model_admin):\n return (\n ('1', 'до 199'),\n ('2', '200 - 299'),\n ('3', '300 - 449'),\n ('4', 'от 450'),\n )\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte= 0,\n price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte = 200,\n price__lte = 299)\n if self.value() == '3':\n return queryset.filter(price__gte = 300,\n price__lte = 449)\n if self.value() == '4':\n return queryset.filter(price__gte=200,\n price__lte=299)\n\n list_display = ('id', 'dateTimeOrder', 'price', 'status' )\n list_filter = ['dateTimeOrder', PriceListFilter, \"status\"]\n list_editable = [\"status\"]\n\n inlines = [\n OrderInline\n ]\n\[email protected](Client)\nclass ClientAdmin(admin.ModelAdmin):\n list_display = ('id', 'name', 'phone_number' )\n inlines = [\n Client_OrderInline\n ]\n\[email protected](Section)\nclass SectionAdmin(admin.ModelAdmin):\n list_display = ('id', 'title')\n\n\nclass StockAdmin(admin.ModelAdmin):\n list_display = (\"product_id\", \"value\", \"status\" )\n search_fields = ['product_id__title']\n list_filter = ['status']\n\nadmin_site = MyAdminSite(name='myadmin')\nadmin_site.register(Product, ProductAdmin)\nadmin_site.register(Client, ClientAdmin)\nadmin_site.register(Order, OrderAdmin)\nadmin_site.register(Section, SectionAdmin)\nadmin_site.register(Stock, StockAdmin)\nadmin_site.register(Product_comment)\nadmin_site.register(Client_Auth)\nadmin_site.register(Special_offers)\nadmin_site.register(Product_rating)\n\n\n\n# \n#\n# class CustomIndexDashboard(Dashboard):\n# columns = 3\n#\n# def init_with_context(self, context):\n# self.available_children.append(modules.LinkList)\n# self.children.append(modules.LinkList(\n# _('Support'),\n# children=[\n# {\n# 'title': _('Django documentation'),\n# 'url': 'http://docs.djangoproject.com/',\n# 'external': True,\n# },\n# {\n# 'title': _('Django \"django-users\" mailing list'),\n# 'url': 'http://groups.google.com/group/django-users',\n# 'external': True,\n# },\n# {\n# 'title': _('Django irc channel'),\n# 'url': 'irc://irc.freenode.net/django',\n# 'external': True,\n# },\n# ],\n# column=0,\n# order=0\n# ))\n",
"step-ids": [
16,
17,
18,
24,
25
]
}
|
[
16,
17,
18,
24,
25
] |
club_info = {'club_url':
'https://www.futbin.com///18/leagues/Major%20League%20Soccer?page=1&club=101112'
, 'club_logo':
'https://cdn.futbin.com/content/fifa18/img/clubs/101112.png',
'club_name': 'Vancouver Whitecaps FC'}
players = {}
players['Waston'] = {'player_url':
'https://www.futbin.com//18/player/15583/Kendall Waston', 'player_name':
'Kendall Waston', 'player_rating': '80', 'player_shortname': 'Waston',
'player_position': 'CB', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/72.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/216811.png?v=2'}
players['Montero'] = {'player_url':
'https://www.futbin.com//18/player/1645/Fredy Montero', 'player_name':
'Fredy Montero', 'player_rating': '76', 'player_shortname': 'Montero',
'player_position': 'ST', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/56.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/190919.png?v=2'}
players['Waston'] = {'player_url':
'https://www.futbin.com//18/player/2545/Kendall Waston', 'player_name':
'Kendall Waston', 'player_rating': '74', 'player_shortname': 'Waston',
'player_position': 'CB', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/72.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/216811.png?v=2'}
players['Laba'] = {'player_url':
'https://www.futbin.com//18/player/2526/Matías Laba', 'player_name':
'Matías Laba', 'player_rating': '74', 'player_shortname': 'Laba',
'player_position': 'CDM', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/52.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/214594.png?v=2'}
players['Kamara'] = {'player_url':
'https://www.futbin.com//18/player/16045/Kei Kamara', 'player_name':
'Kei Kamara', 'player_rating': '74', 'player_shortname': 'Kamara',
'player_position': 'ST', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/138.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/176048.png?v=2'}
players['Ghazal'] = {'player_url':
'https://www.futbin.com//18/player/3000/Aly Ghazal', 'player_name':
'Aly Ghazal', 'player_rating': '73', 'player_shortname': 'Ghazal',
'player_position': 'CDM', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/111.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/212469.png?v=2'}
players['Ousted'] = {'player_url':
'https://www.futbin.com//18/player/2630/David Ousted', 'player_name':
'David Ousted', 'player_rating': '73', 'player_shortname': 'Ousted',
'player_position': 'GK', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/13.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/162052.png?v=2'}
players['Bolaños'] = {'player_url':
'https://www.futbin.com//18/player/3890/Christian Bolaños',
'player_name': 'Christian Bolaños', 'player_rating': '71',
'player_shortname': 'Bolaños', 'player_position': 'RM', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/72.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/175753.png?v=2'}
players['Juárez'] = {'player_url':
'https://www.futbin.com//18/player/18393/Efraín Juárez', 'player_name':
'Efraín Juárez', 'player_rating': '71', 'player_shortname': 'Juárez',
'player_position': 'RB', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/83.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/167519.png?v=2'}
players['Mutch'] = {'player_url':
'https://www.futbin.com//18/player/18709/Jordon Mutch', 'player_name':
'Jordon Mutch', 'player_rating': '70', 'player_shortname': 'Mutch',
'player_position': 'CM', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/14.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/186117.png?v=2'}
players['Parker'] = {'player_url':
'https://www.futbin.com//18/player/5180/Tim Parker', 'player_name':
'Tim Parker', 'player_rating': '70', 'player_shortname': 'Parker',
'player_position': 'CB', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/95.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/226803.png?v=2'}
players['Felipe'] = {'player_url':
'https://www.futbin.com//18/player/18710/Felipe Martins', 'player_name':
'Felipe Martins', 'player_rating': '70', 'player_shortname': 'Felipe',
'player_position': 'CM', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/54.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/207465.png?v=2'}
players['Techera'] = {'player_url':
'https://www.futbin.com//18/player/5285/Cristian Techera',
'player_name': 'Cristian Techera', 'player_rating': '69',
'player_shortname': 'Techera', 'player_position': 'RM', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/60.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/228404.png?v=2'}
players['Maund'] = {'player_url':
'https://www.futbin.com//18/player/5824/Aaron Maund', 'player_name':
'Aaron Maund', 'player_rating': '69', 'player_shortname': 'Maund',
'player_position': 'CB', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/95.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/207860.png?v=2'}
players['Marinovic'] = {'player_url':
'https://www.futbin.com//18/player/18161/Stefan Marinovic',
'player_name': 'Stefan Marinovic', 'player_rating': '68',
'player_shortname': 'Marinovic', 'player_position': 'GK',
'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/198.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/231056.png?v=2'}
players['Edgar'] = {'player_url':
'https://www.futbin.com//18/player/6202/David Edgar', 'player_name':
'David Edgar', 'player_rating': '68', 'player_shortname': 'Edgar',
'player_position': 'CB', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/70.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/172207.png?v=2'}
players['Aja'] = {'player_url':
'https://www.futbin.com//18/player/18394/José Aja', 'player_name':
'José Aja', 'player_rating': '68', 'player_shortname': 'Aja',
'player_position': 'CB', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/60.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/235280.png?v=2'}
players['Davies'] = {'player_url':
'https://www.futbin.com//18/player/18543/Alphonso Davies',
'player_name': 'Alphonso Davies', 'player_rating': '67',
'player_shortname': 'Davies', 'player_position': 'LM', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/70.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/234396.png?v=2'}
players['Williams'] = {'player_url':
'https://www.futbin.com//18/player/7680/Sheanon Williams',
'player_name': 'Sheanon Williams', 'player_rating': '67',
'player_shortname': 'Williams', 'player_position': 'RB',
'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/95.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/190585.png?v=2'}
players['Harvey'] = {'player_url':
'https://www.futbin.com//18/player/7581/Jordan Harvey', 'player_name':
'Jordan Harvey', 'player_rating': '67', 'player_shortname': 'Harvey',
'player_position': 'LB', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/95.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/178379.png?v=2'}
players['Franklin'] = {'player_url':
'https://www.futbin.com//18/player/18395/Sean Franklin', 'player_name':
'Sean Franklin', 'player_rating': '67', 'player_shortname': 'Franklin',
'player_position': 'RB', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/95.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/186645.png?v=2'}
players['Henry'] = {'player_url':
'https://www.futbin.com//18/player/18396/Doneil Henry', 'player_name':
'Doneil Henry', 'player_rating': '66', 'player_shortname': 'Henry',
'player_position': 'CB', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/70.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/201208.png?v=2'}
players['Ibini'] = {'player_url':
'https://www.futbin.com//18/player/8275/Bernie Ibini', 'player_name':
'Bernie Ibini', 'player_rating': '66', 'player_shortname': 'Ibini',
'player_position': 'RM', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/195.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/203092.png?v=2'}
players['de Jong'] = {'player_url':
'https://www.futbin.com//18/player/7954/Marcel de Jong', 'player_name':
'Marcel de Jong', 'player_rating': '66', 'player_shortname': 'de Jong',
'player_position': 'LB', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/70.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/164581.png?v=2'}
players['Mezquida'] = {'player_url':
'https://www.futbin.com//18/player/8267/Nicolás Mezquida',
'player_name': 'Nicolás Mezquida', 'player_rating': '66',
'player_shortname': 'Mezquida', 'player_position': 'CAM',
'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/60.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/202866.png?v=2'}
players['Reyna'] = {'player_url':
'https://www.futbin.com//18/player/8529/Yordy Reyna', 'player_name':
'Yordy Reyna', 'player_rating': '66', 'player_shortname': 'Reyna',
'player_position': 'CF', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/59.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/214000.png?v=2'}
players['Hurtado'] = {'player_url':
'https://www.futbin.com//18/player/8507/Erik Hurtado', 'player_name':
'Erik Hurtado', 'player_rating': '66', 'player_shortname': 'Hurtado',
'player_position': 'ST', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/95.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/212750.png?v=2'}
players['Tchani'] = {'player_url':
'https://www.futbin.com//18/player/8175/Tony Tchani', 'player_name':
'Tony Tchani', 'player_rating': '66', 'player_shortname': 'Tchani',
'player_position': 'CM', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/103.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/197717.png?v=2'}
players['Shea'] = {'player_url':
'https://www.futbin.com//18/player/8062/Brek Shea', 'player_name':
'Brek Shea', 'player_rating': '66', 'player_shortname': 'Shea',
'player_position': 'LM', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/95.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/186536.png?v=2'}
players['Rowe'] = {'player_url':
'https://www.futbin.com//18/player/16147/Brian Rowe', 'player_name':
'Brian Rowe', 'player_rating': '66', 'player_shortname': 'Rowe',
'player_position': 'GK', 'player_nation':
'https://cdn.futbin.com/content/fifa18/img/nation/95.png',
'player_photo':
'https://cdn.futbin.com/content/fifa18/img/players/210711.png?v=2'}
|
normal
|
{
"blob_id": "35c4e26acbe99ca7f37b63b67f38d5c40fbf0ea4",
"index": 2503,
"step-1": "<mask token>\n",
"step-2": "club_info = {'club_url':\n 'https://www.futbin.com///18/leagues/Major%20League%20Soccer?page=1&club=101112'\n , 'club_logo':\n 'https://cdn.futbin.com/content/fifa18/img/clubs/101112.png',\n 'club_name': 'Vancouver Whitecaps FC'}\nplayers = {}\nplayers['Waston'] = {'player_url':\n 'https://www.futbin.com//18/player/15583/Kendall Waston', 'player_name':\n 'Kendall Waston', 'player_rating': '80', 'player_shortname': 'Waston',\n 'player_position': 'CB', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/72.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/216811.png?v=2'}\nplayers['Montero'] = {'player_url':\n 'https://www.futbin.com//18/player/1645/Fredy Montero', 'player_name':\n 'Fredy Montero', 'player_rating': '76', 'player_shortname': 'Montero',\n 'player_position': 'ST', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/56.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/190919.png?v=2'}\nplayers['Waston'] = {'player_url':\n 'https://www.futbin.com//18/player/2545/Kendall Waston', 'player_name':\n 'Kendall Waston', 'player_rating': '74', 'player_shortname': 'Waston',\n 'player_position': 'CB', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/72.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/216811.png?v=2'}\nplayers['Laba'] = {'player_url':\n 'https://www.futbin.com//18/player/2526/Matías Laba', 'player_name':\n 'Matías Laba', 'player_rating': '74', 'player_shortname': 'Laba',\n 'player_position': 'CDM', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/52.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/214594.png?v=2'}\nplayers['Kamara'] = {'player_url':\n 'https://www.futbin.com//18/player/16045/Kei Kamara', 'player_name':\n 'Kei Kamara', 'player_rating': '74', 'player_shortname': 'Kamara',\n 'player_position': 'ST', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/138.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/176048.png?v=2'}\nplayers['Ghazal'] = {'player_url':\n 'https://www.futbin.com//18/player/3000/Aly Ghazal', 'player_name':\n 'Aly Ghazal', 'player_rating': '73', 'player_shortname': 'Ghazal',\n 'player_position': 'CDM', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/111.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/212469.png?v=2'}\nplayers['Ousted'] = {'player_url':\n 'https://www.futbin.com//18/player/2630/David Ousted', 'player_name':\n 'David Ousted', 'player_rating': '73', 'player_shortname': 'Ousted',\n 'player_position': 'GK', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/13.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/162052.png?v=2'}\nplayers['Bolaños'] = {'player_url':\n 'https://www.futbin.com//18/player/3890/Christian Bolaños',\n 'player_name': 'Christian Bolaños', 'player_rating': '71',\n 'player_shortname': 'Bolaños', 'player_position': 'RM', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/72.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/175753.png?v=2'}\nplayers['Juárez'] = {'player_url':\n 'https://www.futbin.com//18/player/18393/Efraín Juárez', 'player_name':\n 'Efraín Juárez', 'player_rating': '71', 'player_shortname': 'Juárez',\n 'player_position': 'RB', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/83.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/167519.png?v=2'}\nplayers['Mutch'] = {'player_url':\n 'https://www.futbin.com//18/player/18709/Jordon Mutch', 'player_name':\n 'Jordon Mutch', 'player_rating': '70', 'player_shortname': 'Mutch',\n 'player_position': 'CM', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/14.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/186117.png?v=2'}\nplayers['Parker'] = {'player_url':\n 'https://www.futbin.com//18/player/5180/Tim Parker', 'player_name':\n 'Tim Parker', 'player_rating': '70', 'player_shortname': 'Parker',\n 'player_position': 'CB', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/95.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/226803.png?v=2'}\nplayers['Felipe'] = {'player_url':\n 'https://www.futbin.com//18/player/18710/Felipe Martins', 'player_name':\n 'Felipe Martins', 'player_rating': '70', 'player_shortname': 'Felipe',\n 'player_position': 'CM', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/54.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/207465.png?v=2'}\nplayers['Techera'] = {'player_url':\n 'https://www.futbin.com//18/player/5285/Cristian Techera',\n 'player_name': 'Cristian Techera', 'player_rating': '69',\n 'player_shortname': 'Techera', 'player_position': 'RM', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/60.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/228404.png?v=2'}\nplayers['Maund'] = {'player_url':\n 'https://www.futbin.com//18/player/5824/Aaron Maund', 'player_name':\n 'Aaron Maund', 'player_rating': '69', 'player_shortname': 'Maund',\n 'player_position': 'CB', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/95.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/207860.png?v=2'}\nplayers['Marinovic'] = {'player_url':\n 'https://www.futbin.com//18/player/18161/Stefan Marinovic',\n 'player_name': 'Stefan Marinovic', 'player_rating': '68',\n 'player_shortname': 'Marinovic', 'player_position': 'GK',\n 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/198.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/231056.png?v=2'}\nplayers['Edgar'] = {'player_url':\n 'https://www.futbin.com//18/player/6202/David Edgar', 'player_name':\n 'David Edgar', 'player_rating': '68', 'player_shortname': 'Edgar',\n 'player_position': 'CB', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/70.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/172207.png?v=2'}\nplayers['Aja'] = {'player_url':\n 'https://www.futbin.com//18/player/18394/José Aja', 'player_name':\n 'José Aja', 'player_rating': '68', 'player_shortname': 'Aja',\n 'player_position': 'CB', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/60.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/235280.png?v=2'}\nplayers['Davies'] = {'player_url':\n 'https://www.futbin.com//18/player/18543/Alphonso Davies',\n 'player_name': 'Alphonso Davies', 'player_rating': '67',\n 'player_shortname': 'Davies', 'player_position': 'LM', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/70.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/234396.png?v=2'}\nplayers['Williams'] = {'player_url':\n 'https://www.futbin.com//18/player/7680/Sheanon Williams',\n 'player_name': 'Sheanon Williams', 'player_rating': '67',\n 'player_shortname': 'Williams', 'player_position': 'RB',\n 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/95.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/190585.png?v=2'}\nplayers['Harvey'] = {'player_url':\n 'https://www.futbin.com//18/player/7581/Jordan Harvey', 'player_name':\n 'Jordan Harvey', 'player_rating': '67', 'player_shortname': 'Harvey',\n 'player_position': 'LB', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/95.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/178379.png?v=2'}\nplayers['Franklin'] = {'player_url':\n 'https://www.futbin.com//18/player/18395/Sean Franklin', 'player_name':\n 'Sean Franklin', 'player_rating': '67', 'player_shortname': 'Franklin',\n 'player_position': 'RB', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/95.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/186645.png?v=2'}\nplayers['Henry'] = {'player_url':\n 'https://www.futbin.com//18/player/18396/Doneil Henry', 'player_name':\n 'Doneil Henry', 'player_rating': '66', 'player_shortname': 'Henry',\n 'player_position': 'CB', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/70.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/201208.png?v=2'}\nplayers['Ibini'] = {'player_url':\n 'https://www.futbin.com//18/player/8275/Bernie Ibini', 'player_name':\n 'Bernie Ibini', 'player_rating': '66', 'player_shortname': 'Ibini',\n 'player_position': 'RM', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/195.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/203092.png?v=2'}\nplayers['de Jong'] = {'player_url':\n 'https://www.futbin.com//18/player/7954/Marcel de Jong', 'player_name':\n 'Marcel de Jong', 'player_rating': '66', 'player_shortname': 'de Jong',\n 'player_position': 'LB', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/70.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/164581.png?v=2'}\nplayers['Mezquida'] = {'player_url':\n 'https://www.futbin.com//18/player/8267/Nicolás Mezquida',\n 'player_name': 'Nicolás Mezquida', 'player_rating': '66',\n 'player_shortname': 'Mezquida', 'player_position': 'CAM',\n 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/60.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/202866.png?v=2'}\nplayers['Reyna'] = {'player_url':\n 'https://www.futbin.com//18/player/8529/Yordy Reyna', 'player_name':\n 'Yordy Reyna', 'player_rating': '66', 'player_shortname': 'Reyna',\n 'player_position': 'CF', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/59.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/214000.png?v=2'}\nplayers['Hurtado'] = {'player_url':\n 'https://www.futbin.com//18/player/8507/Erik Hurtado', 'player_name':\n 'Erik Hurtado', 'player_rating': '66', 'player_shortname': 'Hurtado',\n 'player_position': 'ST', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/95.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/212750.png?v=2'}\nplayers['Tchani'] = {'player_url':\n 'https://www.futbin.com//18/player/8175/Tony Tchani', 'player_name':\n 'Tony Tchani', 'player_rating': '66', 'player_shortname': 'Tchani',\n 'player_position': 'CM', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/103.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/197717.png?v=2'}\nplayers['Shea'] = {'player_url':\n 'https://www.futbin.com//18/player/8062/Brek Shea', 'player_name':\n 'Brek Shea', 'player_rating': '66', 'player_shortname': 'Shea',\n 'player_position': 'LM', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/95.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/186536.png?v=2'}\nplayers['Rowe'] = {'player_url':\n 'https://www.futbin.com//18/player/16147/Brian Rowe', 'player_name':\n 'Brian Rowe', 'player_rating': '66', 'player_shortname': 'Rowe',\n 'player_position': 'GK', 'player_nation':\n 'https://cdn.futbin.com/content/fifa18/img/nation/95.png',\n 'player_photo':\n 'https://cdn.futbin.com/content/fifa18/img/players/210711.png?v=2'}\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
def reorderAssetsByTypes(nodePath, colorNode=True, alignNode=True):
node = hou.pwd()
def getNaskCasting():
path = "E:/WIP/Work/casting-nask.csv"
file = open(path, "r")
fileText = file.readlines()
file.close()
fileText.pop(0)
assetDic = {}
for line in fileText:
assetType = line.split(",")
assetName = assetType[2]
assetType = assetType[1].split("/")[0]
assetDic[assetName] = assetType.lower()
return assetDic
assetList = getNaskCasting()
colorList = {"sets":(0, 0.4, 1), "chars":(0.4, 1, 0.4), "props":(0.6, 0.4, 1)}
assetTypeList = {"sets":[], "props":[], "chars":[]}
nodeChildren = hou.node(nodePath).children()
#colorize nodes by asset type
for child in list(nodeChildren):
if str(child) in assetList.keys():
type = assetList[str(child)]
if colorNode == True:
child.setColor(hou.Color(colorList[type]))
assetTypeList[type].append(child)
#reorder nodes layout by asset type
if alignNode == True:
u = 0
v = 0
for type in sorted(assetTypeList.keys()):
v = 0
for asset in sorted(assetTypeList[type]):
pos = hou.Vector2 (u,v)
asset.setPosition(pos)
v -= 1
u -= 3
reorderAssetsByTypes("/obj/geo1")
|
normal
|
{
"blob_id": "3073850890eb7a61fb5200c5ab87c802cafe50bb",
"index": 7229,
"step-1": "<mask token>\n",
"step-2": "def reorderAssetsByTypes(nodePath, colorNode=True, alignNode=True):\n node = hou.pwd()\n\n def getNaskCasting():\n path = 'E:/WIP/Work/casting-nask.csv'\n file = open(path, 'r')\n fileText = file.readlines()\n file.close()\n fileText.pop(0)\n assetDic = {}\n for line in fileText:\n assetType = line.split(',')\n assetName = assetType[2]\n assetType = assetType[1].split('/')[0]\n assetDic[assetName] = assetType.lower()\n return assetDic\n assetList = getNaskCasting()\n colorList = {'sets': (0, 0.4, 1), 'chars': (0.4, 1, 0.4), 'props': (0.6,\n 0.4, 1)}\n assetTypeList = {'sets': [], 'props': [], 'chars': []}\n nodeChildren = hou.node(nodePath).children()\n for child in list(nodeChildren):\n if str(child) in assetList.keys():\n type = assetList[str(child)]\n if colorNode == True:\n child.setColor(hou.Color(colorList[type]))\n assetTypeList[type].append(child)\n if alignNode == True:\n u = 0\n v = 0\n for type in sorted(assetTypeList.keys()):\n v = 0\n for asset in sorted(assetTypeList[type]):\n pos = hou.Vector2(u, v)\n asset.setPosition(pos)\n v -= 1\n u -= 3\n\n\n<mask token>\n",
"step-3": "def reorderAssetsByTypes(nodePath, colorNode=True, alignNode=True):\n node = hou.pwd()\n\n def getNaskCasting():\n path = 'E:/WIP/Work/casting-nask.csv'\n file = open(path, 'r')\n fileText = file.readlines()\n file.close()\n fileText.pop(0)\n assetDic = {}\n for line in fileText:\n assetType = line.split(',')\n assetName = assetType[2]\n assetType = assetType[1].split('/')[0]\n assetDic[assetName] = assetType.lower()\n return assetDic\n assetList = getNaskCasting()\n colorList = {'sets': (0, 0.4, 1), 'chars': (0.4, 1, 0.4), 'props': (0.6,\n 0.4, 1)}\n assetTypeList = {'sets': [], 'props': [], 'chars': []}\n nodeChildren = hou.node(nodePath).children()\n for child in list(nodeChildren):\n if str(child) in assetList.keys():\n type = assetList[str(child)]\n if colorNode == True:\n child.setColor(hou.Color(colorList[type]))\n assetTypeList[type].append(child)\n if alignNode == True:\n u = 0\n v = 0\n for type in sorted(assetTypeList.keys()):\n v = 0\n for asset in sorted(assetTypeList[type]):\n pos = hou.Vector2(u, v)\n asset.setPosition(pos)\n v -= 1\n u -= 3\n\n\nreorderAssetsByTypes('/obj/geo1')\n",
"step-4": "def reorderAssetsByTypes(nodePath, colorNode=True, alignNode=True):\n node = hou.pwd()\n \n def getNaskCasting():\n path = \"E:/WIP/Work/casting-nask.csv\"\n\n file = open(path, \"r\")\n fileText = file.readlines()\n file.close()\n fileText.pop(0)\n\n assetDic = {}\n\n for line in fileText:\n assetType = line.split(\",\")\n assetName = assetType[2]\n assetType = assetType[1].split(\"/\")[0]\n assetDic[assetName] = assetType.lower()\n\n return assetDic\n \n assetList = getNaskCasting()\n colorList = {\"sets\":(0, 0.4, 1), \"chars\":(0.4, 1, 0.4), \"props\":(0.6, 0.4, 1)}\n assetTypeList = {\"sets\":[], \"props\":[], \"chars\":[]}\n \n nodeChildren = hou.node(nodePath).children()\n \n #colorize nodes by asset type\n for child in list(nodeChildren):\n if str(child) in assetList.keys():\n type = assetList[str(child)]\n if colorNode == True:\n child.setColor(hou.Color(colorList[type]))\n assetTypeList[type].append(child)\n \n #reorder nodes layout by asset type\n if alignNode == True:\n u = 0\n v = 0\n for type in sorted(assetTypeList.keys()):\n v = 0\n for asset in sorted(assetTypeList[type]):\n pos = hou.Vector2 (u,v)\n asset.setPosition(pos)\n v -= 1\n u -= 3\n\nreorderAssetsByTypes(\"/obj/geo1\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class TestPool2D(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestPool2D(unittest.TestCase):
@staticmethod
def _build_model(input_shape, layer, row_num, col_num, pos_type=
Piecewise2D.POS_TYPE_SEGMENTS):
data_input = keras.layers.Input(shape=input_shape)
row_input = keras.layers.Input(shape=(row_num,))
col_input = keras.layers.Input(shape=(col_num,))
pool_layer = Piecewise2D(layer=layer, pos_type=pos_type)([
data_input, row_input, col_input])
model = keras.models.Model(inputs=[data_input, row_input, col_input
], outputs=pool_layer)
model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.
mean_squared_error)
model.summary()
return model
def test_max_2d(self):
data = [[[1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]], [
[1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]]]
rows = [[2, 4], [3, 4]]
cols = [[1, 2, 4], [1, 3, 4]]
model = self._build_model(input_shape=(None, None), layer=MaxPool2D
(), row_num=len(rows[0]), col_num=len(cols[0]))
predicts = model.predict([np.asarray(data), np.asarray(rows), np.
asarray(cols)]).tolist()
expected = [[[2.0, 5.0, 6.0], [7.0, 2.0, 5.0]], [[7.0, 6.0, 3.0], [
7.0, 2.0, 4.0]]]
self.assertEqual(expected, predicts)
cols = [[1, 2, 0, 4], [1, 3, 2, 4]]
model = self._build_model(input_shape=(None, None), layer=MaxPool2D
(), row_num=len(rows[0]), col_num=len(cols[0]), pos_type=
Piecewise2D.POS_TYPE_PAIRS)
model_path = os.path.join(tempfile.gettempdir(),
'keras_piece_test_save_load_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={
'Piecewise2D': Piecewise2D, 'MaxPool2D': MaxPool2D})
predicts = model.predict([np.asarray(data), np.asarray(rows), np.
asarray(cols)]).tolist()
expected = [[[2.0, 7.0]], [[2.0, 4.0]]]
self.assertEqual(expected, predicts)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestPool2D(unittest.TestCase):
@staticmethod
def _build_model(input_shape, layer, row_num, col_num, pos_type=
Piecewise2D.POS_TYPE_SEGMENTS):
data_input = keras.layers.Input(shape=input_shape)
row_input = keras.layers.Input(shape=(row_num,))
col_input = keras.layers.Input(shape=(col_num,))
pool_layer = Piecewise2D(layer=layer, pos_type=pos_type)([
data_input, row_input, col_input])
model = keras.models.Model(inputs=[data_input, row_input, col_input
], outputs=pool_layer)
model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.
mean_squared_error)
model.summary()
return model
def test_max_2d(self):
data = [[[1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]], [
[1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]]]
rows = [[2, 4], [3, 4]]
cols = [[1, 2, 4], [1, 3, 4]]
model = self._build_model(input_shape=(None, None), layer=MaxPool2D
(), row_num=len(rows[0]), col_num=len(cols[0]))
predicts = model.predict([np.asarray(data), np.asarray(rows), np.
asarray(cols)]).tolist()
expected = [[[2.0, 5.0, 6.0], [7.0, 2.0, 5.0]], [[7.0, 6.0, 3.0], [
7.0, 2.0, 4.0]]]
self.assertEqual(expected, predicts)
cols = [[1, 2, 0, 4], [1, 3, 2, 4]]
model = self._build_model(input_shape=(None, None), layer=MaxPool2D
(), row_num=len(rows[0]), col_num=len(cols[0]), pos_type=
Piecewise2D.POS_TYPE_PAIRS)
model_path = os.path.join(tempfile.gettempdir(),
'keras_piece_test_save_load_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={
'Piecewise2D': Piecewise2D, 'MaxPool2D': MaxPool2D})
predicts = model.predict([np.asarray(data), np.asarray(rows), np.
asarray(cols)]).tolist()
expected = [[[2.0, 7.0]], [[2.0, 4.0]]]
self.assertEqual(expected, predicts)
def test_pos_type_not_implemented(self):
with self.assertRaises(NotImplementedError):
self._build_model(input_shape=(None,), layer=MaxPool2D(),
row_num=13, col_num=17, pos_type='whatever')
<|reserved_special_token_1|>
import unittest
import os
import tempfile
import numpy as np
from keras_piecewise.backend import keras
from keras_piecewise import Piecewise2D
from .util import MaxPool2D
class TestPool2D(unittest.TestCase):
@staticmethod
def _build_model(input_shape, layer, row_num, col_num, pos_type=
Piecewise2D.POS_TYPE_SEGMENTS):
data_input = keras.layers.Input(shape=input_shape)
row_input = keras.layers.Input(shape=(row_num,))
col_input = keras.layers.Input(shape=(col_num,))
pool_layer = Piecewise2D(layer=layer, pos_type=pos_type)([
data_input, row_input, col_input])
model = keras.models.Model(inputs=[data_input, row_input, col_input
], outputs=pool_layer)
model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.
mean_squared_error)
model.summary()
return model
def test_max_2d(self):
data = [[[1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]], [
[1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]]]
rows = [[2, 4], [3, 4]]
cols = [[1, 2, 4], [1, 3, 4]]
model = self._build_model(input_shape=(None, None), layer=MaxPool2D
(), row_num=len(rows[0]), col_num=len(cols[0]))
predicts = model.predict([np.asarray(data), np.asarray(rows), np.
asarray(cols)]).tolist()
expected = [[[2.0, 5.0, 6.0], [7.0, 2.0, 5.0]], [[7.0, 6.0, 3.0], [
7.0, 2.0, 4.0]]]
self.assertEqual(expected, predicts)
cols = [[1, 2, 0, 4], [1, 3, 2, 4]]
model = self._build_model(input_shape=(None, None), layer=MaxPool2D
(), row_num=len(rows[0]), col_num=len(cols[0]), pos_type=
Piecewise2D.POS_TYPE_PAIRS)
model_path = os.path.join(tempfile.gettempdir(),
'keras_piece_test_save_load_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={
'Piecewise2D': Piecewise2D, 'MaxPool2D': MaxPool2D})
predicts = model.predict([np.asarray(data), np.asarray(rows), np.
asarray(cols)]).tolist()
expected = [[[2.0, 7.0]], [[2.0, 4.0]]]
self.assertEqual(expected, predicts)
def test_pos_type_not_implemented(self):
with self.assertRaises(NotImplementedError):
self._build_model(input_shape=(None,), layer=MaxPool2D(),
row_num=13, col_num=17, pos_type='whatever')
<|reserved_special_token_1|>
import unittest
import os
import tempfile
import numpy as np
from keras_piecewise.backend import keras
from keras_piecewise import Piecewise2D
from .util import MaxPool2D
class TestPool2D(unittest.TestCase):
@staticmethod
def _build_model(input_shape, layer, row_num, col_num, pos_type=Piecewise2D.POS_TYPE_SEGMENTS):
data_input = keras.layers.Input(shape=input_shape)
row_input = keras.layers.Input(shape=(row_num,))
col_input = keras.layers.Input(shape=(col_num,))
pool_layer = Piecewise2D(
layer=layer,
pos_type=pos_type,
)([data_input, row_input, col_input])
model = keras.models.Model(inputs=[data_input, row_input, col_input], outputs=pool_layer)
model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.mean_squared_error)
model.summary()
return model
def test_max_2d(self):
data = [
[
[1, 3, 5, 2],
[2, 5, 6, 1],
[7, 1, 5, 3],
[7, 2, 2, 4],
],
[
[1, 3, 5, 2],
[2, 5, 6, 1],
[7, 1, 5, 3],
[7, 2, 2, 4],
],
]
rows = [
[2, 4],
[3, 4],
]
cols = [
[1, 2, 4],
[1, 3, 4],
]
model = self._build_model(
input_shape=(None, None),
layer=MaxPool2D(),
row_num=len(rows[0]),
col_num=len(cols[0]),
)
predicts = model.predict([np.asarray(data), np.asarray(rows), np.asarray(cols)]).tolist()
expected = [
[
[2.0, 5.0, 6.0],
[7.0, 2.0, 5.0],
],
[
[7.0, 6.0, 3.0],
[7.0, 2.0, 4.0],
],
]
self.assertEqual(expected, predicts)
cols = [
[1, 2, 0, 4],
[1, 3, 2, 4],
]
model = self._build_model(
input_shape=(None, None),
layer=MaxPool2D(),
row_num=len(rows[0]),
col_num=len(cols[0]),
pos_type=Piecewise2D.POS_TYPE_PAIRS,
)
model_path = os.path.join(tempfile.gettempdir(), 'keras_piece_test_save_load_%f.h5' % np.random.random())
model.save(model_path)
model = keras.models.load_model(model_path, custom_objects={
'Piecewise2D': Piecewise2D,
'MaxPool2D': MaxPool2D,
})
predicts = model.predict([np.asarray(data), np.asarray(rows), np.asarray(cols)]).tolist()
expected = [
[[2.0, 7.0]],
[[2.0, 4.0]],
]
self.assertEqual(expected, predicts)
def test_pos_type_not_implemented(self):
with self.assertRaises(NotImplementedError):
self._build_model(
input_shape=(None,),
layer=MaxPool2D(),
row_num=13,
col_num=17,
pos_type='whatever',
)
|
flexible
|
{
"blob_id": "1af9fb91e69ea78709c47fca6b12e4f7a6fd17a8",
"index": 7392,
"step-1": "<mask token>\n\n\nclass TestPool2D(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestPool2D(unittest.TestCase):\n\n @staticmethod\n def _build_model(input_shape, layer, row_num, col_num, pos_type=\n Piecewise2D.POS_TYPE_SEGMENTS):\n data_input = keras.layers.Input(shape=input_shape)\n row_input = keras.layers.Input(shape=(row_num,))\n col_input = keras.layers.Input(shape=(col_num,))\n pool_layer = Piecewise2D(layer=layer, pos_type=pos_type)([\n data_input, row_input, col_input])\n model = keras.models.Model(inputs=[data_input, row_input, col_input\n ], outputs=pool_layer)\n model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.\n mean_squared_error)\n model.summary()\n return model\n\n def test_max_2d(self):\n data = [[[1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]], [\n [1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]]]\n rows = [[2, 4], [3, 4]]\n cols = [[1, 2, 4], [1, 3, 4]]\n model = self._build_model(input_shape=(None, None), layer=MaxPool2D\n (), row_num=len(rows[0]), col_num=len(cols[0]))\n predicts = model.predict([np.asarray(data), np.asarray(rows), np.\n asarray(cols)]).tolist()\n expected = [[[2.0, 5.0, 6.0], [7.0, 2.0, 5.0]], [[7.0, 6.0, 3.0], [\n 7.0, 2.0, 4.0]]]\n self.assertEqual(expected, predicts)\n cols = [[1, 2, 0, 4], [1, 3, 2, 4]]\n model = self._build_model(input_shape=(None, None), layer=MaxPool2D\n (), row_num=len(rows[0]), col_num=len(cols[0]), pos_type=\n Piecewise2D.POS_TYPE_PAIRS)\n model_path = os.path.join(tempfile.gettempdir(), \n 'keras_piece_test_save_load_%f.h5' % np.random.random())\n model.save(model_path)\n model = keras.models.load_model(model_path, custom_objects={\n 'Piecewise2D': Piecewise2D, 'MaxPool2D': MaxPool2D})\n predicts = model.predict([np.asarray(data), np.asarray(rows), np.\n asarray(cols)]).tolist()\n expected = [[[2.0, 7.0]], [[2.0, 4.0]]]\n self.assertEqual(expected, predicts)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestPool2D(unittest.TestCase):\n\n @staticmethod\n def _build_model(input_shape, layer, row_num, col_num, pos_type=\n Piecewise2D.POS_TYPE_SEGMENTS):\n data_input = keras.layers.Input(shape=input_shape)\n row_input = keras.layers.Input(shape=(row_num,))\n col_input = keras.layers.Input(shape=(col_num,))\n pool_layer = Piecewise2D(layer=layer, pos_type=pos_type)([\n data_input, row_input, col_input])\n model = keras.models.Model(inputs=[data_input, row_input, col_input\n ], outputs=pool_layer)\n model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.\n mean_squared_error)\n model.summary()\n return model\n\n def test_max_2d(self):\n data = [[[1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]], [\n [1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]]]\n rows = [[2, 4], [3, 4]]\n cols = [[1, 2, 4], [1, 3, 4]]\n model = self._build_model(input_shape=(None, None), layer=MaxPool2D\n (), row_num=len(rows[0]), col_num=len(cols[0]))\n predicts = model.predict([np.asarray(data), np.asarray(rows), np.\n asarray(cols)]).tolist()\n expected = [[[2.0, 5.0, 6.0], [7.0, 2.0, 5.0]], [[7.0, 6.0, 3.0], [\n 7.0, 2.0, 4.0]]]\n self.assertEqual(expected, predicts)\n cols = [[1, 2, 0, 4], [1, 3, 2, 4]]\n model = self._build_model(input_shape=(None, None), layer=MaxPool2D\n (), row_num=len(rows[0]), col_num=len(cols[0]), pos_type=\n Piecewise2D.POS_TYPE_PAIRS)\n model_path = os.path.join(tempfile.gettempdir(), \n 'keras_piece_test_save_load_%f.h5' % np.random.random())\n model.save(model_path)\n model = keras.models.load_model(model_path, custom_objects={\n 'Piecewise2D': Piecewise2D, 'MaxPool2D': MaxPool2D})\n predicts = model.predict([np.asarray(data), np.asarray(rows), np.\n asarray(cols)]).tolist()\n expected = [[[2.0, 7.0]], [[2.0, 4.0]]]\n self.assertEqual(expected, predicts)\n\n def test_pos_type_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n self._build_model(input_shape=(None,), layer=MaxPool2D(),\n row_num=13, col_num=17, pos_type='whatever')\n",
"step-4": "import unittest\nimport os\nimport tempfile\nimport numpy as np\nfrom keras_piecewise.backend import keras\nfrom keras_piecewise import Piecewise2D\nfrom .util import MaxPool2D\n\n\nclass TestPool2D(unittest.TestCase):\n\n @staticmethod\n def _build_model(input_shape, layer, row_num, col_num, pos_type=\n Piecewise2D.POS_TYPE_SEGMENTS):\n data_input = keras.layers.Input(shape=input_shape)\n row_input = keras.layers.Input(shape=(row_num,))\n col_input = keras.layers.Input(shape=(col_num,))\n pool_layer = Piecewise2D(layer=layer, pos_type=pos_type)([\n data_input, row_input, col_input])\n model = keras.models.Model(inputs=[data_input, row_input, col_input\n ], outputs=pool_layer)\n model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.\n mean_squared_error)\n model.summary()\n return model\n\n def test_max_2d(self):\n data = [[[1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]], [\n [1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4]]]\n rows = [[2, 4], [3, 4]]\n cols = [[1, 2, 4], [1, 3, 4]]\n model = self._build_model(input_shape=(None, None), layer=MaxPool2D\n (), row_num=len(rows[0]), col_num=len(cols[0]))\n predicts = model.predict([np.asarray(data), np.asarray(rows), np.\n asarray(cols)]).tolist()\n expected = [[[2.0, 5.0, 6.0], [7.0, 2.0, 5.0]], [[7.0, 6.0, 3.0], [\n 7.0, 2.0, 4.0]]]\n self.assertEqual(expected, predicts)\n cols = [[1, 2, 0, 4], [1, 3, 2, 4]]\n model = self._build_model(input_shape=(None, None), layer=MaxPool2D\n (), row_num=len(rows[0]), col_num=len(cols[0]), pos_type=\n Piecewise2D.POS_TYPE_PAIRS)\n model_path = os.path.join(tempfile.gettempdir(), \n 'keras_piece_test_save_load_%f.h5' % np.random.random())\n model.save(model_path)\n model = keras.models.load_model(model_path, custom_objects={\n 'Piecewise2D': Piecewise2D, 'MaxPool2D': MaxPool2D})\n predicts = model.predict([np.asarray(data), np.asarray(rows), np.\n asarray(cols)]).tolist()\n expected = [[[2.0, 7.0]], [[2.0, 4.0]]]\n self.assertEqual(expected, predicts)\n\n def test_pos_type_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n self._build_model(input_shape=(None,), layer=MaxPool2D(),\n row_num=13, col_num=17, pos_type='whatever')\n",
"step-5": "import unittest\nimport os\nimport tempfile\nimport numpy as np\nfrom keras_piecewise.backend import keras\nfrom keras_piecewise import Piecewise2D\nfrom .util import MaxPool2D\n\n\nclass TestPool2D(unittest.TestCase):\n\n @staticmethod\n def _build_model(input_shape, layer, row_num, col_num, pos_type=Piecewise2D.POS_TYPE_SEGMENTS):\n data_input = keras.layers.Input(shape=input_shape)\n row_input = keras.layers.Input(shape=(row_num,))\n col_input = keras.layers.Input(shape=(col_num,))\n pool_layer = Piecewise2D(\n layer=layer,\n pos_type=pos_type,\n )([data_input, row_input, col_input])\n model = keras.models.Model(inputs=[data_input, row_input, col_input], outputs=pool_layer)\n model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.mean_squared_error)\n model.summary()\n return model\n\n def test_max_2d(self):\n data = [\n [\n [1, 3, 5, 2],\n [2, 5, 6, 1],\n [7, 1, 5, 3],\n [7, 2, 2, 4],\n ],\n [\n [1, 3, 5, 2],\n [2, 5, 6, 1],\n [7, 1, 5, 3],\n [7, 2, 2, 4],\n ],\n ]\n rows = [\n [2, 4],\n [3, 4],\n ]\n cols = [\n [1, 2, 4],\n [1, 3, 4],\n ]\n model = self._build_model(\n input_shape=(None, None),\n layer=MaxPool2D(),\n row_num=len(rows[0]),\n col_num=len(cols[0]),\n )\n predicts = model.predict([np.asarray(data), np.asarray(rows), np.asarray(cols)]).tolist()\n expected = [\n [\n [2.0, 5.0, 6.0],\n [7.0, 2.0, 5.0],\n ],\n [\n [7.0, 6.0, 3.0],\n [7.0, 2.0, 4.0],\n ],\n ]\n self.assertEqual(expected, predicts)\n cols = [\n [1, 2, 0, 4],\n [1, 3, 2, 4],\n ]\n model = self._build_model(\n input_shape=(None, None),\n layer=MaxPool2D(),\n row_num=len(rows[0]),\n col_num=len(cols[0]),\n pos_type=Piecewise2D.POS_TYPE_PAIRS,\n )\n model_path = os.path.join(tempfile.gettempdir(), 'keras_piece_test_save_load_%f.h5' % np.random.random())\n model.save(model_path)\n model = keras.models.load_model(model_path, custom_objects={\n 'Piecewise2D': Piecewise2D,\n 'MaxPool2D': MaxPool2D,\n })\n predicts = model.predict([np.asarray(data), np.asarray(rows), np.asarray(cols)]).tolist()\n expected = [\n [[2.0, 7.0]],\n [[2.0, 4.0]],\n ]\n self.assertEqual(expected, predicts)\n\n def test_pos_type_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n self._build_model(\n input_shape=(None,),\n layer=MaxPool2D(),\n row_num=13,\n col_num=17,\n pos_type='whatever',\n )\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import mmap;
import random;
def shuffle():
l_digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'];
random.shuffle(l_digits);
return "".join(l_digits);
with open("hello.txt", "r+") as f:
map = mmap.mmap(f.fileno(), 1000);
l_i = 0;
for l_digit in shuffle():
map[l_i] = l_digit;
l_i += 1;
|
normal
|
{
"blob_id": "b0468e58c4d0387a92ba96e8fb8a876ece256c78",
"index": 6507,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef shuffle():\n l_digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n random.shuffle(l_digits)\n return ''.join(l_digits)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef shuffle():\n l_digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n random.shuffle(l_digits)\n return ''.join(l_digits)\n\n\nwith open('hello.txt', 'r+') as f:\n map = mmap.mmap(f.fileno(), 1000)\n l_i = 0\n for l_digit in shuffle():\n map[l_i] = l_digit\n l_i += 1\n",
"step-4": "import mmap\nimport random\n\n\ndef shuffle():\n l_digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n random.shuffle(l_digits)\n return ''.join(l_digits)\n\n\nwith open('hello.txt', 'r+') as f:\n map = mmap.mmap(f.fileno(), 1000)\n l_i = 0\n for l_digit in shuffle():\n map[l_i] = l_digit\n l_i += 1\n",
"step-5": "import mmap;\r\nimport random;\r\n\r\ndef shuffle():\r\n l_digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'];\r\n random.shuffle(l_digits);\r\n\r\n return \"\".join(l_digits);\r\n\r\n\r\nwith open(\"hello.txt\", \"r+\") as f:\r\n map = mmap.mmap(f.fileno(), 1000);\r\n l_i = 0;\r\n\r\n for l_digit in shuffle():\r\n map[l_i] = l_digit;\r\n l_i += 1;",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
8-6. 도시 이름
도시와 국가 이름을 받는 city_country() 함수를 만드세요. 이 함수는 다음과 같은 문자열을 반환해야 합니다.
'Santiago, Chile'
- 최소한 세 개의 도시-국가 쌍으로 함수를 호출하고 반환값을 출력하세요.
Output:
santiago, chile
ushuaia, argentina
longyearbyen, svalbard
'''
|
normal
|
{
"blob_id": "2d5abcd75dcbeb1baa3f387035bdcc3b7adbfe3f",
"index": 7856,
"step-1": "<mask token>\n",
"step-2": "'''\n8-6. 도시 이름\n도시와 국가 이름을 받는 city_country() 함수를 만드세요. 이 함수는 다음과 같은 문자열을 반환해야 합니다.\n'Santiago, Chile'\n- 최소한 세 개의 도시-국가 쌍으로 함수를 호출하고 반환값을 출력하세요.\n\nOutput:\nsantiago, chile\nushuaia, argentina\nlongyearbyen, svalbard\n'''\n\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#!/usr/bin/env python3
import click
@click.command()
@click.option("--name", prompt = "Your name")
def hello(name):
print("hello", name)
if __name__ == '__main__':
hello()
|
normal
|
{
"blob_id": "19c1a50cf19f04a9e0d0163a9383cb900bca1d38",
"index": 9862,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]()\[email protected]('--name', prompt='Your name')\ndef hello(name):\n print('hello', name)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]()\[email protected]('--name', prompt='Your name')\ndef hello(name):\n print('hello', name)\n\n\nif __name__ == '__main__':\n hello()\n",
"step-4": "import click\n\n\[email protected]()\[email protected]('--name', prompt='Your name')\ndef hello(name):\n print('hello', name)\n\n\nif __name__ == '__main__':\n hello()\n",
"step-5": "#!/usr/bin/env python3\n\nimport click\n\[email protected]()\[email protected](\"--name\", prompt = \"Your name\")\ndef hello(name):\n print(\"hello\", name)\n\nif __name__ == '__main__':\n hello()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class UploaderThread(object):
<|reserved_special_token_0|>
def is_uploading_tourney(self, tourney):
return tourney in self.uploading_tourneys
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_last_successful_upload_time(self, tourney_name):
try:
with countdowntourney.tourney_open(tourney_name, db_dir
) as tourney:
upload_time = tourney.get_last_successful_upload_time()
if (upload_time is None or upload_time < self.
tourney_upload_start_time.get(tourney_name, 0)):
return None
else:
return upload_time
except countdowntourney.TourneyException as e:
sys.stderr.write(
'Failed to get last successful upload time: %s\n' % str(e))
return None
def get_last_failed_upload(self, tourney_name):
try:
with countdowntourney.tourney_open(tourney_name, db_dir
) as tourney:
failed_upload = tourney.get_last_failed_upload()
if failed_upload is not None and failed_upload.get('ts', None
) is not None and failed_upload['ts'
] >= self.tourney_upload_start_time.get(tourney_name, 0):
return failed_upload
else:
return None
except countdowntourney.TourneyException as e:
sys.stderr.write('Failed to get last failed upload info: %s\n' %
str(e))
return None
def get_num_viewers(self, tourney_name):
return self.tourney_num_viewers.get(tourney_name, None)
def get_tourney_auth(self, tourney):
return self.tourney_auth.get(tourney)
def set_tourney_auth(self, tourney, username, password):
self.tourney_auth[tourney] = {'username': username, 'password':
password}
def get_upload_button_pressed_time(self, tourney):
if tourney not in self.uploading_tourneys:
return None
else:
return self.tourney_upload_start_time.get(tourney, None)
def write_log(self, message):
sys.stderr.write('%s: %s\r\n' % (time.strftime('%Y-%m-%d %H:%M:%S'),
message))
def body(self):
while True:
uploading_tourneys = self.uploading_tourneys.copy()
for tourney_name in uploading_tourneys:
now = time.time()
last_upload_time = self.tourney_last_upload_attempt_time.get(
tourney_name, 0)
if now >= last_upload_time + upload_interval_sec:
try:
self.tourney_last_upload_attempt_time[tourney_name
] = now
with countdowntourney.tourney_open(tourney_name, db_dir
) as tourney:
game_state = get_game_state(tourney)
tourney_unique_id = get_tourney_unique_id(tourney)
auth = self.tourney_auth.get(tourney_name, None)
if auth:
username = auth.get('username')
password = auth.get('password')
private = auth.get('private', False)
else:
username = None
password = None
private = False
req = {'username': username, 'password':
password, 'private': private, 'unique_id':
tourney_unique_id, 'tourney': tourney_name}
if (tourney_name not in self.
tourney_last_uploaded_game_state or
game_state != self.
tourney_last_uploaded_game_state[tourney_name]
):
req['state'] = game_state
rep = make_https_json_request(http_server_host,
http_server_port, http_submit_path, req)
num_viewers = None
if rep.get('success', False):
self.tourney_last_uploaded_game_state[
tourney_name] = game_state
tourney.log_successful_upload()
if 'state' in req:
self.write_log(
'Successfully uploaded state for tourney "%s"'
% tourney_name)
else:
self.write_log(
'No change since last upload of tourney "%s"'
% tourney_name)
num_viewers = rep.get('viewers', None)
if num_viewers is not None:
self.write_log(
'Server reports %d viewer%s.' % (
num_viewers, 's' if num_viewers !=
1 else ''))
else:
if rep.get('http_failure', False):
failure_type = (countdowntourney.
UPLOAD_FAIL_TYPE_HTTP)
else:
failure_type = (countdowntourney.
UPLOAD_FAIL_TYPE_REJECTED)
tourney.log_failed_upload(failure_type, rep
.get('message', '(no message)'))
self.write_log(
'Failed to upload state for tourney "%s": %s'
% (tourney_name, rep.get('message',
'(no message')))
self.tourney_num_viewers[tourney_name
] = num_viewers
except countdowntourney.TourneyException as e:
self.write_log(
"UploaderThread: couldn't open tourney %s: %s" %
(tourney_name, str(e)))
traceback.print_tb(e.__traceback__)
continue
except Exception as e:
self.write_log(
'Uploader thread threw exception: %s' % str(e))
traceback.print_tb(e.__traceback__)
continue
time.sleep(1)
class UploaderServiceHandler(BaseRequestHandler):
def get_fields_from_req(self, req, field_names):
field_values = []
for name in field_names:
value = req.get(name, None)
if value is None:
raise FieldNotFoundException()
field_values.append(value)
return tuple(field_values)
def process_request(self, req):
global uploader_thread
req_type = req.get('type', None)
if not req_type:
return make_error_response('Request has no request type')
req_body = req.get('request', None)
if req_body is None:
return make_error_response('Request has no body')
try:
if req_type == 'start_uploading':
tourney, username, password, private = (self.
get_fields_from_req(req_body, ['tourney', 'username',
'password', 'private']))
uploader_thread.add_tourney_to_upload_list(tourney,
username, password, private)
rep = make_ok_response()
elif req_type == 'stop_uploading':
tourney, = self.get_fields_from_req(req_body, ['tourney'])
uploader_thread.remove_tourney_from_upload_list(tourney)
rep = make_ok_response()
elif req_type == 'delete':
tourney, username, password = self.get_fields_from_req(req_body
, ['tourney', 'username', 'password'])
uploader_thread.remove_tourney_from_upload_list(tourney)
rep = delete_tourney_from_web(tourney, username, password)
uploader_thread.set_tourney_auth(tourney, username, password)
elif req_type == 'status':
tourney, = self.get_fields_from_req(req_body, ['tourney'])
rep = {'success': True}
auth = uploader_thread.get_tourney_auth(tourney)
rep['publishing'] = uploader_thread.is_uploading_tourney(
tourney)
rep['viewers'] = uploader_thread.get_num_viewers(tourney)
if auth:
rep['username'] = auth.get('username', None)
rep['password'] = auth.get('password', None)
rep['private'] = auth.get('private', False)
rep['last_successful_upload_time'
] = uploader_thread.get_last_successful_upload_time(tourney
)
rep['last_failed_upload'
] = uploader_thread.get_last_failed_upload(tourney)
rep['upload_button_pressed_time'
] = uploader_thread.get_upload_button_pressed_time(tourney)
rep['now'] = int(time.time())
else:
rep = make_error_response('Unrecognised request type')
except FieldNotFoundException:
return make_error_response('Request is not valid for type')
return rep
def handle(self):
line = read_line_from_socket(self.request)
if line is not None:
rep = None
try:
req = json.loads(line)
except Exception as e:
rep = make_error_response('Request is not valid JSON')
if not rep:
rep = self.process_request(req)
self.request.sendall((json.dumps(rep) + '\n').encode('utf-8'))
self.request.close()
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, addr_port, service_handler):
self.allow_reuse_address = True
super().__init__(addr_port, service_handler)
class TourneyUploaderService(object):
def __init__(self, listen_port):
global uploader_thread
self.listen_port = listen_port
self.socket_server = ThreadedTCPServer(('127.0.0.1', listen_port),
UploaderServiceHandler)
self.server_thread = threading.Thread(target=self.socket_server.
serve_forever)
if not uploader_thread:
uploader_thread = UploaderThread()
self.server_thread.daemon = True
self.server_thread.start()
def shutdown(self):
self.socket_server.shutdown()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UploaderThread(object):
<|reserved_special_token_0|>
def is_uploading_tourney(self, tourney):
return tourney in self.uploading_tourneys
def add_tourney_to_upload_list(self, tourney, username, password, private):
self.uploading_tourneys.add(tourney)
self.tourney_auth[tourney] = {'username': username, 'password':
password, 'private': private}
self.tourney_upload_start_time[tourney] = int(time.time())
if tourney in self.tourney_last_uploaded_game_state:
del self.tourney_last_uploaded_game_state[tourney]
self.tourney_last_upload_attempt_time[tourney] = 0
def remove_tourney_from_upload_list(self, tourney):
self.uploading_tourneys.discard(tourney)
def get_last_successful_upload_time(self, tourney_name):
try:
with countdowntourney.tourney_open(tourney_name, db_dir
) as tourney:
upload_time = tourney.get_last_successful_upload_time()
if (upload_time is None or upload_time < self.
tourney_upload_start_time.get(tourney_name, 0)):
return None
else:
return upload_time
except countdowntourney.TourneyException as e:
sys.stderr.write(
'Failed to get last successful upload time: %s\n' % str(e))
return None
def get_last_failed_upload(self, tourney_name):
try:
with countdowntourney.tourney_open(tourney_name, db_dir
) as tourney:
failed_upload = tourney.get_last_failed_upload()
if failed_upload is not None and failed_upload.get('ts', None
) is not None and failed_upload['ts'
] >= self.tourney_upload_start_time.get(tourney_name, 0):
return failed_upload
else:
return None
except countdowntourney.TourneyException as e:
sys.stderr.write('Failed to get last failed upload info: %s\n' %
str(e))
return None
def get_num_viewers(self, tourney_name):
return self.tourney_num_viewers.get(tourney_name, None)
def get_tourney_auth(self, tourney):
return self.tourney_auth.get(tourney)
def set_tourney_auth(self, tourney, username, password):
self.tourney_auth[tourney] = {'username': username, 'password':
password}
def get_upload_button_pressed_time(self, tourney):
if tourney not in self.uploading_tourneys:
return None
else:
return self.tourney_upload_start_time.get(tourney, None)
def write_log(self, message):
sys.stderr.write('%s: %s\r\n' % (time.strftime('%Y-%m-%d %H:%M:%S'),
message))
def body(self):
while True:
uploading_tourneys = self.uploading_tourneys.copy()
for tourney_name in uploading_tourneys:
now = time.time()
last_upload_time = self.tourney_last_upload_attempt_time.get(
tourney_name, 0)
if now >= last_upload_time + upload_interval_sec:
try:
self.tourney_last_upload_attempt_time[tourney_name
] = now
with countdowntourney.tourney_open(tourney_name, db_dir
) as tourney:
game_state = get_game_state(tourney)
tourney_unique_id = get_tourney_unique_id(tourney)
auth = self.tourney_auth.get(tourney_name, None)
if auth:
username = auth.get('username')
password = auth.get('password')
private = auth.get('private', False)
else:
username = None
password = None
private = False
req = {'username': username, 'password':
password, 'private': private, 'unique_id':
tourney_unique_id, 'tourney': tourney_name}
if (tourney_name not in self.
tourney_last_uploaded_game_state or
game_state != self.
tourney_last_uploaded_game_state[tourney_name]
):
req['state'] = game_state
rep = make_https_json_request(http_server_host,
http_server_port, http_submit_path, req)
num_viewers = None
if rep.get('success', False):
self.tourney_last_uploaded_game_state[
tourney_name] = game_state
tourney.log_successful_upload()
if 'state' in req:
self.write_log(
'Successfully uploaded state for tourney "%s"'
% tourney_name)
else:
self.write_log(
'No change since last upload of tourney "%s"'
% tourney_name)
num_viewers = rep.get('viewers', None)
if num_viewers is not None:
self.write_log(
'Server reports %d viewer%s.' % (
num_viewers, 's' if num_viewers !=
1 else ''))
else:
if rep.get('http_failure', False):
failure_type = (countdowntourney.
UPLOAD_FAIL_TYPE_HTTP)
else:
failure_type = (countdowntourney.
UPLOAD_FAIL_TYPE_REJECTED)
tourney.log_failed_upload(failure_type, rep
.get('message', '(no message)'))
self.write_log(
'Failed to upload state for tourney "%s": %s'
% (tourney_name, rep.get('message',
'(no message')))
self.tourney_num_viewers[tourney_name
] = num_viewers
except countdowntourney.TourneyException as e:
self.write_log(
"UploaderThread: couldn't open tourney %s: %s" %
(tourney_name, str(e)))
traceback.print_tb(e.__traceback__)
continue
except Exception as e:
self.write_log(
'Uploader thread threw exception: %s' % str(e))
traceback.print_tb(e.__traceback__)
continue
time.sleep(1)
class UploaderServiceHandler(BaseRequestHandler):
def get_fields_from_req(self, req, field_names):
field_values = []
for name in field_names:
value = req.get(name, None)
if value is None:
raise FieldNotFoundException()
field_values.append(value)
return tuple(field_values)
def process_request(self, req):
global uploader_thread
req_type = req.get('type', None)
if not req_type:
return make_error_response('Request has no request type')
req_body = req.get('request', None)
if req_body is None:
return make_error_response('Request has no body')
try:
if req_type == 'start_uploading':
tourney, username, password, private = (self.
get_fields_from_req(req_body, ['tourney', 'username',
'password', 'private']))
uploader_thread.add_tourney_to_upload_list(tourney,
username, password, private)
rep = make_ok_response()
elif req_type == 'stop_uploading':
tourney, = self.get_fields_from_req(req_body, ['tourney'])
uploader_thread.remove_tourney_from_upload_list(tourney)
rep = make_ok_response()
elif req_type == 'delete':
tourney, username, password = self.get_fields_from_req(req_body
, ['tourney', 'username', 'password'])
uploader_thread.remove_tourney_from_upload_list(tourney)
rep = delete_tourney_from_web(tourney, username, password)
uploader_thread.set_tourney_auth(tourney, username, password)
elif req_type == 'status':
tourney, = self.get_fields_from_req(req_body, ['tourney'])
rep = {'success': True}
auth = uploader_thread.get_tourney_auth(tourney)
rep['publishing'] = uploader_thread.is_uploading_tourney(
tourney)
rep['viewers'] = uploader_thread.get_num_viewers(tourney)
if auth:
rep['username'] = auth.get('username', None)
rep['password'] = auth.get('password', None)
rep['private'] = auth.get('private', False)
rep['last_successful_upload_time'
] = uploader_thread.get_last_successful_upload_time(tourney
)
rep['last_failed_upload'
] = uploader_thread.get_last_failed_upload(tourney)
rep['upload_button_pressed_time'
] = uploader_thread.get_upload_button_pressed_time(tourney)
rep['now'] = int(time.time())
else:
rep = make_error_response('Unrecognised request type')
except FieldNotFoundException:
return make_error_response('Request is not valid for type')
return rep
def handle(self):
line = read_line_from_socket(self.request)
if line is not None:
rep = None
try:
req = json.loads(line)
except Exception as e:
rep = make_error_response('Request is not valid JSON')
if not rep:
rep = self.process_request(req)
self.request.sendall((json.dumps(rep) + '\n').encode('utf-8'))
self.request.close()
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, addr_port, service_handler):
self.allow_reuse_address = True
super().__init__(addr_port, service_handler)
class TourneyUploaderService(object):
def __init__(self, listen_port):
global uploader_thread
self.listen_port = listen_port
self.socket_server = ThreadedTCPServer(('127.0.0.1', listen_port),
UploaderServiceHandler)
self.server_thread = threading.Thread(target=self.socket_server.
serve_forever)
if not uploader_thread:
uploader_thread = UploaderThread()
self.server_thread.daemon = True
self.server_thread.start()
def shutdown(self):
self.socket_server.shutdown()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FieldNotFoundException(Exception):
pass
def make_error_response(message):
return {'success': False, 'message': message}
<|reserved_special_token_0|>
def get_tourney_unique_id(tourney):
return tourney.get_unique_id()
def delete_tourney_from_web(tourney_name, username, password):
req = {'username': username, 'password': password, 'tourney':
tourney_name, 'delete': True}
return make_https_json_request(http_server_host, http_server_port,
http_delete_path, req)
<|reserved_special_token_0|>
class UploaderThread(object):
def __init__(self):
self.uploading_tourneys = set()
self.tourney_upload_start_time = {}
self.tourney_last_upload_attempt_time = {}
self.tourney_last_uploaded_game_state = {}
self.tourney_num_viewers = {}
self.tourney_auth = {}
self.thread = threading.Thread(target=self.body)
self.thread.daemon = True
self.thread.start()
def is_uploading_tourney(self, tourney):
return tourney in self.uploading_tourneys
def add_tourney_to_upload_list(self, tourney, username, password, private):
self.uploading_tourneys.add(tourney)
self.tourney_auth[tourney] = {'username': username, 'password':
password, 'private': private}
self.tourney_upload_start_time[tourney] = int(time.time())
if tourney in self.tourney_last_uploaded_game_state:
del self.tourney_last_uploaded_game_state[tourney]
self.tourney_last_upload_attempt_time[tourney] = 0
def remove_tourney_from_upload_list(self, tourney):
self.uploading_tourneys.discard(tourney)
def get_last_successful_upload_time(self, tourney_name):
try:
with countdowntourney.tourney_open(tourney_name, db_dir
) as tourney:
upload_time = tourney.get_last_successful_upload_time()
if (upload_time is None or upload_time < self.
tourney_upload_start_time.get(tourney_name, 0)):
return None
else:
return upload_time
except countdowntourney.TourneyException as e:
sys.stderr.write(
'Failed to get last successful upload time: %s\n' % str(e))
return None
def get_last_failed_upload(self, tourney_name):
try:
with countdowntourney.tourney_open(tourney_name, db_dir
) as tourney:
failed_upload = tourney.get_last_failed_upload()
if failed_upload is not None and failed_upload.get('ts', None
) is not None and failed_upload['ts'
] >= self.tourney_upload_start_time.get(tourney_name, 0):
return failed_upload
else:
return None
except countdowntourney.TourneyException as e:
sys.stderr.write('Failed to get last failed upload info: %s\n' %
str(e))
return None
def get_num_viewers(self, tourney_name):
return self.tourney_num_viewers.get(tourney_name, None)
def get_tourney_auth(self, tourney):
return self.tourney_auth.get(tourney)
def set_tourney_auth(self, tourney, username, password):
self.tourney_auth[tourney] = {'username': username, 'password':
password}
def get_upload_button_pressed_time(self, tourney):
if tourney not in self.uploading_tourneys:
return None
else:
return self.tourney_upload_start_time.get(tourney, None)
def write_log(self, message):
sys.stderr.write('%s: %s\r\n' % (time.strftime('%Y-%m-%d %H:%M:%S'),
message))
def body(self):
while True:
uploading_tourneys = self.uploading_tourneys.copy()
for tourney_name in uploading_tourneys:
now = time.time()
last_upload_time = self.tourney_last_upload_attempt_time.get(
tourney_name, 0)
if now >= last_upload_time + upload_interval_sec:
try:
self.tourney_last_upload_attempt_time[tourney_name
] = now
with countdowntourney.tourney_open(tourney_name, db_dir
) as tourney:
game_state = get_game_state(tourney)
tourney_unique_id = get_tourney_unique_id(tourney)
auth = self.tourney_auth.get(tourney_name, None)
if auth:
username = auth.get('username')
password = auth.get('password')
private = auth.get('private', False)
else:
username = None
password = None
private = False
req = {'username': username, 'password':
password, 'private': private, 'unique_id':
tourney_unique_id, 'tourney': tourney_name}
if (tourney_name not in self.
tourney_last_uploaded_game_state or
game_state != self.
tourney_last_uploaded_game_state[tourney_name]
):
req['state'] = game_state
rep = make_https_json_request(http_server_host,
http_server_port, http_submit_path, req)
num_viewers = None
if rep.get('success', False):
self.tourney_last_uploaded_game_state[
tourney_name] = game_state
tourney.log_successful_upload()
if 'state' in req:
self.write_log(
'Successfully uploaded state for tourney "%s"'
% tourney_name)
else:
self.write_log(
'No change since last upload of tourney "%s"'
% tourney_name)
num_viewers = rep.get('viewers', None)
if num_viewers is not None:
self.write_log(
'Server reports %d viewer%s.' % (
num_viewers, 's' if num_viewers !=
1 else ''))
else:
if rep.get('http_failure', False):
failure_type = (countdowntourney.
UPLOAD_FAIL_TYPE_HTTP)
else:
failure_type = (countdowntourney.
UPLOAD_FAIL_TYPE_REJECTED)
tourney.log_failed_upload(failure_type, rep
.get('message', '(no message)'))
self.write_log(
'Failed to upload state for tourney "%s": %s'
% (tourney_name, rep.get('message',
'(no message')))
self.tourney_num_viewers[tourney_name
] = num_viewers
except countdowntourney.TourneyException as e:
self.write_log(
"UploaderThread: couldn't open tourney %s: %s" %
(tourney_name, str(e)))
traceback.print_tb(e.__traceback__)
continue
except Exception as e:
self.write_log(
'Uploader thread threw exception: %s' % str(e))
traceback.print_tb(e.__traceback__)
continue
time.sleep(1)
class UploaderServiceHandler(BaseRequestHandler):
def get_fields_from_req(self, req, field_names):
field_values = []
for name in field_names:
value = req.get(name, None)
if value is None:
raise FieldNotFoundException()
field_values.append(value)
return tuple(field_values)
def process_request(self, req):
global uploader_thread
req_type = req.get('type', None)
if not req_type:
return make_error_response('Request has no request type')
req_body = req.get('request', None)
if req_body is None:
return make_error_response('Request has no body')
try:
if req_type == 'start_uploading':
tourney, username, password, private = (self.
get_fields_from_req(req_body, ['tourney', 'username',
'password', 'private']))
uploader_thread.add_tourney_to_upload_list(tourney,
username, password, private)
rep = make_ok_response()
elif req_type == 'stop_uploading':
tourney, = self.get_fields_from_req(req_body, ['tourney'])
uploader_thread.remove_tourney_from_upload_list(tourney)
rep = make_ok_response()
elif req_type == 'delete':
tourney, username, password = self.get_fields_from_req(req_body
, ['tourney', 'username', 'password'])
uploader_thread.remove_tourney_from_upload_list(tourney)
rep = delete_tourney_from_web(tourney, username, password)
uploader_thread.set_tourney_auth(tourney, username, password)
elif req_type == 'status':
tourney, = self.get_fields_from_req(req_body, ['tourney'])
rep = {'success': True}
auth = uploader_thread.get_tourney_auth(tourney)
rep['publishing'] = uploader_thread.is_uploading_tourney(
tourney)
rep['viewers'] = uploader_thread.get_num_viewers(tourney)
if auth:
rep['username'] = auth.get('username', None)
rep['password'] = auth.get('password', None)
rep['private'] = auth.get('private', False)
rep['last_successful_upload_time'
] = uploader_thread.get_last_successful_upload_time(tourney
)
rep['last_failed_upload'
] = uploader_thread.get_last_failed_upload(tourney)
rep['upload_button_pressed_time'
] = uploader_thread.get_upload_button_pressed_time(tourney)
rep['now'] = int(time.time())
else:
rep = make_error_response('Unrecognised request type')
except FieldNotFoundException:
return make_error_response('Request is not valid for type')
return rep
def handle(self):
line = read_line_from_socket(self.request)
if line is not None:
rep = None
try:
req = json.loads(line)
except Exception as e:
rep = make_error_response('Request is not valid JSON')
if not rep:
rep = self.process_request(req)
self.request.sendall((json.dumps(rep) + '\n').encode('utf-8'))
self.request.close()
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, addr_port, service_handler):
self.allow_reuse_address = True
super().__init__(addr_port, service_handler)
class TourneyUploaderService(object):
def __init__(self, listen_port):
global uploader_thread
self.listen_port = listen_port
self.socket_server = ThreadedTCPServer(('127.0.0.1', listen_port),
UploaderServiceHandler)
self.server_thread = threading.Thread(target=self.socket_server.
serve_forever)
if not uploader_thread:
uploader_thread = UploaderThread()
self.server_thread.daemon = True
self.server_thread.start()
def shutdown(self):
self.socket_server.shutdown()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if not db_dir:
db_dir = os.path.join(os.getcwd(), 'tourneys')
<|reserved_special_token_0|>
class FieldNotFoundException(Exception):
pass
def make_error_response(message):
return {'success': False, 'message': message}
def make_ok_response():
return {'success': True}
def get_game_state(tourney):
return tourney2json.get_state_for_upload(tourney)
def get_tourney_unique_id(tourney):
return tourney.get_unique_id()
def delete_tourney_from_web(tourney_name, username, password):
req = {'username': username, 'password': password, 'tourney':
tourney_name, 'delete': True}
return make_https_json_request(http_server_host, http_server_port,
http_delete_path, req)
def read_line_from_socket(sock):
byte_array = b''
b = 0
while b != b'\n':
b = sock.recv(1)
if b is None or len(b) == 0:
return None
byte_array += b
return byte_array.decode('utf-8')
def make_https_json_request(server_host, server_port, path, request):
post_data = json.dumps(request)
httpcon = None
try:
httpcon = http.client.HTTPSConnection(host=server_host, port=
server_port, timeout=30)
httpcon.connect()
except Exception as e:
if httpcon:
httpcon.close()
sys.stderr.write('Failed to connect to %s: %s\r\n' % (server_host,
str(e)))
return {'success': False, 'http_failure': True, 'message':
'Failed to connect to %s: %s. Check your internet connection.' %
(server_host, str(e))}
try:
while path and path[0] == '/':
path = path[1:]
url = 'https://%s%s/%s' % (server_host, ':' + str(server_port) if
server_port else '', path)
httpcon.request('POST', url, post_data)
except ConnectionError as e:
httpcon.close()
sys.stderr.write('Failed to send HTTP request to %s: %s\r\n' % (url,
str(e)))
return {'success': False, 'http_failure': True, 'message':
'Failed to upload game state to server %s: %s. Check your internet connection.'
% (url, str(e))}
except Exception as e:
httpcon.close()
sys.stderr.write('Failed to send HTTP request to %s: %s\r\n' % (url,
str(e)))
return {'success': False, 'http_failure': True, 'message': str(e)}
try:
response = httpcon.getresponse()
except Exception as e:
sys.stderr.write('Failed to read response from %s: %s\r\n' % (url,
str(e)))
httpcon.close()
return {'success': False, 'http_failure': True, 'message': str(e)}
if response.status != 200:
sys.stderr.write(
'Failed to post data to %s: HTTP response %d: %s\r\n' % (url,
response.status, response.reason))
rep = {'success': False, 'http_failure': True, 'message':
'Failed to post update to server: HTTP %d: %s' % (response.
status, response.reason)}
else:
response_body = None
rep = None
try:
response_body = response.read()
except Exception as e:
sys.stderr.write('Failed to read response data from HTTP: ' +
str(e) + '\r\n')
rep = {'success': False, 'http_failure': True, 'message': str(e)}
if response_body is not None:
try:
rep = json.loads(response_body.decode('utf-8'))
if not rep.get('success', False):
message = rep.get('message', '(none)')
sys.stderr.write('Update failed. Message: ' + message +
'\r\n')
except Exception as e:
sys.stderr.write('Failed to parse server response: ' + str(
e) + '\r\n')
rep = {'success': False, 'message':
'Server response was invalid JSON: ' + str(e)}
httpcon.close()
return rep
class UploaderThread(object):
def __init__(self):
self.uploading_tourneys = set()
self.tourney_upload_start_time = {}
self.tourney_last_upload_attempt_time = {}
self.tourney_last_uploaded_game_state = {}
self.tourney_num_viewers = {}
self.tourney_auth = {}
self.thread = threading.Thread(target=self.body)
self.thread.daemon = True
self.thread.start()
def is_uploading_tourney(self, tourney):
return tourney in self.uploading_tourneys
def add_tourney_to_upload_list(self, tourney, username, password, private):
self.uploading_tourneys.add(tourney)
self.tourney_auth[tourney] = {'username': username, 'password':
password, 'private': private}
self.tourney_upload_start_time[tourney] = int(time.time())
if tourney in self.tourney_last_uploaded_game_state:
del self.tourney_last_uploaded_game_state[tourney]
self.tourney_last_upload_attempt_time[tourney] = 0
def remove_tourney_from_upload_list(self, tourney):
self.uploading_tourneys.discard(tourney)
def get_last_successful_upload_time(self, tourney_name):
try:
with countdowntourney.tourney_open(tourney_name, db_dir
) as tourney:
upload_time = tourney.get_last_successful_upload_time()
if (upload_time is None or upload_time < self.
tourney_upload_start_time.get(tourney_name, 0)):
return None
else:
return upload_time
except countdowntourney.TourneyException as e:
sys.stderr.write(
'Failed to get last successful upload time: %s\n' % str(e))
return None
def get_last_failed_upload(self, tourney_name):
try:
with countdowntourney.tourney_open(tourney_name, db_dir
) as tourney:
failed_upload = tourney.get_last_failed_upload()
if failed_upload is not None and failed_upload.get('ts', None
) is not None and failed_upload['ts'
] >= self.tourney_upload_start_time.get(tourney_name, 0):
return failed_upload
else:
return None
except countdowntourney.TourneyException as e:
sys.stderr.write('Failed to get last failed upload info: %s\n' %
str(e))
return None
def get_num_viewers(self, tourney_name):
return self.tourney_num_viewers.get(tourney_name, None)
def get_tourney_auth(self, tourney):
return self.tourney_auth.get(tourney)
def set_tourney_auth(self, tourney, username, password):
self.tourney_auth[tourney] = {'username': username, 'password':
password}
def get_upload_button_pressed_time(self, tourney):
if tourney not in self.uploading_tourneys:
return None
else:
return self.tourney_upload_start_time.get(tourney, None)
def write_log(self, message):
sys.stderr.write('%s: %s\r\n' % (time.strftime('%Y-%m-%d %H:%M:%S'),
message))
def body(self):
while True:
uploading_tourneys = self.uploading_tourneys.copy()
for tourney_name in uploading_tourneys:
now = time.time()
last_upload_time = self.tourney_last_upload_attempt_time.get(
tourney_name, 0)
if now >= last_upload_time + upload_interval_sec:
try:
self.tourney_last_upload_attempt_time[tourney_name
] = now
with countdowntourney.tourney_open(tourney_name, db_dir
) as tourney:
game_state = get_game_state(tourney)
tourney_unique_id = get_tourney_unique_id(tourney)
auth = self.tourney_auth.get(tourney_name, None)
if auth:
username = auth.get('username')
password = auth.get('password')
private = auth.get('private', False)
else:
username = None
password = None
private = False
req = {'username': username, 'password':
password, 'private': private, 'unique_id':
tourney_unique_id, 'tourney': tourney_name}
if (tourney_name not in self.
tourney_last_uploaded_game_state or
game_state != self.
tourney_last_uploaded_game_state[tourney_name]
):
req['state'] = game_state
rep = make_https_json_request(http_server_host,
http_server_port, http_submit_path, req)
num_viewers = None
if rep.get('success', False):
self.tourney_last_uploaded_game_state[
tourney_name] = game_state
tourney.log_successful_upload()
if 'state' in req:
self.write_log(
'Successfully uploaded state for tourney "%s"'
% tourney_name)
else:
self.write_log(
'No change since last upload of tourney "%s"'
% tourney_name)
num_viewers = rep.get('viewers', None)
if num_viewers is not None:
self.write_log(
'Server reports %d viewer%s.' % (
num_viewers, 's' if num_viewers !=
1 else ''))
else:
if rep.get('http_failure', False):
failure_type = (countdowntourney.
UPLOAD_FAIL_TYPE_HTTP)
else:
failure_type = (countdowntourney.
UPLOAD_FAIL_TYPE_REJECTED)
tourney.log_failed_upload(failure_type, rep
.get('message', '(no message)'))
self.write_log(
'Failed to upload state for tourney "%s": %s'
% (tourney_name, rep.get('message',
'(no message')))
self.tourney_num_viewers[tourney_name
] = num_viewers
except countdowntourney.TourneyException as e:
self.write_log(
"UploaderThread: couldn't open tourney %s: %s" %
(tourney_name, str(e)))
traceback.print_tb(e.__traceback__)
continue
except Exception as e:
self.write_log(
'Uploader thread threw exception: %s' % str(e))
traceback.print_tb(e.__traceback__)
continue
time.sleep(1)
class UploaderServiceHandler(BaseRequestHandler):
def get_fields_from_req(self, req, field_names):
field_values = []
for name in field_names:
value = req.get(name, None)
if value is None:
raise FieldNotFoundException()
field_values.append(value)
return tuple(field_values)
def process_request(self, req):
global uploader_thread
req_type = req.get('type', None)
if not req_type:
return make_error_response('Request has no request type')
req_body = req.get('request', None)
if req_body is None:
return make_error_response('Request has no body')
try:
if req_type == 'start_uploading':
tourney, username, password, private = (self.
get_fields_from_req(req_body, ['tourney', 'username',
'password', 'private']))
uploader_thread.add_tourney_to_upload_list(tourney,
username, password, private)
rep = make_ok_response()
elif req_type == 'stop_uploading':
tourney, = self.get_fields_from_req(req_body, ['tourney'])
uploader_thread.remove_tourney_from_upload_list(tourney)
rep = make_ok_response()
elif req_type == 'delete':
tourney, username, password = self.get_fields_from_req(req_body
, ['tourney', 'username', 'password'])
uploader_thread.remove_tourney_from_upload_list(tourney)
rep = delete_tourney_from_web(tourney, username, password)
uploader_thread.set_tourney_auth(tourney, username, password)
elif req_type == 'status':
tourney, = self.get_fields_from_req(req_body, ['tourney'])
rep = {'success': True}
auth = uploader_thread.get_tourney_auth(tourney)
rep['publishing'] = uploader_thread.is_uploading_tourney(
tourney)
rep['viewers'] = uploader_thread.get_num_viewers(tourney)
if auth:
rep['username'] = auth.get('username', None)
rep['password'] = auth.get('password', None)
rep['private'] = auth.get('private', False)
rep['last_successful_upload_time'
] = uploader_thread.get_last_successful_upload_time(tourney
)
rep['last_failed_upload'
] = uploader_thread.get_last_failed_upload(tourney)
rep['upload_button_pressed_time'
] = uploader_thread.get_upload_button_pressed_time(tourney)
rep['now'] = int(time.time())
else:
rep = make_error_response('Unrecognised request type')
except FieldNotFoundException:
return make_error_response('Request is not valid for type')
return rep
def handle(self):
line = read_line_from_socket(self.request)
if line is not None:
rep = None
try:
req = json.loads(line)
except Exception as e:
rep = make_error_response('Request is not valid JSON')
if not rep:
rep = self.process_request(req)
self.request.sendall((json.dumps(rep) + '\n').encode('utf-8'))
self.request.close()
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, addr_port, service_handler):
self.allow_reuse_address = True
super().__init__(addr_port, service_handler)
class TourneyUploaderService(object):
def __init__(self, listen_port):
global uploader_thread
self.listen_port = listen_port
self.socket_server = ThreadedTCPServer(('127.0.0.1', listen_port),
UploaderServiceHandler)
self.server_thread = threading.Thread(target=self.socket_server.
serve_forever)
if not uploader_thread:
uploader_thread = UploaderThread()
self.server_thread.daemon = True
self.server_thread.start()
def shutdown(self):
self.socket_server.shutdown()
<|reserved_special_token_1|>
#!/usr/bin/python3
# The uploader service listens for connections from localhost on port 3961.
# It expects a JSON object on a line by itself as the request. It responds
# with another JSON object on a line by itself, then closes the connection.
# Atropine CGI scripts can send requests to this service to tell it to:
# * Add a tourney to the list of tourneys we're periodically uploading to
# greem.co.uk
# * Remove a tourney from that list (i.e. stop uploading it)
# * Get the upload state of a tourney (are we uploading it, when was the
# last successful upload, was the last upload successful, and if not what
# went wrong)
#
# The service is started with atropine.py, and runs alongside the web server
# which serves the web interface used by the tournament administrator. At
# startup, no tourneys are being uploaded.
import sys
import os
import socketserver
from socketserver import BaseRequestHandler
import json
import threading
import time
import http.client
import traceback
http_server_host = "greem.co.uk"
http_server_port = None
http_submit_path = "/cgi-bin/colive/submit.py"
http_delete_path = "/cgi-bin/colive/submit.py"
upload_interval_sec = 10
db_dir = os.getenv("TOURNEYSPATH")
if not db_dir:
db_dir = os.path.join(os.getcwd(), "tourneys")
import tourney2json
import countdowntourney
uploader_thread = None
class FieldNotFoundException(Exception):
pass
def make_error_response(message):
return { "success" : False, "message" : message }
def make_ok_response():
return { "success" : True }
def get_game_state(tourney):
return tourney2json.get_state_for_upload(tourney)
def get_tourney_unique_id(tourney):
return tourney.get_unique_id()
def delete_tourney_from_web(tourney_name, username, password):
req = {
"username" : username,
"password" : password,
"tourney" : tourney_name,
"delete" : True
}
return make_https_json_request(http_server_host, http_server_port, http_delete_path, req)
def read_line_from_socket(sock):
byte_array = b'';
b = 0
while b != b'\n':
b = sock.recv(1)
if b is None or len(b) == 0:
return None
byte_array += b
return byte_array.decode("utf-8")
def make_https_json_request(server_host, server_port, path, request):
post_data = json.dumps(request)
httpcon = None
try:
httpcon = http.client.HTTPSConnection(host=server_host, port=server_port, timeout=30)
httpcon.connect()
except Exception as e:
if httpcon:
httpcon.close()
sys.stderr.write("Failed to connect to %s: %s\r\n" % (server_host, str(e)))
return { "success" : False, "http_failure" : True, "message" : "Failed to connect to %s: %s. Check your internet connection." % (server_host, str(e)) }
try:
while path and path[0] == '/':
path = path[1:]
url = "https://%s%s/%s" % (server_host, (":" + str(server_port)) if server_port else "", path)
httpcon.request("POST", url, post_data)
except ConnectionError as e:
httpcon.close()
sys.stderr.write("Failed to send HTTP request to %s: %s\r\n" % (url, str(e)))
return {
"success" : False,
"http_failure" : True,
"message" : "Failed to upload game state to server %s: %s. Check your internet connection." % (url, str(e))
}
except Exception as e:
httpcon.close()
sys.stderr.write("Failed to send HTTP request to %s: %s\r\n" % (url, str(e)))
return { "success" : False, "http_failure" : True, "message" : str(e) }
try:
response = httpcon.getresponse()
except Exception as e:
sys.stderr.write("Failed to read response from %s: %s\r\n" % (url, str(e)))
httpcon.close()
return { "success" : False, "http_failure" : True, "message" : str(e) }
if response.status != 200:
sys.stderr.write("Failed to post data to %s: HTTP response %d: %s\r\n" % (url, response.status, response.reason))
rep = {
"success" : False,
"http_failure" : True,
"message" : "Failed to post update to server: HTTP %d: %s" % (response.status, response.reason)
}
else:
response_body = None
rep = None
try:
response_body = response.read()
except Exception as e:
sys.stderr.write("Failed to read response data from HTTP: " + str(e) + "\r\n")
rep = {
"success" : False,
"http_failure" : True,
"message" : str(e)
}
if response_body is not None:
try:
rep = json.loads(response_body.decode("utf-8"))
if not rep.get("success", False):
message = rep.get("message", "(none)")
sys.stderr.write("Update failed. Message: " + message + "\r\n")
except Exception as e:
sys.stderr.write("Failed to parse server response: " + str(e) + "\r\n")
rep = {
"success" : False,
"message" : "Server response was invalid JSON: " + str(e)
}
httpcon.close()
return rep
class UploaderThread(object):
def __init__(self):
self.uploading_tourneys = set()
self.tourney_upload_start_time = {}
self.tourney_last_upload_attempt_time = {}
self.tourney_last_uploaded_game_state = {}
self.tourney_num_viewers = {}
self.tourney_auth = {}
self.thread = threading.Thread(target=self.body)
self.thread.daemon = True
self.thread.start()
def is_uploading_tourney(self, tourney):
return (tourney in self.uploading_tourneys)
def add_tourney_to_upload_list(self, tourney, username, password, private):
self.uploading_tourneys.add(tourney)
self.tourney_auth[tourney] = { "username" : username, "password" : password, "private" : private }
self.tourney_upload_start_time[tourney] = int(time.time());
if tourney in self.tourney_last_uploaded_game_state:
del self.tourney_last_uploaded_game_state[tourney]
self.tourney_last_upload_attempt_time[tourney] = 0
def remove_tourney_from_upload_list(self, tourney):
self.uploading_tourneys.discard(tourney)
def get_last_successful_upload_time(self, tourney_name):
try:
with countdowntourney.tourney_open(tourney_name, db_dir) as tourney:
upload_time = tourney.get_last_successful_upload_time()
# Don't return this time if it's before the user even pressed
# the "start uploading" button"
if upload_time is None or upload_time < self.tourney_upload_start_time.get(tourney_name, 0):
return None
else:
return upload_time
except countdowntourney.TourneyException as e:
sys.stderr.write("Failed to get last successful upload time: %s\n" % (str(e)))
return None
def get_last_failed_upload(self, tourney_name):
try:
with countdowntourney.tourney_open(tourney_name, db_dir) as tourney:
failed_upload = tourney.get_last_failed_upload()
if failed_upload is not None and failed_upload.get("ts", None) is not None and failed_upload["ts"] >= self.tourney_upload_start_time.get(tourney_name, 0):
return failed_upload
else:
return None
except countdowntourney.TourneyException as e:
sys.stderr.write("Failed to get last failed upload info: %s\n" % (str(e)))
return None
def get_num_viewers(self, tourney_name):
return self.tourney_num_viewers.get(tourney_name, None)
def get_tourney_auth(self, tourney):
return self.tourney_auth.get(tourney)
def set_tourney_auth(self, tourney, username, password):
self.tourney_auth[tourney] = { "username" : username, "password" : password }
def get_upload_button_pressed_time(self, tourney):
if tourney not in self.uploading_tourneys:
return None
else:
return self.tourney_upload_start_time.get(tourney, None)
def write_log(self, message):
sys.stderr.write("%s: %s\r\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), message))
def body(self):
while True:
uploading_tourneys = self.uploading_tourneys.copy()
for tourney_name in uploading_tourneys:
now = time.time()
last_upload_time = self.tourney_last_upload_attempt_time.get(tourney_name, 0)
if now >= last_upload_time + upload_interval_sec:
# Upload this tourney to the web if it's been at least
# upload_interval_sec seconds since the previous upload
# attempt.
try:
self.tourney_last_upload_attempt_time[tourney_name] = now
with countdowntourney.tourney_open(tourney_name, db_dir) as tourney:
game_state = get_game_state(tourney)
tourney_unique_id = get_tourney_unique_id(tourney)
auth = self.tourney_auth.get(tourney_name, None)
if auth:
username = auth.get("username")
password = auth.get("password")
private = auth.get("private", False)
else:
username = None
password = None
private = False
req = {
"username" : username,
"password" : password,
"private" : private,
"unique_id" : tourney_unique_id,
"tourney" : tourney_name
}
# If the game state has changed since the last time
# we did a successful upload, include the new game
# state, otherwise we just submit a null update
# which only checks the server still works and
# reads how many current visitors there are.
if tourney_name not in self.tourney_last_uploaded_game_state or game_state != self.tourney_last_uploaded_game_state[tourney_name]:
req["state"] = game_state
# Send the submission to the server & get the reply
rep = make_https_json_request(http_server_host, http_server_port, http_submit_path, req)
num_viewers = None
if rep.get("success", False):
self.tourney_last_uploaded_game_state[tourney_name] = game_state
tourney.log_successful_upload()
if "state" in req:
self.write_log("Successfully uploaded state for tourney \"%s\"" % (tourney_name))
else:
self.write_log("No change since last upload of tourney \"%s\"" % (tourney_name))
num_viewers = rep.get("viewers", None)
if num_viewers is not None:
self.write_log("Server reports %d viewer%s." % (num_viewers, "s" if num_viewers != 1 else ""))
else:
if rep.get("http_failure", False):
failure_type = countdowntourney.UPLOAD_FAIL_TYPE_HTTP
else:
failure_type = countdowntourney.UPLOAD_FAIL_TYPE_REJECTED
tourney.log_failed_upload(failure_type, rep.get("message", "(no message)"))
self.write_log("Failed to upload state for tourney \"%s\": %s" % (tourney_name, rep.get("message", "(no message")))
self.tourney_num_viewers[tourney_name] = num_viewers
except countdowntourney.TourneyException as e:
self.write_log("UploaderThread: couldn't open tourney %s: %s" % (tourney_name, str(e)))
traceback.print_tb(e.__traceback__)
continue
except Exception as e:
self.write_log("Uploader thread threw exception: %s" % (str(e)))
traceback.print_tb(e.__traceback__)
continue
time.sleep(1)
class UploaderServiceHandler(BaseRequestHandler):
def get_fields_from_req(self, req, field_names):
field_values = []
for name in field_names:
value = req.get(name, None)
if value is None:
raise FieldNotFoundException()
field_values.append(value)
return tuple(field_values)
def process_request(self, req):
global uploader_thread
req_type = req.get("type", None)
if not req_type:
return make_error_response("Request has no request type")
req_body = req.get("request", None)
if req_body is None:
return make_error_response("Request has no body")
try:
if req_type == "start_uploading":
(tourney, username, password, private) = self.get_fields_from_req(req_body, ["tourney", "username", "password", "private"])
uploader_thread.add_tourney_to_upload_list(tourney, username, password, private)
rep = make_ok_response()
elif req_type == "stop_uploading":
(tourney,) = self.get_fields_from_req(req_body, ["tourney"])
uploader_thread.remove_tourney_from_upload_list(tourney)
rep = make_ok_response()
elif req_type == "delete":
(tourney, username, password) = self.get_fields_from_req(req_body, ["tourney", "username", "password"])
uploader_thread.remove_tourney_from_upload_list(tourney)
rep = delete_tourney_from_web(tourney, username, password)
uploader_thread.set_tourney_auth(tourney, username, password)
elif req_type == "status":
(tourney,) = self.get_fields_from_req(req_body, ["tourney"])
rep = { "success" : True }
auth = uploader_thread.get_tourney_auth(tourney)
rep["publishing"] = uploader_thread.is_uploading_tourney(tourney)
rep["viewers"] = uploader_thread.get_num_viewers(tourney)
if auth:
rep["username"] = auth.get("username", None)
rep["password"] = auth.get("password", None)
rep["private"] = auth.get("private", False)
rep["last_successful_upload_time"] = uploader_thread.get_last_successful_upload_time(tourney)
rep["last_failed_upload"] = uploader_thread.get_last_failed_upload(tourney)
rep["upload_button_pressed_time"] = uploader_thread.get_upload_button_pressed_time(tourney)
rep["now"] = int(time.time())
else:
rep = make_error_response("Unrecognised request type")
except FieldNotFoundException:
return make_error_response("Request is not valid for type")
return rep
def handle(self):
# Request is expected to be a JSON object, on a line by itself
line = read_line_from_socket(self.request)
if line is not None:
rep = None
try:
req = json.loads(line)
except Exception as e:
rep = make_error_response("Request is not valid JSON")
if not rep:
rep = self.process_request(req)
self.request.sendall((json.dumps(rep) + "\n").encode("utf-8"))
self.request.close()
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, addr_port, service_handler):
self.allow_reuse_address = True
super().__init__(addr_port, service_handler)
class TourneyUploaderService(object):
def __init__(self, listen_port):
global uploader_thread
self.listen_port = listen_port
self.socket_server = ThreadedTCPServer(("127.0.0.1", listen_port), UploaderServiceHandler)
self.server_thread = threading.Thread(target=self.socket_server.serve_forever)
if not uploader_thread:
uploader_thread = UploaderThread()
self.server_thread.daemon = True
self.server_thread.start()
def shutdown(self):
self.socket_server.shutdown()
|
flexible
|
{
"blob_id": "bd202e18cb98efc2b62ce4670fadcf70c35a33cb",
"index": 2529,
"step-1": "<mask token>\n\n\nclass UploaderThread(object):\n <mask token>\n\n def is_uploading_tourney(self, tourney):\n return tourney in self.uploading_tourneys\n <mask token>\n <mask token>\n\n def get_last_successful_upload_time(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n upload_time = tourney.get_last_successful_upload_time()\n if (upload_time is None or upload_time < self.\n tourney_upload_start_time.get(tourney_name, 0)):\n return None\n else:\n return upload_time\n except countdowntourney.TourneyException as e:\n sys.stderr.write(\n 'Failed to get last successful upload time: %s\\n' % str(e))\n return None\n\n def get_last_failed_upload(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n failed_upload = tourney.get_last_failed_upload()\n if failed_upload is not None and failed_upload.get('ts', None\n ) is not None and failed_upload['ts'\n ] >= self.tourney_upload_start_time.get(tourney_name, 0):\n return failed_upload\n else:\n return None\n except countdowntourney.TourneyException as e:\n sys.stderr.write('Failed to get last failed upload info: %s\\n' %\n str(e))\n return None\n\n def get_num_viewers(self, tourney_name):\n return self.tourney_num_viewers.get(tourney_name, None)\n\n def get_tourney_auth(self, tourney):\n return self.tourney_auth.get(tourney)\n\n def set_tourney_auth(self, tourney, username, password):\n self.tourney_auth[tourney] = {'username': username, 'password':\n password}\n\n def get_upload_button_pressed_time(self, tourney):\n if tourney not in self.uploading_tourneys:\n return None\n else:\n return self.tourney_upload_start_time.get(tourney, None)\n\n def write_log(self, message):\n sys.stderr.write('%s: %s\\r\\n' % (time.strftime('%Y-%m-%d %H:%M:%S'),\n message))\n\n def body(self):\n while True:\n uploading_tourneys = self.uploading_tourneys.copy()\n for tourney_name in uploading_tourneys:\n now = time.time()\n last_upload_time = self.tourney_last_upload_attempt_time.get(\n tourney_name, 0)\n if now >= last_upload_time + upload_interval_sec:\n try:\n self.tourney_last_upload_attempt_time[tourney_name\n ] = now\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n game_state = get_game_state(tourney)\n tourney_unique_id = get_tourney_unique_id(tourney)\n auth = self.tourney_auth.get(tourney_name, None)\n if auth:\n username = auth.get('username')\n password = auth.get('password')\n private = auth.get('private', False)\n else:\n username = None\n password = None\n private = False\n req = {'username': username, 'password':\n password, 'private': private, 'unique_id':\n tourney_unique_id, 'tourney': tourney_name}\n if (tourney_name not in self.\n tourney_last_uploaded_game_state or \n game_state != self.\n tourney_last_uploaded_game_state[tourney_name]\n ):\n req['state'] = game_state\n rep = make_https_json_request(http_server_host,\n http_server_port, http_submit_path, req)\n num_viewers = None\n if rep.get('success', False):\n self.tourney_last_uploaded_game_state[\n tourney_name] = game_state\n tourney.log_successful_upload()\n if 'state' in req:\n self.write_log(\n 'Successfully uploaded state for tourney \"%s\"'\n % tourney_name)\n else:\n self.write_log(\n 'No change since last upload of tourney \"%s\"'\n % tourney_name)\n num_viewers = rep.get('viewers', None)\n if num_viewers is not None:\n self.write_log(\n 'Server reports %d viewer%s.' % (\n num_viewers, 's' if num_viewers != \n 1 else ''))\n else:\n if rep.get('http_failure', False):\n failure_type = (countdowntourney.\n UPLOAD_FAIL_TYPE_HTTP)\n else:\n failure_type = (countdowntourney.\n UPLOAD_FAIL_TYPE_REJECTED)\n tourney.log_failed_upload(failure_type, rep\n .get('message', '(no message)'))\n self.write_log(\n 'Failed to upload state for tourney \"%s\": %s'\n % (tourney_name, rep.get('message',\n '(no message')))\n self.tourney_num_viewers[tourney_name\n ] = num_viewers\n except countdowntourney.TourneyException as e:\n self.write_log(\n \"UploaderThread: couldn't open tourney %s: %s\" %\n (tourney_name, str(e)))\n traceback.print_tb(e.__traceback__)\n continue\n except Exception as e:\n self.write_log(\n 'Uploader thread threw exception: %s' % str(e))\n traceback.print_tb(e.__traceback__)\n continue\n time.sleep(1)\n\n\nclass UploaderServiceHandler(BaseRequestHandler):\n\n def get_fields_from_req(self, req, field_names):\n field_values = []\n for name in field_names:\n value = req.get(name, None)\n if value is None:\n raise FieldNotFoundException()\n field_values.append(value)\n return tuple(field_values)\n\n def process_request(self, req):\n global uploader_thread\n req_type = req.get('type', None)\n if not req_type:\n return make_error_response('Request has no request type')\n req_body = req.get('request', None)\n if req_body is None:\n return make_error_response('Request has no body')\n try:\n if req_type == 'start_uploading':\n tourney, username, password, private = (self.\n get_fields_from_req(req_body, ['tourney', 'username',\n 'password', 'private']))\n uploader_thread.add_tourney_to_upload_list(tourney,\n username, password, private)\n rep = make_ok_response()\n elif req_type == 'stop_uploading':\n tourney, = self.get_fields_from_req(req_body, ['tourney'])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = make_ok_response()\n elif req_type == 'delete':\n tourney, username, password = self.get_fields_from_req(req_body\n , ['tourney', 'username', 'password'])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = delete_tourney_from_web(tourney, username, password)\n uploader_thread.set_tourney_auth(tourney, username, password)\n elif req_type == 'status':\n tourney, = self.get_fields_from_req(req_body, ['tourney'])\n rep = {'success': True}\n auth = uploader_thread.get_tourney_auth(tourney)\n rep['publishing'] = uploader_thread.is_uploading_tourney(\n tourney)\n rep['viewers'] = uploader_thread.get_num_viewers(tourney)\n if auth:\n rep['username'] = auth.get('username', None)\n rep['password'] = auth.get('password', None)\n rep['private'] = auth.get('private', False)\n rep['last_successful_upload_time'\n ] = uploader_thread.get_last_successful_upload_time(tourney\n )\n rep['last_failed_upload'\n ] = uploader_thread.get_last_failed_upload(tourney)\n rep['upload_button_pressed_time'\n ] = uploader_thread.get_upload_button_pressed_time(tourney)\n rep['now'] = int(time.time())\n else:\n rep = make_error_response('Unrecognised request type')\n except FieldNotFoundException:\n return make_error_response('Request is not valid for type')\n return rep\n\n def handle(self):\n line = read_line_from_socket(self.request)\n if line is not None:\n rep = None\n try:\n req = json.loads(line)\n except Exception as e:\n rep = make_error_response('Request is not valid JSON')\n if not rep:\n rep = self.process_request(req)\n self.request.sendall((json.dumps(rep) + '\\n').encode('utf-8'))\n self.request.close()\n\n\nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n\n def __init__(self, addr_port, service_handler):\n self.allow_reuse_address = True\n super().__init__(addr_port, service_handler)\n\n\nclass TourneyUploaderService(object):\n\n def __init__(self, listen_port):\n global uploader_thread\n self.listen_port = listen_port\n self.socket_server = ThreadedTCPServer(('127.0.0.1', listen_port),\n UploaderServiceHandler)\n self.server_thread = threading.Thread(target=self.socket_server.\n serve_forever)\n if not uploader_thread:\n uploader_thread = UploaderThread()\n self.server_thread.daemon = True\n self.server_thread.start()\n\n def shutdown(self):\n self.socket_server.shutdown()\n",
"step-2": "<mask token>\n\n\nclass UploaderThread(object):\n <mask token>\n\n def is_uploading_tourney(self, tourney):\n return tourney in self.uploading_tourneys\n\n def add_tourney_to_upload_list(self, tourney, username, password, private):\n self.uploading_tourneys.add(tourney)\n self.tourney_auth[tourney] = {'username': username, 'password':\n password, 'private': private}\n self.tourney_upload_start_time[tourney] = int(time.time())\n if tourney in self.tourney_last_uploaded_game_state:\n del self.tourney_last_uploaded_game_state[tourney]\n self.tourney_last_upload_attempt_time[tourney] = 0\n\n def remove_tourney_from_upload_list(self, tourney):\n self.uploading_tourneys.discard(tourney)\n\n def get_last_successful_upload_time(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n upload_time = tourney.get_last_successful_upload_time()\n if (upload_time is None or upload_time < self.\n tourney_upload_start_time.get(tourney_name, 0)):\n return None\n else:\n return upload_time\n except countdowntourney.TourneyException as e:\n sys.stderr.write(\n 'Failed to get last successful upload time: %s\\n' % str(e))\n return None\n\n def get_last_failed_upload(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n failed_upload = tourney.get_last_failed_upload()\n if failed_upload is not None and failed_upload.get('ts', None\n ) is not None and failed_upload['ts'\n ] >= self.tourney_upload_start_time.get(tourney_name, 0):\n return failed_upload\n else:\n return None\n except countdowntourney.TourneyException as e:\n sys.stderr.write('Failed to get last failed upload info: %s\\n' %\n str(e))\n return None\n\n def get_num_viewers(self, tourney_name):\n return self.tourney_num_viewers.get(tourney_name, None)\n\n def get_tourney_auth(self, tourney):\n return self.tourney_auth.get(tourney)\n\n def set_tourney_auth(self, tourney, username, password):\n self.tourney_auth[tourney] = {'username': username, 'password':\n password}\n\n def get_upload_button_pressed_time(self, tourney):\n if tourney not in self.uploading_tourneys:\n return None\n else:\n return self.tourney_upload_start_time.get(tourney, None)\n\n def write_log(self, message):\n sys.stderr.write('%s: %s\\r\\n' % (time.strftime('%Y-%m-%d %H:%M:%S'),\n message))\n\n def body(self):\n while True:\n uploading_tourneys = self.uploading_tourneys.copy()\n for tourney_name in uploading_tourneys:\n now = time.time()\n last_upload_time = self.tourney_last_upload_attempt_time.get(\n tourney_name, 0)\n if now >= last_upload_time + upload_interval_sec:\n try:\n self.tourney_last_upload_attempt_time[tourney_name\n ] = now\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n game_state = get_game_state(tourney)\n tourney_unique_id = get_tourney_unique_id(tourney)\n auth = self.tourney_auth.get(tourney_name, None)\n if auth:\n username = auth.get('username')\n password = auth.get('password')\n private = auth.get('private', False)\n else:\n username = None\n password = None\n private = False\n req = {'username': username, 'password':\n password, 'private': private, 'unique_id':\n tourney_unique_id, 'tourney': tourney_name}\n if (tourney_name not in self.\n tourney_last_uploaded_game_state or \n game_state != self.\n tourney_last_uploaded_game_state[tourney_name]\n ):\n req['state'] = game_state\n rep = make_https_json_request(http_server_host,\n http_server_port, http_submit_path, req)\n num_viewers = None\n if rep.get('success', False):\n self.tourney_last_uploaded_game_state[\n tourney_name] = game_state\n tourney.log_successful_upload()\n if 'state' in req:\n self.write_log(\n 'Successfully uploaded state for tourney \"%s\"'\n % tourney_name)\n else:\n self.write_log(\n 'No change since last upload of tourney \"%s\"'\n % tourney_name)\n num_viewers = rep.get('viewers', None)\n if num_viewers is not None:\n self.write_log(\n 'Server reports %d viewer%s.' % (\n num_viewers, 's' if num_viewers != \n 1 else ''))\n else:\n if rep.get('http_failure', False):\n failure_type = (countdowntourney.\n UPLOAD_FAIL_TYPE_HTTP)\n else:\n failure_type = (countdowntourney.\n UPLOAD_FAIL_TYPE_REJECTED)\n tourney.log_failed_upload(failure_type, rep\n .get('message', '(no message)'))\n self.write_log(\n 'Failed to upload state for tourney \"%s\": %s'\n % (tourney_name, rep.get('message',\n '(no message')))\n self.tourney_num_viewers[tourney_name\n ] = num_viewers\n except countdowntourney.TourneyException as e:\n self.write_log(\n \"UploaderThread: couldn't open tourney %s: %s\" %\n (tourney_name, str(e)))\n traceback.print_tb(e.__traceback__)\n continue\n except Exception as e:\n self.write_log(\n 'Uploader thread threw exception: %s' % str(e))\n traceback.print_tb(e.__traceback__)\n continue\n time.sleep(1)\n\n\nclass UploaderServiceHandler(BaseRequestHandler):\n\n def get_fields_from_req(self, req, field_names):\n field_values = []\n for name in field_names:\n value = req.get(name, None)\n if value is None:\n raise FieldNotFoundException()\n field_values.append(value)\n return tuple(field_values)\n\n def process_request(self, req):\n global uploader_thread\n req_type = req.get('type', None)\n if not req_type:\n return make_error_response('Request has no request type')\n req_body = req.get('request', None)\n if req_body is None:\n return make_error_response('Request has no body')\n try:\n if req_type == 'start_uploading':\n tourney, username, password, private = (self.\n get_fields_from_req(req_body, ['tourney', 'username',\n 'password', 'private']))\n uploader_thread.add_tourney_to_upload_list(tourney,\n username, password, private)\n rep = make_ok_response()\n elif req_type == 'stop_uploading':\n tourney, = self.get_fields_from_req(req_body, ['tourney'])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = make_ok_response()\n elif req_type == 'delete':\n tourney, username, password = self.get_fields_from_req(req_body\n , ['tourney', 'username', 'password'])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = delete_tourney_from_web(tourney, username, password)\n uploader_thread.set_tourney_auth(tourney, username, password)\n elif req_type == 'status':\n tourney, = self.get_fields_from_req(req_body, ['tourney'])\n rep = {'success': True}\n auth = uploader_thread.get_tourney_auth(tourney)\n rep['publishing'] = uploader_thread.is_uploading_tourney(\n tourney)\n rep['viewers'] = uploader_thread.get_num_viewers(tourney)\n if auth:\n rep['username'] = auth.get('username', None)\n rep['password'] = auth.get('password', None)\n rep['private'] = auth.get('private', False)\n rep['last_successful_upload_time'\n ] = uploader_thread.get_last_successful_upload_time(tourney\n )\n rep['last_failed_upload'\n ] = uploader_thread.get_last_failed_upload(tourney)\n rep['upload_button_pressed_time'\n ] = uploader_thread.get_upload_button_pressed_time(tourney)\n rep['now'] = int(time.time())\n else:\n rep = make_error_response('Unrecognised request type')\n except FieldNotFoundException:\n return make_error_response('Request is not valid for type')\n return rep\n\n def handle(self):\n line = read_line_from_socket(self.request)\n if line is not None:\n rep = None\n try:\n req = json.loads(line)\n except Exception as e:\n rep = make_error_response('Request is not valid JSON')\n if not rep:\n rep = self.process_request(req)\n self.request.sendall((json.dumps(rep) + '\\n').encode('utf-8'))\n self.request.close()\n\n\nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n\n def __init__(self, addr_port, service_handler):\n self.allow_reuse_address = True\n super().__init__(addr_port, service_handler)\n\n\nclass TourneyUploaderService(object):\n\n def __init__(self, listen_port):\n global uploader_thread\n self.listen_port = listen_port\n self.socket_server = ThreadedTCPServer(('127.0.0.1', listen_port),\n UploaderServiceHandler)\n self.server_thread = threading.Thread(target=self.socket_server.\n serve_forever)\n if not uploader_thread:\n uploader_thread = UploaderThread()\n self.server_thread.daemon = True\n self.server_thread.start()\n\n def shutdown(self):\n self.socket_server.shutdown()\n",
"step-3": "<mask token>\n\n\nclass FieldNotFoundException(Exception):\n pass\n\n\ndef make_error_response(message):\n return {'success': False, 'message': message}\n\n\n<mask token>\n\n\ndef get_tourney_unique_id(tourney):\n return tourney.get_unique_id()\n\n\ndef delete_tourney_from_web(tourney_name, username, password):\n req = {'username': username, 'password': password, 'tourney':\n tourney_name, 'delete': True}\n return make_https_json_request(http_server_host, http_server_port,\n http_delete_path, req)\n\n\n<mask token>\n\n\nclass UploaderThread(object):\n\n def __init__(self):\n self.uploading_tourneys = set()\n self.tourney_upload_start_time = {}\n self.tourney_last_upload_attempt_time = {}\n self.tourney_last_uploaded_game_state = {}\n self.tourney_num_viewers = {}\n self.tourney_auth = {}\n self.thread = threading.Thread(target=self.body)\n self.thread.daemon = True\n self.thread.start()\n\n def is_uploading_tourney(self, tourney):\n return tourney in self.uploading_tourneys\n\n def add_tourney_to_upload_list(self, tourney, username, password, private):\n self.uploading_tourneys.add(tourney)\n self.tourney_auth[tourney] = {'username': username, 'password':\n password, 'private': private}\n self.tourney_upload_start_time[tourney] = int(time.time())\n if tourney in self.tourney_last_uploaded_game_state:\n del self.tourney_last_uploaded_game_state[tourney]\n self.tourney_last_upload_attempt_time[tourney] = 0\n\n def remove_tourney_from_upload_list(self, tourney):\n self.uploading_tourneys.discard(tourney)\n\n def get_last_successful_upload_time(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n upload_time = tourney.get_last_successful_upload_time()\n if (upload_time is None or upload_time < self.\n tourney_upload_start_time.get(tourney_name, 0)):\n return None\n else:\n return upload_time\n except countdowntourney.TourneyException as e:\n sys.stderr.write(\n 'Failed to get last successful upload time: %s\\n' % str(e))\n return None\n\n def get_last_failed_upload(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n failed_upload = tourney.get_last_failed_upload()\n if failed_upload is not None and failed_upload.get('ts', None\n ) is not None and failed_upload['ts'\n ] >= self.tourney_upload_start_time.get(tourney_name, 0):\n return failed_upload\n else:\n return None\n except countdowntourney.TourneyException as e:\n sys.stderr.write('Failed to get last failed upload info: %s\\n' %\n str(e))\n return None\n\n def get_num_viewers(self, tourney_name):\n return self.tourney_num_viewers.get(tourney_name, None)\n\n def get_tourney_auth(self, tourney):\n return self.tourney_auth.get(tourney)\n\n def set_tourney_auth(self, tourney, username, password):\n self.tourney_auth[tourney] = {'username': username, 'password':\n password}\n\n def get_upload_button_pressed_time(self, tourney):\n if tourney not in self.uploading_tourneys:\n return None\n else:\n return self.tourney_upload_start_time.get(tourney, None)\n\n def write_log(self, message):\n sys.stderr.write('%s: %s\\r\\n' % (time.strftime('%Y-%m-%d %H:%M:%S'),\n message))\n\n def body(self):\n while True:\n uploading_tourneys = self.uploading_tourneys.copy()\n for tourney_name in uploading_tourneys:\n now = time.time()\n last_upload_time = self.tourney_last_upload_attempt_time.get(\n tourney_name, 0)\n if now >= last_upload_time + upload_interval_sec:\n try:\n self.tourney_last_upload_attempt_time[tourney_name\n ] = now\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n game_state = get_game_state(tourney)\n tourney_unique_id = get_tourney_unique_id(tourney)\n auth = self.tourney_auth.get(tourney_name, None)\n if auth:\n username = auth.get('username')\n password = auth.get('password')\n private = auth.get('private', False)\n else:\n username = None\n password = None\n private = False\n req = {'username': username, 'password':\n password, 'private': private, 'unique_id':\n tourney_unique_id, 'tourney': tourney_name}\n if (tourney_name not in self.\n tourney_last_uploaded_game_state or \n game_state != self.\n tourney_last_uploaded_game_state[tourney_name]\n ):\n req['state'] = game_state\n rep = make_https_json_request(http_server_host,\n http_server_port, http_submit_path, req)\n num_viewers = None\n if rep.get('success', False):\n self.tourney_last_uploaded_game_state[\n tourney_name] = game_state\n tourney.log_successful_upload()\n if 'state' in req:\n self.write_log(\n 'Successfully uploaded state for tourney \"%s\"'\n % tourney_name)\n else:\n self.write_log(\n 'No change since last upload of tourney \"%s\"'\n % tourney_name)\n num_viewers = rep.get('viewers', None)\n if num_viewers is not None:\n self.write_log(\n 'Server reports %d viewer%s.' % (\n num_viewers, 's' if num_viewers != \n 1 else ''))\n else:\n if rep.get('http_failure', False):\n failure_type = (countdowntourney.\n UPLOAD_FAIL_TYPE_HTTP)\n else:\n failure_type = (countdowntourney.\n UPLOAD_FAIL_TYPE_REJECTED)\n tourney.log_failed_upload(failure_type, rep\n .get('message', '(no message)'))\n self.write_log(\n 'Failed to upload state for tourney \"%s\": %s'\n % (tourney_name, rep.get('message',\n '(no message')))\n self.tourney_num_viewers[tourney_name\n ] = num_viewers\n except countdowntourney.TourneyException as e:\n self.write_log(\n \"UploaderThread: couldn't open tourney %s: %s\" %\n (tourney_name, str(e)))\n traceback.print_tb(e.__traceback__)\n continue\n except Exception as e:\n self.write_log(\n 'Uploader thread threw exception: %s' % str(e))\n traceback.print_tb(e.__traceback__)\n continue\n time.sleep(1)\n\n\nclass UploaderServiceHandler(BaseRequestHandler):\n\n def get_fields_from_req(self, req, field_names):\n field_values = []\n for name in field_names:\n value = req.get(name, None)\n if value is None:\n raise FieldNotFoundException()\n field_values.append(value)\n return tuple(field_values)\n\n def process_request(self, req):\n global uploader_thread\n req_type = req.get('type', None)\n if not req_type:\n return make_error_response('Request has no request type')\n req_body = req.get('request', None)\n if req_body is None:\n return make_error_response('Request has no body')\n try:\n if req_type == 'start_uploading':\n tourney, username, password, private = (self.\n get_fields_from_req(req_body, ['tourney', 'username',\n 'password', 'private']))\n uploader_thread.add_tourney_to_upload_list(tourney,\n username, password, private)\n rep = make_ok_response()\n elif req_type == 'stop_uploading':\n tourney, = self.get_fields_from_req(req_body, ['tourney'])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = make_ok_response()\n elif req_type == 'delete':\n tourney, username, password = self.get_fields_from_req(req_body\n , ['tourney', 'username', 'password'])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = delete_tourney_from_web(tourney, username, password)\n uploader_thread.set_tourney_auth(tourney, username, password)\n elif req_type == 'status':\n tourney, = self.get_fields_from_req(req_body, ['tourney'])\n rep = {'success': True}\n auth = uploader_thread.get_tourney_auth(tourney)\n rep['publishing'] = uploader_thread.is_uploading_tourney(\n tourney)\n rep['viewers'] = uploader_thread.get_num_viewers(tourney)\n if auth:\n rep['username'] = auth.get('username', None)\n rep['password'] = auth.get('password', None)\n rep['private'] = auth.get('private', False)\n rep['last_successful_upload_time'\n ] = uploader_thread.get_last_successful_upload_time(tourney\n )\n rep['last_failed_upload'\n ] = uploader_thread.get_last_failed_upload(tourney)\n rep['upload_button_pressed_time'\n ] = uploader_thread.get_upload_button_pressed_time(tourney)\n rep['now'] = int(time.time())\n else:\n rep = make_error_response('Unrecognised request type')\n except FieldNotFoundException:\n return make_error_response('Request is not valid for type')\n return rep\n\n def handle(self):\n line = read_line_from_socket(self.request)\n if line is not None:\n rep = None\n try:\n req = json.loads(line)\n except Exception as e:\n rep = make_error_response('Request is not valid JSON')\n if not rep:\n rep = self.process_request(req)\n self.request.sendall((json.dumps(rep) + '\\n').encode('utf-8'))\n self.request.close()\n\n\nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n\n def __init__(self, addr_port, service_handler):\n self.allow_reuse_address = True\n super().__init__(addr_port, service_handler)\n\n\nclass TourneyUploaderService(object):\n\n def __init__(self, listen_port):\n global uploader_thread\n self.listen_port = listen_port\n self.socket_server = ThreadedTCPServer(('127.0.0.1', listen_port),\n UploaderServiceHandler)\n self.server_thread = threading.Thread(target=self.socket_server.\n serve_forever)\n if not uploader_thread:\n uploader_thread = UploaderThread()\n self.server_thread.daemon = True\n self.server_thread.start()\n\n def shutdown(self):\n self.socket_server.shutdown()\n",
"step-4": "<mask token>\nif not db_dir:\n db_dir = os.path.join(os.getcwd(), 'tourneys')\n<mask token>\n\n\nclass FieldNotFoundException(Exception):\n pass\n\n\ndef make_error_response(message):\n return {'success': False, 'message': message}\n\n\ndef make_ok_response():\n return {'success': True}\n\n\ndef get_game_state(tourney):\n return tourney2json.get_state_for_upload(tourney)\n\n\ndef get_tourney_unique_id(tourney):\n return tourney.get_unique_id()\n\n\ndef delete_tourney_from_web(tourney_name, username, password):\n req = {'username': username, 'password': password, 'tourney':\n tourney_name, 'delete': True}\n return make_https_json_request(http_server_host, http_server_port,\n http_delete_path, req)\n\n\ndef read_line_from_socket(sock):\n byte_array = b''\n b = 0\n while b != b'\\n':\n b = sock.recv(1)\n if b is None or len(b) == 0:\n return None\n byte_array += b\n return byte_array.decode('utf-8')\n\n\ndef make_https_json_request(server_host, server_port, path, request):\n post_data = json.dumps(request)\n httpcon = None\n try:\n httpcon = http.client.HTTPSConnection(host=server_host, port=\n server_port, timeout=30)\n httpcon.connect()\n except Exception as e:\n if httpcon:\n httpcon.close()\n sys.stderr.write('Failed to connect to %s: %s\\r\\n' % (server_host,\n str(e)))\n return {'success': False, 'http_failure': True, 'message': \n 'Failed to connect to %s: %s. Check your internet connection.' %\n (server_host, str(e))}\n try:\n while path and path[0] == '/':\n path = path[1:]\n url = 'https://%s%s/%s' % (server_host, ':' + str(server_port) if\n server_port else '', path)\n httpcon.request('POST', url, post_data)\n except ConnectionError as e:\n httpcon.close()\n sys.stderr.write('Failed to send HTTP request to %s: %s\\r\\n' % (url,\n str(e)))\n return {'success': False, 'http_failure': True, 'message': \n 'Failed to upload game state to server %s: %s. Check your internet connection.'\n % (url, str(e))}\n except Exception as e:\n httpcon.close()\n sys.stderr.write('Failed to send HTTP request to %s: %s\\r\\n' % (url,\n str(e)))\n return {'success': False, 'http_failure': True, 'message': str(e)}\n try:\n response = httpcon.getresponse()\n except Exception as e:\n sys.stderr.write('Failed to read response from %s: %s\\r\\n' % (url,\n str(e)))\n httpcon.close()\n return {'success': False, 'http_failure': True, 'message': str(e)}\n if response.status != 200:\n sys.stderr.write(\n 'Failed to post data to %s: HTTP response %d: %s\\r\\n' % (url,\n response.status, response.reason))\n rep = {'success': False, 'http_failure': True, 'message': \n 'Failed to post update to server: HTTP %d: %s' % (response.\n status, response.reason)}\n else:\n response_body = None\n rep = None\n try:\n response_body = response.read()\n except Exception as e:\n sys.stderr.write('Failed to read response data from HTTP: ' +\n str(e) + '\\r\\n')\n rep = {'success': False, 'http_failure': True, 'message': str(e)}\n if response_body is not None:\n try:\n rep = json.loads(response_body.decode('utf-8'))\n if not rep.get('success', False):\n message = rep.get('message', '(none)')\n sys.stderr.write('Update failed. Message: ' + message +\n '\\r\\n')\n except Exception as e:\n sys.stderr.write('Failed to parse server response: ' + str(\n e) + '\\r\\n')\n rep = {'success': False, 'message': \n 'Server response was invalid JSON: ' + str(e)}\n httpcon.close()\n return rep\n\n\nclass UploaderThread(object):\n\n def __init__(self):\n self.uploading_tourneys = set()\n self.tourney_upload_start_time = {}\n self.tourney_last_upload_attempt_time = {}\n self.tourney_last_uploaded_game_state = {}\n self.tourney_num_viewers = {}\n self.tourney_auth = {}\n self.thread = threading.Thread(target=self.body)\n self.thread.daemon = True\n self.thread.start()\n\n def is_uploading_tourney(self, tourney):\n return tourney in self.uploading_tourneys\n\n def add_tourney_to_upload_list(self, tourney, username, password, private):\n self.uploading_tourneys.add(tourney)\n self.tourney_auth[tourney] = {'username': username, 'password':\n password, 'private': private}\n self.tourney_upload_start_time[tourney] = int(time.time())\n if tourney in self.tourney_last_uploaded_game_state:\n del self.tourney_last_uploaded_game_state[tourney]\n self.tourney_last_upload_attempt_time[tourney] = 0\n\n def remove_tourney_from_upload_list(self, tourney):\n self.uploading_tourneys.discard(tourney)\n\n def get_last_successful_upload_time(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n upload_time = tourney.get_last_successful_upload_time()\n if (upload_time is None or upload_time < self.\n tourney_upload_start_time.get(tourney_name, 0)):\n return None\n else:\n return upload_time\n except countdowntourney.TourneyException as e:\n sys.stderr.write(\n 'Failed to get last successful upload time: %s\\n' % str(e))\n return None\n\n def get_last_failed_upload(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n failed_upload = tourney.get_last_failed_upload()\n if failed_upload is not None and failed_upload.get('ts', None\n ) is not None and failed_upload['ts'\n ] >= self.tourney_upload_start_time.get(tourney_name, 0):\n return failed_upload\n else:\n return None\n except countdowntourney.TourneyException as e:\n sys.stderr.write('Failed to get last failed upload info: %s\\n' %\n str(e))\n return None\n\n def get_num_viewers(self, tourney_name):\n return self.tourney_num_viewers.get(tourney_name, None)\n\n def get_tourney_auth(self, tourney):\n return self.tourney_auth.get(tourney)\n\n def set_tourney_auth(self, tourney, username, password):\n self.tourney_auth[tourney] = {'username': username, 'password':\n password}\n\n def get_upload_button_pressed_time(self, tourney):\n if tourney not in self.uploading_tourneys:\n return None\n else:\n return self.tourney_upload_start_time.get(tourney, None)\n\n def write_log(self, message):\n sys.stderr.write('%s: %s\\r\\n' % (time.strftime('%Y-%m-%d %H:%M:%S'),\n message))\n\n def body(self):\n while True:\n uploading_tourneys = self.uploading_tourneys.copy()\n for tourney_name in uploading_tourneys:\n now = time.time()\n last_upload_time = self.tourney_last_upload_attempt_time.get(\n tourney_name, 0)\n if now >= last_upload_time + upload_interval_sec:\n try:\n self.tourney_last_upload_attempt_time[tourney_name\n ] = now\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n game_state = get_game_state(tourney)\n tourney_unique_id = get_tourney_unique_id(tourney)\n auth = self.tourney_auth.get(tourney_name, None)\n if auth:\n username = auth.get('username')\n password = auth.get('password')\n private = auth.get('private', False)\n else:\n username = None\n password = None\n private = False\n req = {'username': username, 'password':\n password, 'private': private, 'unique_id':\n tourney_unique_id, 'tourney': tourney_name}\n if (tourney_name not in self.\n tourney_last_uploaded_game_state or \n game_state != self.\n tourney_last_uploaded_game_state[tourney_name]\n ):\n req['state'] = game_state\n rep = make_https_json_request(http_server_host,\n http_server_port, http_submit_path, req)\n num_viewers = None\n if rep.get('success', False):\n self.tourney_last_uploaded_game_state[\n tourney_name] = game_state\n tourney.log_successful_upload()\n if 'state' in req:\n self.write_log(\n 'Successfully uploaded state for tourney \"%s\"'\n % tourney_name)\n else:\n self.write_log(\n 'No change since last upload of tourney \"%s\"'\n % tourney_name)\n num_viewers = rep.get('viewers', None)\n if num_viewers is not None:\n self.write_log(\n 'Server reports %d viewer%s.' % (\n num_viewers, 's' if num_viewers != \n 1 else ''))\n else:\n if rep.get('http_failure', False):\n failure_type = (countdowntourney.\n UPLOAD_FAIL_TYPE_HTTP)\n else:\n failure_type = (countdowntourney.\n UPLOAD_FAIL_TYPE_REJECTED)\n tourney.log_failed_upload(failure_type, rep\n .get('message', '(no message)'))\n self.write_log(\n 'Failed to upload state for tourney \"%s\": %s'\n % (tourney_name, rep.get('message',\n '(no message')))\n self.tourney_num_viewers[tourney_name\n ] = num_viewers\n except countdowntourney.TourneyException as e:\n self.write_log(\n \"UploaderThread: couldn't open tourney %s: %s\" %\n (tourney_name, str(e)))\n traceback.print_tb(e.__traceback__)\n continue\n except Exception as e:\n self.write_log(\n 'Uploader thread threw exception: %s' % str(e))\n traceback.print_tb(e.__traceback__)\n continue\n time.sleep(1)\n\n\nclass UploaderServiceHandler(BaseRequestHandler):\n\n def get_fields_from_req(self, req, field_names):\n field_values = []\n for name in field_names:\n value = req.get(name, None)\n if value is None:\n raise FieldNotFoundException()\n field_values.append(value)\n return tuple(field_values)\n\n def process_request(self, req):\n global uploader_thread\n req_type = req.get('type', None)\n if not req_type:\n return make_error_response('Request has no request type')\n req_body = req.get('request', None)\n if req_body is None:\n return make_error_response('Request has no body')\n try:\n if req_type == 'start_uploading':\n tourney, username, password, private = (self.\n get_fields_from_req(req_body, ['tourney', 'username',\n 'password', 'private']))\n uploader_thread.add_tourney_to_upload_list(tourney,\n username, password, private)\n rep = make_ok_response()\n elif req_type == 'stop_uploading':\n tourney, = self.get_fields_from_req(req_body, ['tourney'])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = make_ok_response()\n elif req_type == 'delete':\n tourney, username, password = self.get_fields_from_req(req_body\n , ['tourney', 'username', 'password'])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = delete_tourney_from_web(tourney, username, password)\n uploader_thread.set_tourney_auth(tourney, username, password)\n elif req_type == 'status':\n tourney, = self.get_fields_from_req(req_body, ['tourney'])\n rep = {'success': True}\n auth = uploader_thread.get_tourney_auth(tourney)\n rep['publishing'] = uploader_thread.is_uploading_tourney(\n tourney)\n rep['viewers'] = uploader_thread.get_num_viewers(tourney)\n if auth:\n rep['username'] = auth.get('username', None)\n rep['password'] = auth.get('password', None)\n rep['private'] = auth.get('private', False)\n rep['last_successful_upload_time'\n ] = uploader_thread.get_last_successful_upload_time(tourney\n )\n rep['last_failed_upload'\n ] = uploader_thread.get_last_failed_upload(tourney)\n rep['upload_button_pressed_time'\n ] = uploader_thread.get_upload_button_pressed_time(tourney)\n rep['now'] = int(time.time())\n else:\n rep = make_error_response('Unrecognised request type')\n except FieldNotFoundException:\n return make_error_response('Request is not valid for type')\n return rep\n\n def handle(self):\n line = read_line_from_socket(self.request)\n if line is not None:\n rep = None\n try:\n req = json.loads(line)\n except Exception as e:\n rep = make_error_response('Request is not valid JSON')\n if not rep:\n rep = self.process_request(req)\n self.request.sendall((json.dumps(rep) + '\\n').encode('utf-8'))\n self.request.close()\n\n\nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n\n def __init__(self, addr_port, service_handler):\n self.allow_reuse_address = True\n super().__init__(addr_port, service_handler)\n\n\nclass TourneyUploaderService(object):\n\n def __init__(self, listen_port):\n global uploader_thread\n self.listen_port = listen_port\n self.socket_server = ThreadedTCPServer(('127.0.0.1', listen_port),\n UploaderServiceHandler)\n self.server_thread = threading.Thread(target=self.socket_server.\n serve_forever)\n if not uploader_thread:\n uploader_thread = UploaderThread()\n self.server_thread.daemon = True\n self.server_thread.start()\n\n def shutdown(self):\n self.socket_server.shutdown()\n",
"step-5": "#!/usr/bin/python3\n\n# The uploader service listens for connections from localhost on port 3961.\n# It expects a JSON object on a line by itself as the request. It responds\n# with another JSON object on a line by itself, then closes the connection.\n# Atropine CGI scripts can send requests to this service to tell it to:\n# * Add a tourney to the list of tourneys we're periodically uploading to\n# greem.co.uk\n# * Remove a tourney from that list (i.e. stop uploading it)\n# * Get the upload state of a tourney (are we uploading it, when was the\n# last successful upload, was the last upload successful, and if not what\n# went wrong)\n#\n# The service is started with atropine.py, and runs alongside the web server\n# which serves the web interface used by the tournament administrator. At\n# startup, no tourneys are being uploaded.\n\nimport sys\nimport os\nimport socketserver\nfrom socketserver import BaseRequestHandler\nimport json\nimport threading\nimport time\nimport http.client\nimport traceback\n\nhttp_server_host = \"greem.co.uk\"\nhttp_server_port = None\nhttp_submit_path = \"/cgi-bin/colive/submit.py\"\nhttp_delete_path = \"/cgi-bin/colive/submit.py\"\n\nupload_interval_sec = 10\n\ndb_dir = os.getenv(\"TOURNEYSPATH\")\nif not db_dir:\n db_dir = os.path.join(os.getcwd(), \"tourneys\")\n\nimport tourney2json\nimport countdowntourney\n\nuploader_thread = None\n\nclass FieldNotFoundException(Exception):\n pass\n\ndef make_error_response(message):\n return { \"success\" : False, \"message\" : message }\n\ndef make_ok_response():\n return { \"success\" : True }\n\ndef get_game_state(tourney):\n return tourney2json.get_state_for_upload(tourney)\n\ndef get_tourney_unique_id(tourney):\n return tourney.get_unique_id()\n\ndef delete_tourney_from_web(tourney_name, username, password):\n req = {\n \"username\" : username,\n \"password\" : password,\n \"tourney\" : tourney_name,\n \"delete\" : True\n }\n return make_https_json_request(http_server_host, http_server_port, http_delete_path, req)\n\ndef read_line_from_socket(sock):\n byte_array = b'';\n b = 0\n while b != b'\\n':\n b = sock.recv(1)\n if b is None or len(b) == 0:\n return None\n byte_array += b\n return byte_array.decode(\"utf-8\")\n\ndef make_https_json_request(server_host, server_port, path, request):\n post_data = json.dumps(request)\n httpcon = None\n try:\n httpcon = http.client.HTTPSConnection(host=server_host, port=server_port, timeout=30)\n httpcon.connect()\n except Exception as e:\n if httpcon:\n httpcon.close()\n sys.stderr.write(\"Failed to connect to %s: %s\\r\\n\" % (server_host, str(e)))\n return { \"success\" : False, \"http_failure\" : True, \"message\" : \"Failed to connect to %s: %s. Check your internet connection.\" % (server_host, str(e)) }\n\n try:\n while path and path[0] == '/':\n path = path[1:]\n url = \"https://%s%s/%s\" % (server_host, (\":\" + str(server_port)) if server_port else \"\", path)\n httpcon.request(\"POST\", url, post_data)\n except ConnectionError as e:\n httpcon.close()\n sys.stderr.write(\"Failed to send HTTP request to %s: %s\\r\\n\" % (url, str(e)))\n return {\n \"success\" : False,\n \"http_failure\" : True,\n \"message\" : \"Failed to upload game state to server %s: %s. Check your internet connection.\" % (url, str(e))\n }\n except Exception as e:\n httpcon.close()\n sys.stderr.write(\"Failed to send HTTP request to %s: %s\\r\\n\" % (url, str(e)))\n return { \"success\" : False, \"http_failure\" : True, \"message\" : str(e) }\n\n try:\n response = httpcon.getresponse()\n except Exception as e:\n sys.stderr.write(\"Failed to read response from %s: %s\\r\\n\" % (url, str(e)))\n httpcon.close()\n return { \"success\" : False, \"http_failure\" : True, \"message\" : str(e) }\n\n if response.status != 200:\n sys.stderr.write(\"Failed to post data to %s: HTTP response %d: %s\\r\\n\" % (url, response.status, response.reason))\n rep = {\n \"success\" : False,\n \"http_failure\" : True,\n \"message\" : \"Failed to post update to server: HTTP %d: %s\" % (response.status, response.reason)\n }\n else:\n response_body = None\n rep = None\n try:\n response_body = response.read()\n except Exception as e:\n sys.stderr.write(\"Failed to read response data from HTTP: \" + str(e) + \"\\r\\n\")\n rep = {\n \"success\" : False,\n \"http_failure\" : True,\n \"message\" : str(e)\n }\n if response_body is not None:\n try:\n rep = json.loads(response_body.decode(\"utf-8\"))\n if not rep.get(\"success\", False):\n message = rep.get(\"message\", \"(none)\")\n sys.stderr.write(\"Update failed. Message: \" + message + \"\\r\\n\")\n except Exception as e:\n sys.stderr.write(\"Failed to parse server response: \" + str(e) + \"\\r\\n\")\n rep = {\n \"success\" : False,\n \"message\" : \"Server response was invalid JSON: \" + str(e)\n }\n httpcon.close()\n return rep\n\n\nclass UploaderThread(object):\n def __init__(self):\n self.uploading_tourneys = set()\n self.tourney_upload_start_time = {}\n self.tourney_last_upload_attempt_time = {}\n self.tourney_last_uploaded_game_state = {}\n self.tourney_num_viewers = {}\n self.tourney_auth = {}\n self.thread = threading.Thread(target=self.body)\n self.thread.daemon = True\n self.thread.start()\n\n def is_uploading_tourney(self, tourney):\n return (tourney in self.uploading_tourneys)\n\n def add_tourney_to_upload_list(self, tourney, username, password, private):\n self.uploading_tourneys.add(tourney)\n self.tourney_auth[tourney] = { \"username\" : username, \"password\" : password, \"private\" : private }\n self.tourney_upload_start_time[tourney] = int(time.time());\n if tourney in self.tourney_last_uploaded_game_state:\n del self.tourney_last_uploaded_game_state[tourney]\n self.tourney_last_upload_attempt_time[tourney] = 0\n\n def remove_tourney_from_upload_list(self, tourney):\n self.uploading_tourneys.discard(tourney)\n\n def get_last_successful_upload_time(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir) as tourney:\n upload_time = tourney.get_last_successful_upload_time()\n\n # Don't return this time if it's before the user even pressed\n # the \"start uploading\" button\"\n if upload_time is None or upload_time < self.tourney_upload_start_time.get(tourney_name, 0):\n return None\n else:\n return upload_time\n except countdowntourney.TourneyException as e:\n sys.stderr.write(\"Failed to get last successful upload time: %s\\n\" % (str(e)))\n return None\n\n def get_last_failed_upload(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir) as tourney:\n failed_upload = tourney.get_last_failed_upload()\n if failed_upload is not None and failed_upload.get(\"ts\", None) is not None and failed_upload[\"ts\"] >= self.tourney_upload_start_time.get(tourney_name, 0):\n return failed_upload\n else:\n return None\n except countdowntourney.TourneyException as e:\n sys.stderr.write(\"Failed to get last failed upload info: %s\\n\" % (str(e)))\n return None\n\n def get_num_viewers(self, tourney_name):\n return self.tourney_num_viewers.get(tourney_name, None)\n\n def get_tourney_auth(self, tourney):\n return self.tourney_auth.get(tourney)\n\n def set_tourney_auth(self, tourney, username, password):\n self.tourney_auth[tourney] = { \"username\" : username, \"password\" : password }\n\n def get_upload_button_pressed_time(self, tourney):\n if tourney not in self.uploading_tourneys:\n return None\n else:\n return self.tourney_upload_start_time.get(tourney, None)\n\n def write_log(self, message):\n sys.stderr.write(\"%s: %s\\r\\n\" % (time.strftime(\"%Y-%m-%d %H:%M:%S\"), message))\n\n def body(self):\n while True:\n uploading_tourneys = self.uploading_tourneys.copy()\n for tourney_name in uploading_tourneys:\n now = time.time()\n last_upload_time = self.tourney_last_upload_attempt_time.get(tourney_name, 0)\n if now >= last_upload_time + upload_interval_sec:\n # Upload this tourney to the web if it's been at least\n # upload_interval_sec seconds since the previous upload\n # attempt.\n try:\n self.tourney_last_upload_attempt_time[tourney_name] = now\n with countdowntourney.tourney_open(tourney_name, db_dir) as tourney:\n game_state = get_game_state(tourney)\n tourney_unique_id = get_tourney_unique_id(tourney)\n auth = self.tourney_auth.get(tourney_name, None)\n if auth:\n username = auth.get(\"username\")\n password = auth.get(\"password\")\n private = auth.get(\"private\", False)\n else:\n username = None\n password = None\n private = False\n req = {\n \"username\" : username,\n \"password\" : password,\n \"private\" : private,\n \"unique_id\" : tourney_unique_id,\n \"tourney\" : tourney_name\n }\n\n # If the game state has changed since the last time\n # we did a successful upload, include the new game\n # state, otherwise we just submit a null update\n # which only checks the server still works and\n # reads how many current visitors there are.\n if tourney_name not in self.tourney_last_uploaded_game_state or game_state != self.tourney_last_uploaded_game_state[tourney_name]:\n req[\"state\"] = game_state\n\n # Send the submission to the server & get the reply\n rep = make_https_json_request(http_server_host, http_server_port, http_submit_path, req)\n num_viewers = None\n if rep.get(\"success\", False):\n self.tourney_last_uploaded_game_state[tourney_name] = game_state\n tourney.log_successful_upload()\n if \"state\" in req:\n self.write_log(\"Successfully uploaded state for tourney \\\"%s\\\"\" % (tourney_name))\n else:\n self.write_log(\"No change since last upload of tourney \\\"%s\\\"\" % (tourney_name))\n num_viewers = rep.get(\"viewers\", None)\n if num_viewers is not None:\n self.write_log(\"Server reports %d viewer%s.\" % (num_viewers, \"s\" if num_viewers != 1 else \"\"))\n else:\n if rep.get(\"http_failure\", False):\n failure_type = countdowntourney.UPLOAD_FAIL_TYPE_HTTP\n else:\n failure_type = countdowntourney.UPLOAD_FAIL_TYPE_REJECTED\n tourney.log_failed_upload(failure_type, rep.get(\"message\", \"(no message)\"))\n self.write_log(\"Failed to upload state for tourney \\\"%s\\\": %s\" % (tourney_name, rep.get(\"message\", \"(no message\")))\n self.tourney_num_viewers[tourney_name] = num_viewers\n except countdowntourney.TourneyException as e:\n self.write_log(\"UploaderThread: couldn't open tourney %s: %s\" % (tourney_name, str(e)))\n traceback.print_tb(e.__traceback__)\n continue\n except Exception as e:\n self.write_log(\"Uploader thread threw exception: %s\" % (str(e)))\n traceback.print_tb(e.__traceback__)\n continue\n time.sleep(1)\n\nclass UploaderServiceHandler(BaseRequestHandler):\n def get_fields_from_req(self, req, field_names):\n field_values = []\n for name in field_names:\n value = req.get(name, None)\n if value is None:\n raise FieldNotFoundException()\n field_values.append(value)\n return tuple(field_values)\n\n def process_request(self, req):\n global uploader_thread\n\n req_type = req.get(\"type\", None)\n if not req_type:\n return make_error_response(\"Request has no request type\")\n req_body = req.get(\"request\", None)\n if req_body is None:\n return make_error_response(\"Request has no body\")\n\n try:\n if req_type == \"start_uploading\":\n (tourney, username, password, private) = self.get_fields_from_req(req_body, [\"tourney\", \"username\", \"password\", \"private\"])\n uploader_thread.add_tourney_to_upload_list(tourney, username, password, private)\n rep = make_ok_response()\n elif req_type == \"stop_uploading\":\n (tourney,) = self.get_fields_from_req(req_body, [\"tourney\"])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = make_ok_response()\n elif req_type == \"delete\":\n (tourney, username, password) = self.get_fields_from_req(req_body, [\"tourney\", \"username\", \"password\"])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = delete_tourney_from_web(tourney, username, password)\n uploader_thread.set_tourney_auth(tourney, username, password)\n elif req_type == \"status\":\n (tourney,) = self.get_fields_from_req(req_body, [\"tourney\"])\n rep = { \"success\" : True }\n auth = uploader_thread.get_tourney_auth(tourney)\n rep[\"publishing\"] = uploader_thread.is_uploading_tourney(tourney)\n rep[\"viewers\"] = uploader_thread.get_num_viewers(tourney)\n if auth:\n rep[\"username\"] = auth.get(\"username\", None)\n rep[\"password\"] = auth.get(\"password\", None)\n rep[\"private\"] = auth.get(\"private\", False)\n rep[\"last_successful_upload_time\"] = uploader_thread.get_last_successful_upload_time(tourney)\n rep[\"last_failed_upload\"] = uploader_thread.get_last_failed_upload(tourney)\n rep[\"upload_button_pressed_time\"] = uploader_thread.get_upload_button_pressed_time(tourney)\n rep[\"now\"] = int(time.time())\n else:\n rep = make_error_response(\"Unrecognised request type\")\n except FieldNotFoundException:\n return make_error_response(\"Request is not valid for type\")\n\n return rep\n\n def handle(self):\n # Request is expected to be a JSON object, on a line by itself\n line = read_line_from_socket(self.request)\n if line is not None:\n rep = None\n try:\n req = json.loads(line)\n except Exception as e:\n rep = make_error_response(\"Request is not valid JSON\")\n\n if not rep:\n rep = self.process_request(req)\n self.request.sendall((json.dumps(rep) + \"\\n\").encode(\"utf-8\"))\n\n self.request.close()\n\nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n def __init__(self, addr_port, service_handler):\n self.allow_reuse_address = True\n super().__init__(addr_port, service_handler)\n\nclass TourneyUploaderService(object):\n def __init__(self, listen_port):\n global uploader_thread\n self.listen_port = listen_port\n self.socket_server = ThreadedTCPServer((\"127.0.0.1\", listen_port), UploaderServiceHandler)\n self.server_thread = threading.Thread(target=self.socket_server.serve_forever)\n\n if not uploader_thread:\n uploader_thread = UploaderThread()\n self.server_thread.daemon = True\n self.server_thread.start()\n\n def shutdown(self):\n self.socket_server.shutdown()\n",
"step-ids": [
19,
21,
26,
31,
34
]
}
|
[
19,
21,
26,
31,
34
] |
import pandas as pd
import re
import sqlite3 as lite
import os
from pybedtools import BedTool
import django
from checkprimers import CheckPrimers
from pandas import ExcelWriter
import datetime
os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'
django.setup()
class GetPrimers(object):
"""Extracts data from excel spread sheet and imports it into a sqlite database.
:param excel_file: excel file to be imported.
:param db: database the excel file should be imported into.
"""
def __init__(self, excel_file, db):
self.excel_file = excel_file
self.db = db
global con, curs
con = lite.connect(self.db) # Creates a database if it doesn't already exist.
curs = con.cursor()
def get_sheet_name(self):
"""Returns the sheetname to be used to import data from."""
xl = pd.ExcelFile(self.excel_file)
sheet_names = xl.sheet_names
for item in sheet_names:
if re.match('(.*)Current primers', item, re.IGNORECASE): # Only extracts most recent primers.
sheet_name = item
return sheet_name
def get_primers(self, sheetname):
"""Extracts primer data from sheet.
Function reads an excel sheet using pandas and stores this in the df_primers_dups data frame (contains
duplicated rows).The df_primers data frame will go on to be used in the virtual PCR so irrelevant columns
are dropped and any duplicate rows are removed.
:param sheetname: sheet data to be extracted from
:return df_primers_dups: data frame containing extracted data which may include duplicates.
:return df_primers: data frame containing only data necessary to get genome coordinates.
"""
df_primers_dups = pd.read_excel(self.excel_file, header=0, parse_cols='A:M, O:X', skiprows=2,
names=['Gene', 'Exon', 'Direction', 'Version', 'Primer_seq', 'Chrom', 'M13_tag',
'Batch', 'project', 'Order_date', 'Frag_size', 'anneal_temp', 'Other',
'snp_check', 'no_snps', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2',
'action_to_take', 'check_by'],
sheetname=sheetname, index_col=None)
to_drop = ['Version', 'M13_tag', 'Batch', 'project', 'Order_date', 'Frag_size', 'anneal_temp', 'Other',
'snp_check', 'no_snps', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2', 'action_to_take',
'check_by']
df_primers_dups = df_primers_dups.where((pd.notnull(df_primers_dups)), None) # easier to work with than NaN
df_primers = df_primers_dups.drop(to_drop, axis=1)
df_primers = df_primers.drop_duplicates(subset=('Gene', 'Exon', 'Direction', 'Chrom'))
df_primers = df_primers.reset_index(drop=True)
return df_primers_dups, df_primers
def run_pcr(self, csv):
"""Runs virtual PCR on a CSV file using the isPcr and pslToBed tools installed from UCSC.
:param csv: a csv file is need as an input with format "name, forward, reverse".
:return bedfile: with results of virtual PCR if there is a match.
"""
print "Running virtual PCR..."
chromosomes = ['chr1.2bit', 'chr11.2bit', 'chr12.2bit', 'chrX.2bit', 'chr13.2bit', 'chr14.2bit', 'chr15.2bit',
'chr16.2bit', 'chr17.2bit', 'chr18.2bit', 'chr19.2bit', 'chr20.2bit', 'chr21.2bit', 'chr22.2bit',
'chr2.2bit', 'chr3.2bit', 'chr4.2bit', 'chr5.2bit', 'chr6.2bit', 'chr7.2bit', 'chr8.2bit',
'chr9.2bit', 'chr10.2bit', 'chrY.2bit']
for chr in chromosomes:
os.system(
"/opt/kentools/isPcr -out=psl /media/genomicdata/ucsc_hg19_by_chr/2bit_chr/%s \
%s %s.tmp.psl" % (chr, csv, chr[:-5]))
pslfile = "%s.tmp.psl" % chr[:-5]
bedfile = "%s.tmp.bed" % chr[:-5]
# Only converts a non-empty psl file to a bed file, and removes all psl files in folder.
if os.path.getsize(pslfile) != 0:
os.system("/opt/kentools/pslToBed %s %s" % (pslfile, bedfile))
os.system("rm %s" % pslfile)
return bedfile
else:
os.system("rm %s" % pslfile)
def get_coords(self, df_primers):
"""Generates csv file for virtual PCR and imports results into a pandas data frame.
:param df_primers: data frame of primer data.
:return df_coords: data frame with chromosome, start and end coordinates, and a name
(format "Gene_ExonDirection") for each primer.
"""
primer_list = []
names_dup = []
names = []
exons = []
dirs = []
start_coords = []
end_coords = []
chroms = []
seq_position = 0
list_position = 0
primer_seqs = pd.DataFrame([])
csv = '%s.csv' % self.excel_file[:-5]
csv = csv.replace(" ", "")
# (1) Gets sequences, exons and directions, splits the sequences into F+R and combines into series and then csv.
for row_index, row in df_primers.iterrows():
primer_list.append(str(row['Primer_seq']))
names_dup.append(str(row['Gene']) + '_' + str(row['Exon']) + str(row['Direction']))
exons.append(str(row['Exon']))
dirs.append(str(row['Direction']))
for item in names_dup:
if item not in names:
names.append(item)
forwards = primer_list[::2]
reverses = primer_list[1::2]
while list_position < len(forwards):
ser = pd.Series([names[list_position], forwards[list_position], reverses[list_position]])
primer_seqs = primer_seqs.append(ser, ignore_index=True)
list_position += 1
primer_seqs.to_csv(csv, header=None, index=None, sep='\t')
# (2) Runs virtual PCR on generated csv.
bedfile = self.run_pcr(csv)
tool = BedTool(bedfile)
# (3) Uses results to calculate start and end position of each primer (results give PCR product). Adds to df.
for row in tool:
chroms.append(row.chrom)
start_coords.append(row.start)
end_coords.append(row.start + len(primer_list[seq_position]))
chroms.append(row.chrom)
end_coords.append(row.end)
start_coords.append(row.end - len(primer_list[seq_position + 1]))
seq_position += 1
df_coords = pd.DataFrame([])
df_coords.insert(0, 'chrom', chroms)
df_coords.insert(1, 'start', start_coords)
df_coords.insert(2, 'end', end_coords)
df_coords.insert(3, 'name', names)
# (4) Generates a bed file from df_coords (not currently used in application).
bed = os.path.splitext(bedfile)[0]
df_coords.to_csv('%s.csv' % bed, header=None, index=None, sep='\t') # cannot directly convert to bed.
csv_file = BedTool('%s.csv' % bed)
csv_file.saveas('%s.bed' % bed)
df_coords.insert(4, 'Exon', exons) # not need in bed file so added after.
df_coords.insert(5, 'Direction', dirs)
# Removes unnecessary files and moves BED file into shared folder. (add /tests for unit testing)
os.system("rm /home/cuser/PycharmProjects/django_apps/mysite/%s.csv" % bed)
os.system("mv /home/cuser/PycharmProjects/django_apps/mysite/%s.bed /media/sf_sarah_share/bedfiles" %
bed)
os.system("rm /home/cuser/PycharmProjects/django_apps/mysite/%s" % csv)
return df_coords
def col_to_string(self, row):
"""Converts values in the Exon column into string values which makes merging data frames easier.
:param row: for every row in Exon column.
:return string of value.
"""
return str(row['Exon'])
def combine_coords_primers(self, df_coords, df_primers_dups):
"""Adds primer coordinates to original df_primers_dups data frame.
:param df_primers_dups: data frame with primer data from excel.
:param df_coords: data frame with chrom, start, end, name, exon, direction.
:return df_combined: data frame of merge between df_coords and df_primers_dups.
:return gene_name: this will be added to the Genes table and used to check if already in database.
"""
df_coords['Exon'] = df_coords.apply(self.col_to_string, axis=1)
df_primers_dups['Exon'] = df_primers_dups.apply(self.col_to_string, axis=1)
# Merge based on Exon and Direction columns
df_combined = pd.merge(df_primers_dups, df_coords, how='left', on=['Exon', 'Direction'])
# There is already a Chromosome column in df_primers_dups
cols_to_drop = ['chrom']
df_combined = df_combined.drop(cols_to_drop, axis=1)
gene_name = df_combined.get_value(0, 'Gene')
return df_combined, gene_name
def check_in_db(self, gene):
"""Queries the database to check if data for a particular gene is already present.
:param gene: a gene name to check against the database.
:return result: query result which will be a gene if already in database and None if not.
"""
curs.execute("SELECT Gene FROM Genes WHERE Gene LIKE '%s'" % gene)
result = curs.fetchone()
return result
def to_db(self, df_combined, gene_name):
"""Creates tables and adds data into the database.
Function modifies the given data frame to generate three tables in the database (Primers, SNPs, Genes) and
performs data checks. If data for a particular gene is already in the database, this is overridden and the
previous data is saved to an excel document (archived_files).
The commented out section should only be used for the first file to initially set up the tables.
:param gene_name: gene to check against database.
:param df_combined: data frame to be inserted into database.
:return info: description of action performed (for audit log).
:return archived_filename: filename the previous data is saved under (for audit log).
"""
# (1) Creates database schema
curs.execute("CREATE TABLE IF NOT EXISTS Primers(PrimerId INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, "
"Gene TEXT, Exon TEXT, Direction TEXT, Version INTEGER, Primer_Seq TEXT, Chrom TEXT, M13_Tag TEXT"
", Batch TEXT, Project TEXT, Order_date TEXT, Frag_size INTEGER, Anneal_Temp TEXT, Other TEXT, "
"snp_check INTEGER, no_snps INTEGER, rs TEXT, hgvs TEXT, freq TEXT, ss TEXT, ss_proj TEXT, "
"other2 TEXT, action_to_take TEXT, check_by TEXT, start TEXT, end TEXT, name TEXT)")
curs.execute("CREATE TABLE IF NOT EXISTS SNPs(SNP_Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Gene TEXT, "
"Exon TEXT, Direction TEXT, snp_check INTEGER, rs TEXT, hgvs TEXT, freq TEXT, ss TEXT, "
"ss_proj TEXT, other2 TEXT, action_to_take TEXT, check_by TEXT, name TEXT)")
curs.execute("CREATE TABLE IF NOT EXISTS Genes(Gene_Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Gene TEXT)")
# (2) Drops unnecessary columns to make two tables and removes duplicates.
primertable_cols_to_drop = ['snp_check', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2', 'action_to_take',
'check_by']
snptable_cols_to_drop = ['Exon', 'Direction', 'Version', 'Primer_seq', 'Chrom', 'M13_tag', 'Batch', 'project',
'Order_date', 'Frag_size', 'anneal_temp', 'Other', 'no_snps', 'start', 'end']
df_primertable = df_combined.drop(primertable_cols_to_drop, axis=1)
df_primertable = df_primertable.drop_duplicates(subset=('Gene', 'Exon', 'Direction', 'Chrom'))
df_snptable = df_combined.drop(snptable_cols_to_drop, axis=1)
# (3) Performs data checks using CheckPrimers and CheckSNPs classes.
check = CheckPrimers(df_primertable, df_snptable)
total_errors, error_details = check.check_all()
# (4) Checks if gene data already in database.
uni_gene = '(u\'%s\',)' % gene_name
gene = self.check_in_db(gene_name) # this outputs a unicode string
# (5) Adds to database if no errors. Overrides data if already present.
archived_filename = None
if total_errors == 0:
if str(uni_gene) == str(gene):
# Add query to data frame then save to excel.
get_old_query = "SELECT p.Gene, p.Exon, p.Direction, p.Version, p.Primer_seq, p.Chrom, p.M13_Tag, " \
"p.Batch, p.Project, p.Order_date, p.Frag_size, p.Anneal_Temp, p.Other, s.snp_check, " \
"p.no_snps, s.rs, s.hgvs, s.freq, s.ss, s.ss_proj, s.other2, s.action_to_take, " \
"s.check_by FROM SNPs s LEFT JOIN Primers p ON s.name = p.name WHERE p.Gene='%s'" % \
gene_name
today_date = datetime.datetime.now().strftime("%d-%m-%Y_%H%M")
df_sql = pd.read_sql_query(get_old_query, con=con)
archived_filename = '%s_%s' % (gene_name, today_date)
writer = ExcelWriter('%s.xlsx' % archived_filename)
df_sql.to_excel(writer, '%s' % today_date, index=False)
writer.save()
os.system("mv /home/cuser/PycharmProjects/django_apps/mysite/%s.xlsx "
"/home/cuser/PycharmProjects/django_apps/mysite/primerdb/archived_files/" % archived_filename)
curs.execute("DELETE FROM Primers WHERE Gene='%s'" % gene_name)
curs.execute("DELETE FROM Genes WHERE Gene='%s'" % gene_name)
curs.execute("DELETE FROM SNPs WHERE Gene='%s'" % gene_name)
info = "Data updated."
else:
info = "New gene added."
# Insert new data into SQL tables.
curs.execute("INSERT INTO Genes (Gene) VALUES (?)", (gene_name,))
df_primertable.to_sql('Primers', con, if_exists='append', index=False)
df_snptable.to_sql('SNPs', con, if_exists='append', index=False)
print "Primers successfully added to database."
else:
info = error_details
con.commit()
return info, archived_filename
def all(self):
"""Combines all methods"""
sheetname = self.get_sheet_name()
df_primers_dups, df_primers = self.get_primers(sheetname)
df_coords = self.get_coords(df_primers)
df_combined, gene = self.combine_coords_primers(df_coords, df_primers_dups)
info, archived_filename = self.to_db(df_combined, gene)
return info, archived_filename
|
normal
|
{
"blob_id": "1d817ee09705301b574c421a9ff716748c146fdd",
"index": 9591,
"step-1": "import pandas as pd\nimport re\nimport sqlite3 as lite\nimport os\nfrom pybedtools import BedTool\nimport django\nfrom checkprimers import CheckPrimers\nfrom pandas import ExcelWriter\nimport datetime\nos.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'\ndjango.setup()\n\n\nclass GetPrimers(object):\n \"\"\"Extracts data from excel spread sheet and imports it into a sqlite database.\n :param excel_file: excel file to be imported.\n :param db: database the excel file should be imported into.\n \"\"\"\n\n def __init__(self, excel_file, db):\n self.excel_file = excel_file\n self.db = db\n global con, curs\n con = lite.connect(self.db) # Creates a database if it doesn't already exist.\n curs = con.cursor()\n\n def get_sheet_name(self):\n \"\"\"Returns the sheetname to be used to import data from.\"\"\"\n\n xl = pd.ExcelFile(self.excel_file)\n sheet_names = xl.sheet_names\n for item in sheet_names:\n if re.match('(.*)Current primers', item, re.IGNORECASE): # Only extracts most recent primers.\n sheet_name = item\n return sheet_name\n\n def get_primers(self, sheetname):\n \"\"\"Extracts primer data from sheet.\n Function reads an excel sheet using pandas and stores this in the df_primers_dups data frame (contains\n duplicated rows).The df_primers data frame will go on to be used in the virtual PCR so irrelevant columns\n are dropped and any duplicate rows are removed.\n :param sheetname: sheet data to be extracted from\n :return df_primers_dups: data frame containing extracted data which may include duplicates.\n :return df_primers: data frame containing only data necessary to get genome coordinates.\n \"\"\"\n df_primers_dups = pd.read_excel(self.excel_file, header=0, parse_cols='A:M, O:X', skiprows=2,\n names=['Gene', 'Exon', 'Direction', 'Version', 'Primer_seq', 'Chrom', 'M13_tag',\n 'Batch', 'project', 'Order_date', 'Frag_size', 'anneal_temp', 'Other',\n 'snp_check', 'no_snps', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2',\n 'action_to_take', 'check_by'],\n sheetname=sheetname, index_col=None)\n\n to_drop = ['Version', 'M13_tag', 'Batch', 'project', 'Order_date', 'Frag_size', 'anneal_temp', 'Other',\n 'snp_check', 'no_snps', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2', 'action_to_take',\n 'check_by']\n\n df_primers_dups = df_primers_dups.where((pd.notnull(df_primers_dups)), None) # easier to work with than NaN\n df_primers = df_primers_dups.drop(to_drop, axis=1)\n df_primers = df_primers.drop_duplicates(subset=('Gene', 'Exon', 'Direction', 'Chrom'))\n df_primers = df_primers.reset_index(drop=True)\n\n return df_primers_dups, df_primers\n\n def run_pcr(self, csv):\n \"\"\"Runs virtual PCR on a CSV file using the isPcr and pslToBed tools installed from UCSC.\n :param csv: a csv file is need as an input with format \"name, forward, reverse\".\n :return bedfile: with results of virtual PCR if there is a match.\n \"\"\"\n\n print \"Running virtual PCR...\"\n\n chromosomes = ['chr1.2bit', 'chr11.2bit', 'chr12.2bit', 'chrX.2bit', 'chr13.2bit', 'chr14.2bit', 'chr15.2bit',\n 'chr16.2bit', 'chr17.2bit', 'chr18.2bit', 'chr19.2bit', 'chr20.2bit', 'chr21.2bit', 'chr22.2bit',\n 'chr2.2bit', 'chr3.2bit', 'chr4.2bit', 'chr5.2bit', 'chr6.2bit', 'chr7.2bit', 'chr8.2bit',\n 'chr9.2bit', 'chr10.2bit', 'chrY.2bit']\n\n for chr in chromosomes:\n os.system(\n \"/opt/kentools/isPcr -out=psl /media/genomicdata/ucsc_hg19_by_chr/2bit_chr/%s \\\n %s %s.tmp.psl\" % (chr, csv, chr[:-5]))\n\n pslfile = \"%s.tmp.psl\" % chr[:-5]\n bedfile = \"%s.tmp.bed\" % chr[:-5]\n\n # Only converts a non-empty psl file to a bed file, and removes all psl files in folder.\n if os.path.getsize(pslfile) != 0:\n os.system(\"/opt/kentools/pslToBed %s %s\" % (pslfile, bedfile))\n os.system(\"rm %s\" % pslfile)\n return bedfile\n else:\n os.system(\"rm %s\" % pslfile)\n\n def get_coords(self, df_primers):\n \"\"\"Generates csv file for virtual PCR and imports results into a pandas data frame.\n :param df_primers: data frame of primer data.\n :return df_coords: data frame with chromosome, start and end coordinates, and a name\n (format \"Gene_ExonDirection\") for each primer.\n \"\"\"\n primer_list = []\n names_dup = []\n names = []\n exons = []\n dirs = []\n start_coords = []\n end_coords = []\n chroms = []\n seq_position = 0\n list_position = 0\n primer_seqs = pd.DataFrame([])\n csv = '%s.csv' % self.excel_file[:-5]\n csv = csv.replace(\" \", \"\")\n\n # (1) Gets sequences, exons and directions, splits the sequences into F+R and combines into series and then csv.\n for row_index, row in df_primers.iterrows():\n primer_list.append(str(row['Primer_seq']))\n names_dup.append(str(row['Gene']) + '_' + str(row['Exon']) + str(row['Direction']))\n exons.append(str(row['Exon']))\n dirs.append(str(row['Direction']))\n for item in names_dup:\n if item not in names:\n names.append(item)\n\n forwards = primer_list[::2]\n reverses = primer_list[1::2]\n\n while list_position < len(forwards):\n ser = pd.Series([names[list_position], forwards[list_position], reverses[list_position]])\n primer_seqs = primer_seqs.append(ser, ignore_index=True)\n list_position += 1\n\n primer_seqs.to_csv(csv, header=None, index=None, sep='\\t')\n\n # (2) Runs virtual PCR on generated csv.\n bedfile = self.run_pcr(csv)\n tool = BedTool(bedfile)\n\n # (3) Uses results to calculate start and end position of each primer (results give PCR product). Adds to df.\n for row in tool:\n chroms.append(row.chrom)\n start_coords.append(row.start)\n end_coords.append(row.start + len(primer_list[seq_position]))\n chroms.append(row.chrom)\n end_coords.append(row.end)\n start_coords.append(row.end - len(primer_list[seq_position + 1]))\n seq_position += 1\n\n df_coords = pd.DataFrame([])\n df_coords.insert(0, 'chrom', chroms)\n df_coords.insert(1, 'start', start_coords)\n df_coords.insert(2, 'end', end_coords)\n df_coords.insert(3, 'name', names)\n\n # (4) Generates a bed file from df_coords (not currently used in application).\n bed = os.path.splitext(bedfile)[0]\n df_coords.to_csv('%s.csv' % bed, header=None, index=None, sep='\\t') # cannot directly convert to bed.\n csv_file = BedTool('%s.csv' % bed)\n csv_file.saveas('%s.bed' % bed)\n\n df_coords.insert(4, 'Exon', exons) # not need in bed file so added after.\n df_coords.insert(5, 'Direction', dirs)\n\n # Removes unnecessary files and moves BED file into shared folder. (add /tests for unit testing)\n os.system(\"rm /home/cuser/PycharmProjects/django_apps/mysite/%s.csv\" % bed)\n os.system(\"mv /home/cuser/PycharmProjects/django_apps/mysite/%s.bed /media/sf_sarah_share/bedfiles\" %\n bed)\n os.system(\"rm /home/cuser/PycharmProjects/django_apps/mysite/%s\" % csv)\n\n return df_coords\n\n def col_to_string(self, row):\n \"\"\"Converts values in the Exon column into string values which makes merging data frames easier.\n :param row: for every row in Exon column.\n :return string of value.\n \"\"\"\n\n return str(row['Exon'])\n\n def combine_coords_primers(self, df_coords, df_primers_dups):\n \"\"\"Adds primer coordinates to original df_primers_dups data frame.\n :param df_primers_dups: data frame with primer data from excel.\n :param df_coords: data frame with chrom, start, end, name, exon, direction.\n :return df_combined: data frame of merge between df_coords and df_primers_dups.\n :return gene_name: this will be added to the Genes table and used to check if already in database.\n \"\"\"\n df_coords['Exon'] = df_coords.apply(self.col_to_string, axis=1)\n df_primers_dups['Exon'] = df_primers_dups.apply(self.col_to_string, axis=1)\n\n # Merge based on Exon and Direction columns\n df_combined = pd.merge(df_primers_dups, df_coords, how='left', on=['Exon', 'Direction'])\n\n # There is already a Chromosome column in df_primers_dups\n cols_to_drop = ['chrom']\n df_combined = df_combined.drop(cols_to_drop, axis=1)\n\n gene_name = df_combined.get_value(0, 'Gene')\n\n return df_combined, gene_name\n\n def check_in_db(self, gene):\n \"\"\"Queries the database to check if data for a particular gene is already present.\n :param gene: a gene name to check against the database.\n :return result: query result which will be a gene if already in database and None if not.\n \"\"\"\n curs.execute(\"SELECT Gene FROM Genes WHERE Gene LIKE '%s'\" % gene)\n result = curs.fetchone()\n\n return result\n\n def to_db(self, df_combined, gene_name):\n \"\"\"Creates tables and adds data into the database.\n Function modifies the given data frame to generate three tables in the database (Primers, SNPs, Genes) and\n performs data checks. If data for a particular gene is already in the database, this is overridden and the\n previous data is saved to an excel document (archived_files).\n The commented out section should only be used for the first file to initially set up the tables.\n :param gene_name: gene to check against database.\n :param df_combined: data frame to be inserted into database.\n :return info: description of action performed (for audit log).\n :return archived_filename: filename the previous data is saved under (for audit log).\n \"\"\"\n\n # (1) Creates database schema\n curs.execute(\"CREATE TABLE IF NOT EXISTS Primers(PrimerId INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, \"\n \"Gene TEXT, Exon TEXT, Direction TEXT, Version INTEGER, Primer_Seq TEXT, Chrom TEXT, M13_Tag TEXT\"\n \", Batch TEXT, Project TEXT, Order_date TEXT, Frag_size INTEGER, Anneal_Temp TEXT, Other TEXT, \"\n \"snp_check INTEGER, no_snps INTEGER, rs TEXT, hgvs TEXT, freq TEXT, ss TEXT, ss_proj TEXT, \"\n \"other2 TEXT, action_to_take TEXT, check_by TEXT, start TEXT, end TEXT, name TEXT)\")\n\n curs.execute(\"CREATE TABLE IF NOT EXISTS SNPs(SNP_Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Gene TEXT, \"\n \"Exon TEXT, Direction TEXT, snp_check INTEGER, rs TEXT, hgvs TEXT, freq TEXT, ss TEXT, \"\n \"ss_proj TEXT, other2 TEXT, action_to_take TEXT, check_by TEXT, name TEXT)\")\n\n curs.execute(\"CREATE TABLE IF NOT EXISTS Genes(Gene_Id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, Gene TEXT)\")\n\n # (2) Drops unnecessary columns to make two tables and removes duplicates.\n primertable_cols_to_drop = ['snp_check', 'rs', 'hgvs', 'freq', 'ss', 'ss_proj', 'other2', 'action_to_take',\n 'check_by']\n snptable_cols_to_drop = ['Exon', 'Direction', 'Version', 'Primer_seq', 'Chrom', 'M13_tag', 'Batch', 'project',\n 'Order_date', 'Frag_size', 'anneal_temp', 'Other', 'no_snps', 'start', 'end']\n\n df_primertable = df_combined.drop(primertable_cols_to_drop, axis=1)\n df_primertable = df_primertable.drop_duplicates(subset=('Gene', 'Exon', 'Direction', 'Chrom'))\n df_snptable = df_combined.drop(snptable_cols_to_drop, axis=1)\n\n # (3) Performs data checks using CheckPrimers and CheckSNPs classes.\n check = CheckPrimers(df_primertable, df_snptable)\n total_errors, error_details = check.check_all()\n\n # (4) Checks if gene data already in database.\n uni_gene = '(u\\'%s\\',)' % gene_name\n gene = self.check_in_db(gene_name) # this outputs a unicode string\n\n # (5) Adds to database if no errors. Overrides data if already present.\n archived_filename = None\n if total_errors == 0:\n if str(uni_gene) == str(gene):\n # Add query to data frame then save to excel.\n get_old_query = \"SELECT p.Gene, p.Exon, p.Direction, p.Version, p.Primer_seq, p.Chrom, p.M13_Tag, \" \\\n \"p.Batch, p.Project, p.Order_date, p.Frag_size, p.Anneal_Temp, p.Other, s.snp_check, \" \\\n \"p.no_snps, s.rs, s.hgvs, s.freq, s.ss, s.ss_proj, s.other2, s.action_to_take, \" \\\n \"s.check_by FROM SNPs s LEFT JOIN Primers p ON s.name = p.name WHERE p.Gene='%s'\" % \\\n gene_name\n today_date = datetime.datetime.now().strftime(\"%d-%m-%Y_%H%M\")\n df_sql = pd.read_sql_query(get_old_query, con=con)\n archived_filename = '%s_%s' % (gene_name, today_date)\n writer = ExcelWriter('%s.xlsx' % archived_filename)\n df_sql.to_excel(writer, '%s' % today_date, index=False)\n writer.save()\n os.system(\"mv /home/cuser/PycharmProjects/django_apps/mysite/%s.xlsx \"\n \"/home/cuser/PycharmProjects/django_apps/mysite/primerdb/archived_files/\" % archived_filename)\n\n curs.execute(\"DELETE FROM Primers WHERE Gene='%s'\" % gene_name)\n curs.execute(\"DELETE FROM Genes WHERE Gene='%s'\" % gene_name)\n curs.execute(\"DELETE FROM SNPs WHERE Gene='%s'\" % gene_name)\n\n info = \"Data updated.\"\n\n else:\n info = \"New gene added.\"\n\n # Insert new data into SQL tables.\n curs.execute(\"INSERT INTO Genes (Gene) VALUES (?)\", (gene_name,))\n df_primertable.to_sql('Primers', con, if_exists='append', index=False)\n df_snptable.to_sql('SNPs', con, if_exists='append', index=False)\n\n print \"Primers successfully added to database.\"\n else:\n info = error_details\n\n con.commit()\n return info, archived_filename\n\n def all(self):\n \"\"\"Combines all methods\"\"\"\n sheetname = self.get_sheet_name()\n df_primers_dups, df_primers = self.get_primers(sheetname)\n df_coords = self.get_coords(df_primers)\n df_combined, gene = self.combine_coords_primers(df_coords, df_primers_dups)\n info, archived_filename = self.to_db(df_combined, gene)\n return info, archived_filename\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""@brief the routes for Flask application
"""
import hashlib
import json
import time
import requests
from flask import render_template, url_for
from soco import SoCo
from app import app
app.config.from_pyfile("settings.py")
sonos = SoCo(app.config["SPEAKER_IP"])
def gen_sig():
"""@brief return the MD5 checksum """
return hashlib.md5(
(
app.config["ROVI_API_KEY"]
+ app.config["ROVI_SHARED_SECRET"]
+ repr(int(time.time()))
).encode("utf-8")
).hexdigest()
def get_track_image(artist, album):
"""@brief get the track image from Rovi """
blank_image = url_for("static", filename="img/blank.jpg")
if "ROVI_SHARED_SECRET" not in app.config:
return blank_image
if "ROVI_API_KEY" not in app.config:
return blank_image
headers = {"Accept-Encoding": "gzip"}
req = requests.get(
"http://api.rovicorp.com/recognition/v2.1/music/match/album?apikey="
+ app.config["ROVI_API_KEY"]
+ "&sig="
+ gen_sig()
+ "&name= "
+ album
+ "&performername="
+ artist
+ "&include=images&size=1",
headers=headers,
timeout=30,
)
if req.status_code != requests.codes.ok:
return blank_image
result = json.loads(req.content)
try:
return result["matchResponse"]["results"][0]["album"]["images"][0]["front"][3]["url"]
except (KeyError, IndexError):
return blank_image
def current_track():
"""@brief get the current track information from Sonos """
track = sonos.get_current_track_info()
track["title"] = track["title"][:30]
track["artist"] = track["artist"][:30]
return track
@app.route("/play")
def play():
"""@brief the play function """
sonos.play()
return "Ok"
@app.route("/pause")
def pause():
"""@brief the pause function """
sonos.pause()
return "Ok"
@app.route("/following")
def following():
"""@brief the following function """
sonos.next()
return "Ok"
@app.route("/previous")
def previous():
"""@brief the previous function """
sonos.previous()
return "Ok"
@app.route("/volume")
def volume():
"""@brief get the actual volume """
vol = sonos.volume
return vol
@app.route("/volume_up")
def volume_up():
"""@brief the volume up function """
sonos.set_relative_volume(10)
return "Ok"
@app.route("/volume_down")
def volume_down():
"""@brief the volume down function """
sonos.set_relative_volume(-10)
return "Ok"
@app.route("/volume_mute")
def volume_mute():
"""@brief the mute function """
sonos.mute = True
return "Ok"
@app.route("/volume_unmute")
def volume_unmute():
"""@brief the unmute function """
sonos.mute = False
return "Ok"
@app.route("/track_01")
def track_01():
"""@brief switch to new track """
sonos.play_uri('http://mp3stream1.apasf.apa.at:8000', title='FM4.ORF.AT', force_radio=True)
return "Ok"
@app.route("/track_02")
def track_02():
"""@brief switch to new track """
sonos.play_uri('http://streams.radiopsr.de/psr-live/mp3-192/mediaplayer', title='Radio PSR Live', force_radio=True)
return "Ok"
@app.route("/track_03")
def track_03():
"""@brief switch to new track """
sonos.play_uri('http://nrj.de/sachsen', title='Energy Sachsen', force_radio=True)
return "Ok"
@app.route("/track_04")
def track_04():
"""@brief switch to new track """
sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title='Sunshine Live', force_radio=True)
return "Ok"
@app.route("/info-light")
def info_light():
"""@brief the info-light function """
track = current_track()
return json.dumps(track)
@app.route("/info")
def info():
"""@brief the info function """
track = current_track()
track["image"] = get_track_image(track["artist"], track["album"])
transport = sonos.get_current_transport_info()
track["playing"] = transport["current_transport_state"] != "STOPPED"
track["mute"] = sonos.mute
return json.dumps(track)
@app.route("/")
@app.route('/index')
def index():
"""@brief the index function """
track = current_track()
track["image"] = get_track_image(track["artist"], track["album"])
return render_template("index.html", track=track)
|
normal
|
{
"blob_id": "86f33895e9ae0e026d7d6e40e611796b2dc2c713",
"index": 8394,
"step-1": "<mask token>\n\n\ndef gen_sig():\n \"\"\"@brief return the MD5 checksum \"\"\"\n return hashlib.md5((app.config['ROVI_API_KEY'] + app.config[\n 'ROVI_SHARED_SECRET'] + repr(int(time.time()))).encode('utf-8')\n ).hexdigest()\n\n\ndef get_track_image(artist, album):\n \"\"\"@brief get the track image from Rovi \"\"\"\n blank_image = url_for('static', filename='img/blank.jpg')\n if 'ROVI_SHARED_SECRET' not in app.config:\n return blank_image\n if 'ROVI_API_KEY' not in app.config:\n return blank_image\n headers = {'Accept-Encoding': 'gzip'}\n req = requests.get(\n 'http://api.rovicorp.com/recognition/v2.1/music/match/album?apikey=' +\n app.config['ROVI_API_KEY'] + '&sig=' + gen_sig() + '&name= ' +\n album + '&performername=' + artist + '&include=images&size=1',\n headers=headers, timeout=30)\n if req.status_code != requests.codes.ok:\n return blank_image\n result = json.loads(req.content)\n try:\n return result['matchResponse']['results'][0]['album']['images'][0][\n 'front'][3]['url']\n except (KeyError, IndexError):\n return blank_image\n\n\ndef current_track():\n \"\"\"@brief get the current track information from Sonos \"\"\"\n track = sonos.get_current_track_info()\n track['title'] = track['title'][:30]\n track['artist'] = track['artist'][:30]\n return track\n\n\[email protected]('/play')\ndef play():\n \"\"\"@brief the play function \"\"\"\n sonos.play()\n return 'Ok'\n\n\[email protected]('/pause')\ndef pause():\n \"\"\"@brief the pause function \"\"\"\n sonos.pause()\n return 'Ok'\n\n\[email protected]('/following')\ndef following():\n \"\"\"@brief the following function \"\"\"\n sonos.next()\n return 'Ok'\n\n\[email protected]('/previous')\ndef previous():\n \"\"\"@brief the previous function \"\"\"\n sonos.previous()\n return 'Ok'\n\n\[email protected]('/volume')\ndef volume():\n \"\"\"@brief get the actual volume \"\"\"\n vol = sonos.volume\n return vol\n\n\[email protected]('/volume_up')\ndef volume_up():\n \"\"\"@brief the volume up function \"\"\"\n sonos.set_relative_volume(10)\n return 'Ok'\n\n\[email protected]('/volume_down')\ndef volume_down():\n \"\"\"@brief the volume down function \"\"\"\n sonos.set_relative_volume(-10)\n return 'Ok'\n\n\[email protected]('/volume_mute')\ndef volume_mute():\n \"\"\"@brief the mute function \"\"\"\n sonos.mute = True\n return 'Ok'\n\n\[email protected]('/volume_unmute')\ndef volume_unmute():\n \"\"\"@brief the unmute function \"\"\"\n sonos.mute = False\n return 'Ok'\n\n\n<mask token>\n\n\[email protected]('/track_02')\ndef track_02():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://streams.radiopsr.de/psr-live/mp3-192/mediaplayer',\n title='Radio PSR Live', force_radio=True)\n return 'Ok'\n\n\n<mask token>\n\n\[email protected]('/track_04')\ndef track_04():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title=\n 'Sunshine Live', force_radio=True)\n return 'Ok'\n\n\[email protected]('/info-light')\ndef info_light():\n \"\"\"@brief the info-light function \"\"\"\n track = current_track()\n return json.dumps(track)\n\n\[email protected]('/info')\ndef info():\n \"\"\"@brief the info function \"\"\"\n track = current_track()\n track['image'] = get_track_image(track['artist'], track['album'])\n transport = sonos.get_current_transport_info()\n track['playing'] = transport['current_transport_state'] != 'STOPPED'\n track['mute'] = sonos.mute\n return json.dumps(track)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gen_sig():\n \"\"\"@brief return the MD5 checksum \"\"\"\n return hashlib.md5((app.config['ROVI_API_KEY'] + app.config[\n 'ROVI_SHARED_SECRET'] + repr(int(time.time()))).encode('utf-8')\n ).hexdigest()\n\n\ndef get_track_image(artist, album):\n \"\"\"@brief get the track image from Rovi \"\"\"\n blank_image = url_for('static', filename='img/blank.jpg')\n if 'ROVI_SHARED_SECRET' not in app.config:\n return blank_image\n if 'ROVI_API_KEY' not in app.config:\n return blank_image\n headers = {'Accept-Encoding': 'gzip'}\n req = requests.get(\n 'http://api.rovicorp.com/recognition/v2.1/music/match/album?apikey=' +\n app.config['ROVI_API_KEY'] + '&sig=' + gen_sig() + '&name= ' +\n album + '&performername=' + artist + '&include=images&size=1',\n headers=headers, timeout=30)\n if req.status_code != requests.codes.ok:\n return blank_image\n result = json.loads(req.content)\n try:\n return result['matchResponse']['results'][0]['album']['images'][0][\n 'front'][3]['url']\n except (KeyError, IndexError):\n return blank_image\n\n\ndef current_track():\n \"\"\"@brief get the current track information from Sonos \"\"\"\n track = sonos.get_current_track_info()\n track['title'] = track['title'][:30]\n track['artist'] = track['artist'][:30]\n return track\n\n\[email protected]('/play')\ndef play():\n \"\"\"@brief the play function \"\"\"\n sonos.play()\n return 'Ok'\n\n\[email protected]('/pause')\ndef pause():\n \"\"\"@brief the pause function \"\"\"\n sonos.pause()\n return 'Ok'\n\n\[email protected]('/following')\ndef following():\n \"\"\"@brief the following function \"\"\"\n sonos.next()\n return 'Ok'\n\n\[email protected]('/previous')\ndef previous():\n \"\"\"@brief the previous function \"\"\"\n sonos.previous()\n return 'Ok'\n\n\[email protected]('/volume')\ndef volume():\n \"\"\"@brief get the actual volume \"\"\"\n vol = sonos.volume\n return vol\n\n\[email protected]('/volume_up')\ndef volume_up():\n \"\"\"@brief the volume up function \"\"\"\n sonos.set_relative_volume(10)\n return 'Ok'\n\n\[email protected]('/volume_down')\ndef volume_down():\n \"\"\"@brief the volume down function \"\"\"\n sonos.set_relative_volume(-10)\n return 'Ok'\n\n\[email protected]('/volume_mute')\ndef volume_mute():\n \"\"\"@brief the mute function \"\"\"\n sonos.mute = True\n return 'Ok'\n\n\[email protected]('/volume_unmute')\ndef volume_unmute():\n \"\"\"@brief the unmute function \"\"\"\n sonos.mute = False\n return 'Ok'\n\n\[email protected]('/track_01')\ndef track_01():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://mp3stream1.apasf.apa.at:8000', title=\n 'FM4.ORF.AT', force_radio=True)\n return 'Ok'\n\n\[email protected]('/track_02')\ndef track_02():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://streams.radiopsr.de/psr-live/mp3-192/mediaplayer',\n title='Radio PSR Live', force_radio=True)\n return 'Ok'\n\n\n<mask token>\n\n\[email protected]('/track_04')\ndef track_04():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title=\n 'Sunshine Live', force_radio=True)\n return 'Ok'\n\n\[email protected]('/info-light')\ndef info_light():\n \"\"\"@brief the info-light function \"\"\"\n track = current_track()\n return json.dumps(track)\n\n\[email protected]('/info')\ndef info():\n \"\"\"@brief the info function \"\"\"\n track = current_track()\n track['image'] = get_track_image(track['artist'], track['album'])\n transport = sonos.get_current_transport_info()\n track['playing'] = transport['current_transport_state'] != 'STOPPED'\n track['mute'] = sonos.mute\n return json.dumps(track)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef gen_sig():\n \"\"\"@brief return the MD5 checksum \"\"\"\n return hashlib.md5((app.config['ROVI_API_KEY'] + app.config[\n 'ROVI_SHARED_SECRET'] + repr(int(time.time()))).encode('utf-8')\n ).hexdigest()\n\n\ndef get_track_image(artist, album):\n \"\"\"@brief get the track image from Rovi \"\"\"\n blank_image = url_for('static', filename='img/blank.jpg')\n if 'ROVI_SHARED_SECRET' not in app.config:\n return blank_image\n if 'ROVI_API_KEY' not in app.config:\n return blank_image\n headers = {'Accept-Encoding': 'gzip'}\n req = requests.get(\n 'http://api.rovicorp.com/recognition/v2.1/music/match/album?apikey=' +\n app.config['ROVI_API_KEY'] + '&sig=' + gen_sig() + '&name= ' +\n album + '&performername=' + artist + '&include=images&size=1',\n headers=headers, timeout=30)\n if req.status_code != requests.codes.ok:\n return blank_image\n result = json.loads(req.content)\n try:\n return result['matchResponse']['results'][0]['album']['images'][0][\n 'front'][3]['url']\n except (KeyError, IndexError):\n return blank_image\n\n\ndef current_track():\n \"\"\"@brief get the current track information from Sonos \"\"\"\n track = sonos.get_current_track_info()\n track['title'] = track['title'][:30]\n track['artist'] = track['artist'][:30]\n return track\n\n\[email protected]('/play')\ndef play():\n \"\"\"@brief the play function \"\"\"\n sonos.play()\n return 'Ok'\n\n\[email protected]('/pause')\ndef pause():\n \"\"\"@brief the pause function \"\"\"\n sonos.pause()\n return 'Ok'\n\n\[email protected]('/following')\ndef following():\n \"\"\"@brief the following function \"\"\"\n sonos.next()\n return 'Ok'\n\n\[email protected]('/previous')\ndef previous():\n \"\"\"@brief the previous function \"\"\"\n sonos.previous()\n return 'Ok'\n\n\[email protected]('/volume')\ndef volume():\n \"\"\"@brief get the actual volume \"\"\"\n vol = sonos.volume\n return vol\n\n\[email protected]('/volume_up')\ndef volume_up():\n \"\"\"@brief the volume up function \"\"\"\n sonos.set_relative_volume(10)\n return 'Ok'\n\n\[email protected]('/volume_down')\ndef volume_down():\n \"\"\"@brief the volume down function \"\"\"\n sonos.set_relative_volume(-10)\n return 'Ok'\n\n\[email protected]('/volume_mute')\ndef volume_mute():\n \"\"\"@brief the mute function \"\"\"\n sonos.mute = True\n return 'Ok'\n\n\[email protected]('/volume_unmute')\ndef volume_unmute():\n \"\"\"@brief the unmute function \"\"\"\n sonos.mute = False\n return 'Ok'\n\n\[email protected]('/track_01')\ndef track_01():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://mp3stream1.apasf.apa.at:8000', title=\n 'FM4.ORF.AT', force_radio=True)\n return 'Ok'\n\n\[email protected]('/track_02')\ndef track_02():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://streams.radiopsr.de/psr-live/mp3-192/mediaplayer',\n title='Radio PSR Live', force_radio=True)\n return 'Ok'\n\n\[email protected]('/track_03')\ndef track_03():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://nrj.de/sachsen', title='Energy Sachsen',\n force_radio=True)\n return 'Ok'\n\n\[email protected]('/track_04')\ndef track_04():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title=\n 'Sunshine Live', force_radio=True)\n return 'Ok'\n\n\[email protected]('/info-light')\ndef info_light():\n \"\"\"@brief the info-light function \"\"\"\n track = current_track()\n return json.dumps(track)\n\n\[email protected]('/info')\ndef info():\n \"\"\"@brief the info function \"\"\"\n track = current_track()\n track['image'] = get_track_image(track['artist'], track['album'])\n transport = sonos.get_current_transport_info()\n track['playing'] = transport['current_transport_state'] != 'STOPPED'\n track['mute'] = sonos.mute\n return json.dumps(track)\n\n\n<mask token>\n",
"step-4": "<mask token>\napp.config.from_pyfile('settings.py')\n<mask token>\n\n\ndef gen_sig():\n \"\"\"@brief return the MD5 checksum \"\"\"\n return hashlib.md5((app.config['ROVI_API_KEY'] + app.config[\n 'ROVI_SHARED_SECRET'] + repr(int(time.time()))).encode('utf-8')\n ).hexdigest()\n\n\ndef get_track_image(artist, album):\n \"\"\"@brief get the track image from Rovi \"\"\"\n blank_image = url_for('static', filename='img/blank.jpg')\n if 'ROVI_SHARED_SECRET' not in app.config:\n return blank_image\n if 'ROVI_API_KEY' not in app.config:\n return blank_image\n headers = {'Accept-Encoding': 'gzip'}\n req = requests.get(\n 'http://api.rovicorp.com/recognition/v2.1/music/match/album?apikey=' +\n app.config['ROVI_API_KEY'] + '&sig=' + gen_sig() + '&name= ' +\n album + '&performername=' + artist + '&include=images&size=1',\n headers=headers, timeout=30)\n if req.status_code != requests.codes.ok:\n return blank_image\n result = json.loads(req.content)\n try:\n return result['matchResponse']['results'][0]['album']['images'][0][\n 'front'][3]['url']\n except (KeyError, IndexError):\n return blank_image\n\n\ndef current_track():\n \"\"\"@brief get the current track information from Sonos \"\"\"\n track = sonos.get_current_track_info()\n track['title'] = track['title'][:30]\n track['artist'] = track['artist'][:30]\n return track\n\n\[email protected]('/play')\ndef play():\n \"\"\"@brief the play function \"\"\"\n sonos.play()\n return 'Ok'\n\n\[email protected]('/pause')\ndef pause():\n \"\"\"@brief the pause function \"\"\"\n sonos.pause()\n return 'Ok'\n\n\[email protected]('/following')\ndef following():\n \"\"\"@brief the following function \"\"\"\n sonos.next()\n return 'Ok'\n\n\[email protected]('/previous')\ndef previous():\n \"\"\"@brief the previous function \"\"\"\n sonos.previous()\n return 'Ok'\n\n\[email protected]('/volume')\ndef volume():\n \"\"\"@brief get the actual volume \"\"\"\n vol = sonos.volume\n return vol\n\n\[email protected]('/volume_up')\ndef volume_up():\n \"\"\"@brief the volume up function \"\"\"\n sonos.set_relative_volume(10)\n return 'Ok'\n\n\[email protected]('/volume_down')\ndef volume_down():\n \"\"\"@brief the volume down function \"\"\"\n sonos.set_relative_volume(-10)\n return 'Ok'\n\n\[email protected]('/volume_mute')\ndef volume_mute():\n \"\"\"@brief the mute function \"\"\"\n sonos.mute = True\n return 'Ok'\n\n\[email protected]('/volume_unmute')\ndef volume_unmute():\n \"\"\"@brief the unmute function \"\"\"\n sonos.mute = False\n return 'Ok'\n\n\[email protected]('/track_01')\ndef track_01():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://mp3stream1.apasf.apa.at:8000', title=\n 'FM4.ORF.AT', force_radio=True)\n return 'Ok'\n\n\[email protected]('/track_02')\ndef track_02():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://streams.radiopsr.de/psr-live/mp3-192/mediaplayer',\n title='Radio PSR Live', force_radio=True)\n return 'Ok'\n\n\[email protected]('/track_03')\ndef track_03():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://nrj.de/sachsen', title='Energy Sachsen',\n force_radio=True)\n return 'Ok'\n\n\[email protected]('/track_04')\ndef track_04():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title=\n 'Sunshine Live', force_radio=True)\n return 'Ok'\n\n\[email protected]('/info-light')\ndef info_light():\n \"\"\"@brief the info-light function \"\"\"\n track = current_track()\n return json.dumps(track)\n\n\[email protected]('/info')\ndef info():\n \"\"\"@brief the info function \"\"\"\n track = current_track()\n track['image'] = get_track_image(track['artist'], track['album'])\n transport = sonos.get_current_transport_info()\n track['playing'] = transport['current_transport_state'] != 'STOPPED'\n track['mute'] = sonos.mute\n return json.dumps(track)\n\n\[email protected]('/')\[email protected]('/index')\ndef index():\n \"\"\"@brief the index function \"\"\"\n track = current_track()\n track['image'] = get_track_image(track['artist'], track['album'])\n return render_template('index.html', track=track)\n",
"step-5": "\"\"\"@brief the routes for Flask application\n\"\"\"\nimport hashlib\nimport json\nimport time\n\nimport requests\nfrom flask import render_template, url_for\nfrom soco import SoCo\nfrom app import app\n\napp.config.from_pyfile(\"settings.py\")\nsonos = SoCo(app.config[\"SPEAKER_IP\"])\n\n\ndef gen_sig():\n \"\"\"@brief return the MD5 checksum \"\"\"\n return hashlib.md5(\n (\n app.config[\"ROVI_API_KEY\"]\n + app.config[\"ROVI_SHARED_SECRET\"]\n + repr(int(time.time()))\n ).encode(\"utf-8\")\n ).hexdigest()\n\n\ndef get_track_image(artist, album):\n \"\"\"@brief get the track image from Rovi \"\"\"\n blank_image = url_for(\"static\", filename=\"img/blank.jpg\")\n if \"ROVI_SHARED_SECRET\" not in app.config:\n return blank_image\n if \"ROVI_API_KEY\" not in app.config:\n return blank_image\n\n headers = {\"Accept-Encoding\": \"gzip\"}\n req = requests.get(\n \"http://api.rovicorp.com/recognition/v2.1/music/match/album?apikey=\"\n + app.config[\"ROVI_API_KEY\"]\n + \"&sig=\"\n + gen_sig()\n + \"&name= \"\n + album\n + \"&performername=\"\n + artist\n + \"&include=images&size=1\",\n headers=headers,\n timeout=30,\n )\n\n if req.status_code != requests.codes.ok:\n return blank_image\n\n result = json.loads(req.content)\n try:\n return result[\"matchResponse\"][\"results\"][0][\"album\"][\"images\"][0][\"front\"][3][\"url\"]\n except (KeyError, IndexError):\n return blank_image\n\n\ndef current_track():\n \"\"\"@brief get the current track information from Sonos \"\"\"\n track = sonos.get_current_track_info()\n track[\"title\"] = track[\"title\"][:30]\n track[\"artist\"] = track[\"artist\"][:30]\n return track\n\n\[email protected](\"/play\")\ndef play():\n \"\"\"@brief the play function \"\"\"\n sonos.play()\n return \"Ok\"\n\n\[email protected](\"/pause\")\ndef pause():\n \"\"\"@brief the pause function \"\"\"\n sonos.pause()\n return \"Ok\"\n\n\[email protected](\"/following\")\ndef following():\n \"\"\"@brief the following function \"\"\"\n sonos.next()\n return \"Ok\"\n\n\[email protected](\"/previous\")\ndef previous():\n \"\"\"@brief the previous function \"\"\"\n sonos.previous()\n return \"Ok\"\n\n\[email protected](\"/volume\")\ndef volume():\n \"\"\"@brief get the actual volume \"\"\"\n vol = sonos.volume\n return vol\n\n\[email protected](\"/volume_up\")\ndef volume_up():\n \"\"\"@brief the volume up function \"\"\"\n sonos.set_relative_volume(10)\n return \"Ok\"\n\n\[email protected](\"/volume_down\")\ndef volume_down():\n \"\"\"@brief the volume down function \"\"\"\n sonos.set_relative_volume(-10)\n return \"Ok\"\n\n\[email protected](\"/volume_mute\")\ndef volume_mute():\n \"\"\"@brief the mute function \"\"\"\n sonos.mute = True\n return \"Ok\"\n\n\[email protected](\"/volume_unmute\")\ndef volume_unmute():\n \"\"\"@brief the unmute function \"\"\"\n sonos.mute = False\n return \"Ok\"\n\n\[email protected](\"/track_01\")\ndef track_01():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://mp3stream1.apasf.apa.at:8000', title='FM4.ORF.AT', force_radio=True)\n return \"Ok\"\n\n\[email protected](\"/track_02\")\ndef track_02():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://streams.radiopsr.de/psr-live/mp3-192/mediaplayer', title='Radio PSR Live', force_radio=True)\n return \"Ok\"\n\n\[email protected](\"/track_03\")\ndef track_03():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://nrj.de/sachsen', title='Energy Sachsen', force_radio=True)\n return \"Ok\"\n\n\[email protected](\"/track_04\")\ndef track_04():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title='Sunshine Live', force_radio=True)\n return \"Ok\"\n\n\[email protected](\"/info-light\")\ndef info_light():\n \"\"\"@brief the info-light function \"\"\"\n track = current_track()\n return json.dumps(track)\n\n\[email protected](\"/info\")\ndef info():\n \"\"\"@brief the info function \"\"\"\n track = current_track()\n track[\"image\"] = get_track_image(track[\"artist\"], track[\"album\"])\n transport = sonos.get_current_transport_info()\n track[\"playing\"] = transport[\"current_transport_state\"] != \"STOPPED\"\n track[\"mute\"] = sonos.mute\n return json.dumps(track)\n\n\[email protected](\"/\")\[email protected]('/index')\ndef index():\n \"\"\"@brief the index function \"\"\"\n track = current_track()\n track[\"image\"] = get_track_image(track[\"artist\"], track[\"album\"])\n return render_template(\"index.html\", track=track)\n",
"step-ids": [
16,
17,
18,
20,
23
]
}
|
[
16,
17,
18,
20,
23
] |
"""1) Написать бота-консультанта, который будет собирать информацию с
пользователя (его ФИО, номер телефона, почта, адресс, пожелания).
Записывать сформированную заявку в БД (по желанию SQl/NOSQL).)."""
import telebot
from .config import TOKEN
from telebot.types import ReplyKeyboardMarkup, KeyboardButton, InlineKeyboardMarkup, InlineKeyboardButton
from mongoengine import *
bot = telebot.TeleBot(TOKEN)
data = {}
connect('bot_db')
class User(Document):
surname = StringField(required=True)
name = StringField(required=True)
middle_name = StringField(required=True)
phone = StringField(required=True)
email = StringField(required=True)
address = StringField(required=True)
wishes = StringField(required=True)
@bot.message_handler(commands=['start'])
def start(message):
kb = InlineKeyboardMarkup()
button1 = InlineKeyboardButton('Да', callback_data='yes')
button2 = InlineKeyboardButton('Нет', callback_data='no')
kb.add(button1, button2)
bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?', reply_markup=kb)
@bot.callback_query_handler(func=lambda call: call.data == 'no')
def bye(call):
bot.send_message(call.from_user.id, 'До свидания')
@bot.callback_query_handler(func=lambda call: call.data == 'yes')
def start(call):
bot.send_message(call.from_user.id, 'Хорошо')
bot.send_message(call.from_user.id, 'Как Вас зовут?')
@bot.message_handler(content_types=['text'])
def response(message):
data['name'] = message.text
bot.send_message(message.chat.id, 'Какая у Вас фамилия?')
bot.register_next_step_handler(message, get_surname)
def get_surname(message):
data['surname'] = message.text
bot.send_message(message.chat.id, 'Как Ваше отчество?')
bot.register_next_step_handler(message, get_middle_name)
def get_middle_name(message):
data['middle_name'] = message.text
kb = ReplyKeyboardMarkup(resize_keyboard=True)
button = KeyboardButton(text='поделиться контактом', request_contact=True)
kb.add(button)
bot.send_message(message.chat.id, 'Поделитесь номером телефона?', reply_markup=kb)
bot.register_next_step_handler(message, get_phone)
@bot.message_handler(content_types=['contact'])
def get_phone(message):
data['phone'] = message.contact.phone_number
bot.send_message(message.chat.id, 'Какой у Вас e-mail?')
bot.register_next_step_handler(message, get_email)
def get_email(message):
data['email'] = message.text
bot.send_message(message.chat.id, 'Какой Ваш адрес?')
bot.register_next_step_handler(message, get_address)
def get_address(message):
data['address'] = message.text
bot.send_message(message.chat.id, 'Какие у Вас пожелания?')
bot.register_next_step_handler(message, get_wishes)
def get_wishes(message):
data['wishes'] = message.text
User.objects.create(**data)
bot.send_message(message.chat.id, 'Спасибо.')
bot.polling()
|
normal
|
{
"blob_id": "dcb2351f9489815fbec8694b446d0a93972a6590",
"index": 6388,
"step-1": "<mask token>\n\n\nclass User(Document):\n surname = StringField(required=True)\n name = StringField(required=True)\n middle_name = StringField(required=True)\n phone = StringField(required=True)\n email = StringField(required=True)\n address = StringField(required=True)\n wishes = StringField(required=True)\n\n\[email protected]_handler(commands=['start'])\ndef start(message):\n kb = InlineKeyboardMarkup()\n button1 = InlineKeyboardButton('Да', callback_data='yes')\n button2 = InlineKeyboardButton('Нет', callback_data='no')\n kb.add(button1, button2)\n bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?',\n reply_markup=kb)\n\n\n<mask token>\n\n\[email protected]_query_handler(func=lambda call: call.data == 'yes')\ndef start(call):\n bot.send_message(call.from_user.id, 'Хорошо')\n bot.send_message(call.from_user.id, 'Как Вас зовут?')\n\n\[email protected]_handler(content_types=['text'])\ndef response(message):\n data['name'] = message.text\n bot.send_message(message.chat.id, 'Какая у Вас фамилия?')\n bot.register_next_step_handler(message, get_surname)\n\n\ndef get_surname(message):\n data['surname'] = message.text\n bot.send_message(message.chat.id, 'Как Ваше отчество?')\n bot.register_next_step_handler(message, get_middle_name)\n\n\n<mask token>\n\n\[email protected]_handler(content_types=['contact'])\ndef get_phone(message):\n data['phone'] = message.contact.phone_number\n bot.send_message(message.chat.id, 'Какой у Вас e-mail?')\n bot.register_next_step_handler(message, get_email)\n\n\ndef get_email(message):\n data['email'] = message.text\n bot.send_message(message.chat.id, 'Какой Ваш адрес?')\n bot.register_next_step_handler(message, get_address)\n\n\ndef get_address(message):\n data['address'] = message.text\n bot.send_message(message.chat.id, 'Какие у Вас пожелания?')\n bot.register_next_step_handler(message, get_wishes)\n\n\ndef get_wishes(message):\n data['wishes'] = message.text\n User.objects.create(**data)\n bot.send_message(message.chat.id, 'Спасибо.')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass User(Document):\n surname = StringField(required=True)\n name = StringField(required=True)\n middle_name = StringField(required=True)\n phone = StringField(required=True)\n email = StringField(required=True)\n address = StringField(required=True)\n wishes = StringField(required=True)\n\n\[email protected]_handler(commands=['start'])\ndef start(message):\n kb = InlineKeyboardMarkup()\n button1 = InlineKeyboardButton('Да', callback_data='yes')\n button2 = InlineKeyboardButton('Нет', callback_data='no')\n kb.add(button1, button2)\n bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?',\n reply_markup=kb)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'no')\ndef bye(call):\n bot.send_message(call.from_user.id, 'До свидания')\n\n\[email protected]_query_handler(func=lambda call: call.data == 'yes')\ndef start(call):\n bot.send_message(call.from_user.id, 'Хорошо')\n bot.send_message(call.from_user.id, 'Как Вас зовут?')\n\n\[email protected]_handler(content_types=['text'])\ndef response(message):\n data['name'] = message.text\n bot.send_message(message.chat.id, 'Какая у Вас фамилия?')\n bot.register_next_step_handler(message, get_surname)\n\n\ndef get_surname(message):\n data['surname'] = message.text\n bot.send_message(message.chat.id, 'Как Ваше отчество?')\n bot.register_next_step_handler(message, get_middle_name)\n\n\ndef get_middle_name(message):\n data['middle_name'] = message.text\n kb = ReplyKeyboardMarkup(resize_keyboard=True)\n button = KeyboardButton(text='поделиться контактом', request_contact=True)\n kb.add(button)\n bot.send_message(message.chat.id, 'Поделитесь номером телефона?',\n reply_markup=kb)\n bot.register_next_step_handler(message, get_phone)\n\n\[email protected]_handler(content_types=['contact'])\ndef get_phone(message):\n data['phone'] = message.contact.phone_number\n bot.send_message(message.chat.id, 'Какой у Вас e-mail?')\n bot.register_next_step_handler(message, get_email)\n\n\ndef get_email(message):\n data['email'] = message.text\n bot.send_message(message.chat.id, 'Какой Ваш адрес?')\n bot.register_next_step_handler(message, get_address)\n\n\ndef get_address(message):\n data['address'] = message.text\n bot.send_message(message.chat.id, 'Какие у Вас пожелания?')\n bot.register_next_step_handler(message, get_wishes)\n\n\ndef get_wishes(message):\n data['wishes'] = message.text\n User.objects.create(**data)\n bot.send_message(message.chat.id, 'Спасибо.')\n\n\n<mask token>\n",
"step-3": "<mask token>\nconnect('bot_db')\n\n\nclass User(Document):\n surname = StringField(required=True)\n name = StringField(required=True)\n middle_name = StringField(required=True)\n phone = StringField(required=True)\n email = StringField(required=True)\n address = StringField(required=True)\n wishes = StringField(required=True)\n\n\[email protected]_handler(commands=['start'])\ndef start(message):\n kb = InlineKeyboardMarkup()\n button1 = InlineKeyboardButton('Да', callback_data='yes')\n button2 = InlineKeyboardButton('Нет', callback_data='no')\n kb.add(button1, button2)\n bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?',\n reply_markup=kb)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'no')\ndef bye(call):\n bot.send_message(call.from_user.id, 'До свидания')\n\n\[email protected]_query_handler(func=lambda call: call.data == 'yes')\ndef start(call):\n bot.send_message(call.from_user.id, 'Хорошо')\n bot.send_message(call.from_user.id, 'Как Вас зовут?')\n\n\[email protected]_handler(content_types=['text'])\ndef response(message):\n data['name'] = message.text\n bot.send_message(message.chat.id, 'Какая у Вас фамилия?')\n bot.register_next_step_handler(message, get_surname)\n\n\ndef get_surname(message):\n data['surname'] = message.text\n bot.send_message(message.chat.id, 'Как Ваше отчество?')\n bot.register_next_step_handler(message, get_middle_name)\n\n\ndef get_middle_name(message):\n data['middle_name'] = message.text\n kb = ReplyKeyboardMarkup(resize_keyboard=True)\n button = KeyboardButton(text='поделиться контактом', request_contact=True)\n kb.add(button)\n bot.send_message(message.chat.id, 'Поделитесь номером телефона?',\n reply_markup=kb)\n bot.register_next_step_handler(message, get_phone)\n\n\[email protected]_handler(content_types=['contact'])\ndef get_phone(message):\n data['phone'] = message.contact.phone_number\n bot.send_message(message.chat.id, 'Какой у Вас e-mail?')\n bot.register_next_step_handler(message, get_email)\n\n\ndef get_email(message):\n data['email'] = message.text\n bot.send_message(message.chat.id, 'Какой Ваш адрес?')\n bot.register_next_step_handler(message, get_address)\n\n\ndef get_address(message):\n data['address'] = message.text\n bot.send_message(message.chat.id, 'Какие у Вас пожелания?')\n bot.register_next_step_handler(message, get_wishes)\n\n\ndef get_wishes(message):\n data['wishes'] = message.text\n User.objects.create(**data)\n bot.send_message(message.chat.id, 'Спасибо.')\n\n\nbot.polling()\n",
"step-4": "<mask token>\nbot = telebot.TeleBot(TOKEN)\ndata = {}\nconnect('bot_db')\n\n\nclass User(Document):\n surname = StringField(required=True)\n name = StringField(required=True)\n middle_name = StringField(required=True)\n phone = StringField(required=True)\n email = StringField(required=True)\n address = StringField(required=True)\n wishes = StringField(required=True)\n\n\[email protected]_handler(commands=['start'])\ndef start(message):\n kb = InlineKeyboardMarkup()\n button1 = InlineKeyboardButton('Да', callback_data='yes')\n button2 = InlineKeyboardButton('Нет', callback_data='no')\n kb.add(button1, button2)\n bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?',\n reply_markup=kb)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'no')\ndef bye(call):\n bot.send_message(call.from_user.id, 'До свидания')\n\n\[email protected]_query_handler(func=lambda call: call.data == 'yes')\ndef start(call):\n bot.send_message(call.from_user.id, 'Хорошо')\n bot.send_message(call.from_user.id, 'Как Вас зовут?')\n\n\[email protected]_handler(content_types=['text'])\ndef response(message):\n data['name'] = message.text\n bot.send_message(message.chat.id, 'Какая у Вас фамилия?')\n bot.register_next_step_handler(message, get_surname)\n\n\ndef get_surname(message):\n data['surname'] = message.text\n bot.send_message(message.chat.id, 'Как Ваше отчество?')\n bot.register_next_step_handler(message, get_middle_name)\n\n\ndef get_middle_name(message):\n data['middle_name'] = message.text\n kb = ReplyKeyboardMarkup(resize_keyboard=True)\n button = KeyboardButton(text='поделиться контактом', request_contact=True)\n kb.add(button)\n bot.send_message(message.chat.id, 'Поделитесь номером телефона?',\n reply_markup=kb)\n bot.register_next_step_handler(message, get_phone)\n\n\[email protected]_handler(content_types=['contact'])\ndef get_phone(message):\n data['phone'] = message.contact.phone_number\n bot.send_message(message.chat.id, 'Какой у Вас e-mail?')\n bot.register_next_step_handler(message, get_email)\n\n\ndef get_email(message):\n data['email'] = message.text\n bot.send_message(message.chat.id, 'Какой Ваш адрес?')\n bot.register_next_step_handler(message, get_address)\n\n\ndef get_address(message):\n data['address'] = message.text\n bot.send_message(message.chat.id, 'Какие у Вас пожелания?')\n bot.register_next_step_handler(message, get_wishes)\n\n\ndef get_wishes(message):\n data['wishes'] = message.text\n User.objects.create(**data)\n bot.send_message(message.chat.id, 'Спасибо.')\n\n\nbot.polling()\n",
"step-5": "\"\"\"1) Написать бота-консультанта, который будет собирать информацию с\nпользователя (его ФИО, номер телефона, почта, адресс, пожелания).\nЗаписывать сформированную заявку в БД (по желанию SQl/NOSQL).).\"\"\"\n\n\nimport telebot\nfrom .config import TOKEN\nfrom telebot.types import ReplyKeyboardMarkup, KeyboardButton, InlineKeyboardMarkup, InlineKeyboardButton\nfrom mongoengine import *\n\n\nbot = telebot.TeleBot(TOKEN)\ndata = {}\nconnect('bot_db')\n\n\nclass User(Document):\n\n surname = StringField(required=True)\n name = StringField(required=True)\n middle_name = StringField(required=True)\n phone = StringField(required=True)\n email = StringField(required=True)\n address = StringField(required=True)\n wishes = StringField(required=True)\n\n\[email protected]_handler(commands=['start'])\ndef start(message):\n kb = InlineKeyboardMarkup()\n button1 = InlineKeyboardButton('Да', callback_data='yes')\n button2 = InlineKeyboardButton('Нет', callback_data='no')\n kb.add(button1, button2)\n\n bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?', reply_markup=kb)\n\n\[email protected]_query_handler(func=lambda call: call.data == 'no')\ndef bye(call):\n bot.send_message(call.from_user.id, 'До свидания')\n\n\[email protected]_query_handler(func=lambda call: call.data == 'yes')\ndef start(call):\n bot.send_message(call.from_user.id, 'Хорошо')\n bot.send_message(call.from_user.id, 'Как Вас зовут?')\n\n\[email protected]_handler(content_types=['text'])\ndef response(message):\n data['name'] = message.text\n bot.send_message(message.chat.id, 'Какая у Вас фамилия?')\n bot.register_next_step_handler(message, get_surname)\n\n\ndef get_surname(message):\n data['surname'] = message.text\n bot.send_message(message.chat.id, 'Как Ваше отчество?')\n bot.register_next_step_handler(message, get_middle_name)\n\n\ndef get_middle_name(message):\n data['middle_name'] = message.text\n kb = ReplyKeyboardMarkup(resize_keyboard=True)\n button = KeyboardButton(text='поделиться контактом', request_contact=True)\n kb.add(button)\n bot.send_message(message.chat.id, 'Поделитесь номером телефона?', reply_markup=kb)\n bot.register_next_step_handler(message, get_phone)\n\n\[email protected]_handler(content_types=['contact'])\ndef get_phone(message):\n data['phone'] = message.contact.phone_number\n bot.send_message(message.chat.id, 'Какой у Вас e-mail?')\n bot.register_next_step_handler(message, get_email)\n\n\ndef get_email(message):\n data['email'] = message.text\n bot.send_message(message.chat.id, 'Какой Ваш адрес?')\n bot.register_next_step_handler(message, get_address)\n\n\ndef get_address(message):\n data['address'] = message.text\n bot.send_message(message.chat.id, 'Какие у Вас пожелания?')\n bot.register_next_step_handler(message, get_wishes)\n\n\ndef get_wishes(message):\n data['wishes'] = message.text\n User.objects.create(**data)\n bot.send_message(message.chat.id, 'Спасибо.')\n\n\nbot.polling()\n",
"step-ids": [
10,
12,
13,
14,
16
]
}
|
[
10,
12,
13,
14,
16
] |
# This handle the url for routing
from django.urls import path
from . import views
# Defines views to pass dynamic data to listings page
urlpatterns = [
path('', views.index, name='listings'),
path('<int:listing_id>', views.listing, name='listing'),
path('search', views.search, name='search')
]
|
normal
|
{
"blob_id": "be894830bb0dde6bacaea6be823391e0445603c3",
"index": 1192,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.index, name='listings'), path(\n '<int:listing_id>', views.listing, name='listing'), path('search',\n views.search, name='search')]\n",
"step-3": "from django.urls import path\nfrom . import views\nurlpatterns = [path('', views.index, name='listings'), path(\n '<int:listing_id>', views.listing, name='listing'), path('search',\n views.search, name='search')]\n",
"step-4": "# This handle the url for routing\n\nfrom django.urls import path\nfrom . import views\n\n# Defines views to pass dynamic data to listings page\nurlpatterns = [\n path('', views.index, name='listings'),\n path('<int:listing_id>', views.listing, name='listing'),\n path('search', views.search, name='search')\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i_angle in np.arange(0, 360, 45):
ri_filenames.append('r%di%d.csv' % (i_angle, i_angle))
ri_filenames.append('r%di%d.csv' % (i_angle + 45, i_angle))
ri_filenames.append('r360i360.csv')
<|reserved_special_token_0|>
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. yaw', 'Calculated distance vs. yaw'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp1'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp1_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. pitch', 'Calculated distance vs. pitch'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp2'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp2_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. roll', 'Calculated distance vs. roll'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp3'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp3_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. position', 'Calculated distance vs. position'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp4'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in angle_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 360, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp4_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=2, subplot_titles=['Line of sight',
'Blocked'], shared_xaxes=True)
rssi_los_hist2d = []
dist_los_hist2d = []
experiment = 'distance_los'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in distance_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_los_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_los_hist2d.append(column)
rssi_los_hist2d = np.array(rssi_los_hist2d).T
dist_los_hist2d = np.array(dist_los_hist2d).T
rssi_blocked_hist2d = []
dist_blocked_hist2d = []
experiment = 'distance_blocked'
for filename in distance_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_blocked_hist2d.append(column)
column = np.zeros(1000)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_blocked_hist2d.append(column)
rssi_blocked_hist2d = np.array(rssi_blocked_hist2d).T
dist_blocked_hist2d = np.array(dist_blocked_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
db_lim[0], db_lim[1], -1), z=rssi_los_hist2d[int(-db_lim[0]):int(-
db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
dist_lim[0], dist_lim[1], 0.1), z=dist_los_hist2d[int(dist_lim[0] /
0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
db_lim[0], db_lim[1], -1), z=rssi_blocked_hist2d[int(-db_lim[0]):
int(-db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=2)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
dist_lim[0], dist_lim[1], 0.1), z=dist_blocked_hist2d[int(dist_lim[
0] / 0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=2)
fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=2)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Separation (m)', row=2, col=1)
fig.update_xaxes(title='Separation (m)', row=2, col=2)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory, 'distance_%s.png' % antenna)
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
output_directory = 'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/figures'
antennas = ['original_whip']
folder = 'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/data'
ri_filenames = []
for i_angle in np.arange(0, 360, 45):
ri_filenames.append('r%di%d.csv' % (i_angle, i_angle))
ri_filenames.append('r%di%d.csv' % (i_angle + 45, i_angle))
ri_filenames.append('r360i360.csv')
angle_filenames = [('%d.csv' % n) for n in np.arange(0, 405, 45)]
distance_filenames = [('%1.2f.csv' % n) for n in np.arange(0.75, 3.25, 0.25)]
ref_line = dict(color='white', width=1)
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. yaw', 'Calculated distance vs. yaw'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp1'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp1_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. pitch', 'Calculated distance vs. pitch'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp2'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp2_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. roll', 'Calculated distance vs. roll'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp3'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp3_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. position', 'Calculated distance vs. position'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp4'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in angle_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 360, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp4_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=2, subplot_titles=['Line of sight',
'Blocked'], shared_xaxes=True)
rssi_los_hist2d = []
dist_los_hist2d = []
experiment = 'distance_los'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in distance_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_los_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_los_hist2d.append(column)
rssi_los_hist2d = np.array(rssi_los_hist2d).T
dist_los_hist2d = np.array(dist_los_hist2d).T
rssi_blocked_hist2d = []
dist_blocked_hist2d = []
experiment = 'distance_blocked'
for filename in distance_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_blocked_hist2d.append(column)
column = np.zeros(1000)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_blocked_hist2d.append(column)
rssi_blocked_hist2d = np.array(rssi_blocked_hist2d).T
dist_blocked_hist2d = np.array(dist_blocked_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
db_lim[0], db_lim[1], -1), z=rssi_los_hist2d[int(-db_lim[0]):int(-
db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
dist_lim[0], dist_lim[1], 0.1), z=dist_los_hist2d[int(dist_lim[0] /
0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
db_lim[0], db_lim[1], -1), z=rssi_blocked_hist2d[int(-db_lim[0]):
int(-db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=2)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
dist_lim[0], dist_lim[1], 0.1), z=dist_blocked_hist2d[int(dist_lim[
0] / 0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=2)
fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=2)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Separation (m)', row=2, col=1)
fig.update_xaxes(title='Separation (m)', row=2, col=2)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory, 'distance_%s.png' % antenna)
)
<|reserved_special_token_1|>
import numpy as np
import plotly
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import pandas as pd
import os
output_directory = 'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/figures'
antennas = ['original_whip']
folder = 'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/data'
ri_filenames = []
for i_angle in np.arange(0, 360, 45):
ri_filenames.append('r%di%d.csv' % (i_angle, i_angle))
ri_filenames.append('r%di%d.csv' % (i_angle + 45, i_angle))
ri_filenames.append('r360i360.csv')
angle_filenames = [('%d.csv' % n) for n in np.arange(0, 405, 45)]
distance_filenames = [('%1.2f.csv' % n) for n in np.arange(0.75, 3.25, 0.25)]
ref_line = dict(color='white', width=1)
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. yaw', 'Calculated distance vs. yaw'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp1'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp1_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. pitch', 'Calculated distance vs. pitch'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp2'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp2_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. roll', 'Calculated distance vs. roll'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp3'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp3_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=1, subplot_titles=[
'Initiator RSSI vs. position', 'Calculated distance vs. position'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp4'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in angle_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(db_lim[0],
db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(dist_lim[
0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(
dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 360, 45), y=np.array([1] * 16),
mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory,
'orientation_exp4_%s.png' % antenna))
for antenna in antennas:
fig = make_subplots(rows=2, cols=2, subplot_titles=['Line of sight',
'Blocked'], shared_xaxes=True)
rssi_los_hist2d = []
dist_los_hist2d = []
experiment = 'distance_los'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in distance_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_los_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_los_hist2d.append(column)
rssi_los_hist2d = np.array(rssi_los_hist2d).T
dist_los_hist2d = np.array(dist_los_hist2d).T
rssi_blocked_hist2d = []
dist_blocked_hist2d = []
experiment = 'distance_blocked'
for filename in distance_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi - 5 < db_lim[1]:
db_lim[1] = rssi - 5
if rssi + 5 > db_lim[0]:
db_lim[0] = rssi + 5
for dist in Dist:
if dist - 0.5 < dist_lim[0]:
dist_lim[0] = dist - 0.5
if dist + 0.5 > dist_lim[1]:
dist_lim[1] = dist + 0.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1] / len(data['i_rssi'])
rssi_blocked_hist2d.append(column)
column = np.zeros(1000)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0] / 0.1))
column[row_idx] = row[1] / len(Dist)
dist_blocked_hist2d.append(column)
rssi_blocked_hist2d = np.array(rssi_blocked_hist2d).T
dist_blocked_hist2d = np.array(dist_blocked_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
db_lim[0], db_lim[1], -1), z=rssi_los_hist2d[int(-db_lim[0]):int(-
db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
dist_lim[0], dist_lim[1], 0.1), z=dist_los_hist2d[int(dist_lim[0] /
0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
db_lim[0], db_lim[1], -1), z=rssi_blocked_hist2d[int(-db_lim[0]):
int(-db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=2)
fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
dist_lim[0], dist_lim[1], 0.1), z=dist_blocked_hist2d[int(dist_lim[
0] / 0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=2)
fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(
0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=2)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %
antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})
fig.update_xaxes(title='Separation (m)', row=2, col=1)
fig.update_xaxes(title='Separation (m)', row=2, col=2)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory, 'distance_%s.png' % antenna)
)
<|reserved_special_token_1|>
#%%
# -*- coding: utf-8 -*-
import numpy as np
import plotly
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import pandas as pd
import os
output_directory = r'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/figures'
antennas = ['original_whip']
folder = r'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/data'
ri_filenames = []
for i_angle in np.arange(0, 360, 45):
ri_filenames.append('r%di%d.csv'%(i_angle, i_angle))
ri_filenames.append('r%di%d.csv'%(i_angle+45, i_angle))
ri_filenames.append('r360i360.csv')
angle_filenames = ['%d.csv'%(n) for n in np.arange(0, 405, 45)]
distance_filenames = ['%1.2f.csv'%(n) for n in np.arange(.75, 3.25, .25)]
ref_line = dict(color='white', width=1)
# Plot yaw data
for antenna in antennas:
fig = make_subplots(rows=2, cols=1,
subplot_titles=['Initiator RSSI vs. yaw',
'Calculated distance vs. yaw'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp1'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi-5 < db_lim[1]:
db_lim[1] = rssi-5
if rssi+5 > db_lim[0]:
db_lim[0] = rssi+5
for dist in Dist:
if dist-.5 < dist_lim[0]:
dist_lim[0] = dist-.5
if dist+.5 > dist_lim[1]:
dist_lim[1] = dist+.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1]/len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0]/.1))
column[row_idx] = row[1]/len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(
x=np.arange(0, 765, 45),
y=np.arange(db_lim[0], db_lim[1], -1),
z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(
x=np.arange(0, 765, 45),
y=np.arange(dist_lim[0], dist_lim[1], .1),
z=dist_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],
zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1]*16), mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna'%(antenna),
'xanchor': 'center', 'yanchor': 'top', 'y': .95, 'x': .5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory, 'orientation_exp1_%s.png'%(antenna)))
# Plot pitch data
for antenna in antennas:
fig = make_subplots(rows=2, cols=1,
subplot_titles=['Initiator RSSI vs. pitch',
'Calculated distance vs. pitch'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp2'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi-5 < db_lim[1]:
db_lim[1] = rssi-5
if rssi+5 > db_lim[0]:
db_lim[0] = rssi+5
for dist in Dist:
if dist-.5 < dist_lim[0]:
dist_lim[0] = dist-.5
if dist+.5 > dist_lim[1]:
dist_lim[1] = dist+.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1]/len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0]/.1))
column[row_idx] = row[1]/len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(
x=np.arange(0, 765, 45),
y=np.arange(db_lim[0], db_lim[1], -1),
z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(
x=np.arange(0, 765, 45),
y=np.arange(dist_lim[0], dist_lim[1], .1),
z=dist_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],
zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1]*16), mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna'%(antenna),
'xanchor': 'center', 'yanchor': 'top', 'y': .95, 'x': .5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory, 'orientation_exp2_%s.png'%(antenna)))
# Plot roll data
for antenna in antennas:
fig = make_subplots(rows=2, cols=1,
subplot_titles=['Initiator RSSI vs. roll',
'Calculated distance vs. roll'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp3'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in ri_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi-5 < db_lim[1]:
db_lim[1] = rssi-5
if rssi+5 > db_lim[0]:
db_lim[0] = rssi+5
for dist in Dist:
if dist-.5 < dist_lim[0]:
dist_lim[0] = dist-.5
if dist+.5 > dist_lim[1]:
dist_lim[1] = dist+.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1]/len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0]/.1))
column[row_idx] = row[1]/len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(
x=np.arange(0, 765, 45),
y=np.arange(db_lim[0], db_lim[1], -1),
z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(
x=np.arange(0, 765, 45),
y=np.arange(dist_lim[0], dist_lim[1], .1),
z=dist_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],
zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1]*16), mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna'%(antenna),
'xanchor': 'center', 'yanchor': 'top', 'y': .95, 'x': .5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory, 'orientation_exp3_%s.png'%(antenna)))
# Plot position data
for antenna in antennas:
fig = make_subplots(rows=2, cols=1,
subplot_titles=['Initiator RSSI vs. position',
'Calculated distance vs. position'],
shared_xaxes=True)
rssi_hist2d = []
dist_hist2d = []
experiment = 'orientation_exp4'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in angle_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi-5 < db_lim[1]:
db_lim[1] = rssi-5
if rssi+5 > db_lim[0]:
db_lim[0] = rssi+5
for dist in Dist:
if dist-.5 < dist_lim[0]:
dist_lim[0] = dist-.5
if dist+.5 > dist_lim[1]:
dist_lim[1] = dist+.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1]/len(data['i_rssi'])
rssi_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0]/.1))
column[row_idx] = row[1]/len(Dist)
dist_hist2d.append(column)
rssi_hist2d = np.array(rssi_hist2d).T
dist_hist2d = np.array(dist_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(
x=np.arange(0, 360, 45),
y=np.arange(db_lim[0], db_lim[1], -1),
z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(
x=np.arange(0, 360, 45),
y=np.arange(dist_lim[0], dist_lim[1], .1),
z=dist_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],
zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(0, 360, 45), y=np.array([1]*16), mode='lines', line=ref_line), row=2, col=1)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna'%(antenna),
'xanchor': 'center', 'yanchor': 'top', 'y': .95, 'x': .5})
fig.update_xaxes(title='Angle (°)', row=2, col=1)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory, 'orientation_exp4_%s.png'%(antenna)))
# Plot separation data
for antenna in antennas:
fig = make_subplots(rows=2, cols=2,
subplot_titles=['Line of sight', 'Blocked'],
shared_xaxes=True)
rssi_los_hist2d = []
dist_los_hist2d = []
experiment = 'distance_los'
dist_lim = [100, 0]
db_lim = [-100, 0]
for filename in distance_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi-5 < db_lim[1]:
db_lim[1] = rssi-5
if rssi+5 > db_lim[0]:
db_lim[0] = rssi+5
for dist in Dist:
if dist-.5 < dist_lim[0]:
dist_lim[0] = dist-.5
if dist+.5 > dist_lim[1]:
dist_lim[1] = dist+.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1]/len(data['i_rssi'])
rssi_los_hist2d.append(column)
column = np.zeros(100)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0]/.1))
column[row_idx] = row[1]/len(Dist)
dist_los_hist2d.append(column)
rssi_los_hist2d = np.array(rssi_los_hist2d).T
dist_los_hist2d = np.array(dist_los_hist2d).T
rssi_blocked_hist2d = []
dist_blocked_hist2d = []
experiment = 'distance_blocked'
for filename in distance_filenames:
data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))
Dist = np.around(data['distance'], 1)
for rssi in data['i_rssi']:
if rssi-5 < db_lim[1]:
db_lim[1] = rssi-5
if rssi+5 > db_lim[0]:
db_lim[0] = rssi+5
for dist in Dist:
if dist-.5 < dist_lim[0]:
dist_lim[0] = dist-.5
if dist+.5 > dist_lim[1]:
dist_lim[1] = dist+.5
dist_lim[0] = np.max([0, dist_lim[0]])
column = np.zeros(200)
hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T
for row in hist:
row_idx = -int(row[0])
column[row_idx] = row[1]/len(data['i_rssi'])
rssi_blocked_hist2d.append(column)
column = np.zeros(1000)
hist = np.array(np.unique(Dist, return_counts=True)).T
for row in hist:
row_idx = int(np.around(row[0]/.1))
column[row_idx] = row[1]/len(Dist)
dist_blocked_hist2d.append(column)
rssi_blocked_hist2d = np.array(rssi_blocked_hist2d).T
dist_blocked_hist2d = np.array(dist_blocked_hist2d).T
maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])
fig.add_trace(go.Heatmap(
x=np.arange(.75, 3.25, .25),
y=np.arange(db_lim[0], db_lim[1], -1),
z=rssi_los_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=1)
fig.add_trace(go.Heatmap(
x=np.arange(.75, 3.25, .25),
y=np.arange(dist_lim[0], dist_lim[1], .1),
z=dist_los_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],
zmin=0, zmax=maxz), row=2, col=1)
fig.add_trace(go.Heatmap(
x=np.arange(.75, 3.25, .25),
y=np.arange(db_lim[0], db_lim[1], -1),
z=rssi_blocked_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],
zmin=0, zmax=maxz), row=1, col=2)
fig.add_trace(go.Heatmap(
x=np.arange(.75, 3.25, .25),
y=np.arange(dist_lim[0], dist_lim[1], .1),
z=dist_blocked_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],
zmin=0, zmax=maxz), row=2, col=2)
fig.add_trace(go.Scatter(x=np.arange(.75, 3.25, .25), y=np.arange(.75, 3.25, .25), mode='lines', line=ref_line), row=2, col=1)
fig.add_trace(go.Scatter(x=np.arange(.75, 3.25, .25), y=np.arange(.75, 3.25, .25), mode='lines', line=ref_line), row=2, col=2)
fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna'%(antenna),
'xanchor': 'center', 'yanchor': 'top', 'y': .95, 'x': .5})
fig.update_xaxes(title='Separation (m)', row=2, col=1)
fig.update_xaxes(title='Separation (m)', row=2, col=2)
fig.update_layout(showlegend=False)
fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)
fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)
fig.write_image(os.path.join(output_directory, 'distance_%s.png'%(antenna)))
|
flexible
|
{
"blob_id": "3d3b9956a98f11a170d66280abe7f193cef9ccfb",
"index": 808,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i_angle in np.arange(0, 360, 45):\n ri_filenames.append('r%di%d.csv' % (i_angle, i_angle))\n ri_filenames.append('r%di%d.csv' % (i_angle + 45, i_angle))\nri_filenames.append('r360i360.csv')\n<mask token>\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=1, subplot_titles=[\n 'Initiator RSSI vs. yaw', 'Calculated distance vs. yaw'],\n shared_xaxes=True)\n rssi_hist2d = []\n dist_hist2d = []\n experiment = 'orientation_exp1'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in ri_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_hist2d.append(column)\n rssi_hist2d = np.array(rssi_hist2d).T\n dist_hist2d = np.array(dist_hist2d).T\n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],\n db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[\n 0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(\n dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),\n mode='lines', line=ref_line), row=2, col=1)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %\n antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})\n fig.update_xaxes(title='Angle (°)', row=2, col=1)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, \n 'orientation_exp1_%s.png' % antenna))\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=1, subplot_titles=[\n 'Initiator RSSI vs. pitch', 'Calculated distance vs. pitch'],\n shared_xaxes=True)\n rssi_hist2d = []\n dist_hist2d = []\n experiment = 'orientation_exp2'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in ri_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_hist2d.append(column)\n rssi_hist2d = np.array(rssi_hist2d).T\n dist_hist2d = np.array(dist_hist2d).T\n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],\n db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[\n 0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(\n dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),\n mode='lines', line=ref_line), row=2, col=1)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %\n antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})\n fig.update_xaxes(title='Angle (°)', row=2, col=1)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, \n 'orientation_exp2_%s.png' % antenna))\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=1, subplot_titles=[\n 'Initiator RSSI vs. roll', 'Calculated distance vs. roll'],\n shared_xaxes=True)\n rssi_hist2d = []\n dist_hist2d = []\n experiment = 'orientation_exp3'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in ri_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_hist2d.append(column)\n rssi_hist2d = np.array(rssi_hist2d).T\n dist_hist2d = np.array(dist_hist2d).T\n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],\n db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[\n 0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(\n dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),\n mode='lines', line=ref_line), row=2, col=1)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %\n antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})\n fig.update_xaxes(title='Angle (°)', row=2, col=1)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, \n 'orientation_exp3_%s.png' % antenna))\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=1, subplot_titles=[\n 'Initiator RSSI vs. position', 'Calculated distance vs. position'],\n shared_xaxes=True)\n rssi_hist2d = []\n dist_hist2d = []\n experiment = 'orientation_exp4'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in angle_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_hist2d.append(column)\n rssi_hist2d = np.array(rssi_hist2d).T\n dist_hist2d = np.array(dist_hist2d).T\n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(db_lim[0],\n db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(dist_lim[\n 0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(\n dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0, 360, 45), y=np.array([1] * 16),\n mode='lines', line=ref_line), row=2, col=1)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %\n antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})\n fig.update_xaxes(title='Angle (°)', row=2, col=1)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, \n 'orientation_exp4_%s.png' % antenna))\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=2, subplot_titles=['Line of sight',\n 'Blocked'], shared_xaxes=True)\n rssi_los_hist2d = []\n dist_los_hist2d = []\n experiment = 'distance_los'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in distance_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_los_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_los_hist2d.append(column)\n rssi_los_hist2d = np.array(rssi_los_hist2d).T\n dist_los_hist2d = np.array(dist_los_hist2d).T\n rssi_blocked_hist2d = []\n dist_blocked_hist2d = []\n experiment = 'distance_blocked'\n for filename in distance_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_blocked_hist2d.append(column)\n column = np.zeros(1000)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_blocked_hist2d.append(column)\n rssi_blocked_hist2d = np.array(rssi_blocked_hist2d).T\n dist_blocked_hist2d = np.array(dist_blocked_hist2d).T\n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n db_lim[0], db_lim[1], -1), z=rssi_los_hist2d[int(-db_lim[0]):int(-\n db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n dist_lim[0], dist_lim[1], 0.1), z=dist_los_hist2d[int(dist_lim[0] /\n 0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n db_lim[0], db_lim[1], -1), z=rssi_blocked_hist2d[int(-db_lim[0]):\n int(-db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=2)\n fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n dist_lim[0], dist_lim[1], 0.1), z=dist_blocked_hist2d[int(dist_lim[\n 0] / 0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=2)\n fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n 0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n 0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=2)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %\n antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})\n fig.update_xaxes(title='Separation (m)', row=2, col=1)\n fig.update_xaxes(title='Separation (m)', row=2, col=2)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, 'distance_%s.png' % antenna)\n )\n",
"step-3": "<mask token>\noutput_directory = 'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/figures'\nantennas = ['original_whip']\nfolder = 'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/data'\nri_filenames = []\nfor i_angle in np.arange(0, 360, 45):\n ri_filenames.append('r%di%d.csv' % (i_angle, i_angle))\n ri_filenames.append('r%di%d.csv' % (i_angle + 45, i_angle))\nri_filenames.append('r360i360.csv')\nangle_filenames = [('%d.csv' % n) for n in np.arange(0, 405, 45)]\ndistance_filenames = [('%1.2f.csv' % n) for n in np.arange(0.75, 3.25, 0.25)]\nref_line = dict(color='white', width=1)\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=1, subplot_titles=[\n 'Initiator RSSI vs. yaw', 'Calculated distance vs. yaw'],\n shared_xaxes=True)\n rssi_hist2d = []\n dist_hist2d = []\n experiment = 'orientation_exp1'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in ri_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_hist2d.append(column)\n rssi_hist2d = np.array(rssi_hist2d).T\n dist_hist2d = np.array(dist_hist2d).T\n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],\n db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[\n 0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(\n dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),\n mode='lines', line=ref_line), row=2, col=1)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %\n antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})\n fig.update_xaxes(title='Angle (°)', row=2, col=1)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, \n 'orientation_exp1_%s.png' % antenna))\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=1, subplot_titles=[\n 'Initiator RSSI vs. pitch', 'Calculated distance vs. pitch'],\n shared_xaxes=True)\n rssi_hist2d = []\n dist_hist2d = []\n experiment = 'orientation_exp2'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in ri_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_hist2d.append(column)\n rssi_hist2d = np.array(rssi_hist2d).T\n dist_hist2d = np.array(dist_hist2d).T\n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],\n db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[\n 0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(\n dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),\n mode='lines', line=ref_line), row=2, col=1)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %\n antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})\n fig.update_xaxes(title='Angle (°)', row=2, col=1)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, \n 'orientation_exp2_%s.png' % antenna))\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=1, subplot_titles=[\n 'Initiator RSSI vs. roll', 'Calculated distance vs. roll'],\n shared_xaxes=True)\n rssi_hist2d = []\n dist_hist2d = []\n experiment = 'orientation_exp3'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in ri_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_hist2d.append(column)\n rssi_hist2d = np.array(rssi_hist2d).T\n dist_hist2d = np.array(dist_hist2d).T\n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],\n db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[\n 0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(\n dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),\n mode='lines', line=ref_line), row=2, col=1)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %\n antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})\n fig.update_xaxes(title='Angle (°)', row=2, col=1)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, \n 'orientation_exp3_%s.png' % antenna))\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=1, subplot_titles=[\n 'Initiator RSSI vs. position', 'Calculated distance vs. position'],\n shared_xaxes=True)\n rssi_hist2d = []\n dist_hist2d = []\n experiment = 'orientation_exp4'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in angle_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_hist2d.append(column)\n rssi_hist2d = np.array(rssi_hist2d).T\n dist_hist2d = np.array(dist_hist2d).T\n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(db_lim[0],\n db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(dist_lim[\n 0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(\n dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0, 360, 45), y=np.array([1] * 16),\n mode='lines', line=ref_line), row=2, col=1)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %\n antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})\n fig.update_xaxes(title='Angle (°)', row=2, col=1)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, \n 'orientation_exp4_%s.png' % antenna))\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=2, subplot_titles=['Line of sight',\n 'Blocked'], shared_xaxes=True)\n rssi_los_hist2d = []\n dist_los_hist2d = []\n experiment = 'distance_los'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in distance_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_los_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_los_hist2d.append(column)\n rssi_los_hist2d = np.array(rssi_los_hist2d).T\n dist_los_hist2d = np.array(dist_los_hist2d).T\n rssi_blocked_hist2d = []\n dist_blocked_hist2d = []\n experiment = 'distance_blocked'\n for filename in distance_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_blocked_hist2d.append(column)\n column = np.zeros(1000)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_blocked_hist2d.append(column)\n rssi_blocked_hist2d = np.array(rssi_blocked_hist2d).T\n dist_blocked_hist2d = np.array(dist_blocked_hist2d).T\n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n db_lim[0], db_lim[1], -1), z=rssi_los_hist2d[int(-db_lim[0]):int(-\n db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n dist_lim[0], dist_lim[1], 0.1), z=dist_los_hist2d[int(dist_lim[0] /\n 0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n db_lim[0], db_lim[1], -1), z=rssi_blocked_hist2d[int(-db_lim[0]):\n int(-db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=2)\n fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n dist_lim[0], dist_lim[1], 0.1), z=dist_blocked_hist2d[int(dist_lim[\n 0] / 0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=2)\n fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n 0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n 0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=2)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %\n antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})\n fig.update_xaxes(title='Separation (m)', row=2, col=1)\n fig.update_xaxes(title='Separation (m)', row=2, col=2)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, 'distance_%s.png' % antenna)\n )\n",
"step-4": "import numpy as np\nimport plotly\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\nimport pandas as pd\nimport os\noutput_directory = 'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/figures'\nantennas = ['original_whip']\nfolder = 'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/data'\nri_filenames = []\nfor i_angle in np.arange(0, 360, 45):\n ri_filenames.append('r%di%d.csv' % (i_angle, i_angle))\n ri_filenames.append('r%di%d.csv' % (i_angle + 45, i_angle))\nri_filenames.append('r360i360.csv')\nangle_filenames = [('%d.csv' % n) for n in np.arange(0, 405, 45)]\ndistance_filenames = [('%1.2f.csv' % n) for n in np.arange(0.75, 3.25, 0.25)]\nref_line = dict(color='white', width=1)\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=1, subplot_titles=[\n 'Initiator RSSI vs. yaw', 'Calculated distance vs. yaw'],\n shared_xaxes=True)\n rssi_hist2d = []\n dist_hist2d = []\n experiment = 'orientation_exp1'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in ri_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_hist2d.append(column)\n rssi_hist2d = np.array(rssi_hist2d).T\n dist_hist2d = np.array(dist_hist2d).T\n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],\n db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[\n 0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(\n dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),\n mode='lines', line=ref_line), row=2, col=1)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %\n antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})\n fig.update_xaxes(title='Angle (°)', row=2, col=1)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, \n 'orientation_exp1_%s.png' % antenna))\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=1, subplot_titles=[\n 'Initiator RSSI vs. pitch', 'Calculated distance vs. pitch'],\n shared_xaxes=True)\n rssi_hist2d = []\n dist_hist2d = []\n experiment = 'orientation_exp2'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in ri_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_hist2d.append(column)\n rssi_hist2d = np.array(rssi_hist2d).T\n dist_hist2d = np.array(dist_hist2d).T\n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],\n db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[\n 0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(\n dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),\n mode='lines', line=ref_line), row=2, col=1)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %\n antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})\n fig.update_xaxes(title='Angle (°)', row=2, col=1)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, \n 'orientation_exp2_%s.png' % antenna))\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=1, subplot_titles=[\n 'Initiator RSSI vs. roll', 'Calculated distance vs. roll'],\n shared_xaxes=True)\n rssi_hist2d = []\n dist_hist2d = []\n experiment = 'orientation_exp3'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in ri_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_hist2d.append(column)\n rssi_hist2d = np.array(rssi_hist2d).T\n dist_hist2d = np.array(dist_hist2d).T\n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(db_lim[0],\n db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0, 765, 45), y=np.arange(dist_lim[\n 0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(\n dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1] * 16),\n mode='lines', line=ref_line), row=2, col=1)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %\n antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})\n fig.update_xaxes(title='Angle (°)', row=2, col=1)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, \n 'orientation_exp3_%s.png' % antenna))\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=1, subplot_titles=[\n 'Initiator RSSI vs. position', 'Calculated distance vs. position'],\n shared_xaxes=True)\n rssi_hist2d = []\n dist_hist2d = []\n experiment = 'orientation_exp4'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in angle_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_hist2d.append(column)\n rssi_hist2d = np.array(rssi_hist2d).T\n dist_hist2d = np.array(dist_hist2d).T\n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(db_lim[0],\n db_lim[1], -1), z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0, 360, 45), y=np.arange(dist_lim[\n 0], dist_lim[1], 0.1), z=dist_hist2d[int(dist_lim[0] / 0.1):int(\n dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0, 360, 45), y=np.array([1] * 16),\n mode='lines', line=ref_line), row=2, col=1)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %\n antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})\n fig.update_xaxes(title='Angle (°)', row=2, col=1)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, \n 'orientation_exp4_%s.png' % antenna))\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=2, subplot_titles=['Line of sight',\n 'Blocked'], shared_xaxes=True)\n rssi_los_hist2d = []\n dist_los_hist2d = []\n experiment = 'distance_los'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in distance_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_los_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_los_hist2d.append(column)\n rssi_los_hist2d = np.array(rssi_los_hist2d).T\n dist_los_hist2d = np.array(dist_los_hist2d).T\n rssi_blocked_hist2d = []\n dist_blocked_hist2d = []\n experiment = 'distance_blocked'\n for filename in distance_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi - 5 < db_lim[1]:\n db_lim[1] = rssi - 5\n if rssi + 5 > db_lim[0]:\n db_lim[0] = rssi + 5\n for dist in Dist:\n if dist - 0.5 < dist_lim[0]:\n dist_lim[0] = dist - 0.5\n if dist + 0.5 > dist_lim[1]:\n dist_lim[1] = dist + 0.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1] / len(data['i_rssi'])\n rssi_blocked_hist2d.append(column)\n column = np.zeros(1000)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0] / 0.1))\n column[row_idx] = row[1] / len(Dist)\n dist_blocked_hist2d.append(column)\n rssi_blocked_hist2d = np.array(rssi_blocked_hist2d).T\n dist_blocked_hist2d = np.array(dist_blocked_hist2d).T\n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n db_lim[0], db_lim[1], -1), z=rssi_los_hist2d[int(-db_lim[0]):int(-\n db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n dist_lim[0], dist_lim[1], 0.1), z=dist_los_hist2d[int(dist_lim[0] /\n 0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n db_lim[0], db_lim[1], -1), z=rssi_blocked_hist2d[int(-db_lim[0]):\n int(-db_lim[1]), :], zmin=0, zmax=maxz), row=1, col=2)\n fig.add_trace(go.Heatmap(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n dist_lim[0], dist_lim[1], 0.1), z=dist_blocked_hist2d[int(dist_lim[\n 0] / 0.1):int(dist_lim[1] / 0.1), :], zmin=0, zmax=maxz), row=2, col=2)\n fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n 0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0.75, 3.25, 0.25), y=np.arange(\n 0.75, 3.25, 0.25), mode='lines', line=ref_line), row=2, col=2)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna' %\n antenna, 'xanchor': 'center', 'yanchor': 'top', 'y': 0.95, 'x': 0.5})\n fig.update_xaxes(title='Separation (m)', row=2, col=1)\n fig.update_xaxes(title='Separation (m)', row=2, col=2)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, 'distance_%s.png' % antenna)\n )\n",
"step-5": "#%%\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport plotly\nfrom plotly.subplots import make_subplots\nimport plotly.graph_objects as go\nimport pandas as pd\nimport os\n\noutput_directory = r'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/figures'\nantennas = ['original_whip']\nfolder = r'C:/Users/jgamm/Desktop/rssi_measurement/2020-06-10/data'\nri_filenames = []\nfor i_angle in np.arange(0, 360, 45):\n ri_filenames.append('r%di%d.csv'%(i_angle, i_angle))\n ri_filenames.append('r%di%d.csv'%(i_angle+45, i_angle))\nri_filenames.append('r360i360.csv')\nangle_filenames = ['%d.csv'%(n) for n in np.arange(0, 405, 45)]\ndistance_filenames = ['%1.2f.csv'%(n) for n in np.arange(.75, 3.25, .25)]\n\nref_line = dict(color='white', width=1)\n\n# Plot yaw data\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=1,\n subplot_titles=['Initiator RSSI vs. yaw',\n 'Calculated distance vs. yaw'],\n shared_xaxes=True)\n rssi_hist2d = []\n dist_hist2d = []\n experiment = 'orientation_exp1'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in ri_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi-5 < db_lim[1]:\n db_lim[1] = rssi-5\n if rssi+5 > db_lim[0]:\n db_lim[0] = rssi+5\n for dist in Dist:\n if dist-.5 < dist_lim[0]:\n dist_lim[0] = dist-.5\n if dist+.5 > dist_lim[1]:\n dist_lim[1] = dist+.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1]/len(data['i_rssi'])\n rssi_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0]/.1))\n column[row_idx] = row[1]/len(Dist)\n dist_hist2d.append(column)\n rssi_hist2d = np.array(rssi_hist2d).T\n dist_hist2d = np.array(dist_hist2d).T\n \n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(\n x=np.arange(0, 765, 45),\n y=np.arange(db_lim[0], db_lim[1], -1),\n z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(\n x=np.arange(0, 765, 45),\n y=np.arange(dist_lim[0], dist_lim[1], .1),\n z=dist_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],\n zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1]*16), mode='lines', line=ref_line), row=2, col=1)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna'%(antenna),\n 'xanchor': 'center', 'yanchor': 'top', 'y': .95, 'x': .5})\n fig.update_xaxes(title='Angle (°)', row=2, col=1)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, 'orientation_exp1_%s.png'%(antenna)))\n\n# Plot pitch data\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=1,\n subplot_titles=['Initiator RSSI vs. pitch',\n 'Calculated distance vs. pitch'],\n shared_xaxes=True)\n rssi_hist2d = []\n dist_hist2d = []\n experiment = 'orientation_exp2'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in ri_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi-5 < db_lim[1]:\n db_lim[1] = rssi-5\n if rssi+5 > db_lim[0]:\n db_lim[0] = rssi+5\n for dist in Dist:\n if dist-.5 < dist_lim[0]:\n dist_lim[0] = dist-.5\n if dist+.5 > dist_lim[1]:\n dist_lim[1] = dist+.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1]/len(data['i_rssi'])\n rssi_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0]/.1))\n column[row_idx] = row[1]/len(Dist)\n dist_hist2d.append(column)\n rssi_hist2d = np.array(rssi_hist2d).T\n dist_hist2d = np.array(dist_hist2d).T\n \n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(\n x=np.arange(0, 765, 45),\n y=np.arange(db_lim[0], db_lim[1], -1),\n z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(\n x=np.arange(0, 765, 45),\n y=np.arange(dist_lim[0], dist_lim[1], .1),\n z=dist_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],\n zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1]*16), mode='lines', line=ref_line), row=2, col=1)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna'%(antenna),\n 'xanchor': 'center', 'yanchor': 'top', 'y': .95, 'x': .5})\n fig.update_xaxes(title='Angle (°)', row=2, col=1)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, 'orientation_exp2_%s.png'%(antenna)))\n\n# Plot roll data\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=1,\n subplot_titles=['Initiator RSSI vs. roll',\n 'Calculated distance vs. roll'],\n shared_xaxes=True)\n rssi_hist2d = []\n dist_hist2d = []\n experiment = 'orientation_exp3'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in ri_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi-5 < db_lim[1]:\n db_lim[1] = rssi-5\n if rssi+5 > db_lim[0]:\n db_lim[0] = rssi+5\n for dist in Dist:\n if dist-.5 < dist_lim[0]:\n dist_lim[0] = dist-.5\n if dist+.5 > dist_lim[1]:\n dist_lim[1] = dist+.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1]/len(data['i_rssi'])\n rssi_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0]/.1))\n column[row_idx] = row[1]/len(Dist)\n dist_hist2d.append(column)\n rssi_hist2d = np.array(rssi_hist2d).T\n dist_hist2d = np.array(dist_hist2d).T\n \n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(\n x=np.arange(0, 765, 45),\n y=np.arange(db_lim[0], db_lim[1], -1),\n z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(\n x=np.arange(0, 765, 45),\n y=np.arange(dist_lim[0], dist_lim[1], .1),\n z=dist_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],\n zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0, 765, 45), y=np.array([1]*16), mode='lines', line=ref_line), row=2, col=1)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna'%(antenna),\n 'xanchor': 'center', 'yanchor': 'top', 'y': .95, 'x': .5})\n fig.update_xaxes(title='Angle (°)', row=2, col=1)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, 'orientation_exp3_%s.png'%(antenna)))\n\n# Plot position data\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=1,\n subplot_titles=['Initiator RSSI vs. position',\n 'Calculated distance vs. position'],\n shared_xaxes=True)\n rssi_hist2d = []\n dist_hist2d = []\n experiment = 'orientation_exp4'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in angle_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi-5 < db_lim[1]:\n db_lim[1] = rssi-5\n if rssi+5 > db_lim[0]:\n db_lim[0] = rssi+5\n for dist in Dist:\n if dist-.5 < dist_lim[0]:\n dist_lim[0] = dist-.5\n if dist+.5 > dist_lim[1]:\n dist_lim[1] = dist+.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1]/len(data['i_rssi'])\n rssi_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0]/.1))\n column[row_idx] = row[1]/len(Dist)\n dist_hist2d.append(column)\n rssi_hist2d = np.array(rssi_hist2d).T\n dist_hist2d = np.array(dist_hist2d).T\n \n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(\n x=np.arange(0, 360, 45),\n y=np.arange(db_lim[0], db_lim[1], -1),\n z=rssi_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(\n x=np.arange(0, 360, 45),\n y=np.arange(dist_lim[0], dist_lim[1], .1),\n z=dist_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],\n zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(0, 360, 45), y=np.array([1]*16), mode='lines', line=ref_line), row=2, col=1)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna'%(antenna),\n 'xanchor': 'center', 'yanchor': 'top', 'y': .95, 'x': .5})\n fig.update_xaxes(title='Angle (°)', row=2, col=1)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, 'orientation_exp4_%s.png'%(antenna)))\n\n# Plot separation data\nfor antenna in antennas:\n fig = make_subplots(rows=2, cols=2,\n subplot_titles=['Line of sight', 'Blocked'],\n shared_xaxes=True)\n rssi_los_hist2d = []\n dist_los_hist2d = []\n experiment = 'distance_los'\n dist_lim = [100, 0]\n db_lim = [-100, 0]\n for filename in distance_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi-5 < db_lim[1]:\n db_lim[1] = rssi-5\n if rssi+5 > db_lim[0]:\n db_lim[0] = rssi+5\n for dist in Dist:\n if dist-.5 < dist_lim[0]:\n dist_lim[0] = dist-.5\n if dist+.5 > dist_lim[1]:\n dist_lim[1] = dist+.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1]/len(data['i_rssi'])\n rssi_los_hist2d.append(column)\n column = np.zeros(100)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0]/.1))\n column[row_idx] = row[1]/len(Dist)\n dist_los_hist2d.append(column)\n rssi_los_hist2d = np.array(rssi_los_hist2d).T\n dist_los_hist2d = np.array(dist_los_hist2d).T\n \n rssi_blocked_hist2d = []\n dist_blocked_hist2d = []\n experiment = 'distance_blocked'\n for filename in distance_filenames:\n data = pd.read_csv(os.path.join(folder, antenna, experiment, filename))\n Dist = np.around(data['distance'], 1)\n for rssi in data['i_rssi']:\n if rssi-5 < db_lim[1]:\n db_lim[1] = rssi-5\n if rssi+5 > db_lim[0]:\n db_lim[0] = rssi+5\n for dist in Dist:\n if dist-.5 < dist_lim[0]:\n dist_lim[0] = dist-.5\n if dist+.5 > dist_lim[1]:\n dist_lim[1] = dist+.5\n dist_lim[0] = np.max([0, dist_lim[0]])\n column = np.zeros(200)\n hist = np.array(np.unique(data['i_rssi'], return_counts=True)).T\n for row in hist:\n row_idx = -int(row[0])\n column[row_idx] = row[1]/len(data['i_rssi'])\n rssi_blocked_hist2d.append(column)\n column = np.zeros(1000)\n hist = np.array(np.unique(Dist, return_counts=True)).T\n for row in hist:\n row_idx = int(np.around(row[0]/.1))\n column[row_idx] = row[1]/len(Dist)\n dist_blocked_hist2d.append(column)\n rssi_blocked_hist2d = np.array(rssi_blocked_hist2d).T\n dist_blocked_hist2d = np.array(dist_blocked_hist2d).T\n \n maxz = np.max([np.max(rssi_hist2d), np.max(dist_hist2d)])\n fig.add_trace(go.Heatmap(\n x=np.arange(.75, 3.25, .25),\n y=np.arange(db_lim[0], db_lim[1], -1),\n z=rssi_los_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=1)\n fig.add_trace(go.Heatmap(\n x=np.arange(.75, 3.25, .25),\n y=np.arange(dist_lim[0], dist_lim[1], .1),\n z=dist_los_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],\n zmin=0, zmax=maxz), row=2, col=1)\n fig.add_trace(go.Heatmap(\n x=np.arange(.75, 3.25, .25),\n y=np.arange(db_lim[0], db_lim[1], -1),\n z=rssi_blocked_hist2d[int(-db_lim[0]):int(-db_lim[1]), :],\n zmin=0, zmax=maxz), row=1, col=2)\n fig.add_trace(go.Heatmap(\n x=np.arange(.75, 3.25, .25),\n y=np.arange(dist_lim[0], dist_lim[1], .1),\n z=dist_blocked_hist2d[int(dist_lim[0]/.1):int(dist_lim[1]/.1), :],\n zmin=0, zmax=maxz), row=2, col=2)\n fig.add_trace(go.Scatter(x=np.arange(.75, 3.25, .25), y=np.arange(.75, 3.25, .25), mode='lines', line=ref_line), row=2, col=1)\n fig.add_trace(go.Scatter(x=np.arange(.75, 3.25, .25), y=np.arange(.75, 3.25, .25), mode='lines', line=ref_line), row=2, col=2)\n fig.update_layout(title={'text': 'DA14695 Evaluation Board, %s antenna'%(antenna),\n 'xanchor': 'center', 'yanchor': 'top', 'y': .95, 'x': .5})\n fig.update_xaxes(title='Separation (m)', row=2, col=1)\n fig.update_xaxes(title='Separation (m)', row=2, col=2)\n fig.update_layout(showlegend=False)\n fig.update_yaxes(title_text='Initiator RSSI (dBm)', row=1, col=1)\n fig.update_yaxes(title_text='Calculated distance (m)', row=2, col=1)\n fig.write_image(os.path.join(output_directory, 'distance_%s.png'%(antenna)))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@app.route('/')
def root():
return 'Test!'
@app.route('/federal/geographic')
def federal_geographic():
pass
<|reserved_special_token_0|>
@app.route('/state/geographic')
def state_geographic():
pass
@app.route('/local/temporal')
def local_temporal():
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def root():
return 'Test!'
@app.route('/federal/geographic')
def federal_geographic():
pass
@app.route('/federal/issue')
def federal_issue():
pass
@app.route('/state/geographic')
def state_geographic():
pass
@app.route('/local/temporal')
def local_temporal():
pass
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
@app.route('/')
def root():
return 'Test!'
@app.route('/federal/geographic')
def federal_geographic():
pass
@app.route('/federal/issue')
def federal_issue():
pass
@app.route('/state/geographic')
def state_geographic():
pass
@app.route('/local/temporal')
def local_temporal():
pass
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from flask import Flask
app = Flask(__name__)
@app.route('/')
def root():
return 'Test!'
@app.route('/federal/geographic')
def federal_geographic():
pass
@app.route('/federal/issue')
def federal_issue():
pass
@app.route('/state/geographic')
def state_geographic():
pass
@app.route('/local/temporal')
def local_temporal():
pass
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from flask import Flask
app = Flask(__name__)
@app.route('/')
def root():
return "Test!"
@app.route('/federal/geographic')
def federal_geographic():
pass
@app.route('/federal/issue')
def federal_issue():
pass
@app.route('/state/geographic')
def state_geographic():
pass
@app.route('/local/temporal')
def local_temporal():
pass
if __name__ == "__main__":
app.run(debug=True)
|
flexible
|
{
"blob_id": "cc094f8aeff3b52bd9184f7b815320529ecb4550",
"index": 9928,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef root():\n return 'Test!'\n\n\[email protected]('/federal/geographic')\ndef federal_geographic():\n pass\n\n\n<mask token>\n\n\[email protected]('/state/geographic')\ndef state_geographic():\n pass\n\n\[email protected]('/local/temporal')\ndef local_temporal():\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef root():\n return 'Test!'\n\n\[email protected]('/federal/geographic')\ndef federal_geographic():\n pass\n\n\[email protected]('/federal/issue')\ndef federal_issue():\n pass\n\n\[email protected]('/state/geographic')\ndef state_geographic():\n pass\n\n\[email protected]('/local/temporal')\ndef local_temporal():\n pass\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef root():\n return 'Test!'\n\n\[email protected]('/federal/geographic')\ndef federal_geographic():\n pass\n\n\[email protected]('/federal/issue')\ndef federal_issue():\n pass\n\n\[email protected]('/state/geographic')\ndef state_geographic():\n pass\n\n\[email protected]('/local/temporal')\ndef local_temporal():\n pass\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask\napp = Flask(__name__)\n\n\[email protected]('/')\ndef root():\n return 'Test!'\n\n\[email protected]('/federal/geographic')\ndef federal_geographic():\n pass\n\n\[email protected]('/federal/issue')\ndef federal_issue():\n pass\n\n\[email protected]('/state/geographic')\ndef state_geographic():\n pass\n\n\[email protected]('/local/temporal')\ndef local_temporal():\n pass\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask\n\napp = Flask(__name__)\n\[email protected]('/')\ndef root():\n return \"Test!\"\n\[email protected]('/federal/geographic')\ndef federal_geographic():\n pass\n\[email protected]('/federal/issue')\ndef federal_issue():\n pass\n\[email protected]('/state/geographic')\ndef state_geographic():\n pass\n\[email protected]('/local/temporal')\ndef local_temporal():\n pass\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class TestValidate(unittest.TestCase):
@db_session
def setUp(self):
db.execute('delete from Person')
registry = getattr(core, '__warningregistry__', {})
for key in list(registry):
if type(key) is not tuple:
continue
text, category, lineno = key
if category is DatabaseContainsIncorrectEmptyValue:
del registry[key]
@db_session
def test_1a(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue
)
db.insert('Person', id=1, name='', tel='111')
p = Person.get(id=1)
self.assertEqual(p.name, '')
@raises_exception(DatabaseContainsIncorrectEmptyValue,
'Database contains empty string for required attribute Person.name')
@db_session
def test_1b(self):
with warnings.catch_warnings():
warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)
db.insert('Person', id=1, name='', tel='111')
p = Person.get(id=1)
@db_session
def test_2a(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue
)
db.insert('Person', id=1, name=None, tel='111')
p = Person.get(id=1)
self.assertEqual(p.name, None)
@raises_exception(DatabaseContainsIncorrectEmptyValue,
'Database contains NULL for required attribute Person.name')
@db_session
def test_2b(self):
with warnings.catch_warnings():
warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)
db.insert('Person', id=1, name=None, tel='111')
p = Person.get(id=1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Person(db.Entity):
id = PrimaryKey(int)
name = Required(str)
tel = Optional(str)
<|reserved_special_token_0|>
class TestValidate(unittest.TestCase):
@db_session
def setUp(self):
db.execute('delete from Person')
registry = getattr(core, '__warningregistry__', {})
for key in list(registry):
if type(key) is not tuple:
continue
text, category, lineno = key
if category is DatabaseContainsIncorrectEmptyValue:
del registry[key]
@db_session
def test_1a(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue
)
db.insert('Person', id=1, name='', tel='111')
p = Person.get(id=1)
self.assertEqual(p.name, '')
@raises_exception(DatabaseContainsIncorrectEmptyValue,
'Database contains empty string for required attribute Person.name')
@db_session
def test_1b(self):
with warnings.catch_warnings():
warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)
db.insert('Person', id=1, name='', tel='111')
p = Person.get(id=1)
@db_session
def test_2a(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue
)
db.insert('Person', id=1, name=None, tel='111')
p = Person.get(id=1)
self.assertEqual(p.name, None)
@raises_exception(DatabaseContainsIncorrectEmptyValue,
'Database contains NULL for required attribute Person.name')
@db_session
def test_2b(self):
with warnings.catch_warnings():
warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)
db.insert('Person', id=1, name=None, tel='111')
p = Person.get(id=1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Person(db.Entity):
id = PrimaryKey(int)
name = Required(str)
tel = Optional(str)
db.generate_mapping(check_tables=False)
with db_session:
db.execute(
"""
create table Person(
id int primary key,
name text,
tel text
)
"""
)
warnings.simplefilter('error')
class TestValidate(unittest.TestCase):
@db_session
def setUp(self):
db.execute('delete from Person')
registry = getattr(core, '__warningregistry__', {})
for key in list(registry):
if type(key) is not tuple:
continue
text, category, lineno = key
if category is DatabaseContainsIncorrectEmptyValue:
del registry[key]
@db_session
def test_1a(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue
)
db.insert('Person', id=1, name='', tel='111')
p = Person.get(id=1)
self.assertEqual(p.name, '')
@raises_exception(DatabaseContainsIncorrectEmptyValue,
'Database contains empty string for required attribute Person.name')
@db_session
def test_1b(self):
with warnings.catch_warnings():
warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)
db.insert('Person', id=1, name='', tel='111')
p = Person.get(id=1)
@db_session
def test_2a(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue
)
db.insert('Person', id=1, name=None, tel='111')
p = Person.get(id=1)
self.assertEqual(p.name, None)
@raises_exception(DatabaseContainsIncorrectEmptyValue,
'Database contains NULL for required attribute Person.name')
@db_session
def test_2b(self):
with warnings.catch_warnings():
warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)
db.insert('Person', id=1, name=None, tel='111')
p = Person.get(id=1)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db = Database('sqlite', ':memory:')
class Person(db.Entity):
id = PrimaryKey(int)
name = Required(str)
tel = Optional(str)
db.generate_mapping(check_tables=False)
with db_session:
db.execute(
"""
create table Person(
id int primary key,
name text,
tel text
)
"""
)
warnings.simplefilter('error')
class TestValidate(unittest.TestCase):
@db_session
def setUp(self):
db.execute('delete from Person')
registry = getattr(core, '__warningregistry__', {})
for key in list(registry):
if type(key) is not tuple:
continue
text, category, lineno = key
if category is DatabaseContainsIncorrectEmptyValue:
del registry[key]
@db_session
def test_1a(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue
)
db.insert('Person', id=1, name='', tel='111')
p = Person.get(id=1)
self.assertEqual(p.name, '')
@raises_exception(DatabaseContainsIncorrectEmptyValue,
'Database contains empty string for required attribute Person.name')
@db_session
def test_1b(self):
with warnings.catch_warnings():
warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)
db.insert('Person', id=1, name='', tel='111')
p = Person.get(id=1)
@db_session
def test_2a(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue
)
db.insert('Person', id=1, name=None, tel='111')
p = Person.get(id=1)
self.assertEqual(p.name, None)
@raises_exception(DatabaseContainsIncorrectEmptyValue,
'Database contains NULL for required attribute Person.name')
@db_session
def test_2b(self):
with warnings.catch_warnings():
warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)
db.insert('Person', id=1, name=None, tel='111')
p = Person.get(id=1)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest, warnings
from pony.orm import *
from pony.orm import core
from pony.orm.tests.testutils import raises_exception
db = Database('sqlite', ':memory:')
class Person(db.Entity):
id = PrimaryKey(int)
name = Required(str)
tel = Optional(str)
db.generate_mapping(check_tables=False)
with db_session:
db.execute("""
create table Person(
id int primary key,
name text,
tel text
)
""")
warnings.simplefilter('error', )
class TestValidate(unittest.TestCase):
@db_session
def setUp(self):
db.execute('delete from Person')
registry = getattr(core, '__warningregistry__', {})
for key in list(registry):
if type(key) is not tuple: continue
text, category, lineno = key
if category is DatabaseContainsIncorrectEmptyValue:
del registry[key]
@db_session
def test_1a(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue)
db.insert('Person', id=1, name='', tel='111')
p = Person.get(id=1)
self.assertEqual(p.name, '')
@raises_exception(DatabaseContainsIncorrectEmptyValue,
'Database contains empty string for required attribute Person.name')
@db_session
def test_1b(self):
with warnings.catch_warnings():
warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)
db.insert('Person', id=1, name='', tel='111')
p = Person.get(id=1)
@db_session
def test_2a(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue)
db.insert('Person', id=1, name=None, tel='111')
p = Person.get(id=1)
self.assertEqual(p.name, None)
@raises_exception(DatabaseContainsIncorrectEmptyValue,
'Database contains NULL for required attribute Person.name')
@db_session
def test_2b(self):
with warnings.catch_warnings():
warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)
db.insert('Person', id=1, name=None, tel='111')
p = Person.get(id=1)
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "33c39b098cb9d3368b8f74a7433e0943fe252da5",
"index": 5672,
"step-1": "<mask token>\n\n\nclass TestValidate(unittest.TestCase):\n\n @db_session\n def setUp(self):\n db.execute('delete from Person')\n registry = getattr(core, '__warningregistry__', {})\n for key in list(registry):\n if type(key) is not tuple:\n continue\n text, category, lineno = key\n if category is DatabaseContainsIncorrectEmptyValue:\n del registry[key]\n\n @db_session\n def test_1a(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue\n )\n db.insert('Person', id=1, name='', tel='111')\n p = Person.get(id=1)\n self.assertEqual(p.name, '')\n\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\n 'Database contains empty string for required attribute Person.name')\n @db_session\n def test_1b(self):\n with warnings.catch_warnings():\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\n db.insert('Person', id=1, name='', tel='111')\n p = Person.get(id=1)\n\n @db_session\n def test_2a(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue\n )\n db.insert('Person', id=1, name=None, tel='111')\n p = Person.get(id=1)\n self.assertEqual(p.name, None)\n\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\n 'Database contains NULL for required attribute Person.name')\n @db_session\n def test_2b(self):\n with warnings.catch_warnings():\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\n db.insert('Person', id=1, name=None, tel='111')\n p = Person.get(id=1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Person(db.Entity):\n id = PrimaryKey(int)\n name = Required(str)\n tel = Optional(str)\n\n\n<mask token>\n\n\nclass TestValidate(unittest.TestCase):\n\n @db_session\n def setUp(self):\n db.execute('delete from Person')\n registry = getattr(core, '__warningregistry__', {})\n for key in list(registry):\n if type(key) is not tuple:\n continue\n text, category, lineno = key\n if category is DatabaseContainsIncorrectEmptyValue:\n del registry[key]\n\n @db_session\n def test_1a(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue\n )\n db.insert('Person', id=1, name='', tel='111')\n p = Person.get(id=1)\n self.assertEqual(p.name, '')\n\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\n 'Database contains empty string for required attribute Person.name')\n @db_session\n def test_1b(self):\n with warnings.catch_warnings():\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\n db.insert('Person', id=1, name='', tel='111')\n p = Person.get(id=1)\n\n @db_session\n def test_2a(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue\n )\n db.insert('Person', id=1, name=None, tel='111')\n p = Person.get(id=1)\n self.assertEqual(p.name, None)\n\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\n 'Database contains NULL for required attribute Person.name')\n @db_session\n def test_2b(self):\n with warnings.catch_warnings():\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\n db.insert('Person', id=1, name=None, tel='111')\n p = Person.get(id=1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Person(db.Entity):\n id = PrimaryKey(int)\n name = Required(str)\n tel = Optional(str)\n\n\ndb.generate_mapping(check_tables=False)\nwith db_session:\n db.execute(\n \"\"\"\n create table Person(\n id int primary key,\n name text,\n tel text\n )\n \"\"\"\n )\nwarnings.simplefilter('error')\n\n\nclass TestValidate(unittest.TestCase):\n\n @db_session\n def setUp(self):\n db.execute('delete from Person')\n registry = getattr(core, '__warningregistry__', {})\n for key in list(registry):\n if type(key) is not tuple:\n continue\n text, category, lineno = key\n if category is DatabaseContainsIncorrectEmptyValue:\n del registry[key]\n\n @db_session\n def test_1a(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue\n )\n db.insert('Person', id=1, name='', tel='111')\n p = Person.get(id=1)\n self.assertEqual(p.name, '')\n\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\n 'Database contains empty string for required attribute Person.name')\n @db_session\n def test_1b(self):\n with warnings.catch_warnings():\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\n db.insert('Person', id=1, name='', tel='111')\n p = Person.get(id=1)\n\n @db_session\n def test_2a(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue\n )\n db.insert('Person', id=1, name=None, tel='111')\n p = Person.get(id=1)\n self.assertEqual(p.name, None)\n\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\n 'Database contains NULL for required attribute Person.name')\n @db_session\n def test_2b(self):\n with warnings.catch_warnings():\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\n db.insert('Person', id=1, name=None, tel='111')\n p = Person.get(id=1)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\ndb = Database('sqlite', ':memory:')\n\n\nclass Person(db.Entity):\n id = PrimaryKey(int)\n name = Required(str)\n tel = Optional(str)\n\n\ndb.generate_mapping(check_tables=False)\nwith db_session:\n db.execute(\n \"\"\"\n create table Person(\n id int primary key,\n name text,\n tel text\n )\n \"\"\"\n )\nwarnings.simplefilter('error')\n\n\nclass TestValidate(unittest.TestCase):\n\n @db_session\n def setUp(self):\n db.execute('delete from Person')\n registry = getattr(core, '__warningregistry__', {})\n for key in list(registry):\n if type(key) is not tuple:\n continue\n text, category, lineno = key\n if category is DatabaseContainsIncorrectEmptyValue:\n del registry[key]\n\n @db_session\n def test_1a(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue\n )\n db.insert('Person', id=1, name='', tel='111')\n p = Person.get(id=1)\n self.assertEqual(p.name, '')\n\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\n 'Database contains empty string for required attribute Person.name')\n @db_session\n def test_1b(self):\n with warnings.catch_warnings():\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\n db.insert('Person', id=1, name='', tel='111')\n p = Person.get(id=1)\n\n @db_session\n def test_2a(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue\n )\n db.insert('Person', id=1, name=None, tel='111')\n p = Person.get(id=1)\n self.assertEqual(p.name, None)\n\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\n 'Database contains NULL for required attribute Person.name')\n @db_session\n def test_2b(self):\n with warnings.catch_warnings():\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\n db.insert('Person', id=1, name=None, tel='111')\n p = Person.get(id=1)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest, warnings\r\n\r\nfrom pony.orm import *\r\nfrom pony.orm import core\r\nfrom pony.orm.tests.testutils import raises_exception\r\n\r\ndb = Database('sqlite', ':memory:')\r\n\r\nclass Person(db.Entity):\r\n id = PrimaryKey(int)\r\n name = Required(str)\r\n tel = Optional(str)\r\n\r\ndb.generate_mapping(check_tables=False)\r\n\r\nwith db_session:\r\n db.execute(\"\"\"\r\n create table Person(\r\n id int primary key,\r\n name text,\r\n tel text\r\n )\r\n \"\"\")\r\n\r\nwarnings.simplefilter('error', )\r\n\r\n\r\nclass TestValidate(unittest.TestCase):\r\n\r\n @db_session\r\n def setUp(self):\r\n db.execute('delete from Person')\r\n registry = getattr(core, '__warningregistry__', {})\r\n for key in list(registry):\r\n if type(key) is not tuple: continue\r\n text, category, lineno = key\r\n if category is DatabaseContainsIncorrectEmptyValue:\r\n del registry[key]\r\n\r\n @db_session\r\n def test_1a(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue)\r\n db.insert('Person', id=1, name='', tel='111')\r\n p = Person.get(id=1)\r\n self.assertEqual(p.name, '')\r\n\r\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\r\n 'Database contains empty string for required attribute Person.name')\r\n @db_session\r\n def test_1b(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\r\n db.insert('Person', id=1, name='', tel='111')\r\n p = Person.get(id=1)\r\n\r\n @db_session\r\n def test_2a(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue)\r\n db.insert('Person', id=1, name=None, tel='111')\r\n p = Person.get(id=1)\r\n self.assertEqual(p.name, None)\r\n\r\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\r\n 'Database contains NULL for required attribute Person.name')\r\n @db_session\r\n def test_2b(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\r\n db.insert('Person', id=1, name=None, tel='111')\r\n p = Person.get(id=1)\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n",
"step-ids": [
6,
8,
9,
10,
12
]
}
|
[
6,
8,
9,
10,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ROOT_URL = 'https://api.twitter.com'
UPLOAD_URL = 'https://upload.twitter.com'
REQUEST_TOKEN_URL = f'{ROOT_URL}/oauth/request_token'
AUTHENTICATE_URL = f'{ROOT_URL}/oauth/authenticate'
ACCESS_TOKEN_URL = f'{ROOT_URL}/oauth/access_token'
VERSION = '1.1'
USER_SEARCH_URL = f'{ROOT_URL}/{VERSION}/users/search.json'
FRIENDSHIP_CREATE_URL = f'{ROOT_URL}/{VERSION}/friendships/create.json'
FRIENDSHIP_DESTROY_URL = f'{ROOT_URL}/{VERSION}/friendships/destroy.json'
FRIENDS_URL = f'{ROOT_URL}/{VERSION}/friends/list.json'
FOLLOWERS_URL = f'{ROOT_URL}/{VERSION}/followers/list.json'
TWEET_SEARCH_URL = f'{ROOT_URL}/{VERSION}/search/tweets.json'
TWEET_LIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/create.json'
TWEET_UNLIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/destroy.json'
RETWEET_URL = ROOT_URL + '/' + VERSION + '/retweet/create/{tweet_id}.json'
REMOVE_RETWEET_URL = (ROOT_URL + '/' + VERSION +
'/unretweet/create/{tweet_id}.json')
FAVOURITED_TWEETS_URL = ROOT_URL + '/' + VERSION + '/favorites/list.json'
STATUS_UPDATE_URL = f'{ROOT_URL}/{VERSION}/statuses/update.json'
MEDIA_UPLOAD_URL = f'{UPLOAD_URL}/{VERSION}/media/upload.json'
TRENDS_URL = f'{ROOT_URL}/{VERSION}/trends/place.json'
<|reserved_special_token_1|>
"""
Constants to be used throughout this program
stored here.
"""
ROOT_URL = "https://api.twitter.com"
UPLOAD_URL = "https://upload.twitter.com"
REQUEST_TOKEN_URL = f'{ROOT_URL}/oauth/request_token'
AUTHENTICATE_URL = f'{ROOT_URL}/oauth/authenticate'
ACCESS_TOKEN_URL = f'{ROOT_URL}/oauth/access_token'
VERSION = '1.1'
USER_SEARCH_URL = f'{ROOT_URL}/{VERSION}/users/search.json'
FRIENDSHIP_CREATE_URL = f'{ROOT_URL}/{VERSION}/friendships/create.json'
FRIENDSHIP_DESTROY_URL = f'{ROOT_URL}/{VERSION}/friendships/destroy.json'
FRIENDS_URL = f'{ROOT_URL}/{VERSION}/friends/list.json'
FOLLOWERS_URL = f'{ROOT_URL}/{VERSION}/followers/list.json'
TWEET_SEARCH_URL = f'{ROOT_URL}/{VERSION}/search/tweets.json'
TWEET_LIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/create.json'
TWEET_UNLIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/destroy.json'
RETWEET_URL = ROOT_URL + "/" + VERSION + "/retweet/create/{tweet_id}.json"
REMOVE_RETWEET_URL = ROOT_URL + "/" + \
VERSION + "/unretweet/create/{tweet_id}.json"
FAVOURITED_TWEETS_URL = ROOT_URL + "/" + VERSION + "/favorites/list.json"
STATUS_UPDATE_URL = f'{ROOT_URL}/{VERSION}/statuses/update.json'
MEDIA_UPLOAD_URL = f'{UPLOAD_URL}/{VERSION}/media/upload.json'
TRENDS_URL = f'{ROOT_URL}/{VERSION}/trends/place.json'
|
flexible
|
{
"blob_id": "c907f6b954aa3eae21a54eba9d54c116576bd40a",
"index": 5848,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nROOT_URL = 'https://api.twitter.com'\nUPLOAD_URL = 'https://upload.twitter.com'\nREQUEST_TOKEN_URL = f'{ROOT_URL}/oauth/request_token'\nAUTHENTICATE_URL = f'{ROOT_URL}/oauth/authenticate'\nACCESS_TOKEN_URL = f'{ROOT_URL}/oauth/access_token'\nVERSION = '1.1'\nUSER_SEARCH_URL = f'{ROOT_URL}/{VERSION}/users/search.json'\nFRIENDSHIP_CREATE_URL = f'{ROOT_URL}/{VERSION}/friendships/create.json'\nFRIENDSHIP_DESTROY_URL = f'{ROOT_URL}/{VERSION}/friendships/destroy.json'\nFRIENDS_URL = f'{ROOT_URL}/{VERSION}/friends/list.json'\nFOLLOWERS_URL = f'{ROOT_URL}/{VERSION}/followers/list.json'\nTWEET_SEARCH_URL = f'{ROOT_URL}/{VERSION}/search/tweets.json'\nTWEET_LIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/create.json'\nTWEET_UNLIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/destroy.json'\nRETWEET_URL = ROOT_URL + '/' + VERSION + '/retweet/create/{tweet_id}.json'\nREMOVE_RETWEET_URL = (ROOT_URL + '/' + VERSION +\n '/unretweet/create/{tweet_id}.json')\nFAVOURITED_TWEETS_URL = ROOT_URL + '/' + VERSION + '/favorites/list.json'\nSTATUS_UPDATE_URL = f'{ROOT_URL}/{VERSION}/statuses/update.json'\nMEDIA_UPLOAD_URL = f'{UPLOAD_URL}/{VERSION}/media/upload.json'\nTRENDS_URL = f'{ROOT_URL}/{VERSION}/trends/place.json'\n",
"step-3": "\"\"\"\nConstants to be used throughout this program\nstored here.\n\"\"\"\nROOT_URL = \"https://api.twitter.com\"\nUPLOAD_URL = \"https://upload.twitter.com\"\n\nREQUEST_TOKEN_URL = f'{ROOT_URL}/oauth/request_token'\nAUTHENTICATE_URL = f'{ROOT_URL}/oauth/authenticate'\nACCESS_TOKEN_URL = f'{ROOT_URL}/oauth/access_token'\n\nVERSION = '1.1'\n\nUSER_SEARCH_URL = f'{ROOT_URL}/{VERSION}/users/search.json'\nFRIENDSHIP_CREATE_URL = f'{ROOT_URL}/{VERSION}/friendships/create.json'\nFRIENDSHIP_DESTROY_URL = f'{ROOT_URL}/{VERSION}/friendships/destroy.json'\nFRIENDS_URL = f'{ROOT_URL}/{VERSION}/friends/list.json'\nFOLLOWERS_URL = f'{ROOT_URL}/{VERSION}/followers/list.json'\n\nTWEET_SEARCH_URL = f'{ROOT_URL}/{VERSION}/search/tweets.json'\nTWEET_LIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/create.json'\nTWEET_UNLIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/destroy.json'\nRETWEET_URL = ROOT_URL + \"/\" + VERSION + \"/retweet/create/{tweet_id}.json\"\nREMOVE_RETWEET_URL = ROOT_URL + \"/\" + \\\n VERSION + \"/unretweet/create/{tweet_id}.json\"\nFAVOURITED_TWEETS_URL = ROOT_URL + \"/\" + VERSION + \"/favorites/list.json\"\n\nSTATUS_UPDATE_URL = f'{ROOT_URL}/{VERSION}/statuses/update.json'\nMEDIA_UPLOAD_URL = f'{UPLOAD_URL}/{VERSION}/media/upload.json'\n\nTRENDS_URL = f'{ROOT_URL}/{VERSION}/trends/place.json'\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
authors.append(text[start + length + len('<![CDATA['):end - len(']]>')])
while text.find('<wp:author_display_name>', start + 1) != -1:
start = text.find('<wp:author_display_name>', start + 1)
end = text.find('</wp:author_display_name>', end + 1)
authors.append(text[start + length + len('<![CDATA['):end - len(']]>')])
authors.sort()
for author in authors:
print(author)
for i in range(len(authors) - 1):
if authors[i] == authors[i + 1]:
print(authors[i], 'was double counted')
print(len(authors))
<|reserved_special_token_1|>
file = open('thegazelle.wordpress.2016-06-22.xml', 'r')
text = file.read()
authors = []
start = text.find('<wp:author_display_name>')
length = len('<wp:author_display_name>')
end = text.find('</wp:author_display_name')
authors.append(text[start + length + len('<![CDATA['):end - len(']]>')])
while text.find('<wp:author_display_name>', start + 1) != -1:
start = text.find('<wp:author_display_name>', start + 1)
end = text.find('</wp:author_display_name>', end + 1)
authors.append(text[start + length + len('<![CDATA['):end - len(']]>')])
authors.sort()
for author in authors:
print(author)
for i in range(len(authors) - 1):
if authors[i] == authors[i + 1]:
print(authors[i], 'was double counted')
print(len(authors))
<|reserved_special_token_1|>
file = open('thegazelle.wordpress.2016-06-22.xml', 'r')
text = file.read()
authors = []
start = text.find("<wp:author_display_name>")
length = len("<wp:author_display_name>")
end = text.find("</wp:author_display_name")
authors.append(text[start+length+len("<![CDATA["):end-len("]]>")])
while text.find("<wp:author_display_name>", start+1) != -1:
start = text.find("<wp:author_display_name>", start+1)
end = text.find("</wp:author_display_name>", end+1)
authors.append(text[start+length+len("<![CDATA["):end-len("]]>")])
authors.sort()
for author in authors:
print(author)
for i in range(len(authors)-1):
if (authors[i] == authors[i+1]):
print(authors[i], "was double counted")
print(len(authors))
|
flexible
|
{
"blob_id": "cf5062c999c6c29f103428c247d8d1a4550f9d75",
"index": 8086,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nauthors.append(text[start + length + len('<![CDATA['):end - len(']]>')])\nwhile text.find('<wp:author_display_name>', start + 1) != -1:\n start = text.find('<wp:author_display_name>', start + 1)\n end = text.find('</wp:author_display_name>', end + 1)\n authors.append(text[start + length + len('<![CDATA['):end - len(']]>')])\nauthors.sort()\nfor author in authors:\n print(author)\nfor i in range(len(authors) - 1):\n if authors[i] == authors[i + 1]:\n print(authors[i], 'was double counted')\nprint(len(authors))\n",
"step-3": "file = open('thegazelle.wordpress.2016-06-22.xml', 'r')\ntext = file.read()\nauthors = []\nstart = text.find('<wp:author_display_name>')\nlength = len('<wp:author_display_name>')\nend = text.find('</wp:author_display_name')\nauthors.append(text[start + length + len('<![CDATA['):end - len(']]>')])\nwhile text.find('<wp:author_display_name>', start + 1) != -1:\n start = text.find('<wp:author_display_name>', start + 1)\n end = text.find('</wp:author_display_name>', end + 1)\n authors.append(text[start + length + len('<![CDATA['):end - len(']]>')])\nauthors.sort()\nfor author in authors:\n print(author)\nfor i in range(len(authors) - 1):\n if authors[i] == authors[i + 1]:\n print(authors[i], 'was double counted')\nprint(len(authors))\n",
"step-4": "file = open('thegazelle.wordpress.2016-06-22.xml', 'r')\ntext = file.read()\nauthors = []\nstart = text.find(\"<wp:author_display_name>\")\nlength = len(\"<wp:author_display_name>\")\nend = text.find(\"</wp:author_display_name\")\nauthors.append(text[start+length+len(\"<![CDATA[\"):end-len(\"]]>\")])\nwhile text.find(\"<wp:author_display_name>\", start+1) != -1:\n start = text.find(\"<wp:author_display_name>\", start+1)\n end = text.find(\"</wp:author_display_name>\", end+1)\n authors.append(text[start+length+len(\"<![CDATA[\"):end-len(\"]]>\")])\nauthors.sort()\nfor author in authors:\n print(author)\n\nfor i in range(len(authors)-1):\n if (authors[i] == authors[i+1]):\n print(authors[i], \"was double counted\")\n\nprint(len(authors))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import krait
from ctrl import ws
krait.mvc.set_init_ctrl(ws.WsPageController())
|
normal
|
{
"blob_id": "da2b946238b429188fe3fa50286658d4b5cdbf41",
"index": 5752,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nkrait.mvc.set_init_ctrl(ws.WsPageController())\n",
"step-3": "import krait\nfrom ctrl import ws\nkrait.mvc.set_init_ctrl(ws.WsPageController())\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class LogPlugin(Plugin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def search(self, message, query, *additional_queries):
chat_history = read_lines_from_file('chatlog.log')
chat_history.reverse()
found_line = None
for line in chat_history:
if query in line:
found_line = line
for additional_query in additional_queries:
if additional_query not in line:
found_line = None
break
if found_line:
break
if found_line:
self.send_message(message.From, line)
return
def replay(self, message, startTime, endTime=None):
start_time = None
end_time = None
try:
start_time = datetime.strptime(startTime, '%Y-%m-%d,%H:%M')
if endTime:
end_time = datetime.strptime(endTime, '%Y-%m-%d,%H:%M')
except Exception as e:
self.send_message(message.From,
'Expects inputs in the format: !chatreplay <yyyy-mm-dd,hh:mm> [<yyyyy-mm-dd,hh:mm>] ; '
+ str(e))
return
chat_history = read_lines_from_file('chatlog.log')
for line in chat_history:
line_tokens = line.split(' : ')
line_time = None
try:
line_time = datetime.strptime(line_tokens[0],
'%Y-%m-%d %H:%M:%S.%f')
except:
continue
delta = line_time - start_time
delta_seconds = (delta.microseconds + (delta.seconds + delta.
days * 24 * 3600) * 10 ** 6) / 10 ** 6
if line_time > start_time and (end_time and line_time < end_time
) or not end_time and abs(delta_seconds) < 10:
self.send_message(message.From, line)
time.sleep(1)
self.send_message(message.From, 'Done replay.')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LogPlugin(Plugin):
def initialize(self):
self.add_trigger(on_message)
self.add_command('!chatsearch', self.search)
self.add_command('!chatreplay', self.replay)
<|reserved_special_token_0|>
def search(self, message, query, *additional_queries):
chat_history = read_lines_from_file('chatlog.log')
chat_history.reverse()
found_line = None
for line in chat_history:
if query in line:
found_line = line
for additional_query in additional_queries:
if additional_query not in line:
found_line = None
break
if found_line:
break
if found_line:
self.send_message(message.From, line)
return
def replay(self, message, startTime, endTime=None):
start_time = None
end_time = None
try:
start_time = datetime.strptime(startTime, '%Y-%m-%d,%H:%M')
if endTime:
end_time = datetime.strptime(endTime, '%Y-%m-%d,%H:%M')
except Exception as e:
self.send_message(message.From,
'Expects inputs in the format: !chatreplay <yyyy-mm-dd,hh:mm> [<yyyyy-mm-dd,hh:mm>] ; '
+ str(e))
return
chat_history = read_lines_from_file('chatlog.log')
for line in chat_history:
line_tokens = line.split(' : ')
line_time = None
try:
line_time = datetime.strptime(line_tokens[0],
'%Y-%m-%d %H:%M:%S.%f')
except:
continue
delta = line_time - start_time
delta_seconds = (delta.microseconds + (delta.seconds + delta.
days * 24 * 3600) * 10 ** 6) / 10 ** 6
if line_time > start_time and (end_time and line_time < end_time
) or not end_time and abs(delta_seconds) < 10:
self.send_message(message.From, line)
time.sleep(1)
self.send_message(message.From, 'Done replay.')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LogPlugin(Plugin):
def initialize(self):
self.add_trigger(on_message)
self.add_command('!chatsearch', self.search)
self.add_command('!chatreplay', self.replay)
def run(self, message):
append_to_file(str(datetime.now()) + ' : ' + message.From + ' : ' +
message.Body + '\n', 'chatlog.log')
def search(self, message, query, *additional_queries):
chat_history = read_lines_from_file('chatlog.log')
chat_history.reverse()
found_line = None
for line in chat_history:
if query in line:
found_line = line
for additional_query in additional_queries:
if additional_query not in line:
found_line = None
break
if found_line:
break
if found_line:
self.send_message(message.From, line)
return
def replay(self, message, startTime, endTime=None):
start_time = None
end_time = None
try:
start_time = datetime.strptime(startTime, '%Y-%m-%d,%H:%M')
if endTime:
end_time = datetime.strptime(endTime, '%Y-%m-%d,%H:%M')
except Exception as e:
self.send_message(message.From,
'Expects inputs in the format: !chatreplay <yyyy-mm-dd,hh:mm> [<yyyyy-mm-dd,hh:mm>] ; '
+ str(e))
return
chat_history = read_lines_from_file('chatlog.log')
for line in chat_history:
line_tokens = line.split(' : ')
line_time = None
try:
line_time = datetime.strptime(line_tokens[0],
'%Y-%m-%d %H:%M:%S.%f')
except:
continue
delta = line_time - start_time
delta_seconds = (delta.microseconds + (delta.seconds + delta.
days * 24 * 3600) * 10 ** 6) / 10 ** 6
if line_time > start_time and (end_time and line_time < end_time
) or not end_time and abs(delta_seconds) < 10:
self.send_message(message.From, line)
time.sleep(1)
self.send_message(message.From, 'Done replay.')
<|reserved_special_token_1|>
from base_plugin import *
from plugin_utils import *
from datetime import datetime
import time
class LogPlugin(Plugin):
def initialize(self):
self.add_trigger(on_message)
self.add_command('!chatsearch', self.search)
self.add_command('!chatreplay', self.replay)
def run(self, message):
append_to_file(str(datetime.now()) + ' : ' + message.From + ' : ' +
message.Body + '\n', 'chatlog.log')
def search(self, message, query, *additional_queries):
chat_history = read_lines_from_file('chatlog.log')
chat_history.reverse()
found_line = None
for line in chat_history:
if query in line:
found_line = line
for additional_query in additional_queries:
if additional_query not in line:
found_line = None
break
if found_line:
break
if found_line:
self.send_message(message.From, line)
return
def replay(self, message, startTime, endTime=None):
start_time = None
end_time = None
try:
start_time = datetime.strptime(startTime, '%Y-%m-%d,%H:%M')
if endTime:
end_time = datetime.strptime(endTime, '%Y-%m-%d,%H:%M')
except Exception as e:
self.send_message(message.From,
'Expects inputs in the format: !chatreplay <yyyy-mm-dd,hh:mm> [<yyyyy-mm-dd,hh:mm>] ; '
+ str(e))
return
chat_history = read_lines_from_file('chatlog.log')
for line in chat_history:
line_tokens = line.split(' : ')
line_time = None
try:
line_time = datetime.strptime(line_tokens[0],
'%Y-%m-%d %H:%M:%S.%f')
except:
continue
delta = line_time - start_time
delta_seconds = (delta.microseconds + (delta.seconds + delta.
days * 24 * 3600) * 10 ** 6) / 10 ** 6
if line_time > start_time and (end_time and line_time < end_time
) or not end_time and abs(delta_seconds) < 10:
self.send_message(message.From, line)
time.sleep(1)
self.send_message(message.From, 'Done replay.')
<|reserved_special_token_1|>
from base_plugin import *
from plugin_utils import *
from datetime import datetime
import time
class LogPlugin(Plugin):
def initialize(self):
self.add_trigger(on_message)
self.add_command("!chatsearch", self.search)
self.add_command("!chatreplay", self.replay)
def run(self, message):
append_to_file(str(datetime.now()) + " : " + message.From + " : " + message.Body + '\n', "chatlog.log")
def search(self, message, query, *additional_queries):
chat_history = read_lines_from_file("chatlog.log")
chat_history.reverse()
found_line = None
for line in chat_history:
if query in line:
found_line = line
for additional_query in additional_queries:
if additional_query not in line:
found_line = None
break
if found_line:
break
if found_line:
self.send_message(message.From, line)
return
def replay(self, message, startTime, endTime = None):
start_time = None
end_time = None
try:
start_time = datetime.strptime(startTime, "%Y-%m-%d,%H:%M")
if endTime:
end_time = datetime.strptime(endTime, "%Y-%m-%d,%H:%M")
except Exception as e:
self.send_message(message.From, "Expects inputs in the format: !chatreplay <yyyy-mm-dd,hh:mm> [<yyyyy-mm-dd,hh:mm>] ; " + str(e))
return
chat_history = read_lines_from_file("chatlog.log")
for line in chat_history:
line_tokens = line.split(" : ")
line_time = None
try:
line_time = datetime.strptime(line_tokens[0], "%Y-%m-%d %H:%M:%S.%f")
except:
continue
#2.6 compatibility.
delta = (line_time - start_time)
delta_seconds = (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6) / 10**6
if ((line_time > start_time ) \
and ( end_time and line_time < end_time )) \
or (not end_time and abs(delta_seconds) < 10):
self.send_message(message.From, line)
time.sleep(1)
self.send_message(message.From, "Done replay.")
|
flexible
|
{
"blob_id": "d932ab84848c9a8ca8bb23a57424b8f6190b6260",
"index": 2563,
"step-1": "<mask token>\n\n\nclass LogPlugin(Plugin):\n <mask token>\n <mask token>\n\n def search(self, message, query, *additional_queries):\n chat_history = read_lines_from_file('chatlog.log')\n chat_history.reverse()\n found_line = None\n for line in chat_history:\n if query in line:\n found_line = line\n for additional_query in additional_queries:\n if additional_query not in line:\n found_line = None\n break\n if found_line:\n break\n if found_line:\n self.send_message(message.From, line)\n return\n\n def replay(self, message, startTime, endTime=None):\n start_time = None\n end_time = None\n try:\n start_time = datetime.strptime(startTime, '%Y-%m-%d,%H:%M')\n if endTime:\n end_time = datetime.strptime(endTime, '%Y-%m-%d,%H:%M')\n except Exception as e:\n self.send_message(message.From, \n 'Expects inputs in the format: !chatreplay <yyyy-mm-dd,hh:mm> [<yyyyy-mm-dd,hh:mm>] ; '\n + str(e))\n return\n chat_history = read_lines_from_file('chatlog.log')\n for line in chat_history:\n line_tokens = line.split(' : ')\n line_time = None\n try:\n line_time = datetime.strptime(line_tokens[0],\n '%Y-%m-%d %H:%M:%S.%f')\n except:\n continue\n delta = line_time - start_time\n delta_seconds = (delta.microseconds + (delta.seconds + delta.\n days * 24 * 3600) * 10 ** 6) / 10 ** 6\n if line_time > start_time and (end_time and line_time < end_time\n ) or not end_time and abs(delta_seconds) < 10:\n self.send_message(message.From, line)\n time.sleep(1)\n self.send_message(message.From, 'Done replay.')\n",
"step-2": "<mask token>\n\n\nclass LogPlugin(Plugin):\n\n def initialize(self):\n self.add_trigger(on_message)\n self.add_command('!chatsearch', self.search)\n self.add_command('!chatreplay', self.replay)\n <mask token>\n\n def search(self, message, query, *additional_queries):\n chat_history = read_lines_from_file('chatlog.log')\n chat_history.reverse()\n found_line = None\n for line in chat_history:\n if query in line:\n found_line = line\n for additional_query in additional_queries:\n if additional_query not in line:\n found_line = None\n break\n if found_line:\n break\n if found_line:\n self.send_message(message.From, line)\n return\n\n def replay(self, message, startTime, endTime=None):\n start_time = None\n end_time = None\n try:\n start_time = datetime.strptime(startTime, '%Y-%m-%d,%H:%M')\n if endTime:\n end_time = datetime.strptime(endTime, '%Y-%m-%d,%H:%M')\n except Exception as e:\n self.send_message(message.From, \n 'Expects inputs in the format: !chatreplay <yyyy-mm-dd,hh:mm> [<yyyyy-mm-dd,hh:mm>] ; '\n + str(e))\n return\n chat_history = read_lines_from_file('chatlog.log')\n for line in chat_history:\n line_tokens = line.split(' : ')\n line_time = None\n try:\n line_time = datetime.strptime(line_tokens[0],\n '%Y-%m-%d %H:%M:%S.%f')\n except:\n continue\n delta = line_time - start_time\n delta_seconds = (delta.microseconds + (delta.seconds + delta.\n days * 24 * 3600) * 10 ** 6) / 10 ** 6\n if line_time > start_time and (end_time and line_time < end_time\n ) or not end_time and abs(delta_seconds) < 10:\n self.send_message(message.From, line)\n time.sleep(1)\n self.send_message(message.From, 'Done replay.')\n",
"step-3": "<mask token>\n\n\nclass LogPlugin(Plugin):\n\n def initialize(self):\n self.add_trigger(on_message)\n self.add_command('!chatsearch', self.search)\n self.add_command('!chatreplay', self.replay)\n\n def run(self, message):\n append_to_file(str(datetime.now()) + ' : ' + message.From + ' : ' +\n message.Body + '\\n', 'chatlog.log')\n\n def search(self, message, query, *additional_queries):\n chat_history = read_lines_from_file('chatlog.log')\n chat_history.reverse()\n found_line = None\n for line in chat_history:\n if query in line:\n found_line = line\n for additional_query in additional_queries:\n if additional_query not in line:\n found_line = None\n break\n if found_line:\n break\n if found_line:\n self.send_message(message.From, line)\n return\n\n def replay(self, message, startTime, endTime=None):\n start_time = None\n end_time = None\n try:\n start_time = datetime.strptime(startTime, '%Y-%m-%d,%H:%M')\n if endTime:\n end_time = datetime.strptime(endTime, '%Y-%m-%d,%H:%M')\n except Exception as e:\n self.send_message(message.From, \n 'Expects inputs in the format: !chatreplay <yyyy-mm-dd,hh:mm> [<yyyyy-mm-dd,hh:mm>] ; '\n + str(e))\n return\n chat_history = read_lines_from_file('chatlog.log')\n for line in chat_history:\n line_tokens = line.split(' : ')\n line_time = None\n try:\n line_time = datetime.strptime(line_tokens[0],\n '%Y-%m-%d %H:%M:%S.%f')\n except:\n continue\n delta = line_time - start_time\n delta_seconds = (delta.microseconds + (delta.seconds + delta.\n days * 24 * 3600) * 10 ** 6) / 10 ** 6\n if line_time > start_time and (end_time and line_time < end_time\n ) or not end_time and abs(delta_seconds) < 10:\n self.send_message(message.From, line)\n time.sleep(1)\n self.send_message(message.From, 'Done replay.')\n",
"step-4": "from base_plugin import *\nfrom plugin_utils import *\nfrom datetime import datetime\nimport time\n\n\nclass LogPlugin(Plugin):\n\n def initialize(self):\n self.add_trigger(on_message)\n self.add_command('!chatsearch', self.search)\n self.add_command('!chatreplay', self.replay)\n\n def run(self, message):\n append_to_file(str(datetime.now()) + ' : ' + message.From + ' : ' +\n message.Body + '\\n', 'chatlog.log')\n\n def search(self, message, query, *additional_queries):\n chat_history = read_lines_from_file('chatlog.log')\n chat_history.reverse()\n found_line = None\n for line in chat_history:\n if query in line:\n found_line = line\n for additional_query in additional_queries:\n if additional_query not in line:\n found_line = None\n break\n if found_line:\n break\n if found_line:\n self.send_message(message.From, line)\n return\n\n def replay(self, message, startTime, endTime=None):\n start_time = None\n end_time = None\n try:\n start_time = datetime.strptime(startTime, '%Y-%m-%d,%H:%M')\n if endTime:\n end_time = datetime.strptime(endTime, '%Y-%m-%d,%H:%M')\n except Exception as e:\n self.send_message(message.From, \n 'Expects inputs in the format: !chatreplay <yyyy-mm-dd,hh:mm> [<yyyyy-mm-dd,hh:mm>] ; '\n + str(e))\n return\n chat_history = read_lines_from_file('chatlog.log')\n for line in chat_history:\n line_tokens = line.split(' : ')\n line_time = None\n try:\n line_time = datetime.strptime(line_tokens[0],\n '%Y-%m-%d %H:%M:%S.%f')\n except:\n continue\n delta = line_time - start_time\n delta_seconds = (delta.microseconds + (delta.seconds + delta.\n days * 24 * 3600) * 10 ** 6) / 10 ** 6\n if line_time > start_time and (end_time and line_time < end_time\n ) or not end_time and abs(delta_seconds) < 10:\n self.send_message(message.From, line)\n time.sleep(1)\n self.send_message(message.From, 'Done replay.')\n",
"step-5": "from base_plugin import *\nfrom plugin_utils import *\n\nfrom datetime import datetime\nimport time\n\n\nclass LogPlugin(Plugin):\n\tdef initialize(self):\n\t\tself.add_trigger(on_message)\n\n\t\tself.add_command(\"!chatsearch\", self.search)\n\t\tself.add_command(\"!chatreplay\", self.replay)\n\n\n\tdef run(self, message):\n\t\tappend_to_file(str(datetime.now()) + \" : \" + message.From + \" : \" + message.Body + '\\n', \"chatlog.log\")\n\n\n\tdef search(self, message, query, *additional_queries):\n\t\tchat_history = read_lines_from_file(\"chatlog.log\")\n\t\tchat_history.reverse()\n\n\t\tfound_line = None\n\t\tfor line in chat_history:\n\t\t\tif query in line:\n\t\t\t\tfound_line = line\n\t\t\t\tfor additional_query in additional_queries:\n\t\t\t\t\tif additional_query not in line:\n\t\t\t\t\t\tfound_line = None\n\t\t\t\t\t\tbreak\n\n\t\t\t\tif found_line:\n\t\t\t\t\tbreak\n\n\t\tif found_line:\n\t\t\tself.send_message(message.From, line)\n\n\t\treturn\n\n\tdef replay(self, message, startTime, endTime = None):\n\t\tstart_time = None\n\t\tend_time = None\n\t\ttry:\n\t\t\tstart_time = datetime.strptime(startTime, \"%Y-%m-%d,%H:%M\")\n\t\t\tif endTime:\n\t\t\t\tend_time = datetime.strptime(endTime, \"%Y-%m-%d,%H:%M\")\n\t\texcept Exception as e:\n\t\t\tself.send_message(message.From, \"Expects inputs in the format: !chatreplay <yyyy-mm-dd,hh:mm> [<yyyyy-mm-dd,hh:mm>] ; \" + str(e))\n\t\t\treturn\n\n\n\t\tchat_history = read_lines_from_file(\"chatlog.log\")\n\n\t\tfor line in chat_history:\n\t\t\tline_tokens = line.split(\" : \")\n\n\t\t\tline_time = None\n\t\t\ttry:\n\t\t\t\tline_time = datetime.strptime(line_tokens[0], \"%Y-%m-%d %H:%M:%S.%f\")\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t#2.6 compatibility.\n\t\t\tdelta = (line_time - start_time)\n\t\t\tdelta_seconds = (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6) / 10**6\n\n\t\t\tif ((line_time > start_time ) \\\n\t\t\t\t\tand ( end_time and line_time < end_time )) \\\n\t\t\t\tor (not end_time and abs(delta_seconds) < 10):\n\t\t\t\t\tself.send_message(message.From, line)\n\t\t\t\t\ttime.sleep(1)\n\n\t\tself.send_message(message.From, \"Done replay.\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class Methylation(object):
def __init__(self, table, data_type, name, called_sites):
self.table = table
self.data_type = data_type
self.name = name
self.called_sites = called_sites
<|reserved_special_token_0|>
def parse_nanopolish(filename, file_type, name, window, smoothen=5):
table = pd.read_csv(filename, sep='\t')
gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':
'Chromosome', 'end': 'End', 'Strand': 'strand'}))
logging.info('Read the file in a dataframe.')
if window:
gr = gr[window.chromosome, window.begin:window.end]
try:
gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))
except KeyError:
sys.stderr.write('\n\n\nProblem parsing nanopolish file {}!\n'.
format(filename))
sys.stderr.write(
'Could it be that there are no calls in your selected window?\n')
sys.stderr.write('\n\n\nDetailed error:\n')
raise
table = gr.df
if file_type in ['nanopolish_call', 'nanopolish_phased']:
table = table.drop(columns=['Start', 'End', 'log_lik_methylated',
'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',
'sequence'])
return Methylation(table=table.sort_values(['read_name', 'pos']),
data_type=file_type, name=name, called_sites=len(table))
if file_type == 'nanopolish_freq':
called_sites = table.called_sites
table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',
'called_sites', 'called_sites_methylated', 'group_sequence'])
return Methylation(table=table.sort_values('pos').groupby('pos').
mean().rolling(window=smoothen, center=True).mean(), data_type=
file_type, name=name, called_sites=called_sites.sum())
<|reserved_special_token_0|>
def get_modified_reference_positions(read):
if read.has_tag('MM'):
basemod = read.get_tag('MM').split(',', 1)[0]
if '-' in basemod:
sys.exit(
'ERROR: modifications on negative strand currently unsupported.'
)
base, mod = basemod.split('+')
deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]
probabilities = phred_to_probability(read.get_tag('MP'))
locations = np.cumsum(deltas) + np.concatenate((np.zeros(shape=1),
np.ones(shape=len(deltas) - 1))).astype('int')
base_index = np.array([i for i, letter in enumerate(read.
get_forward_sequence()) if letter == base])
modified_bases = base_index[locations]
refpos = np.array(read.get_reference_positions(full_length=True))
if read.is_reverse:
refpos = np.flipud(refpos)
probabilities = probabilities[::-1]
return basemod, refpos[modified_bases], probabilities
else:
return None, [None], [None]
def errs_tab(n):
"""Generate list of error rates for qualities less than equal than n."""
return [(10 ** (q / -10)) for q in range(n + 1)]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Methylation(object):
def __init__(self, table, data_type, name, called_sites):
self.table = table
self.data_type = data_type
self.name = name
self.called_sites = called_sites
<|reserved_special_token_0|>
def parse_nanopolish(filename, file_type, name, window, smoothen=5):
table = pd.read_csv(filename, sep='\t')
gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':
'Chromosome', 'end': 'End', 'Strand': 'strand'}))
logging.info('Read the file in a dataframe.')
if window:
gr = gr[window.chromosome, window.begin:window.end]
try:
gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))
except KeyError:
sys.stderr.write('\n\n\nProblem parsing nanopolish file {}!\n'.
format(filename))
sys.stderr.write(
'Could it be that there are no calls in your selected window?\n')
sys.stderr.write('\n\n\nDetailed error:\n')
raise
table = gr.df
if file_type in ['nanopolish_call', 'nanopolish_phased']:
table = table.drop(columns=['Start', 'End', 'log_lik_methylated',
'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',
'sequence'])
return Methylation(table=table.sort_values(['read_name', 'pos']),
data_type=file_type, name=name, called_sites=len(table))
if file_type == 'nanopolish_freq':
called_sites = table.called_sites
table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',
'called_sites', 'called_sites_methylated', 'group_sequence'])
return Methylation(table=table.sort_values('pos').groupby('pos').
mean().rolling(window=smoothen, center=True).mean(), data_type=
file_type, name=name, called_sites=called_sites.sum())
def parse_nanocompore(filename, name, window):
def nanocompore_columns_of_interest(column):
if column in ['pos', 'ref_id']:
return True
elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):
return True
else:
return False
table = pd.read_csv(filename, sep='\t', usecols=
nanocompore_columns_of_interest)
if window:
table = table[table['ref_id'] == window.chromosome]
return Methylation(table=table.sort_values('pos').append({'pos': window
.end}, ignore_index=True).drop(columns='ref_id').fillna(1.0),
data_type='nanocompore', name=name, called_sites=len(table))
def parse_ont_cram(filename, name, window):
cram = pysam.AlignmentFile(filename, 'rc')
data = []
for read in cram.fetch(reference=window.chromosome, start=window.begin,
end=window.end):
if not read.is_supplementary and not read.is_secondary:
mod, positions, quals = get_modified_reference_positions(read)
for pos, qual in zip(positions, quals):
if pos is not None:
data.append((read.query_name, '-' if read.is_reverse else
'+', pos, qual, mod))
return Methylation(table=pd.DataFrame(data, columns=['read_name',
'strand', 'pos', 'quality', 'mod']).astype(dtype={'mod': 'category',
'quality': 'float'}).sort_values(['read_name', 'pos']), data_type=
'ont-cram', name=name, called_sites=len(data))
def get_modified_reference_positions(read):
if read.has_tag('MM'):
basemod = read.get_tag('MM').split(',', 1)[0]
if '-' in basemod:
sys.exit(
'ERROR: modifications on negative strand currently unsupported.'
)
base, mod = basemod.split('+')
deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]
probabilities = phred_to_probability(read.get_tag('MP'))
locations = np.cumsum(deltas) + np.concatenate((np.zeros(shape=1),
np.ones(shape=len(deltas) - 1))).astype('int')
base_index = np.array([i for i, letter in enumerate(read.
get_forward_sequence()) if letter == base])
modified_bases = base_index[locations]
refpos = np.array(read.get_reference_positions(full_length=True))
if read.is_reverse:
refpos = np.flipud(refpos)
probabilities = probabilities[::-1]
return basemod, refpos[modified_bases], probabilities
else:
return None, [None], [None]
def errs_tab(n):
"""Generate list of error rates for qualities less than equal than n."""
return [(10 ** (q / -10)) for q in range(n + 1)]
<|reserved_special_token_0|>
def get_data(methylation_files, names, window, smoothen=5):
"""
Import methylation data from all files in the list methylation_files
Data can be either frequency or raw.
data is extracted within the window args.window
Frequencies are smoothened using a sliding window
"""
return [read_meth(f, n, window, smoothen) for f, n in zip(
methylation_files, names)]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Methylation(object):
def __init__(self, table, data_type, name, called_sites):
self.table = table
self.data_type = data_type
self.name = name
self.called_sites = called_sites
def read_meth(filename, name, window, smoothen=5):
"""
converts a file from nanopolish to a pandas dataframe
input can be from calculate_methylation_frequency
which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'
smoothening the result by a rolling average
input can also be raw data per read, optionally phased
which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'
"""
file_type = file_sniffer(filename)
logging.info('File is of type {}'.format(file_type))
try:
if file_type.startswith('nanopolish'):
return parse_nanopolish(filename, file_type, name, window,
smoothen=smoothen)
elif file_type == 'nanocompore':
return parse_nanocompore(filename, name, window)
elif file_type == 'ont-cram':
return parse_ont_cram(filename, name, window)
except Exception:
sys.stderr.write('\n\n\nInput file {} not recognized!\n'.format(
filename))
sys.stderr.write('\n\n\nDetailed error:\n')
raise
def parse_nanopolish(filename, file_type, name, window, smoothen=5):
table = pd.read_csv(filename, sep='\t')
gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':
'Chromosome', 'end': 'End', 'Strand': 'strand'}))
logging.info('Read the file in a dataframe.')
if window:
gr = gr[window.chromosome, window.begin:window.end]
try:
gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))
except KeyError:
sys.stderr.write('\n\n\nProblem parsing nanopolish file {}!\n'.
format(filename))
sys.stderr.write(
'Could it be that there are no calls in your selected window?\n')
sys.stderr.write('\n\n\nDetailed error:\n')
raise
table = gr.df
if file_type in ['nanopolish_call', 'nanopolish_phased']:
table = table.drop(columns=['Start', 'End', 'log_lik_methylated',
'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',
'sequence'])
return Methylation(table=table.sort_values(['read_name', 'pos']),
data_type=file_type, name=name, called_sites=len(table))
if file_type == 'nanopolish_freq':
called_sites = table.called_sites
table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',
'called_sites', 'called_sites_methylated', 'group_sequence'])
return Methylation(table=table.sort_values('pos').groupby('pos').
mean().rolling(window=smoothen, center=True).mean(), data_type=
file_type, name=name, called_sites=called_sites.sum())
def parse_nanocompore(filename, name, window):
def nanocompore_columns_of_interest(column):
if column in ['pos', 'ref_id']:
return True
elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):
return True
else:
return False
table = pd.read_csv(filename, sep='\t', usecols=
nanocompore_columns_of_interest)
if window:
table = table[table['ref_id'] == window.chromosome]
return Methylation(table=table.sort_values('pos').append({'pos': window
.end}, ignore_index=True).drop(columns='ref_id').fillna(1.0),
data_type='nanocompore', name=name, called_sites=len(table))
def parse_ont_cram(filename, name, window):
cram = pysam.AlignmentFile(filename, 'rc')
data = []
for read in cram.fetch(reference=window.chromosome, start=window.begin,
end=window.end):
if not read.is_supplementary and not read.is_secondary:
mod, positions, quals = get_modified_reference_positions(read)
for pos, qual in zip(positions, quals):
if pos is not None:
data.append((read.query_name, '-' if read.is_reverse else
'+', pos, qual, mod))
return Methylation(table=pd.DataFrame(data, columns=['read_name',
'strand', 'pos', 'quality', 'mod']).astype(dtype={'mod': 'category',
'quality': 'float'}).sort_values(['read_name', 'pos']), data_type=
'ont-cram', name=name, called_sites=len(data))
def get_modified_reference_positions(read):
if read.has_tag('MM'):
basemod = read.get_tag('MM').split(',', 1)[0]
if '-' in basemod:
sys.exit(
'ERROR: modifications on negative strand currently unsupported.'
)
base, mod = basemod.split('+')
deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]
probabilities = phred_to_probability(read.get_tag('MP'))
locations = np.cumsum(deltas) + np.concatenate((np.zeros(shape=1),
np.ones(shape=len(deltas) - 1))).astype('int')
base_index = np.array([i for i, letter in enumerate(read.
get_forward_sequence()) if letter == base])
modified_bases = base_index[locations]
refpos = np.array(read.get_reference_positions(full_length=True))
if read.is_reverse:
refpos = np.flipud(refpos)
probabilities = probabilities[::-1]
return basemod, refpos[modified_bases], probabilities
else:
return None, [None], [None]
def errs_tab(n):
"""Generate list of error rates for qualities less than equal than n."""
return [(10 ** (q / -10)) for q in range(n + 1)]
def phred_to_probability(quals, tab=errs_tab(128)):
return [tab[ord(q) - 33] for q in quals]
def get_data(methylation_files, names, window, smoothen=5):
"""
Import methylation data from all files in the list methylation_files
Data can be either frequency or raw.
data is extracted within the window args.window
Frequencies are smoothened using a sliding window
"""
return [read_meth(f, n, window, smoothen) for f, n in zip(
methylation_files, names)]
<|reserved_special_token_1|>
import pandas as pd
import pyranges as pr
import numpy as np
import sys
import logging
from methplotlib.utils import file_sniffer
import pysam
class Methylation(object):
def __init__(self, table, data_type, name, called_sites):
self.table = table
self.data_type = data_type
self.name = name
self.called_sites = called_sites
def read_meth(filename, name, window, smoothen=5):
"""
converts a file from nanopolish to a pandas dataframe
input can be from calculate_methylation_frequency
which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'
smoothening the result by a rolling average
input can also be raw data per read, optionally phased
which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'
"""
file_type = file_sniffer(filename)
logging.info('File is of type {}'.format(file_type))
try:
if file_type.startswith('nanopolish'):
return parse_nanopolish(filename, file_type, name, window,
smoothen=smoothen)
elif file_type == 'nanocompore':
return parse_nanocompore(filename, name, window)
elif file_type == 'ont-cram':
return parse_ont_cram(filename, name, window)
except Exception:
sys.stderr.write('\n\n\nInput file {} not recognized!\n'.format(
filename))
sys.stderr.write('\n\n\nDetailed error:\n')
raise
def parse_nanopolish(filename, file_type, name, window, smoothen=5):
table = pd.read_csv(filename, sep='\t')
gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':
'Chromosome', 'end': 'End', 'Strand': 'strand'}))
logging.info('Read the file in a dataframe.')
if window:
gr = gr[window.chromosome, window.begin:window.end]
try:
gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))
except KeyError:
sys.stderr.write('\n\n\nProblem parsing nanopolish file {}!\n'.
format(filename))
sys.stderr.write(
'Could it be that there are no calls in your selected window?\n')
sys.stderr.write('\n\n\nDetailed error:\n')
raise
table = gr.df
if file_type in ['nanopolish_call', 'nanopolish_phased']:
table = table.drop(columns=['Start', 'End', 'log_lik_methylated',
'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',
'sequence'])
return Methylation(table=table.sort_values(['read_name', 'pos']),
data_type=file_type, name=name, called_sites=len(table))
if file_type == 'nanopolish_freq':
called_sites = table.called_sites
table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',
'called_sites', 'called_sites_methylated', 'group_sequence'])
return Methylation(table=table.sort_values('pos').groupby('pos').
mean().rolling(window=smoothen, center=True).mean(), data_type=
file_type, name=name, called_sites=called_sites.sum())
def parse_nanocompore(filename, name, window):
def nanocompore_columns_of_interest(column):
if column in ['pos', 'ref_id']:
return True
elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):
return True
else:
return False
table = pd.read_csv(filename, sep='\t', usecols=
nanocompore_columns_of_interest)
if window:
table = table[table['ref_id'] == window.chromosome]
return Methylation(table=table.sort_values('pos').append({'pos': window
.end}, ignore_index=True).drop(columns='ref_id').fillna(1.0),
data_type='nanocompore', name=name, called_sites=len(table))
def parse_ont_cram(filename, name, window):
cram = pysam.AlignmentFile(filename, 'rc')
data = []
for read in cram.fetch(reference=window.chromosome, start=window.begin,
end=window.end):
if not read.is_supplementary and not read.is_secondary:
mod, positions, quals = get_modified_reference_positions(read)
for pos, qual in zip(positions, quals):
if pos is not None:
data.append((read.query_name, '-' if read.is_reverse else
'+', pos, qual, mod))
return Methylation(table=pd.DataFrame(data, columns=['read_name',
'strand', 'pos', 'quality', 'mod']).astype(dtype={'mod': 'category',
'quality': 'float'}).sort_values(['read_name', 'pos']), data_type=
'ont-cram', name=name, called_sites=len(data))
def get_modified_reference_positions(read):
if read.has_tag('MM'):
basemod = read.get_tag('MM').split(',', 1)[0]
if '-' in basemod:
sys.exit(
'ERROR: modifications on negative strand currently unsupported.'
)
base, mod = basemod.split('+')
deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]
probabilities = phred_to_probability(read.get_tag('MP'))
locations = np.cumsum(deltas) + np.concatenate((np.zeros(shape=1),
np.ones(shape=len(deltas) - 1))).astype('int')
base_index = np.array([i for i, letter in enumerate(read.
get_forward_sequence()) if letter == base])
modified_bases = base_index[locations]
refpos = np.array(read.get_reference_positions(full_length=True))
if read.is_reverse:
refpos = np.flipud(refpos)
probabilities = probabilities[::-1]
return basemod, refpos[modified_bases], probabilities
else:
return None, [None], [None]
def errs_tab(n):
"""Generate list of error rates for qualities less than equal than n."""
return [(10 ** (q / -10)) for q in range(n + 1)]
def phred_to_probability(quals, tab=errs_tab(128)):
return [tab[ord(q) - 33] for q in quals]
def get_data(methylation_files, names, window, smoothen=5):
"""
Import methylation data from all files in the list methylation_files
Data can be either frequency or raw.
data is extracted within the window args.window
Frequencies are smoothened using a sliding window
"""
return [read_meth(f, n, window, smoothen) for f, n in zip(
methylation_files, names)]
<|reserved_special_token_1|>
import pandas as pd
import pyranges as pr
import numpy as np
import sys
import logging
from methplotlib.utils import file_sniffer
import pysam
class Methylation(object):
def __init__(self, table, data_type, name, called_sites):
self.table = table
self.data_type = data_type
self.name = name
self.called_sites = called_sites
def read_meth(filename, name, window, smoothen=5):
"""
converts a file from nanopolish to a pandas dataframe
input can be from calculate_methylation_frequency
which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'
smoothening the result by a rolling average
input can also be raw data per read, optionally phased
which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'
"""
file_type = file_sniffer(filename)
logging.info("File is of type {}".format(file_type))
try:
if file_type.startswith("nanopolish"):
return parse_nanopolish(filename, file_type, name, window, smoothen=smoothen)
elif file_type == "nanocompore":
return parse_nanocompore(filename, name, window)
elif file_type == "ont-cram":
return parse_ont_cram(filename, name, window)
except Exception:
sys.stderr.write("\n\n\nInput file {} not recognized!\n".format(filename))
sys.stderr.write("\n\n\nDetailed error:\n")
raise
def parse_nanopolish(filename, file_type, name, window, smoothen=5):
table = pd.read_csv(filename, sep="\t")
gr = pr.PyRanges(table.rename(columns={"start": "Start", "chromosome": "Chromosome",
"end": "End", "Strand": "strand"}))
logging.info("Read the file in a dataframe.")
if window:
gr = gr[window.chromosome, window.begin:window.end]
try:
gr.pos = np.floor(gr.drop().df[["Start", "End"]].mean(axis=1))
except KeyError:
sys.stderr.write("\n\n\nProblem parsing nanopolish file {}!\n".format(filename))
sys.stderr.write("Could it be that there are no calls in your selected window?\n")
sys.stderr.write("\n\n\nDetailed error:\n")
raise
table = gr.df
if file_type in ['nanopolish_call', 'nanopolish_phased']:
table = table.drop(columns=['Start', 'End', 'log_lik_methylated',
'log_lik_unmethylated', 'num_calling_strands',
'num_motifs', 'sequence'])
return Methylation(
table=table.sort_values(['read_name', 'pos']),
data_type=file_type,
name=name,
called_sites=len(table))
if file_type == "nanopolish_freq":
called_sites = table.called_sites
table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',
'called_sites', 'called_sites_methylated',
'group_sequence'])
return Methylation(
table=table.sort_values('pos')
.groupby('pos')
.mean()
.rolling(window=smoothen, center=True)
.mean(),
data_type=file_type,
name=name,
called_sites=called_sites.sum())
def parse_nanocompore(filename, name, window):
def nanocompore_columns_of_interest(column):
if column in ['pos', 'ref_id']:
return True
elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):
return True
else:
return False
table = pd.read_csv(filename, sep="\t", usecols=nanocompore_columns_of_interest)
if window:
table = table[table["ref_id"] == window.chromosome]
return Methylation(
table=table.sort_values('pos')
.append({'pos': window.end}, ignore_index=True)
.drop(columns="ref_id")
.fillna(1.0),
data_type='nanocompore',
name=name,
called_sites=len(table))
def parse_ont_cram(filename, name, window):
cram = pysam.AlignmentFile(filename, "rc")
data = []
for read in cram.fetch(reference=window.chromosome, start=window.begin, end=window.end):
if not read.is_supplementary and not read.is_secondary:
mod, positions, quals = get_modified_reference_positions(read)
for pos, qual in zip(positions, quals):
if pos is not None:
data.append((read.query_name,
'-' if read.is_reverse else '+',
pos,
qual,
mod))
return Methylation(
table=pd.DataFrame(data, columns=['read_name', 'strand', 'pos', 'quality', 'mod'])
.astype(dtype={'mod': 'category', 'quality': 'float'})
.sort_values(['read_name', 'pos']),
data_type="ont-cram",
name=name,
called_sites=len(data))
def get_modified_reference_positions(read):
if read.has_tag('MM'):
basemod = read.get_tag('MM').split(',', 1)[0]
if '-' in basemod:
sys.exit("ERROR: modifications on negative strand currently unsupported.")
base, mod = basemod.split('+')
deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]
probabilities = phred_to_probability(read.get_tag('MP'))
locations = np.cumsum(deltas) + np.concatenate(
(np.zeros(shape=1),
np.ones(shape=len(deltas) - 1))).astype('int')
base_index = np.array(
[i for i, letter in enumerate(read.get_forward_sequence()) if letter == base]
)
modified_bases = base_index[locations]
refpos = np.array(read.get_reference_positions(full_length=True))
if read.is_reverse:
refpos = np.flipud(refpos)
probabilities = probabilities[::-1]
return (basemod, refpos[modified_bases], probabilities)
else:
return (None, [None], [None])
def errs_tab(n):
"""Generate list of error rates for qualities less than equal than n."""
return [10**(q / -10) for q in range(n + 1)]
def phred_to_probability(quals, tab=errs_tab(128)):
return [tab[ord(q) - 33] for q in quals]
def get_data(methylation_files, names, window, smoothen=5):
"""
Import methylation data from all files in the list methylation_files
Data can be either frequency or raw.
data is extracted within the window args.window
Frequencies are smoothened using a sliding window
"""
return [read_meth(f, n, window, smoothen) for f, n in zip(methylation_files, names)]
|
flexible
|
{
"blob_id": "d654aea3da3e36ccde8a5f4e03798a0dea5aad8a",
"index": 510,
"step-1": "<mask token>\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\n<mask token>\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\n<mask token>\n\n\ndef get_modified_reference_positions(read):\n if read.has_tag('MM'):\n basemod = read.get_tag('MM').split(',', 1)[0]\n if '-' in basemod:\n sys.exit(\n 'ERROR: modifications on negative strand currently unsupported.'\n )\n base, mod = basemod.split('+')\n deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]\n probabilities = phred_to_probability(read.get_tag('MP'))\n locations = np.cumsum(deltas) + np.concatenate((np.zeros(shape=1),\n np.ones(shape=len(deltas) - 1))).astype('int')\n base_index = np.array([i for i, letter in enumerate(read.\n get_forward_sequence()) if letter == base])\n modified_bases = base_index[locations]\n refpos = np.array(read.get_reference_positions(full_length=True))\n if read.is_reverse:\n refpos = np.flipud(refpos)\n probabilities = probabilities[::-1]\n return basemod, refpos[modified_bases], probabilities\n else:\n return None, [None], [None]\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [(10 ** (q / -10)) for q in range(n + 1)]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\n<mask token>\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\ndef parse_nanocompore(filename, name, window):\n\n def nanocompore_columns_of_interest(column):\n if column in ['pos', 'ref_id']:\n return True\n elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):\n return True\n else:\n return False\n table = pd.read_csv(filename, sep='\\t', usecols=\n nanocompore_columns_of_interest)\n if window:\n table = table[table['ref_id'] == window.chromosome]\n return Methylation(table=table.sort_values('pos').append({'pos': window\n .end}, ignore_index=True).drop(columns='ref_id').fillna(1.0),\n data_type='nanocompore', name=name, called_sites=len(table))\n\n\ndef parse_ont_cram(filename, name, window):\n cram = pysam.AlignmentFile(filename, 'rc')\n data = []\n for read in cram.fetch(reference=window.chromosome, start=window.begin,\n end=window.end):\n if not read.is_supplementary and not read.is_secondary:\n mod, positions, quals = get_modified_reference_positions(read)\n for pos, qual in zip(positions, quals):\n if pos is not None:\n data.append((read.query_name, '-' if read.is_reverse else\n '+', pos, qual, mod))\n return Methylation(table=pd.DataFrame(data, columns=['read_name',\n 'strand', 'pos', 'quality', 'mod']).astype(dtype={'mod': 'category',\n 'quality': 'float'}).sort_values(['read_name', 'pos']), data_type=\n 'ont-cram', name=name, called_sites=len(data))\n\n\ndef get_modified_reference_positions(read):\n if read.has_tag('MM'):\n basemod = read.get_tag('MM').split(',', 1)[0]\n if '-' in basemod:\n sys.exit(\n 'ERROR: modifications on negative strand currently unsupported.'\n )\n base, mod = basemod.split('+')\n deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]\n probabilities = phred_to_probability(read.get_tag('MP'))\n locations = np.cumsum(deltas) + np.concatenate((np.zeros(shape=1),\n np.ones(shape=len(deltas) - 1))).astype('int')\n base_index = np.array([i for i, letter in enumerate(read.\n get_forward_sequence()) if letter == base])\n modified_bases = base_index[locations]\n refpos = np.array(read.get_reference_positions(full_length=True))\n if read.is_reverse:\n refpos = np.flipud(refpos)\n probabilities = probabilities[::-1]\n return basemod, refpos[modified_bases], probabilities\n else:\n return None, [None], [None]\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [(10 ** (q / -10)) for q in range(n + 1)]\n\n\n<mask token>\n\n\ndef get_data(methylation_files, names, window, smoothen=5):\n \"\"\"\n Import methylation data from all files in the list methylation_files\n\n Data can be either frequency or raw.\n\n data is extracted within the window args.window\n Frequencies are smoothened using a sliding window\n \"\"\"\n return [read_meth(f, n, window, smoothen) for f, n in zip(\n methylation_files, names)]\n",
"step-3": "<mask token>\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\ndef read_meth(filename, name, window, smoothen=5):\n \"\"\"\n converts a file from nanopolish to a pandas dataframe\n input can be from calculate_methylation_frequency\n which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'\n smoothening the result by a rolling average\n\n input can also be raw data per read, optionally phased\n which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'\n \"\"\"\n file_type = file_sniffer(filename)\n logging.info('File is of type {}'.format(file_type))\n try:\n if file_type.startswith('nanopolish'):\n return parse_nanopolish(filename, file_type, name, window,\n smoothen=smoothen)\n elif file_type == 'nanocompore':\n return parse_nanocompore(filename, name, window)\n elif file_type == 'ont-cram':\n return parse_ont_cram(filename, name, window)\n except Exception:\n sys.stderr.write('\\n\\n\\nInput file {} not recognized!\\n'.format(\n filename))\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\ndef parse_nanocompore(filename, name, window):\n\n def nanocompore_columns_of_interest(column):\n if column in ['pos', 'ref_id']:\n return True\n elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):\n return True\n else:\n return False\n table = pd.read_csv(filename, sep='\\t', usecols=\n nanocompore_columns_of_interest)\n if window:\n table = table[table['ref_id'] == window.chromosome]\n return Methylation(table=table.sort_values('pos').append({'pos': window\n .end}, ignore_index=True).drop(columns='ref_id').fillna(1.0),\n data_type='nanocompore', name=name, called_sites=len(table))\n\n\ndef parse_ont_cram(filename, name, window):\n cram = pysam.AlignmentFile(filename, 'rc')\n data = []\n for read in cram.fetch(reference=window.chromosome, start=window.begin,\n end=window.end):\n if not read.is_supplementary and not read.is_secondary:\n mod, positions, quals = get_modified_reference_positions(read)\n for pos, qual in zip(positions, quals):\n if pos is not None:\n data.append((read.query_name, '-' if read.is_reverse else\n '+', pos, qual, mod))\n return Methylation(table=pd.DataFrame(data, columns=['read_name',\n 'strand', 'pos', 'quality', 'mod']).astype(dtype={'mod': 'category',\n 'quality': 'float'}).sort_values(['read_name', 'pos']), data_type=\n 'ont-cram', name=name, called_sites=len(data))\n\n\ndef get_modified_reference_positions(read):\n if read.has_tag('MM'):\n basemod = read.get_tag('MM').split(',', 1)[0]\n if '-' in basemod:\n sys.exit(\n 'ERROR: modifications on negative strand currently unsupported.'\n )\n base, mod = basemod.split('+')\n deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]\n probabilities = phred_to_probability(read.get_tag('MP'))\n locations = np.cumsum(deltas) + np.concatenate((np.zeros(shape=1),\n np.ones(shape=len(deltas) - 1))).astype('int')\n base_index = np.array([i for i, letter in enumerate(read.\n get_forward_sequence()) if letter == base])\n modified_bases = base_index[locations]\n refpos = np.array(read.get_reference_positions(full_length=True))\n if read.is_reverse:\n refpos = np.flipud(refpos)\n probabilities = probabilities[::-1]\n return basemod, refpos[modified_bases], probabilities\n else:\n return None, [None], [None]\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [(10 ** (q / -10)) for q in range(n + 1)]\n\n\ndef phred_to_probability(quals, tab=errs_tab(128)):\n return [tab[ord(q) - 33] for q in quals]\n\n\ndef get_data(methylation_files, names, window, smoothen=5):\n \"\"\"\n Import methylation data from all files in the list methylation_files\n\n Data can be either frequency or raw.\n\n data is extracted within the window args.window\n Frequencies are smoothened using a sliding window\n \"\"\"\n return [read_meth(f, n, window, smoothen) for f, n in zip(\n methylation_files, names)]\n",
"step-4": "import pandas as pd\nimport pyranges as pr\nimport numpy as np\nimport sys\nimport logging\nfrom methplotlib.utils import file_sniffer\nimport pysam\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\ndef read_meth(filename, name, window, smoothen=5):\n \"\"\"\n converts a file from nanopolish to a pandas dataframe\n input can be from calculate_methylation_frequency\n which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'\n smoothening the result by a rolling average\n\n input can also be raw data per read, optionally phased\n which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'\n \"\"\"\n file_type = file_sniffer(filename)\n logging.info('File is of type {}'.format(file_type))\n try:\n if file_type.startswith('nanopolish'):\n return parse_nanopolish(filename, file_type, name, window,\n smoothen=smoothen)\n elif file_type == 'nanocompore':\n return parse_nanocompore(filename, name, window)\n elif file_type == 'ont-cram':\n return parse_ont_cram(filename, name, window)\n except Exception:\n sys.stderr.write('\\n\\n\\nInput file {} not recognized!\\n'.format(\n filename))\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\ndef parse_nanocompore(filename, name, window):\n\n def nanocompore_columns_of_interest(column):\n if column in ['pos', 'ref_id']:\n return True\n elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):\n return True\n else:\n return False\n table = pd.read_csv(filename, sep='\\t', usecols=\n nanocompore_columns_of_interest)\n if window:\n table = table[table['ref_id'] == window.chromosome]\n return Methylation(table=table.sort_values('pos').append({'pos': window\n .end}, ignore_index=True).drop(columns='ref_id').fillna(1.0),\n data_type='nanocompore', name=name, called_sites=len(table))\n\n\ndef parse_ont_cram(filename, name, window):\n cram = pysam.AlignmentFile(filename, 'rc')\n data = []\n for read in cram.fetch(reference=window.chromosome, start=window.begin,\n end=window.end):\n if not read.is_supplementary and not read.is_secondary:\n mod, positions, quals = get_modified_reference_positions(read)\n for pos, qual in zip(positions, quals):\n if pos is not None:\n data.append((read.query_name, '-' if read.is_reverse else\n '+', pos, qual, mod))\n return Methylation(table=pd.DataFrame(data, columns=['read_name',\n 'strand', 'pos', 'quality', 'mod']).astype(dtype={'mod': 'category',\n 'quality': 'float'}).sort_values(['read_name', 'pos']), data_type=\n 'ont-cram', name=name, called_sites=len(data))\n\n\ndef get_modified_reference_positions(read):\n if read.has_tag('MM'):\n basemod = read.get_tag('MM').split(',', 1)[0]\n if '-' in basemod:\n sys.exit(\n 'ERROR: modifications on negative strand currently unsupported.'\n )\n base, mod = basemod.split('+')\n deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]\n probabilities = phred_to_probability(read.get_tag('MP'))\n locations = np.cumsum(deltas) + np.concatenate((np.zeros(shape=1),\n np.ones(shape=len(deltas) - 1))).astype('int')\n base_index = np.array([i for i, letter in enumerate(read.\n get_forward_sequence()) if letter == base])\n modified_bases = base_index[locations]\n refpos = np.array(read.get_reference_positions(full_length=True))\n if read.is_reverse:\n refpos = np.flipud(refpos)\n probabilities = probabilities[::-1]\n return basemod, refpos[modified_bases], probabilities\n else:\n return None, [None], [None]\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [(10 ** (q / -10)) for q in range(n + 1)]\n\n\ndef phred_to_probability(quals, tab=errs_tab(128)):\n return [tab[ord(q) - 33] for q in quals]\n\n\ndef get_data(methylation_files, names, window, smoothen=5):\n \"\"\"\n Import methylation data from all files in the list methylation_files\n\n Data can be either frequency or raw.\n\n data is extracted within the window args.window\n Frequencies are smoothened using a sliding window\n \"\"\"\n return [read_meth(f, n, window, smoothen) for f, n in zip(\n methylation_files, names)]\n",
"step-5": "import pandas as pd\nimport pyranges as pr\nimport numpy as np\nimport sys\nimport logging\nfrom methplotlib.utils import file_sniffer\nimport pysam\n\n\nclass Methylation(object):\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\ndef read_meth(filename, name, window, smoothen=5):\n \"\"\"\n converts a file from nanopolish to a pandas dataframe\n input can be from calculate_methylation_frequency\n which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'\n smoothening the result by a rolling average\n\n input can also be raw data per read, optionally phased\n which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'\n \"\"\"\n file_type = file_sniffer(filename)\n logging.info(\"File is of type {}\".format(file_type))\n try:\n if file_type.startswith(\"nanopolish\"):\n return parse_nanopolish(filename, file_type, name, window, smoothen=smoothen)\n elif file_type == \"nanocompore\":\n return parse_nanocompore(filename, name, window)\n elif file_type == \"ont-cram\":\n return parse_ont_cram(filename, name, window)\n except Exception:\n sys.stderr.write(\"\\n\\n\\nInput file {} not recognized!\\n\".format(filename))\n sys.stderr.write(\"\\n\\n\\nDetailed error:\\n\")\n raise\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep=\"\\t\")\n gr = pr.PyRanges(table.rename(columns={\"start\": \"Start\", \"chromosome\": \"Chromosome\",\n \"end\": \"End\", \"Strand\": \"strand\"}))\n logging.info(\"Read the file in a dataframe.\")\n\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[[\"Start\", \"End\"]].mean(axis=1))\n except KeyError:\n sys.stderr.write(\"\\n\\n\\nProblem parsing nanopolish file {}!\\n\".format(filename))\n sys.stderr.write(\"Could it be that there are no calls in your selected window?\\n\")\n sys.stderr.write(\"\\n\\n\\nDetailed error:\\n\")\n raise\n\n table = gr.df\n\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands',\n 'num_motifs', 'sequence'])\n return Methylation(\n table=table.sort_values(['read_name', 'pos']),\n data_type=file_type,\n name=name,\n called_sites=len(table))\n if file_type == \"nanopolish_freq\":\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated',\n 'group_sequence'])\n return Methylation(\n table=table.sort_values('pos')\n .groupby('pos')\n .mean()\n .rolling(window=smoothen, center=True)\n .mean(),\n data_type=file_type,\n name=name,\n called_sites=called_sites.sum())\n\n\ndef parse_nanocompore(filename, name, window):\n def nanocompore_columns_of_interest(column):\n if column in ['pos', 'ref_id']:\n return True\n elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):\n return True\n else:\n return False\n table = pd.read_csv(filename, sep=\"\\t\", usecols=nanocompore_columns_of_interest)\n if window:\n table = table[table[\"ref_id\"] == window.chromosome]\n return Methylation(\n table=table.sort_values('pos')\n .append({'pos': window.end}, ignore_index=True)\n .drop(columns=\"ref_id\")\n .fillna(1.0),\n data_type='nanocompore',\n name=name,\n called_sites=len(table))\n\n\ndef parse_ont_cram(filename, name, window):\n cram = pysam.AlignmentFile(filename, \"rc\")\n data = []\n for read in cram.fetch(reference=window.chromosome, start=window.begin, end=window.end):\n if not read.is_supplementary and not read.is_secondary:\n mod, positions, quals = get_modified_reference_positions(read)\n for pos, qual in zip(positions, quals):\n if pos is not None:\n data.append((read.query_name,\n '-' if read.is_reverse else '+',\n pos,\n qual,\n mod))\n return Methylation(\n table=pd.DataFrame(data, columns=['read_name', 'strand', 'pos', 'quality', 'mod'])\n .astype(dtype={'mod': 'category', 'quality': 'float'})\n .sort_values(['read_name', 'pos']),\n data_type=\"ont-cram\",\n name=name,\n called_sites=len(data))\n\n\ndef get_modified_reference_positions(read):\n if read.has_tag('MM'):\n basemod = read.get_tag('MM').split(',', 1)[0]\n if '-' in basemod:\n sys.exit(\"ERROR: modifications on negative strand currently unsupported.\")\n base, mod = basemod.split('+')\n deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]\n probabilities = phred_to_probability(read.get_tag('MP'))\n locations = np.cumsum(deltas) + np.concatenate(\n (np.zeros(shape=1),\n np.ones(shape=len(deltas) - 1))).astype('int')\n base_index = np.array(\n [i for i, letter in enumerate(read.get_forward_sequence()) if letter == base]\n )\n modified_bases = base_index[locations]\n refpos = np.array(read.get_reference_positions(full_length=True))\n if read.is_reverse:\n refpos = np.flipud(refpos)\n probabilities = probabilities[::-1]\n return (basemod, refpos[modified_bases], probabilities)\n else:\n return (None, [None], [None])\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [10**(q / -10) for q in range(n + 1)]\n\n\ndef phred_to_probability(quals, tab=errs_tab(128)):\n return [tab[ord(q) - 33] for q in quals]\n\n\ndef get_data(methylation_files, names, window, smoothen=5):\n \"\"\"\n Import methylation data from all files in the list methylation_files\n\n Data can be either frequency or raw.\n\n data is extracted within the window args.window\n Frequencies are smoothened using a sliding window\n \"\"\"\n return [read_meth(f, n, window, smoothen) for f, n in zip(methylation_files, names)]\n",
"step-ids": [
5,
8,
10,
11,
12
]
}
|
[
5,
8,
10,
11,
12
] |
# Title: K번째 수
# Link: https://www.acmicpc.net/problem/11004
import sys
sys.setrecursionlimit(10 ** 6)
def read_list_int():
return list(map(int, sys.stdin.readline().strip().split(' ')))
def read_single_int():
return int(sys.stdin.readline().strip())
def selection_sort(nums, k):
sorted_index = 0
while True:
minimum = 9999999999
min_index = 0
for i, n in enumerate(nums[sorted_index:], sorted_index):
if n < minimum:
minimum = n
min_index = i
k -= 1
if k == 0:
return minimum
nums[sorted_index], nums[min_index] = nums[min_index], nums[sorted_index]
sorted_index += 1
def partition(nums, left, right, pivot_index):
pivot_value = nums[pivot_index]
nums[pivot_index], nums[right] = nums[right], nums[pivot_index]
store_index = left
for i in range(left, right):
if nums[i] < pivot_value:
nums[store_index], nums[i] = nums[i], nums[store_index]
store_index += 1
nums[right], nums[store_index] = nums[store_index], nums[right]
return store_index
def quick_select(nums, left, right, k):
while True:
if left == right:
return nums[left]
pivot_index = right
pivot_index = partition(nums, left, right, pivot_index)
if k-1 == pivot_index:
return nums[k-1]
elif k-1 < pivot_index:
right = pivot_index - 1
else:
left = pivot_index + 1
def get_kth_number(nums, k):
# TLE
# selection_sort(nums, k)
return quick_select(nums, 0, len(nums)-1, k)
if __name__ == '__main__':
N, K = read_list_int()
A = read_list_int()
print(get_kth_number(A, K))
|
normal
|
{
"blob_id": "f4ab6df8efc334fa338ade7deecd36d8cd859e96",
"index": 4174,
"step-1": "<mask token>\n\n\ndef read_list_int():\n return list(map(int, sys.stdin.readline().strip().split(' ')))\n\n\n<mask token>\n\n\ndef selection_sort(nums, k):\n sorted_index = 0\n while True:\n minimum = 9999999999\n min_index = 0\n for i, n in enumerate(nums[sorted_index:], sorted_index):\n if n < minimum:\n minimum = n\n min_index = i\n k -= 1\n if k == 0:\n return minimum\n nums[sorted_index], nums[min_index] = nums[min_index], nums[\n sorted_index]\n sorted_index += 1\n\n\ndef partition(nums, left, right, pivot_index):\n pivot_value = nums[pivot_index]\n nums[pivot_index], nums[right] = nums[right], nums[pivot_index]\n store_index = left\n for i in range(left, right):\n if nums[i] < pivot_value:\n nums[store_index], nums[i] = nums[i], nums[store_index]\n store_index += 1\n nums[right], nums[store_index] = nums[store_index], nums[right]\n return store_index\n\n\ndef quick_select(nums, left, right, k):\n while True:\n if left == right:\n return nums[left]\n pivot_index = right\n pivot_index = partition(nums, left, right, pivot_index)\n if k - 1 == pivot_index:\n return nums[k - 1]\n elif k - 1 < pivot_index:\n right = pivot_index - 1\n else:\n left = pivot_index + 1\n\n\ndef get_kth_number(nums, k):\n return quick_select(nums, 0, len(nums) - 1, k)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_list_int():\n return list(map(int, sys.stdin.readline().strip().split(' ')))\n\n\ndef read_single_int():\n return int(sys.stdin.readline().strip())\n\n\ndef selection_sort(nums, k):\n sorted_index = 0\n while True:\n minimum = 9999999999\n min_index = 0\n for i, n in enumerate(nums[sorted_index:], sorted_index):\n if n < minimum:\n minimum = n\n min_index = i\n k -= 1\n if k == 0:\n return minimum\n nums[sorted_index], nums[min_index] = nums[min_index], nums[\n sorted_index]\n sorted_index += 1\n\n\ndef partition(nums, left, right, pivot_index):\n pivot_value = nums[pivot_index]\n nums[pivot_index], nums[right] = nums[right], nums[pivot_index]\n store_index = left\n for i in range(left, right):\n if nums[i] < pivot_value:\n nums[store_index], nums[i] = nums[i], nums[store_index]\n store_index += 1\n nums[right], nums[store_index] = nums[store_index], nums[right]\n return store_index\n\n\ndef quick_select(nums, left, right, k):\n while True:\n if left == right:\n return nums[left]\n pivot_index = right\n pivot_index = partition(nums, left, right, pivot_index)\n if k - 1 == pivot_index:\n return nums[k - 1]\n elif k - 1 < pivot_index:\n right = pivot_index - 1\n else:\n left = pivot_index + 1\n\n\ndef get_kth_number(nums, k):\n return quick_select(nums, 0, len(nums) - 1, k)\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.setrecursionlimit(10 ** 6)\n\n\ndef read_list_int():\n return list(map(int, sys.stdin.readline().strip().split(' ')))\n\n\ndef read_single_int():\n return int(sys.stdin.readline().strip())\n\n\ndef selection_sort(nums, k):\n sorted_index = 0\n while True:\n minimum = 9999999999\n min_index = 0\n for i, n in enumerate(nums[sorted_index:], sorted_index):\n if n < minimum:\n minimum = n\n min_index = i\n k -= 1\n if k == 0:\n return minimum\n nums[sorted_index], nums[min_index] = nums[min_index], nums[\n sorted_index]\n sorted_index += 1\n\n\ndef partition(nums, left, right, pivot_index):\n pivot_value = nums[pivot_index]\n nums[pivot_index], nums[right] = nums[right], nums[pivot_index]\n store_index = left\n for i in range(left, right):\n if nums[i] < pivot_value:\n nums[store_index], nums[i] = nums[i], nums[store_index]\n store_index += 1\n nums[right], nums[store_index] = nums[store_index], nums[right]\n return store_index\n\n\ndef quick_select(nums, left, right, k):\n while True:\n if left == right:\n return nums[left]\n pivot_index = right\n pivot_index = partition(nums, left, right, pivot_index)\n if k - 1 == pivot_index:\n return nums[k - 1]\n elif k - 1 < pivot_index:\n right = pivot_index - 1\n else:\n left = pivot_index + 1\n\n\ndef get_kth_number(nums, k):\n return quick_select(nums, 0, len(nums) - 1, k)\n\n\nif __name__ == '__main__':\n N, K = read_list_int()\n A = read_list_int()\n print(get_kth_number(A, K))\n",
"step-4": "import sys\nsys.setrecursionlimit(10 ** 6)\n\n\ndef read_list_int():\n return list(map(int, sys.stdin.readline().strip().split(' ')))\n\n\ndef read_single_int():\n return int(sys.stdin.readline().strip())\n\n\ndef selection_sort(nums, k):\n sorted_index = 0\n while True:\n minimum = 9999999999\n min_index = 0\n for i, n in enumerate(nums[sorted_index:], sorted_index):\n if n < minimum:\n minimum = n\n min_index = i\n k -= 1\n if k == 0:\n return minimum\n nums[sorted_index], nums[min_index] = nums[min_index], nums[\n sorted_index]\n sorted_index += 1\n\n\ndef partition(nums, left, right, pivot_index):\n pivot_value = nums[pivot_index]\n nums[pivot_index], nums[right] = nums[right], nums[pivot_index]\n store_index = left\n for i in range(left, right):\n if nums[i] < pivot_value:\n nums[store_index], nums[i] = nums[i], nums[store_index]\n store_index += 1\n nums[right], nums[store_index] = nums[store_index], nums[right]\n return store_index\n\n\ndef quick_select(nums, left, right, k):\n while True:\n if left == right:\n return nums[left]\n pivot_index = right\n pivot_index = partition(nums, left, right, pivot_index)\n if k - 1 == pivot_index:\n return nums[k - 1]\n elif k - 1 < pivot_index:\n right = pivot_index - 1\n else:\n left = pivot_index + 1\n\n\ndef get_kth_number(nums, k):\n return quick_select(nums, 0, len(nums) - 1, k)\n\n\nif __name__ == '__main__':\n N, K = read_list_int()\n A = read_list_int()\n print(get_kth_number(A, K))\n",
"step-5": "# Title: K번째 수\r\n# Link: https://www.acmicpc.net/problem/11004\r\n\r\nimport sys\r\n\r\nsys.setrecursionlimit(10 ** 6)\r\n\r\n\r\ndef read_list_int():\r\n return list(map(int, sys.stdin.readline().strip().split(' ')))\r\n\r\n\r\ndef read_single_int():\r\n return int(sys.stdin.readline().strip())\r\n\r\n\r\ndef selection_sort(nums, k):\r\n sorted_index = 0\r\n while True:\r\n minimum = 9999999999\r\n min_index = 0\r\n for i, n in enumerate(nums[sorted_index:], sorted_index):\r\n if n < minimum:\r\n minimum = n\r\n min_index = i\r\n k -= 1\r\n if k == 0:\r\n return minimum\r\n nums[sorted_index], nums[min_index] = nums[min_index], nums[sorted_index]\r\n sorted_index += 1\r\n\r\n\r\ndef partition(nums, left, right, pivot_index):\r\n pivot_value = nums[pivot_index]\r\n nums[pivot_index], nums[right] = nums[right], nums[pivot_index]\r\n store_index = left\r\n for i in range(left, right):\r\n if nums[i] < pivot_value:\r\n nums[store_index], nums[i] = nums[i], nums[store_index]\r\n store_index += 1\r\n nums[right], nums[store_index] = nums[store_index], nums[right]\r\n return store_index\r\n\r\n\r\ndef quick_select(nums, left, right, k):\r\n while True:\r\n if left == right:\r\n return nums[left]\r\n pivot_index = right\r\n pivot_index = partition(nums, left, right, pivot_index)\r\n if k-1 == pivot_index:\r\n return nums[k-1]\r\n elif k-1 < pivot_index:\r\n right = pivot_index - 1\r\n else:\r\n left = pivot_index + 1\r\n\r\n\r\ndef get_kth_number(nums, k):\r\n # TLE\r\n # selection_sort(nums, k)\r\n return quick_select(nums, 0, len(nums)-1, k)\r\n\r\n\r\nif __name__ == '__main__':\r\n N, K = read_list_int()\r\n A = read_list_int()\r\n print(get_kth_number(A, K))\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
"""Produce a multi-panel figure of each output lead time in a forecast
"""
import matplotlib.pyplot as plt
import iris.plot as iplt
from irise import convert
from irise.plot.util import add_map
from myscripts import plotdir
from myscripts.models.um import case_studies
columns = 3
def main(forecast, name, levels, *args, **kwargs):
nt = len(forecast)
rows = (nt / columns) + 1
fig = plt.figure(figsize=(18, 10 * float(rows) / columns))
for n, cubes in enumerate(forecast):
row = n / columns
column = n - row * columns
print(row, column)
ax = plt.subplot2grid((rows, columns), (row, column))
cube = convert.calc(name, cubes, levels=levels)[0]
im = iplt.pcolormesh(cube, *args, **kwargs)
add_map()
ax = plt.subplot2grid((rows, columns), (row, column + 1))
cbar = plt.colorbar(im, cax=ax, orientation='horizontal')
plt.savefig(plotdir + name + '_' + str(levels[0]) +
'_' + str(levels[1][0]) + '.png')
return
if __name__ == '__main__':
forecast = case_studies.generate_season_forecast(2013, 11, 1)
name = 'ertel_potential_vorticity'
levels = ('air_potential_temperature', [320])
main(forecast, name, levels, vmin=0, vmax=10, cmap='cubehelix_r')
|
normal
|
{
"blob_id": "310e6e693cdce6ff71d06eac86214a21bef236d4",
"index": 7425,
"step-1": "<mask token>\n\n\ndef main(forecast, name, levels, *args, **kwargs):\n nt = len(forecast)\n rows = nt / columns + 1\n fig = plt.figure(figsize=(18, 10 * float(rows) / columns))\n for n, cubes in enumerate(forecast):\n row = n / columns\n column = n - row * columns\n print(row, column)\n ax = plt.subplot2grid((rows, columns), (row, column))\n cube = convert.calc(name, cubes, levels=levels)[0]\n im = iplt.pcolormesh(cube, *args, **kwargs)\n add_map()\n ax = plt.subplot2grid((rows, columns), (row, column + 1))\n cbar = plt.colorbar(im, cax=ax, orientation='horizontal')\n plt.savefig(plotdir + name + '_' + str(levels[0]) + '_' + str(levels[1]\n [0]) + '.png')\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(forecast, name, levels, *args, **kwargs):\n nt = len(forecast)\n rows = nt / columns + 1\n fig = plt.figure(figsize=(18, 10 * float(rows) / columns))\n for n, cubes in enumerate(forecast):\n row = n / columns\n column = n - row * columns\n print(row, column)\n ax = plt.subplot2grid((rows, columns), (row, column))\n cube = convert.calc(name, cubes, levels=levels)[0]\n im = iplt.pcolormesh(cube, *args, **kwargs)\n add_map()\n ax = plt.subplot2grid((rows, columns), (row, column + 1))\n cbar = plt.colorbar(im, cax=ax, orientation='horizontal')\n plt.savefig(plotdir + name + '_' + str(levels[0]) + '_' + str(levels[1]\n [0]) + '.png')\n return\n\n\nif __name__ == '__main__':\n forecast = case_studies.generate_season_forecast(2013, 11, 1)\n name = 'ertel_potential_vorticity'\n levels = 'air_potential_temperature', [320]\n main(forecast, name, levels, vmin=0, vmax=10, cmap='cubehelix_r')\n",
"step-3": "<mask token>\ncolumns = 3\n\n\ndef main(forecast, name, levels, *args, **kwargs):\n nt = len(forecast)\n rows = nt / columns + 1\n fig = plt.figure(figsize=(18, 10 * float(rows) / columns))\n for n, cubes in enumerate(forecast):\n row = n / columns\n column = n - row * columns\n print(row, column)\n ax = plt.subplot2grid((rows, columns), (row, column))\n cube = convert.calc(name, cubes, levels=levels)[0]\n im = iplt.pcolormesh(cube, *args, **kwargs)\n add_map()\n ax = plt.subplot2grid((rows, columns), (row, column + 1))\n cbar = plt.colorbar(im, cax=ax, orientation='horizontal')\n plt.savefig(plotdir + name + '_' + str(levels[0]) + '_' + str(levels[1]\n [0]) + '.png')\n return\n\n\nif __name__ == '__main__':\n forecast = case_studies.generate_season_forecast(2013, 11, 1)\n name = 'ertel_potential_vorticity'\n levels = 'air_potential_temperature', [320]\n main(forecast, name, levels, vmin=0, vmax=10, cmap='cubehelix_r')\n",
"step-4": "<mask token>\nimport matplotlib.pyplot as plt\nimport iris.plot as iplt\nfrom irise import convert\nfrom irise.plot.util import add_map\nfrom myscripts import plotdir\nfrom myscripts.models.um import case_studies\ncolumns = 3\n\n\ndef main(forecast, name, levels, *args, **kwargs):\n nt = len(forecast)\n rows = nt / columns + 1\n fig = plt.figure(figsize=(18, 10 * float(rows) / columns))\n for n, cubes in enumerate(forecast):\n row = n / columns\n column = n - row * columns\n print(row, column)\n ax = plt.subplot2grid((rows, columns), (row, column))\n cube = convert.calc(name, cubes, levels=levels)[0]\n im = iplt.pcolormesh(cube, *args, **kwargs)\n add_map()\n ax = plt.subplot2grid((rows, columns), (row, column + 1))\n cbar = plt.colorbar(im, cax=ax, orientation='horizontal')\n plt.savefig(plotdir + name + '_' + str(levels[0]) + '_' + str(levels[1]\n [0]) + '.png')\n return\n\n\nif __name__ == '__main__':\n forecast = case_studies.generate_season_forecast(2013, 11, 1)\n name = 'ertel_potential_vorticity'\n levels = 'air_potential_temperature', [320]\n main(forecast, name, levels, vmin=0, vmax=10, cmap='cubehelix_r')\n",
"step-5": "\"\"\"Produce a multi-panel figure of each output lead time in a forecast\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport iris.plot as iplt\nfrom irise import convert\nfrom irise.plot.util import add_map\nfrom myscripts import plotdir\nfrom myscripts.models.um import case_studies\n\ncolumns = 3\n\n\ndef main(forecast, name, levels, *args, **kwargs):\n nt = len(forecast)\n rows = (nt / columns) + 1\n fig = plt.figure(figsize=(18, 10 * float(rows) / columns))\n for n, cubes in enumerate(forecast):\n row = n / columns\n column = n - row * columns\n print(row, column)\n ax = plt.subplot2grid((rows, columns), (row, column))\n\n cube = convert.calc(name, cubes, levels=levels)[0]\n im = iplt.pcolormesh(cube, *args, **kwargs)\n add_map()\n\n ax = plt.subplot2grid((rows, columns), (row, column + 1))\n cbar = plt.colorbar(im, cax=ax, orientation='horizontal')\n plt.savefig(plotdir + name + '_' + str(levels[0]) +\n '_' + str(levels[1][0]) + '.png')\n\n return\n\n\nif __name__ == '__main__':\n forecast = case_studies.generate_season_forecast(2013, 11, 1)\n name = 'ertel_potential_vorticity'\n levels = ('air_potential_temperature', [320])\n main(forecast, name, levels, vmin=0, vmax=10, cmap='cubehelix_r')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os, glob
import numpy as np
from ..algorithms.utils import get_file_manager
from ..algorithms.clustered_writes import *
from ..exp_utils import create_empty_dir
def test_get_entity_sizes():
# in C order
bytes_per_voxel = 1
R = (10,9,10)
cs = (5,3,2)
partition = (2,3,5)
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
assert bs == 5*3*2
assert brs == 5*3*2*5
assert bss == 5*3*2*5*3
def test_get_strategy():
# in C order
bytes_per_voxel = 1
R = (20,9,10)
cs = (5,3,2)
partition = (4,3,5)
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
test_case = {
5*2*3: 0, # 1 block
5*2*3*4: 0, # 4 blocks
5*2*3*5: 1, # 1 row
5*2*3*5*2: 1, # 2 rows
5*2*3*5*3: 2, # 1 slice
5*2*3*5*3*3: 2, # 3 slices
5*2*3*5*3*4: 2, # whole img
5*2*3*5*3*7: 2, # whole img (more mem than necessary)
}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
assert strategy == expected
def test_compute_buffers():
# in C order
bytes_per_voxel = 1
R = (20,9,10)
cs = (5,3,2)
partition = (4,3,5)
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
origarr_size = R[0]*R[1]*R[2]*bytes_per_voxel
test_case = {
5*2*3: 4*3*5, # 1 block
5*2*3*4: 4*3*2, # 4 blocks
5*2*3*5: 4*3, # 1 row
5*2*3*5*2: 4*2, # 2 rows
5*2*3*5*3: 4, # 1 slice
5*2*3*5*3*3: 2, # 3 slices
5*2*3*5*3*4: 1, # whole img
5*2*3*5*3*7: 1, # whole img (more mem than necessary)
}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
buffers = compute_buffers(buffer_mem_size, strategy, origarr_size, cs, bs, brs, bss, partition, R, bytes_per_voxel)
# test number of buffers
nb_buffers = len(buffers.values())
assert nb_buffers == expected
def test_clustered_writes():
bpv = 1
R = (20,9,10)
cs = (5,3,2)
ff = 'HDF5'
outdir_path = './outdir'
test_case = [
5*3*2, # 1 block
5*3*2*4, # 4 blocks
5*3*2*5, # 1 row
5*3*2*5*2, # 2 rows
5*3*2*5*3, # 1 slice
5*3*2*5*3*3, # 3 slices
5*3*2*5*3*4, # whole img
5*3*2*5*3*7, # whole img (more mem than necessary)
]
nb_chunks = 4*3*5
# create input array
origarr_filepath = './original_array.hdf5'
data = np.random.normal(size=R)
fm = get_file_manager(ff)
if os.path.isfile(origarr_filepath):
os.remove(origarr_filepath)
fm.write(origarr_filepath, data, R, _slices=None)
for m in test_case:
create_empty_dir(outdir_path)
clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)
workdir = os.getcwd()
os.chdir(outdir_path)
filenames = list()
for filename in glob.glob("*.hdf5"):
arr = fm.read_all(filename)
assert arr.shape == cs
filenames.append(filename)
assert len(filenames) == nb_chunks
os.chdir(workdir)
|
normal
|
{
"blob_id": "6dd11f71e514a46462bf0b97ddac9ea474e86ad0",
"index": 366,
"step-1": "<mask token>\n\n\ndef test_get_strategy():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *\n 2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):\n 2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_get_entity_sizes():\n bytes_per_voxel = 1\n R = 10, 9, 10\n cs = 5, 3, 2\n partition = 2, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n assert bs == 5 * 3 * 2\n assert brs == 5 * 3 * 2 * 5\n assert bss == 5 * 3 * 2 * 5 * 3\n\n\ndef test_get_strategy():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *\n 2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):\n 2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\ndef test_compute_buffers():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel\n test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *\n 3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,\n (5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *\n 5 * 3 * 7): 1}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,\n cs, bs, brs, bss, partition, R, bytes_per_voxel)\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_get_entity_sizes():\n bytes_per_voxel = 1\n R = 10, 9, 10\n cs = 5, 3, 2\n partition = 2, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n assert bs == 5 * 3 * 2\n assert brs == 5 * 3 * 2 * 5\n assert bss == 5 * 3 * 2 * 5 * 3\n\n\ndef test_get_strategy():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *\n 2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):\n 2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\ndef test_compute_buffers():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel\n test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *\n 3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,\n (5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *\n 5 * 3 * 7): 1}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,\n cs, bs, brs, bss, partition, R, bytes_per_voxel)\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\ndef test_clustered_writes():\n bpv = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n ff = 'HDF5'\n outdir_path = './outdir'\n test_case = [5 * 3 * 2, 5 * 3 * 2 * 4, 5 * 3 * 2 * 5, 5 * 3 * 2 * 5 * 2,\n 5 * 3 * 2 * 5 * 3, 5 * 3 * 2 * 5 * 3 * 3, 5 * 3 * 2 * 5 * 3 * 4, 5 *\n 3 * 2 * 5 * 3 * 7]\n nb_chunks = 4 * 3 * 5\n origarr_filepath = './original_array.hdf5'\n data = np.random.normal(size=R)\n fm = get_file_manager(ff)\n if os.path.isfile(origarr_filepath):\n os.remove(origarr_filepath)\n fm.write(origarr_filepath, data, R, _slices=None)\n for m in test_case:\n create_empty_dir(outdir_path)\n clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)\n workdir = os.getcwd()\n os.chdir(outdir_path)\n filenames = list()\n for filename in glob.glob('*.hdf5'):\n arr = fm.read_all(filename)\n assert arr.shape == cs\n filenames.append(filename)\n assert len(filenames) == nb_chunks\n os.chdir(workdir)\n",
"step-4": "import os, glob\nimport numpy as np\nfrom ..algorithms.utils import get_file_manager\nfrom ..algorithms.clustered_writes import *\nfrom ..exp_utils import create_empty_dir\n\n\ndef test_get_entity_sizes():\n bytes_per_voxel = 1\n R = 10, 9, 10\n cs = 5, 3, 2\n partition = 2, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n assert bs == 5 * 3 * 2\n assert brs == 5 * 3 * 2 * 5\n assert bss == 5 * 3 * 2 * 5 * 3\n\n\ndef test_get_strategy():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *\n 2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):\n 2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\ndef test_compute_buffers():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel\n test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *\n 3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,\n (5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *\n 5 * 3 * 7): 1}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,\n cs, bs, brs, bss, partition, R, bytes_per_voxel)\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\ndef test_clustered_writes():\n bpv = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n ff = 'HDF5'\n outdir_path = './outdir'\n test_case = [5 * 3 * 2, 5 * 3 * 2 * 4, 5 * 3 * 2 * 5, 5 * 3 * 2 * 5 * 2,\n 5 * 3 * 2 * 5 * 3, 5 * 3 * 2 * 5 * 3 * 3, 5 * 3 * 2 * 5 * 3 * 4, 5 *\n 3 * 2 * 5 * 3 * 7]\n nb_chunks = 4 * 3 * 5\n origarr_filepath = './original_array.hdf5'\n data = np.random.normal(size=R)\n fm = get_file_manager(ff)\n if os.path.isfile(origarr_filepath):\n os.remove(origarr_filepath)\n fm.write(origarr_filepath, data, R, _slices=None)\n for m in test_case:\n create_empty_dir(outdir_path)\n clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)\n workdir = os.getcwd()\n os.chdir(outdir_path)\n filenames = list()\n for filename in glob.glob('*.hdf5'):\n arr = fm.read_all(filename)\n assert arr.shape == cs\n filenames.append(filename)\n assert len(filenames) == nb_chunks\n os.chdir(workdir)\n",
"step-5": "import os, glob\nimport numpy as np\n\nfrom ..algorithms.utils import get_file_manager\nfrom ..algorithms.clustered_writes import *\nfrom ..exp_utils import create_empty_dir\n\n\ndef test_get_entity_sizes():\n # in C order\n bytes_per_voxel = 1\n R = (10,9,10)\n cs = (5,3,2)\n partition = (2,3,5)\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n\n assert bs == 5*3*2\n assert brs == 5*3*2*5\n assert bss == 5*3*2*5*3\n\n\ndef test_get_strategy():\n # in C order\n bytes_per_voxel = 1\n R = (20,9,10)\n cs = (5,3,2)\n partition = (4,3,5)\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n \n test_case = {\n 5*2*3: 0, # 1 block \n 5*2*3*4: 0, # 4 blocks \n 5*2*3*5: 1, # 1 row \n 5*2*3*5*2: 1, # 2 rows\n 5*2*3*5*3: 2, # 1 slice \n 5*2*3*5*3*3: 2, # 3 slices \n 5*2*3*5*3*4: 2, # whole img\n 5*2*3*5*3*7: 2, # whole img (more mem than necessary)\n }\n\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\ndef test_compute_buffers():\n # in C order\n bytes_per_voxel = 1\n R = (20,9,10)\n cs = (5,3,2)\n partition = (4,3,5)\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0]*R[1]*R[2]*bytes_per_voxel\n \n test_case = {\n 5*2*3: 4*3*5, # 1 block \n 5*2*3*4: 4*3*2, # 4 blocks \n 5*2*3*5: 4*3, # 1 row \n 5*2*3*5*2: 4*2, # 2 rows\n 5*2*3*5*3: 4, # 1 slice \n 5*2*3*5*3*3: 2, # 3 slices \n 5*2*3*5*3*4: 1, # whole img\n 5*2*3*5*3*7: 1, # whole img (more mem than necessary)\n }\n\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size, cs, bs, brs, bss, partition, R, bytes_per_voxel)\n\n # test number of buffers\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\ndef test_clustered_writes():\n bpv = 1\n R = (20,9,10)\n cs = (5,3,2)\n ff = 'HDF5'\n outdir_path = './outdir'\n\n test_case = [\n 5*3*2, # 1 block \n 5*3*2*4, # 4 blocks \n 5*3*2*5, # 1 row \n 5*3*2*5*2, # 2 rows\n 5*3*2*5*3, # 1 slice \n 5*3*2*5*3*3, # 3 slices \n 5*3*2*5*3*4, # whole img\n 5*3*2*5*3*7, # whole img (more mem than necessary)\n ]\n\n nb_chunks = 4*3*5\n\n # create input array\n origarr_filepath = './original_array.hdf5'\n data = np.random.normal(size=R)\n fm = get_file_manager(ff)\n if os.path.isfile(origarr_filepath):\n os.remove(origarr_filepath)\n fm.write(origarr_filepath, data, R, _slices=None)\n \n for m in test_case:\n create_empty_dir(outdir_path)\n clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)\n\n workdir = os.getcwd()\n os.chdir(outdir_path)\n filenames = list()\n for filename in glob.glob(\"*.hdf5\"):\n arr = fm.read_all(filename)\n assert arr.shape == cs\n filenames.append(filename)\n\n assert len(filenames) == nb_chunks\n os.chdir(workdir)\n\n \n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from django.apps import AppConfig
class DojoBookAppConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'dojo_book_app'
|
normal
|
{
"blob_id": "314f6cc97f53fa5bd8bf0ec0e1e305ca6384f1a2",
"index": 1559,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass DojoBookAppConfig(AppConfig):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass DojoBookAppConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'dojo_book_app'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass DojoBookAppConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'dojo_book_app'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
'''
1. Create 0-D array, 1-D array, 2-D array, 3-D array with following value
0-D: [2]
1-D: [3, 4, 5, 6, 7]
2-D: [[8, 1, 3], [2, 3, 4], [6, 2, 5]]
3-D: [[[1, 2, 4], [3, 3, 2], [1, 9, 1]], [[6, 8, 7], [9, 1, 0], [8, 2, 3]], [[5, 4, 1], [5, 7, 2], [3, 5, 9]]]
print them
'''
D0 = np.array(2)
D1 = np.array([3, 4, 5, 6, 7])
D2 = np.array([[8, 1, 3], [2, 3, 4], [6, 2, 5]])
D3 = np.array([[[1, 2, 4], [3, 3, 2], [1, 9, 1]], [[6, 8, 7], [9, 1, 0], [8, 2, 3]], [[5, 4, 1], [5, 7, 2], [3, 5, 9]]])
print('D0')
print(D0)
print('D1')
print(D1)
print('D2')
print(D2)
print('D3')
print(D3)
'''
2. Use index to change all value 8 to 100 in 4 arrays
array[index1, index2] = newValue
for example: 2-D array should be changed as : [[100, 1, 3], [2, 3, 4], [6, 2, 5]]
print them
'''
D2[0, 0] = 100
print('D2')
print(D2)
D3[1, 0, 1] = 100
D3[1, 2, 0] = 100
print('D3')
print(D3)
'''
3. Print the sum of all following values
a. the value of 0-D array
b. the middle of 1-D array
c. the center of 2-D array
d. the center of 3-D array ( the center of middle 2-D array )
* The value should be 11
'''
print('*** the final sum result is: ')
print(D0 + D1[2] + D2[1, 1] + D3[1, 1, 1])
|
normal
|
{
"blob_id": "a868ecb6ea6a5c7a186ddd8fa4fb76d96efeb21d",
"index": 4140,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('D0')\nprint(D0)\nprint('D1')\nprint(D1)\nprint('D2')\nprint(D2)\nprint('D3')\nprint(D3)\n<mask token>\nprint('D2')\nprint(D2)\n<mask token>\nprint('D3')\nprint(D3)\n<mask token>\nprint('*** the final sum result is: ')\nprint(D0 + D1[2] + D2[1, 1] + D3[1, 1, 1])\n",
"step-3": "<mask token>\nD0 = np.array(2)\nD1 = np.array([3, 4, 5, 6, 7])\nD2 = np.array([[8, 1, 3], [2, 3, 4], [6, 2, 5]])\nD3 = np.array([[[1, 2, 4], [3, 3, 2], [1, 9, 1]], [[6, 8, 7], [9, 1, 0], [8,\n 2, 3]], [[5, 4, 1], [5, 7, 2], [3, 5, 9]]])\nprint('D0')\nprint(D0)\nprint('D1')\nprint(D1)\nprint('D2')\nprint(D2)\nprint('D3')\nprint(D3)\n<mask token>\nD2[0, 0] = 100\nprint('D2')\nprint(D2)\nD3[1, 0, 1] = 100\nD3[1, 2, 0] = 100\nprint('D3')\nprint(D3)\n<mask token>\nprint('*** the final sum result is: ')\nprint(D0 + D1[2] + D2[1, 1] + D3[1, 1, 1])\n",
"step-4": "import numpy as np\n<mask token>\nD0 = np.array(2)\nD1 = np.array([3, 4, 5, 6, 7])\nD2 = np.array([[8, 1, 3], [2, 3, 4], [6, 2, 5]])\nD3 = np.array([[[1, 2, 4], [3, 3, 2], [1, 9, 1]], [[6, 8, 7], [9, 1, 0], [8,\n 2, 3]], [[5, 4, 1], [5, 7, 2], [3, 5, 9]]])\nprint('D0')\nprint(D0)\nprint('D1')\nprint(D1)\nprint('D2')\nprint(D2)\nprint('D3')\nprint(D3)\n<mask token>\nD2[0, 0] = 100\nprint('D2')\nprint(D2)\nD3[1, 0, 1] = 100\nD3[1, 2, 0] = 100\nprint('D3')\nprint(D3)\n<mask token>\nprint('*** the final sum result is: ')\nprint(D0 + D1[2] + D2[1, 1] + D3[1, 1, 1])\n",
"step-5": "import numpy as np\r\n\r\n'''\r\n1. Create 0-D array, 1-D array, 2-D array, 3-D array with following value\r\n\r\n\t0-D: [2]\r\n\t1-D: [3, 4, 5, 6, 7]\r\n\t2-D: [[8, 1, 3], [2, 3, 4], [6, 2, 5]]\r\n\t3-D: [[[1, 2, 4], [3, 3, 2], [1, 9, 1]], [[6, 8, 7], [9, 1, 0], [8, 2, 3]], [[5, 4, 1], [5, 7, 2], [3, 5, 9]]]\r\n\r\n\tprint them\r\n'''\r\nD0 = np.array(2)\r\nD1 = np.array([3, 4, 5, 6, 7])\r\nD2 = np.array([[8, 1, 3], [2, 3, 4], [6, 2, 5]])\r\nD3 = np.array([[[1, 2, 4], [3, 3, 2], [1, 9, 1]], [[6, 8, 7], [9, 1, 0], [8, 2, 3]], [[5, 4, 1], [5, 7, 2], [3, 5, 9]]])\r\nprint('D0')\r\nprint(D0)\r\nprint('D1')\r\nprint(D1)\r\nprint('D2')\r\nprint(D2)\r\nprint('D3')\r\nprint(D3)\r\n\r\n'''\r\n2. Use index to change all value 8 to 100 in 4 arrays\r\n\r\n\tarray[index1, index2] = newValue\r\n for example: 2-D array should be changed as : [[100, 1, 3], [2, 3, 4], [6, 2, 5]]\r\n\r\n\tprint them\r\n'''\r\nD2[0, 0] = 100\r\nprint('D2')\r\nprint(D2)\r\nD3[1, 0, 1] = 100\r\nD3[1, 2, 0] = 100\r\nprint('D3')\r\nprint(D3)\r\n'''\r\n3. Print the sum of all following values\r\n\r\n\ta. the value of 0-D array\r\n\tb. the middle of 1-D array\r\n\tc. the center of 2-D array\r\n\td. the center of 3-D array ( the center of middle 2-D array )\r\n\r\n\t* The value should be 11\r\n'''\r\nprint('*** the final sum result is: ')\r\nprint(D0 + D1[2] + D2[1, 1] + D3[1, 1, 1])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# encoding: utf-8
from rest_client import PY2
from tornado.testing import gen_test
from tornado.web import Application, RequestHandler
from .server import AsyncRESTTestCase
class Handler(RequestHandler):
if PY2:
S = '\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82 \xd0\xbc\xd0\xb8\xd1\x80'.decode('utf-8')
else:
S = 'Привет мир'
def get(self):
self.set_header('Content-Type', 'text/plain; charset=utf-8')
self.write(self.S.encode('utf-8'))
class TestCopy(AsyncRESTTestCase):
def get_app(self):
return Application(handlers=[
('/', Handler),
])
@gen_test
def test_get(self):
response = yield self.http_client.get(self.api_url.format("/"))
assert response.body == Handler.S
|
normal
|
{
"blob_id": "4bbd97942023370e053ccf4b5c1496c7247c7bf2",
"index": 9026,
"step-1": "<mask token>\n\n\nclass TestCopy(AsyncRESTTestCase):\n <mask token>\n\n @gen_test\n def test_get(self):\n response = yield self.http_client.get(self.api_url.format('/'))\n assert response.body == Handler.S\n",
"step-2": "<mask token>\n\n\nclass TestCopy(AsyncRESTTestCase):\n\n def get_app(self):\n return Application(handlers=[('/', Handler)])\n\n @gen_test\n def test_get(self):\n response = yield self.http_client.get(self.api_url.format('/'))\n assert response.body == Handler.S\n",
"step-3": "<mask token>\n\n\nclass Handler(RequestHandler):\n if PY2:\n S = 'Ð\\x9fÑ\\x80ивеÑ\\x82 миÑ\\x80'.decode('utf-8')\n else:\n S = 'Привет мир'\n <mask token>\n\n\nclass TestCopy(AsyncRESTTestCase):\n\n def get_app(self):\n return Application(handlers=[('/', Handler)])\n\n @gen_test\n def test_get(self):\n response = yield self.http_client.get(self.api_url.format('/'))\n assert response.body == Handler.S\n",
"step-4": "from rest_client import PY2\nfrom tornado.testing import gen_test\nfrom tornado.web import Application, RequestHandler\nfrom .server import AsyncRESTTestCase\n\n\nclass Handler(RequestHandler):\n if PY2:\n S = 'Ð\\x9fÑ\\x80ивеÑ\\x82 миÑ\\x80'.decode('utf-8')\n else:\n S = 'Привет мир'\n\n def get(self):\n self.set_header('Content-Type', 'text/plain; charset=utf-8')\n self.write(self.S.encode('utf-8'))\n\n\nclass TestCopy(AsyncRESTTestCase):\n\n def get_app(self):\n return Application(handlers=[('/', Handler)])\n\n @gen_test\n def test_get(self):\n response = yield self.http_client.get(self.api_url.format('/'))\n assert response.body == Handler.S\n",
"step-5": "#!/usr/bin/env python\n# encoding: utf-8\nfrom rest_client import PY2\nfrom tornado.testing import gen_test\nfrom tornado.web import Application, RequestHandler\nfrom .server import AsyncRESTTestCase\n\n\nclass Handler(RequestHandler):\n if PY2:\n S = '\\xd0\\x9f\\xd1\\x80\\xd0\\xb8\\xd0\\xb2\\xd0\\xb5\\xd1\\x82 \\xd0\\xbc\\xd0\\xb8\\xd1\\x80'.decode('utf-8')\n else:\n S = 'Привет мир'\n\n def get(self):\n self.set_header('Content-Type', 'text/plain; charset=utf-8')\n self.write(self.S.encode('utf-8'))\n\n\nclass TestCopy(AsyncRESTTestCase):\n def get_app(self):\n return Application(handlers=[\n ('/', Handler),\n ])\n\n @gen_test\n def test_get(self):\n response = yield self.http_client.get(self.api_url.format(\"/\"))\n assert response.body == Handler.S",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
<|reserved_special_token_0|>
def loop_rec(n, m, mapCoords, dims, data, tple):
if n >= m:
for x in range(dims[m]):
loop_rec(n, m + 1, mapCoords, dims, data, tple + (x,))
else:
temp = loop_access(len(dims) - 1, 0, data, tple)
print(temp)
if temp in mapCoords:
mapCoords[temp].append(tple)
else:
mapCoords[temp] = list()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def loop_access(n, m, data, tpl):
if n > m:
return loop_access(n, m + 1, data[tpl[m]], tpl)
else:
return data[tpl[m]]
def loop_rec(n, m, mapCoords, dims, data, tple):
if n >= m:
for x in range(dims[m]):
loop_rec(n, m + 1, mapCoords, dims, data, tple + (x,))
else:
temp = loop_access(len(dims) - 1, 0, data, tple)
print(temp)
if temp in mapCoords:
mapCoords[temp].append(tple)
else:
mapCoords[temp] = list()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def loop_access(n, m, data, tpl):
if n > m:
return loop_access(n, m + 1, data[tpl[m]], tpl)
else:
return data[tpl[m]]
def loop_rec(n, m, mapCoords, dims, data, tple):
if n >= m:
for x in range(dims[m]):
loop_rec(n, m + 1, mapCoords, dims, data, tple + (x,))
else:
temp = loop_access(len(dims) - 1, 0, data, tple)
print(temp)
if temp in mapCoords:
mapCoords[temp].append(tple)
else:
mapCoords[temp] = list()
def transform(dataFile, save_path):
img = nib.load(dataFile)
data = img.get_fdata()
hdr = img.header
mapCoords = dict()
dims = data.shape()
loop_rec(len(dims) - 1, 0, mapCoords, dims, data, ())
for key in mapCoords:
tList = mapCoords[key]
tData = np.zeros(dims)
s = str(key) + '_label.nii.gz'
save_loc = os.path.join(save_path, s)
for coord in tList:
tData[coord[0], coord[1], coord[2]] = key
tImg = nib.Nifti1Image(tData, None, hdr)
nib.save(tImg, save_loc)
<|reserved_special_token_1|>
import os
import numpy as np
import nibabel as nib
def loop_access(n, m, data, tpl):
if n > m:
return loop_access(n, m + 1, data[tpl[m]], tpl)
else:
return data[tpl[m]]
def loop_rec(n, m, mapCoords, dims, data, tple):
if n >= m:
for x in range(dims[m]):
loop_rec(n, m + 1, mapCoords, dims, data, tple + (x,))
else:
temp = loop_access(len(dims) - 1, 0, data, tple)
print(temp)
if temp in mapCoords:
mapCoords[temp].append(tple)
else:
mapCoords[temp] = list()
def transform(dataFile, save_path):
img = nib.load(dataFile)
data = img.get_fdata()
hdr = img.header
mapCoords = dict()
dims = data.shape()
loop_rec(len(dims) - 1, 0, mapCoords, dims, data, ())
for key in mapCoords:
tList = mapCoords[key]
tData = np.zeros(dims)
s = str(key) + '_label.nii.gz'
save_loc = os.path.join(save_path, s)
for coord in tList:
tData[coord[0], coord[1], coord[2]] = key
tImg = nib.Nifti1Image(tData, None, hdr)
nib.save(tImg, save_loc)
<|reserved_special_token_1|>
import os
import numpy as np
import nibabel as nib
def loop_access(n,m,data,tpl):
if n >m:
return loop_access(n,m+1,data[tpl[m]],tpl)
else:
return data[tpl[m]]
def loop_rec(n,m,mapCoords,dims,data,tple):
if n >= m:
for x in range(dims[m]):
loop_rec(n,m+1,mapCoords,dims,data,(tple+(x,)))
else:
temp = loop_access(len(dims)-1,0,data,tple)#recurse to find array element
print(temp)
if temp in mapCoords:
mapCoords[temp].append(tple)# add coord to list
else:
mapCoords[temp] = list() #make list, it doesn't exiss:
def transform(dataFile,save_path):
img = nib.load(dataFile)
data = img.get_fdata()
hdr = img.header
mapCoords = dict()# hashmap to contain lists of 3-tuples
#use specially design loop_rec function to go through data, Im not sure of another way
# to set up something that could possible variable n array
#first arguement is the number elements in data.shape() - 1, start at m=0 for second arg,
#mapcoords is passed to store the values to, dims in the shape of the data, data is passed
# () is an empty tuple, this is added to during each recursion
dims = data.shape()
loop_rec(len(dims)-1,0,mapCoords,dims,data,())
for key in mapCoords:
tList = mapCoords[key]
tData = np.zeros(dims)#generate zeros
s = str(key) + '_label.nii.gz'
save_loc = os.path.join(save_path,s)
for coord in tList:
tData[coord[0],coord[1],coord[2]] = key #fix the coords to the correct value for this 'label'
tImg = nib.Nifti1Image(tData,None,hdr)
nib.save(tImg,save_loc)
|
flexible
|
{
"blob_id": "55b4448caa73bcb50a15eb46d07328934fce72c8",
"index": 7029,
"step-1": "<mask token>\n\n\ndef loop_rec(n, m, mapCoords, dims, data, tple):\n if n >= m:\n for x in range(dims[m]):\n loop_rec(n, m + 1, mapCoords, dims, data, tple + (x,))\n else:\n temp = loop_access(len(dims) - 1, 0, data, tple)\n print(temp)\n if temp in mapCoords:\n mapCoords[temp].append(tple)\n else:\n mapCoords[temp] = list()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef loop_access(n, m, data, tpl):\n if n > m:\n return loop_access(n, m + 1, data[tpl[m]], tpl)\n else:\n return data[tpl[m]]\n\n\ndef loop_rec(n, m, mapCoords, dims, data, tple):\n if n >= m:\n for x in range(dims[m]):\n loop_rec(n, m + 1, mapCoords, dims, data, tple + (x,))\n else:\n temp = loop_access(len(dims) - 1, 0, data, tple)\n print(temp)\n if temp in mapCoords:\n mapCoords[temp].append(tple)\n else:\n mapCoords[temp] = list()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef loop_access(n, m, data, tpl):\n if n > m:\n return loop_access(n, m + 1, data[tpl[m]], tpl)\n else:\n return data[tpl[m]]\n\n\ndef loop_rec(n, m, mapCoords, dims, data, tple):\n if n >= m:\n for x in range(dims[m]):\n loop_rec(n, m + 1, mapCoords, dims, data, tple + (x,))\n else:\n temp = loop_access(len(dims) - 1, 0, data, tple)\n print(temp)\n if temp in mapCoords:\n mapCoords[temp].append(tple)\n else:\n mapCoords[temp] = list()\n\n\ndef transform(dataFile, save_path):\n img = nib.load(dataFile)\n data = img.get_fdata()\n hdr = img.header\n mapCoords = dict()\n dims = data.shape()\n loop_rec(len(dims) - 1, 0, mapCoords, dims, data, ())\n for key in mapCoords:\n tList = mapCoords[key]\n tData = np.zeros(dims)\n s = str(key) + '_label.nii.gz'\n save_loc = os.path.join(save_path, s)\n for coord in tList:\n tData[coord[0], coord[1], coord[2]] = key\n tImg = nib.Nifti1Image(tData, None, hdr)\n nib.save(tImg, save_loc)\n",
"step-4": "import os\nimport numpy as np\nimport nibabel as nib\n\n\ndef loop_access(n, m, data, tpl):\n if n > m:\n return loop_access(n, m + 1, data[tpl[m]], tpl)\n else:\n return data[tpl[m]]\n\n\ndef loop_rec(n, m, mapCoords, dims, data, tple):\n if n >= m:\n for x in range(dims[m]):\n loop_rec(n, m + 1, mapCoords, dims, data, tple + (x,))\n else:\n temp = loop_access(len(dims) - 1, 0, data, tple)\n print(temp)\n if temp in mapCoords:\n mapCoords[temp].append(tple)\n else:\n mapCoords[temp] = list()\n\n\ndef transform(dataFile, save_path):\n img = nib.load(dataFile)\n data = img.get_fdata()\n hdr = img.header\n mapCoords = dict()\n dims = data.shape()\n loop_rec(len(dims) - 1, 0, mapCoords, dims, data, ())\n for key in mapCoords:\n tList = mapCoords[key]\n tData = np.zeros(dims)\n s = str(key) + '_label.nii.gz'\n save_loc = os.path.join(save_path, s)\n for coord in tList:\n tData[coord[0], coord[1], coord[2]] = key\n tImg = nib.Nifti1Image(tData, None, hdr)\n nib.save(tImg, save_loc)\n",
"step-5": "import os\nimport numpy as np\nimport nibabel as nib\n\ndef loop_access(n,m,data,tpl):\n if n >m:\n return loop_access(n,m+1,data[tpl[m]],tpl)\n else:\n return data[tpl[m]]\n\ndef loop_rec(n,m,mapCoords,dims,data,tple):\n if n >= m:\n for x in range(dims[m]):\n loop_rec(n,m+1,mapCoords,dims,data,(tple+(x,)))\n else:\n temp = loop_access(len(dims)-1,0,data,tple)#recurse to find array element\n print(temp)\n if temp in mapCoords:\n mapCoords[temp].append(tple)# add coord to list\n else:\n mapCoords[temp] = list() #make list, it doesn't exiss:\n\ndef transform(dataFile,save_path):\n img = nib.load(dataFile)\n data = img.get_fdata()\n hdr = img.header\n mapCoords = dict()# hashmap to contain lists of 3-tuples\n\n #use specially design loop_rec function to go through data, Im not sure of another way\n # to set up something that could possible variable n array\n #first arguement is the number elements in data.shape() - 1, start at m=0 for second arg,\n #mapcoords is passed to store the values to, dims in the shape of the data, data is passed\n # () is an empty tuple, this is added to during each recursion \n dims = data.shape()\n loop_rec(len(dims)-1,0,mapCoords,dims,data,()) \n\n for key in mapCoords:\n tList = mapCoords[key]\n tData = np.zeros(dims)#generate zeros\n s = str(key) + '_label.nii.gz'\n save_loc = os.path.join(save_path,s)\n for coord in tList:\n tData[coord[0],coord[1],coord[2]] = key #fix the coords to the correct value for this 'label'\n tImg = nib.Nifti1Image(tData,None,hdr)\n nib.save(tImg,save_loc)\n \n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# link https://deeplizard.com/learn/video/QK_PP_2KgGE
import gym
import numpy as np
import random
import time
from IPython.display import clear_output
# setup the env
env = gym.make("FrozenLake8x8-v0", is_slippery=False)
observation = env.reset()
# setup the q-table
action_space_size = env.action_space.n
state_space_size = env.observation_space.n
q_table = np.zeros((state_space_size, action_space_size))
#print(q_table)
# instaniate hyper-parameters
num_episodes = 10000
steps_per_episodes = 100
learning_rate = 0.1
discount_rate = 0.99
exploration_rate = 1
max_exploration_rate = 1
min_exploration_rate = 0.01
exploration_decay_rate = 0.001
# empty list to hold our rewards over time
rewards_all_episodes = []
# main loops
for episode in range(num_episodes):
state = env.reset()
done = False
rewards_current_episode = 0
for step in range(steps_per_episodes):
# exploration vs exploitation
exploration_rate_threshold = random.uniform(0,1)
if exploration_rate_threshold > exploration_rate:
action = np.argmax(q_table[state,:])
else:
action = env.action_space.sample()
next_state, reward, done, info = env.step(action)
#print(next_state)
#print(q_table.shape)
# update q-table
q_table[state, action] = q_table[state, action] * (1 - learning_rate) + learning_rate * (reward + discount_rate * np.max(q_table[next_state, :]))
state = next_state
rewards_current_episode += reward
if done == True:
break
# Exploration rate decay
exploration_rate = min_exploration_rate + (max_exploration_rate - min_exploration_rate) * np.exp(-exploration_decay_rate*episode)
rewards_all_episodes.append(rewards_current_episode)
# Calculate and print the average reward per thousand episodes
rewards_per_thousand_episodes = np.split(np.array(rewards_all_episodes),num_episodes/1000)
count = 1000
print("********Average reward per thousand episodes********\n")
for r in rewards_per_thousand_episodes:
print(count, ": ", str(sum(r/1000)))
count += 1000
# Print updated Q-table
print("\n\n********Q-table********\n")
print(q_table)
|
normal
|
{
"blob_id": "b791afec1c9fb214d1f3b4ec0ec67f905d96aabf",
"index": 3249,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor episode in range(num_episodes):\n state = env.reset()\n done = False\n rewards_current_episode = 0\n for step in range(steps_per_episodes):\n exploration_rate_threshold = random.uniform(0, 1)\n if exploration_rate_threshold > exploration_rate:\n action = np.argmax(q_table[state, :])\n else:\n action = env.action_space.sample()\n next_state, reward, done, info = env.step(action)\n q_table[state, action] = q_table[state, action] * (1 - learning_rate\n ) + learning_rate * (reward + discount_rate * np.max(q_table[\n next_state, :]))\n state = next_state\n rewards_current_episode += reward\n if done == True:\n break\n exploration_rate = min_exploration_rate + (max_exploration_rate -\n min_exploration_rate) * np.exp(-exploration_decay_rate * episode)\n rewards_all_episodes.append(rewards_current_episode)\n<mask token>\nprint('********Average reward per thousand episodes********\\n')\nfor r in rewards_per_thousand_episodes:\n print(count, ': ', str(sum(r / 1000)))\n count += 1000\nprint(\"\"\"\n\n********Q-table********\n\"\"\")\nprint(q_table)\n",
"step-3": "<mask token>\nenv = gym.make('FrozenLake8x8-v0', is_slippery=False)\nobservation = env.reset()\naction_space_size = env.action_space.n\nstate_space_size = env.observation_space.n\nq_table = np.zeros((state_space_size, action_space_size))\nnum_episodes = 10000\nsteps_per_episodes = 100\nlearning_rate = 0.1\ndiscount_rate = 0.99\nexploration_rate = 1\nmax_exploration_rate = 1\nmin_exploration_rate = 0.01\nexploration_decay_rate = 0.001\nrewards_all_episodes = []\nfor episode in range(num_episodes):\n state = env.reset()\n done = False\n rewards_current_episode = 0\n for step in range(steps_per_episodes):\n exploration_rate_threshold = random.uniform(0, 1)\n if exploration_rate_threshold > exploration_rate:\n action = np.argmax(q_table[state, :])\n else:\n action = env.action_space.sample()\n next_state, reward, done, info = env.step(action)\n q_table[state, action] = q_table[state, action] * (1 - learning_rate\n ) + learning_rate * (reward + discount_rate * np.max(q_table[\n next_state, :]))\n state = next_state\n rewards_current_episode += reward\n if done == True:\n break\n exploration_rate = min_exploration_rate + (max_exploration_rate -\n min_exploration_rate) * np.exp(-exploration_decay_rate * episode)\n rewards_all_episodes.append(rewards_current_episode)\nrewards_per_thousand_episodes = np.split(np.array(rewards_all_episodes), \n num_episodes / 1000)\ncount = 1000\nprint('********Average reward per thousand episodes********\\n')\nfor r in rewards_per_thousand_episodes:\n print(count, ': ', str(sum(r / 1000)))\n count += 1000\nprint(\"\"\"\n\n********Q-table********\n\"\"\")\nprint(q_table)\n",
"step-4": "import gym\nimport numpy as np\nimport random\nimport time\nfrom IPython.display import clear_output\nenv = gym.make('FrozenLake8x8-v0', is_slippery=False)\nobservation = env.reset()\naction_space_size = env.action_space.n\nstate_space_size = env.observation_space.n\nq_table = np.zeros((state_space_size, action_space_size))\nnum_episodes = 10000\nsteps_per_episodes = 100\nlearning_rate = 0.1\ndiscount_rate = 0.99\nexploration_rate = 1\nmax_exploration_rate = 1\nmin_exploration_rate = 0.01\nexploration_decay_rate = 0.001\nrewards_all_episodes = []\nfor episode in range(num_episodes):\n state = env.reset()\n done = False\n rewards_current_episode = 0\n for step in range(steps_per_episodes):\n exploration_rate_threshold = random.uniform(0, 1)\n if exploration_rate_threshold > exploration_rate:\n action = np.argmax(q_table[state, :])\n else:\n action = env.action_space.sample()\n next_state, reward, done, info = env.step(action)\n q_table[state, action] = q_table[state, action] * (1 - learning_rate\n ) + learning_rate * (reward + discount_rate * np.max(q_table[\n next_state, :]))\n state = next_state\n rewards_current_episode += reward\n if done == True:\n break\n exploration_rate = min_exploration_rate + (max_exploration_rate -\n min_exploration_rate) * np.exp(-exploration_decay_rate * episode)\n rewards_all_episodes.append(rewards_current_episode)\nrewards_per_thousand_episodes = np.split(np.array(rewards_all_episodes), \n num_episodes / 1000)\ncount = 1000\nprint('********Average reward per thousand episodes********\\n')\nfor r in rewards_per_thousand_episodes:\n print(count, ': ', str(sum(r / 1000)))\n count += 1000\nprint(\"\"\"\n\n********Q-table********\n\"\"\")\nprint(q_table)\n",
"step-5": "# link https://deeplizard.com/learn/video/QK_PP_2KgGE\nimport gym\nimport numpy as np\nimport random\nimport time\nfrom IPython.display import clear_output\n\n# setup the env\nenv = gym.make(\"FrozenLake8x8-v0\", is_slippery=False)\nobservation = env.reset()\n\n# setup the q-table\naction_space_size = env.action_space.n\nstate_space_size = env.observation_space.n\nq_table = np.zeros((state_space_size, action_space_size))\n#print(q_table)\n\n# instaniate hyper-parameters\nnum_episodes = 10000\nsteps_per_episodes = 100\nlearning_rate = 0.1\ndiscount_rate = 0.99\nexploration_rate = 1\nmax_exploration_rate = 1\nmin_exploration_rate = 0.01\nexploration_decay_rate = 0.001\n\n# empty list to hold our rewards over time\nrewards_all_episodes = []\n \n # main loops\nfor episode in range(num_episodes):\n state = env.reset()\n done = False\n rewards_current_episode = 0\n \n for step in range(steps_per_episodes):\n \n # exploration vs exploitation\n exploration_rate_threshold = random.uniform(0,1)\n if exploration_rate_threshold > exploration_rate:\n action = np.argmax(q_table[state,:])\n else:\n action = env.action_space.sample()\n \n next_state, reward, done, info = env.step(action)\n #print(next_state)\n #print(q_table.shape)\n\n # update q-table\n q_table[state, action] = q_table[state, action] * (1 - learning_rate) + learning_rate * (reward + discount_rate * np.max(q_table[next_state, :]))\n\n state = next_state\n rewards_current_episode += reward\n \n if done == True:\n break\n \n # Exploration rate decay\n exploration_rate = min_exploration_rate + (max_exploration_rate - min_exploration_rate) * np.exp(-exploration_decay_rate*episode)\n rewards_all_episodes.append(rewards_current_episode)\n\n# Calculate and print the average reward per thousand episodes\nrewards_per_thousand_episodes = np.split(np.array(rewards_all_episodes),num_episodes/1000)\ncount = 1000\n\nprint(\"********Average reward per thousand episodes********\\n\")\nfor r in rewards_per_thousand_episodes:\n print(count, \": \", str(sum(r/1000)))\n count += 1000\n\n# Print updated Q-table\nprint(\"\\n\\n********Q-table********\\n\")\nprint(q_table)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pandas2ri.activate()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ts = robjects.r('ts')
forecast = importr('forecast', lib_loc=
'C:/Users/sand9888/Documents/sand9888/R/win-library/3.3')
<|reserved_special_token_0|>
pandas2ri.activate()
train = os.path.join('C:/DAT203.3x/Lab01/cadairydata.csv')
traindf = pd.read_csv(train, index_col=0)
traindf.index = traindf.index.to_datetime()
rdata = ts(traindf.Price.values, frequency=4)
fit = forecast.auto_arima(rdata)
forecast_output = forecast.forecast(fit, h=16, level=95.0)
<|reserved_special_token_1|>
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
ts = robjects.r('ts')
forecast = importr('forecast', lib_loc=
'C:/Users/sand9888/Documents/sand9888/R/win-library/3.3')
import os
import pandas as pd
from rpy2.robjects import pandas2ri
pandas2ri.activate()
train = os.path.join('C:/DAT203.3x/Lab01/cadairydata.csv')
traindf = pd.read_csv(train, index_col=0)
traindf.index = traindf.index.to_datetime()
rdata = ts(traindf.Price.values, frequency=4)
fit = forecast.auto_arima(rdata)
forecast_output = forecast.forecast(fit, h=16, level=95.0)
<|reserved_special_token_1|>
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
ts=robjects.r('ts')
forecast = importr("forecast", lib_loc = "C:/Users/sand9888/Documents/sand9888/R/win-library/3.3")
import os
import pandas as pd
from rpy2.robjects import pandas2ri
pandas2ri.activate()
train = os.path.join('C:/DAT203.3x/Lab01/cadairydata.csv')
traindf=pd.read_csv(train, index_col=0)
traindf.index=traindf.index.to_datetime()
rdata=ts(traindf.Price.values,frequency=4)
fit=forecast.auto_arima(rdata)
forecast_output=forecast.forecast(fit,h=16,level=(95.0))
|
flexible
|
{
"blob_id": "e00cbe6e177ee841c6e64de842e5b8f95463b3a8",
"index": 2169,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npandas2ri.activate()\n<mask token>\n",
"step-3": "<mask token>\nts = robjects.r('ts')\nforecast = importr('forecast', lib_loc=\n 'C:/Users/sand9888/Documents/sand9888/R/win-library/3.3')\n<mask token>\npandas2ri.activate()\ntrain = os.path.join('C:/DAT203.3x/Lab01/cadairydata.csv')\ntraindf = pd.read_csv(train, index_col=0)\ntraindf.index = traindf.index.to_datetime()\nrdata = ts(traindf.Price.values, frequency=4)\nfit = forecast.auto_arima(rdata)\nforecast_output = forecast.forecast(fit, h=16, level=95.0)\n",
"step-4": "import rpy2.robjects as robjects\nfrom rpy2.robjects.packages import importr\nts = robjects.r('ts')\nforecast = importr('forecast', lib_loc=\n 'C:/Users/sand9888/Documents/sand9888/R/win-library/3.3')\nimport os\nimport pandas as pd\nfrom rpy2.robjects import pandas2ri\npandas2ri.activate()\ntrain = os.path.join('C:/DAT203.3x/Lab01/cadairydata.csv')\ntraindf = pd.read_csv(train, index_col=0)\ntraindf.index = traindf.index.to_datetime()\nrdata = ts(traindf.Price.values, frequency=4)\nfit = forecast.auto_arima(rdata)\nforecast_output = forecast.forecast(fit, h=16, level=95.0)\n",
"step-5": "import rpy2.robjects as robjects\nfrom rpy2.robjects.packages import importr\nts=robjects.r('ts')\nforecast = importr(\"forecast\", lib_loc = \"C:/Users/sand9888/Documents/sand9888/R/win-library/3.3\")\nimport os\nimport pandas as pd\n\nfrom rpy2.robjects import pandas2ri\npandas2ri.activate()\n\n\ntrain = os.path.join('C:/DAT203.3x/Lab01/cadairydata.csv')\ntraindf=pd.read_csv(train, index_col=0)\ntraindf.index=traindf.index.to_datetime()\n\nrdata=ts(traindf.Price.values,frequency=4)\nfit=forecast.auto_arima(rdata)\nforecast_output=forecast.forecast(fit,h=16,level=(95.0))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Cell(object):
def __init__(self, cellId, top_left_cell, top_right_cell,
bottom_right_cell, bottom_left_cell):
self.cellId = cellId
self.top_left_cell = top_left_cell
self.top_right_cell = top_right_cell
self.bottom_right_cell = bottom_right_cell
self.bottom_left_cell = bottom_left_cell
def __repr__(self):
return str(self.__dict__)
def generate_cell(current_cell_id, top_left_cell, top_right_cell,
bottom_right_cell, bottom_left_cell):
c = Cell(current_cell_id, top_left_cell, top_right_cell,
bottom_right_cell, bottom_left_cell)
return c
<|reserved_special_token_0|>
def grid_to_geojson(grid, lower_elevation, upper_elevation):
features = []
for cell in grid:
rect_points = [[(cell.top_left_cell.lon, cell.top_left_cell.lat,
lower_elevation), (cell.top_right_cell.lon, cell.top_right_cell
.lat, lower_elevation), (cell.bottom_right_cell.lon, cell.
bottom_right_cell.lat, lower_elevation), (cell.bottom_left_cell
.lon, cell.bottom_left_cell.lat, lower_elevation), (cell.
top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), (
cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation
), (cell.top_right_cell.lon, cell.top_right_cell.lat,
upper_elevation), (cell.bottom_right_cell.lon, cell.
bottom_right_cell.lat, upper_elevation), (cell.bottom_left_cell
.lon, cell.bottom_left_cell.lat, upper_elevation), (cell.
top_left_cell.lon, cell.top_left_cell.lat, upper_elevation)]]
properties = {'capacity': random.randint(0, 5)}
polygon = Polygon(rect_points)
feature = Feature(geometry=polygon, id=cell.cellId, properties=
properties)
features.append(feature)
return FeatureCollection(features)
def main():
TOP_LEFT = LatLon(-37.721874, 144.966859)
EAST_EXTENT = 1000.0
SOUT_EXTENT = 1000.0
CELL_LONG_SIZE_METERS = 100.0
CELL_LAT_SIZE_METERS = 100.0
grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT,
CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS)
geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0)
dump(geojson_feature_collection, sys.stdout, indent=4)
json_file = open('grid-3d.geojson', 'w')
dump(geojson_feature_collection, json_file, indent=4)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Cell(object):
def __init__(self, cellId, top_left_cell, top_right_cell,
bottom_right_cell, bottom_left_cell):
self.cellId = cellId
self.top_left_cell = top_left_cell
self.top_right_cell = top_right_cell
self.bottom_right_cell = bottom_right_cell
self.bottom_left_cell = bottom_left_cell
def __repr__(self):
return str(self.__dict__)
def generate_cell(current_cell_id, top_left_cell, top_right_cell,
bottom_right_cell, bottom_left_cell):
c = Cell(current_cell_id, top_left_cell, top_right_cell,
bottom_right_cell, bottom_left_cell)
return c
def generate_cell_grid(top_left, east_extent, south_extent,
cell_lat_size_meters, cell_long_size_meters):
south_distance = 0
current_cell_id = 0
list_of_cells = []
left_edge = top_left
while south_distance < south_extent:
south_distance = south_distance + cell_lat_size_meters
point_south_of_left_edge = left_edge.destination(cell_lat_size_meters,
BEARING_SOUTH)
top_left_cell = left_edge
bottom_left_cell = point_south_of_left_edge
east_distance = 0
while east_distance < east_extent:
top_right_cell = top_left_cell.destination(cell_long_size_meters,
BEARING_EAST)
bottom_right_cell = bottom_left_cell.destination(
cell_long_size_meters, BEARING_EAST)
cell = generate_cell(current_cell_id, top_left_cell,
top_right_cell, bottom_right_cell, bottom_left_cell)
current_cell_id = current_cell_id + 1
list_of_cells.append(cell)
top_left_cell = top_right_cell
bottom_left_cell = bottom_right_cell
east_distance = east_distance + cell_long_size_meters
left_edge = point_south_of_left_edge
return list_of_cells
def grid_to_geojson(grid, lower_elevation, upper_elevation):
features = []
for cell in grid:
rect_points = [[(cell.top_left_cell.lon, cell.top_left_cell.lat,
lower_elevation), (cell.top_right_cell.lon, cell.top_right_cell
.lat, lower_elevation), (cell.bottom_right_cell.lon, cell.
bottom_right_cell.lat, lower_elevation), (cell.bottom_left_cell
.lon, cell.bottom_left_cell.lat, lower_elevation), (cell.
top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), (
cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation
), (cell.top_right_cell.lon, cell.top_right_cell.lat,
upper_elevation), (cell.bottom_right_cell.lon, cell.
bottom_right_cell.lat, upper_elevation), (cell.bottom_left_cell
.lon, cell.bottom_left_cell.lat, upper_elevation), (cell.
top_left_cell.lon, cell.top_left_cell.lat, upper_elevation)]]
properties = {'capacity': random.randint(0, 5)}
polygon = Polygon(rect_points)
feature = Feature(geometry=polygon, id=cell.cellId, properties=
properties)
features.append(feature)
return FeatureCollection(features)
def main():
TOP_LEFT = LatLon(-37.721874, 144.966859)
EAST_EXTENT = 1000.0
SOUT_EXTENT = 1000.0
CELL_LONG_SIZE_METERS = 100.0
CELL_LAT_SIZE_METERS = 100.0
grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT,
CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS)
geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0)
dump(geojson_feature_collection, sys.stdout, indent=4)
json_file = open('grid-3d.geojson', 'w')
dump(geojson_feature_collection, json_file, indent=4)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Cell(object):
def __init__(self, cellId, top_left_cell, top_right_cell,
bottom_right_cell, bottom_left_cell):
self.cellId = cellId
self.top_left_cell = top_left_cell
self.top_right_cell = top_right_cell
self.bottom_right_cell = bottom_right_cell
self.bottom_left_cell = bottom_left_cell
def __repr__(self):
return str(self.__dict__)
def generate_cell(current_cell_id, top_left_cell, top_right_cell,
bottom_right_cell, bottom_left_cell):
c = Cell(current_cell_id, top_left_cell, top_right_cell,
bottom_right_cell, bottom_left_cell)
return c
def generate_cell_grid(top_left, east_extent, south_extent,
cell_lat_size_meters, cell_long_size_meters):
south_distance = 0
current_cell_id = 0
list_of_cells = []
left_edge = top_left
while south_distance < south_extent:
south_distance = south_distance + cell_lat_size_meters
point_south_of_left_edge = left_edge.destination(cell_lat_size_meters,
BEARING_SOUTH)
top_left_cell = left_edge
bottom_left_cell = point_south_of_left_edge
east_distance = 0
while east_distance < east_extent:
top_right_cell = top_left_cell.destination(cell_long_size_meters,
BEARING_EAST)
bottom_right_cell = bottom_left_cell.destination(
cell_long_size_meters, BEARING_EAST)
cell = generate_cell(current_cell_id, top_left_cell,
top_right_cell, bottom_right_cell, bottom_left_cell)
current_cell_id = current_cell_id + 1
list_of_cells.append(cell)
top_left_cell = top_right_cell
bottom_left_cell = bottom_right_cell
east_distance = east_distance + cell_long_size_meters
left_edge = point_south_of_left_edge
return list_of_cells
def grid_to_geojson(grid, lower_elevation, upper_elevation):
features = []
for cell in grid:
rect_points = [[(cell.top_left_cell.lon, cell.top_left_cell.lat,
lower_elevation), (cell.top_right_cell.lon, cell.top_right_cell
.lat, lower_elevation), (cell.bottom_right_cell.lon, cell.
bottom_right_cell.lat, lower_elevation), (cell.bottom_left_cell
.lon, cell.bottom_left_cell.lat, lower_elevation), (cell.
top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), (
cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation
), (cell.top_right_cell.lon, cell.top_right_cell.lat,
upper_elevation), (cell.bottom_right_cell.lon, cell.
bottom_right_cell.lat, upper_elevation), (cell.bottom_left_cell
.lon, cell.bottom_left_cell.lat, upper_elevation), (cell.
top_left_cell.lon, cell.top_left_cell.lat, upper_elevation)]]
properties = {'capacity': random.randint(0, 5)}
polygon = Polygon(rect_points)
feature = Feature(geometry=polygon, id=cell.cellId, properties=
properties)
features.append(feature)
return FeatureCollection(features)
def main():
TOP_LEFT = LatLon(-37.721874, 144.966859)
EAST_EXTENT = 1000.0
SOUT_EXTENT = 1000.0
CELL_LONG_SIZE_METERS = 100.0
CELL_LAT_SIZE_METERS = 100.0
grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT,
CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS)
geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0)
dump(geojson_feature_collection, sys.stdout, indent=4)
json_file = open('grid-3d.geojson', 'w')
dump(geojson_feature_collection, json_file, indent=4)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
BEARING_SOUTH = 180.0
BEARING_EAST = 90.0
class Cell(object):
def __init__(self, cellId, top_left_cell, top_right_cell,
bottom_right_cell, bottom_left_cell):
self.cellId = cellId
self.top_left_cell = top_left_cell
self.top_right_cell = top_right_cell
self.bottom_right_cell = bottom_right_cell
self.bottom_left_cell = bottom_left_cell
def __repr__(self):
return str(self.__dict__)
def generate_cell(current_cell_id, top_left_cell, top_right_cell,
bottom_right_cell, bottom_left_cell):
c = Cell(current_cell_id, top_left_cell, top_right_cell,
bottom_right_cell, bottom_left_cell)
return c
def generate_cell_grid(top_left, east_extent, south_extent,
cell_lat_size_meters, cell_long_size_meters):
south_distance = 0
current_cell_id = 0
list_of_cells = []
left_edge = top_left
while south_distance < south_extent:
south_distance = south_distance + cell_lat_size_meters
point_south_of_left_edge = left_edge.destination(cell_lat_size_meters,
BEARING_SOUTH)
top_left_cell = left_edge
bottom_left_cell = point_south_of_left_edge
east_distance = 0
while east_distance < east_extent:
top_right_cell = top_left_cell.destination(cell_long_size_meters,
BEARING_EAST)
bottom_right_cell = bottom_left_cell.destination(
cell_long_size_meters, BEARING_EAST)
cell = generate_cell(current_cell_id, top_left_cell,
top_right_cell, bottom_right_cell, bottom_left_cell)
current_cell_id = current_cell_id + 1
list_of_cells.append(cell)
top_left_cell = top_right_cell
bottom_left_cell = bottom_right_cell
east_distance = east_distance + cell_long_size_meters
left_edge = point_south_of_left_edge
return list_of_cells
def grid_to_geojson(grid, lower_elevation, upper_elevation):
features = []
for cell in grid:
rect_points = [[(cell.top_left_cell.lon, cell.top_left_cell.lat,
lower_elevation), (cell.top_right_cell.lon, cell.top_right_cell
.lat, lower_elevation), (cell.bottom_right_cell.lon, cell.
bottom_right_cell.lat, lower_elevation), (cell.bottom_left_cell
.lon, cell.bottom_left_cell.lat, lower_elevation), (cell.
top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), (
cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation
), (cell.top_right_cell.lon, cell.top_right_cell.lat,
upper_elevation), (cell.bottom_right_cell.lon, cell.
bottom_right_cell.lat, upper_elevation), (cell.bottom_left_cell
.lon, cell.bottom_left_cell.lat, upper_elevation), (cell.
top_left_cell.lon, cell.top_left_cell.lat, upper_elevation)]]
properties = {'capacity': random.randint(0, 5)}
polygon = Polygon(rect_points)
feature = Feature(geometry=polygon, id=cell.cellId, properties=
properties)
features.append(feature)
return FeatureCollection(features)
def main():
TOP_LEFT = LatLon(-37.721874, 144.966859)
EAST_EXTENT = 1000.0
SOUT_EXTENT = 1000.0
CELL_LONG_SIZE_METERS = 100.0
CELL_LAT_SIZE_METERS = 100.0
grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT,
CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS)
geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0)
dump(geojson_feature_collection, sys.stdout, indent=4)
json_file = open('grid-3d.geojson', 'w')
dump(geojson_feature_collection, json_file, indent=4)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
# GeoPy can be used to interface to map box https://pypi.org/project/geopy/
from pygeodesy.ellipsoidalVincenty import LatLon
from geojson import Polygon, Feature, FeatureCollection, dump
import sys
import random
BEARING_SOUTH = 180.0
BEARING_EAST = 90.0
class Cell(object):
def __init__(self, cellId, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell):
self.cellId = cellId
self.top_left_cell = top_left_cell
self.top_right_cell = top_right_cell
self.bottom_right_cell = bottom_right_cell
self.bottom_left_cell = bottom_left_cell
def __repr__(self):
return str(self.__dict__)
def generate_cell(current_cell_id, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell):
c = Cell(current_cell_id, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell)
# Expect other data to be inserted into the cell here
return c
def generate_cell_grid(top_left, east_extent, south_extent, cell_lat_size_meters, cell_long_size_meters):
south_distance = 0
current_cell_id = 0
list_of_cells = []
left_edge = top_left
while south_distance < south_extent:
south_distance = south_distance + cell_lat_size_meters
point_south_of_left_edge = left_edge.destination(cell_lat_size_meters, BEARING_SOUTH)
top_left_cell = left_edge
bottom_left_cell = point_south_of_left_edge
east_distance = 0
while east_distance < east_extent:
top_right_cell = top_left_cell.destination(cell_long_size_meters, BEARING_EAST)
bottom_right_cell = bottom_left_cell.destination(cell_long_size_meters, BEARING_EAST)
cell = generate_cell(current_cell_id, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell)
current_cell_id = current_cell_id + 1
list_of_cells.append(cell)
# Increments
top_left_cell = top_right_cell
bottom_left_cell = bottom_right_cell
east_distance = east_distance + cell_long_size_meters
left_edge = point_south_of_left_edge
return list_of_cells
def grid_to_geojson(grid, lower_elevation, upper_elevation):
features = []
for cell in grid:
rect_points = [
[
(cell.top_left_cell.lon, cell.top_left_cell.lat, lower_elevation),
(cell.top_right_cell.lon, cell.top_right_cell.lat, lower_elevation),
(cell.bottom_right_cell.lon, cell.bottom_right_cell.lat, lower_elevation),
(cell.bottom_left_cell.lon, cell.bottom_left_cell.lat, lower_elevation),
(cell.top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), #Because first and last points have to match
(cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation),
(cell.top_right_cell.lon, cell.top_right_cell.lat, upper_elevation),
(cell.bottom_right_cell.lon, cell.bottom_right_cell.lat, upper_elevation),
(cell.bottom_left_cell.lon, cell.bottom_left_cell.lat, upper_elevation),
(cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation) #Because first and last points have to match
]
]
properties = {
'capacity': random.randint(0, 5)
} # TODO this is just an example
polygon = Polygon(rect_points)
feature = Feature(geometry=polygon, id=cell.cellId, properties=properties)
features.append(feature)
return FeatureCollection(features)
def main():
TOP_LEFT = LatLon(-37.721874, 144.966859)
EAST_EXTENT = 1000.0
SOUT_EXTENT = 1000.0
CELL_LONG_SIZE_METERS = 100.0
CELL_LAT_SIZE_METERS = 100.0
grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT, CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS)
geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0)
dump(geojson_feature_collection, sys.stdout, indent=4)
json_file = open('grid-3d.geojson', 'w')
dump(geojson_feature_collection, json_file, indent=4)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "01f0ad8746ed9a9941faa699b146625ad3a0b373",
"index": 4289,
"step-1": "<mask token>\n\n\nclass Cell(object):\n\n def __init__(self, cellId, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell):\n self.cellId = cellId\n self.top_left_cell = top_left_cell\n self.top_right_cell = top_right_cell\n self.bottom_right_cell = bottom_right_cell\n self.bottom_left_cell = bottom_left_cell\n\n def __repr__(self):\n return str(self.__dict__)\n\n\ndef generate_cell(current_cell_id, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell):\n c = Cell(current_cell_id, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell)\n return c\n\n\n<mask token>\n\n\ndef grid_to_geojson(grid, lower_elevation, upper_elevation):\n features = []\n for cell in grid:\n rect_points = [[(cell.top_left_cell.lon, cell.top_left_cell.lat,\n lower_elevation), (cell.top_right_cell.lon, cell.top_right_cell\n .lat, lower_elevation), (cell.bottom_right_cell.lon, cell.\n bottom_right_cell.lat, lower_elevation), (cell.bottom_left_cell\n .lon, cell.bottom_left_cell.lat, lower_elevation), (cell.\n top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), (\n cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation\n ), (cell.top_right_cell.lon, cell.top_right_cell.lat,\n upper_elevation), (cell.bottom_right_cell.lon, cell.\n bottom_right_cell.lat, upper_elevation), (cell.bottom_left_cell\n .lon, cell.bottom_left_cell.lat, upper_elevation), (cell.\n top_left_cell.lon, cell.top_left_cell.lat, upper_elevation)]]\n properties = {'capacity': random.randint(0, 5)}\n polygon = Polygon(rect_points)\n feature = Feature(geometry=polygon, id=cell.cellId, properties=\n properties)\n features.append(feature)\n return FeatureCollection(features)\n\n\ndef main():\n TOP_LEFT = LatLon(-37.721874, 144.966859)\n EAST_EXTENT = 1000.0\n SOUT_EXTENT = 1000.0\n CELL_LONG_SIZE_METERS = 100.0\n CELL_LAT_SIZE_METERS = 100.0\n grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT,\n CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS)\n geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0)\n dump(geojson_feature_collection, sys.stdout, indent=4)\n json_file = open('grid-3d.geojson', 'w')\n dump(geojson_feature_collection, json_file, indent=4)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Cell(object):\n\n def __init__(self, cellId, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell):\n self.cellId = cellId\n self.top_left_cell = top_left_cell\n self.top_right_cell = top_right_cell\n self.bottom_right_cell = bottom_right_cell\n self.bottom_left_cell = bottom_left_cell\n\n def __repr__(self):\n return str(self.__dict__)\n\n\ndef generate_cell(current_cell_id, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell):\n c = Cell(current_cell_id, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell)\n return c\n\n\ndef generate_cell_grid(top_left, east_extent, south_extent,\n cell_lat_size_meters, cell_long_size_meters):\n south_distance = 0\n current_cell_id = 0\n list_of_cells = []\n left_edge = top_left\n while south_distance < south_extent:\n south_distance = south_distance + cell_lat_size_meters\n point_south_of_left_edge = left_edge.destination(cell_lat_size_meters,\n BEARING_SOUTH)\n top_left_cell = left_edge\n bottom_left_cell = point_south_of_left_edge\n east_distance = 0\n while east_distance < east_extent:\n top_right_cell = top_left_cell.destination(cell_long_size_meters,\n BEARING_EAST)\n bottom_right_cell = bottom_left_cell.destination(\n cell_long_size_meters, BEARING_EAST)\n cell = generate_cell(current_cell_id, top_left_cell,\n top_right_cell, bottom_right_cell, bottom_left_cell)\n current_cell_id = current_cell_id + 1\n list_of_cells.append(cell)\n top_left_cell = top_right_cell\n bottom_left_cell = bottom_right_cell\n east_distance = east_distance + cell_long_size_meters\n left_edge = point_south_of_left_edge\n return list_of_cells\n\n\ndef grid_to_geojson(grid, lower_elevation, upper_elevation):\n features = []\n for cell in grid:\n rect_points = [[(cell.top_left_cell.lon, cell.top_left_cell.lat,\n lower_elevation), (cell.top_right_cell.lon, cell.top_right_cell\n .lat, lower_elevation), (cell.bottom_right_cell.lon, cell.\n bottom_right_cell.lat, lower_elevation), (cell.bottom_left_cell\n .lon, cell.bottom_left_cell.lat, lower_elevation), (cell.\n top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), (\n cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation\n ), (cell.top_right_cell.lon, cell.top_right_cell.lat,\n upper_elevation), (cell.bottom_right_cell.lon, cell.\n bottom_right_cell.lat, upper_elevation), (cell.bottom_left_cell\n .lon, cell.bottom_left_cell.lat, upper_elevation), (cell.\n top_left_cell.lon, cell.top_left_cell.lat, upper_elevation)]]\n properties = {'capacity': random.randint(0, 5)}\n polygon = Polygon(rect_points)\n feature = Feature(geometry=polygon, id=cell.cellId, properties=\n properties)\n features.append(feature)\n return FeatureCollection(features)\n\n\ndef main():\n TOP_LEFT = LatLon(-37.721874, 144.966859)\n EAST_EXTENT = 1000.0\n SOUT_EXTENT = 1000.0\n CELL_LONG_SIZE_METERS = 100.0\n CELL_LAT_SIZE_METERS = 100.0\n grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT,\n CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS)\n geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0)\n dump(geojson_feature_collection, sys.stdout, indent=4)\n json_file = open('grid-3d.geojson', 'w')\n dump(geojson_feature_collection, json_file, indent=4)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Cell(object):\n\n def __init__(self, cellId, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell):\n self.cellId = cellId\n self.top_left_cell = top_left_cell\n self.top_right_cell = top_right_cell\n self.bottom_right_cell = bottom_right_cell\n self.bottom_left_cell = bottom_left_cell\n\n def __repr__(self):\n return str(self.__dict__)\n\n\ndef generate_cell(current_cell_id, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell):\n c = Cell(current_cell_id, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell)\n return c\n\n\ndef generate_cell_grid(top_left, east_extent, south_extent,\n cell_lat_size_meters, cell_long_size_meters):\n south_distance = 0\n current_cell_id = 0\n list_of_cells = []\n left_edge = top_left\n while south_distance < south_extent:\n south_distance = south_distance + cell_lat_size_meters\n point_south_of_left_edge = left_edge.destination(cell_lat_size_meters,\n BEARING_SOUTH)\n top_left_cell = left_edge\n bottom_left_cell = point_south_of_left_edge\n east_distance = 0\n while east_distance < east_extent:\n top_right_cell = top_left_cell.destination(cell_long_size_meters,\n BEARING_EAST)\n bottom_right_cell = bottom_left_cell.destination(\n cell_long_size_meters, BEARING_EAST)\n cell = generate_cell(current_cell_id, top_left_cell,\n top_right_cell, bottom_right_cell, bottom_left_cell)\n current_cell_id = current_cell_id + 1\n list_of_cells.append(cell)\n top_left_cell = top_right_cell\n bottom_left_cell = bottom_right_cell\n east_distance = east_distance + cell_long_size_meters\n left_edge = point_south_of_left_edge\n return list_of_cells\n\n\ndef grid_to_geojson(grid, lower_elevation, upper_elevation):\n features = []\n for cell in grid:\n rect_points = [[(cell.top_left_cell.lon, cell.top_left_cell.lat,\n lower_elevation), (cell.top_right_cell.lon, cell.top_right_cell\n .lat, lower_elevation), (cell.bottom_right_cell.lon, cell.\n bottom_right_cell.lat, lower_elevation), (cell.bottom_left_cell\n .lon, cell.bottom_left_cell.lat, lower_elevation), (cell.\n top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), (\n cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation\n ), (cell.top_right_cell.lon, cell.top_right_cell.lat,\n upper_elevation), (cell.bottom_right_cell.lon, cell.\n bottom_right_cell.lat, upper_elevation), (cell.bottom_left_cell\n .lon, cell.bottom_left_cell.lat, upper_elevation), (cell.\n top_left_cell.lon, cell.top_left_cell.lat, upper_elevation)]]\n properties = {'capacity': random.randint(0, 5)}\n polygon = Polygon(rect_points)\n feature = Feature(geometry=polygon, id=cell.cellId, properties=\n properties)\n features.append(feature)\n return FeatureCollection(features)\n\n\ndef main():\n TOP_LEFT = LatLon(-37.721874, 144.966859)\n EAST_EXTENT = 1000.0\n SOUT_EXTENT = 1000.0\n CELL_LONG_SIZE_METERS = 100.0\n CELL_LAT_SIZE_METERS = 100.0\n grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT,\n CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS)\n geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0)\n dump(geojson_feature_collection, sys.stdout, indent=4)\n json_file = open('grid-3d.geojson', 'w')\n dump(geojson_feature_collection, json_file, indent=4)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nBEARING_SOUTH = 180.0\nBEARING_EAST = 90.0\n\n\nclass Cell(object):\n\n def __init__(self, cellId, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell):\n self.cellId = cellId\n self.top_left_cell = top_left_cell\n self.top_right_cell = top_right_cell\n self.bottom_right_cell = bottom_right_cell\n self.bottom_left_cell = bottom_left_cell\n\n def __repr__(self):\n return str(self.__dict__)\n\n\ndef generate_cell(current_cell_id, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell):\n c = Cell(current_cell_id, top_left_cell, top_right_cell,\n bottom_right_cell, bottom_left_cell)\n return c\n\n\ndef generate_cell_grid(top_left, east_extent, south_extent,\n cell_lat_size_meters, cell_long_size_meters):\n south_distance = 0\n current_cell_id = 0\n list_of_cells = []\n left_edge = top_left\n while south_distance < south_extent:\n south_distance = south_distance + cell_lat_size_meters\n point_south_of_left_edge = left_edge.destination(cell_lat_size_meters,\n BEARING_SOUTH)\n top_left_cell = left_edge\n bottom_left_cell = point_south_of_left_edge\n east_distance = 0\n while east_distance < east_extent:\n top_right_cell = top_left_cell.destination(cell_long_size_meters,\n BEARING_EAST)\n bottom_right_cell = bottom_left_cell.destination(\n cell_long_size_meters, BEARING_EAST)\n cell = generate_cell(current_cell_id, top_left_cell,\n top_right_cell, bottom_right_cell, bottom_left_cell)\n current_cell_id = current_cell_id + 1\n list_of_cells.append(cell)\n top_left_cell = top_right_cell\n bottom_left_cell = bottom_right_cell\n east_distance = east_distance + cell_long_size_meters\n left_edge = point_south_of_left_edge\n return list_of_cells\n\n\ndef grid_to_geojson(grid, lower_elevation, upper_elevation):\n features = []\n for cell in grid:\n rect_points = [[(cell.top_left_cell.lon, cell.top_left_cell.lat,\n lower_elevation), (cell.top_right_cell.lon, cell.top_right_cell\n .lat, lower_elevation), (cell.bottom_right_cell.lon, cell.\n bottom_right_cell.lat, lower_elevation), (cell.bottom_left_cell\n .lon, cell.bottom_left_cell.lat, lower_elevation), (cell.\n top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), (\n cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation\n ), (cell.top_right_cell.lon, cell.top_right_cell.lat,\n upper_elevation), (cell.bottom_right_cell.lon, cell.\n bottom_right_cell.lat, upper_elevation), (cell.bottom_left_cell\n .lon, cell.bottom_left_cell.lat, upper_elevation), (cell.\n top_left_cell.lon, cell.top_left_cell.lat, upper_elevation)]]\n properties = {'capacity': random.randint(0, 5)}\n polygon = Polygon(rect_points)\n feature = Feature(geometry=polygon, id=cell.cellId, properties=\n properties)\n features.append(feature)\n return FeatureCollection(features)\n\n\ndef main():\n TOP_LEFT = LatLon(-37.721874, 144.966859)\n EAST_EXTENT = 1000.0\n SOUT_EXTENT = 1000.0\n CELL_LONG_SIZE_METERS = 100.0\n CELL_LAT_SIZE_METERS = 100.0\n grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT,\n CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS)\n geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0)\n dump(geojson_feature_collection, sys.stdout, indent=4)\n json_file = open('grid-3d.geojson', 'w')\n dump(geojson_feature_collection, json_file, indent=4)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# GeoPy can be used to interface to map box https://pypi.org/project/geopy/\nfrom pygeodesy.ellipsoidalVincenty import LatLon\nfrom geojson import Polygon, Feature, FeatureCollection, dump\nimport sys\nimport random\n\nBEARING_SOUTH = 180.0\nBEARING_EAST = 90.0\n\n\nclass Cell(object):\n def __init__(self, cellId, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell):\n self.cellId = cellId\n self.top_left_cell = top_left_cell\n self.top_right_cell = top_right_cell\n self.bottom_right_cell = bottom_right_cell\n self.bottom_left_cell = bottom_left_cell\n\n def __repr__(self):\n return str(self.__dict__)\n\n\ndef generate_cell(current_cell_id, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell):\n c = Cell(current_cell_id, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell)\n # Expect other data to be inserted into the cell here\n return c\n\n\ndef generate_cell_grid(top_left, east_extent, south_extent, cell_lat_size_meters, cell_long_size_meters):\n south_distance = 0\n current_cell_id = 0\n\n list_of_cells = []\n\n left_edge = top_left\n\n while south_distance < south_extent:\n south_distance = south_distance + cell_lat_size_meters\n point_south_of_left_edge = left_edge.destination(cell_lat_size_meters, BEARING_SOUTH)\n\n top_left_cell = left_edge\n bottom_left_cell = point_south_of_left_edge\n east_distance = 0\n\n while east_distance < east_extent:\n top_right_cell = top_left_cell.destination(cell_long_size_meters, BEARING_EAST)\n bottom_right_cell = bottom_left_cell.destination(cell_long_size_meters, BEARING_EAST)\n\n cell = generate_cell(current_cell_id, top_left_cell, top_right_cell, bottom_right_cell, bottom_left_cell)\n current_cell_id = current_cell_id + 1\n\n list_of_cells.append(cell)\n\n # Increments\n top_left_cell = top_right_cell\n bottom_left_cell = bottom_right_cell\n east_distance = east_distance + cell_long_size_meters\n\n left_edge = point_south_of_left_edge\n\n return list_of_cells\n\n\ndef grid_to_geojson(grid, lower_elevation, upper_elevation):\n features = []\n\n for cell in grid:\n rect_points = [\n [\n (cell.top_left_cell.lon, cell.top_left_cell.lat, lower_elevation),\n (cell.top_right_cell.lon, cell.top_right_cell.lat, lower_elevation),\n (cell.bottom_right_cell.lon, cell.bottom_right_cell.lat, lower_elevation),\n (cell.bottom_left_cell.lon, cell.bottom_left_cell.lat, lower_elevation),\n (cell.top_left_cell.lon, cell.top_left_cell.lat, lower_elevation), #Because first and last points have to match\n\n (cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation),\n (cell.top_right_cell.lon, cell.top_right_cell.lat, upper_elevation),\n (cell.bottom_right_cell.lon, cell.bottom_right_cell.lat, upper_elevation),\n (cell.bottom_left_cell.lon, cell.bottom_left_cell.lat, upper_elevation),\n (cell.top_left_cell.lon, cell.top_left_cell.lat, upper_elevation) #Because first and last points have to match\n ]\n ]\n properties = {\n 'capacity': random.randint(0, 5)\n } # TODO this is just an example\n\n polygon = Polygon(rect_points)\n feature = Feature(geometry=polygon, id=cell.cellId, properties=properties)\n\n features.append(feature)\n\n return FeatureCollection(features)\n\n\ndef main():\n TOP_LEFT = LatLon(-37.721874, 144.966859)\n EAST_EXTENT = 1000.0\n SOUT_EXTENT = 1000.0\n\n CELL_LONG_SIZE_METERS = 100.0\n CELL_LAT_SIZE_METERS = 100.0\n\n grid = generate_cell_grid(TOP_LEFT, EAST_EXTENT, SOUT_EXTENT, CELL_LAT_SIZE_METERS, CELL_LONG_SIZE_METERS)\n\n geojson_feature_collection = grid_to_geojson(grid, 10.0, 100.0)\n dump(geojson_feature_collection, sys.stdout, indent=4)\n\n json_file = open('grid-3d.geojson', 'w')\n dump(geojson_feature_collection, json_file, indent=4)\n\n\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
from django.core.validators import RegexValidator
from django.db import models
from .image import Image
class AffiliatedStoreManager(models.Manager):
def get_queryset(self):
return super().get_queryset() \
.select_related('icon') \
.select_related('icon__image_type')
def find_all(self):
return self.all()
def find_by_id(self, id):
return self.get(id=id)
class AffiliatedStore(models.Model):
class Meta:
db_table = 'affiliated_store'
objects = AffiliatedStoreManager()
title = models.CharField(max_length=255)
server_url = models.CharField(max_length=2083,
validators=[RegexValidator(regex='^(https|http)://.*$',
code='invalid url',
message='server_url must be a url')])
icon = models.ForeignKey(Image, related_name='+', null=True, blank=True)
is_enabled = models.BooleanField(default=True)
def __repr__(self):
return 'AffiliatedStore(id={0!s}, title="{1!s}")'.format(self.id, self.title)
def __str__(self):
return repr(self)
|
normal
|
{
"blob_id": "e2b439974b66e45a899605bc7234850783c3dfb0",
"index": 2231,
"step-1": "<mask token>\n\n\nclass AffiliatedStoreManager(models.Manager):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass AffiliatedStore(models.Model):\n\n\n class Meta:\n db_table = 'affiliated_store'\n objects = AffiliatedStoreManager()\n title = models.CharField(max_length=255)\n server_url = models.CharField(max_length=2083, validators=[\n RegexValidator(regex='^(https|http)://.*$', code='invalid url',\n message='server_url must be a url')])\n icon = models.ForeignKey(Image, related_name='+', null=True, blank=True)\n is_enabled = models.BooleanField(default=True)\n\n def __repr__(self):\n return 'AffiliatedStore(id={0!s}, title=\"{1!s}\")'.format(self.id,\n self.title)\n\n def __str__(self):\n return repr(self)\n",
"step-2": "<mask token>\n\n\nclass AffiliatedStoreManager(models.Manager):\n\n def get_queryset(self):\n return super().get_queryset().select_related('icon').select_related(\n 'icon__image_type')\n <mask token>\n <mask token>\n\n\nclass AffiliatedStore(models.Model):\n\n\n class Meta:\n db_table = 'affiliated_store'\n objects = AffiliatedStoreManager()\n title = models.CharField(max_length=255)\n server_url = models.CharField(max_length=2083, validators=[\n RegexValidator(regex='^(https|http)://.*$', code='invalid url',\n message='server_url must be a url')])\n icon = models.ForeignKey(Image, related_name='+', null=True, blank=True)\n is_enabled = models.BooleanField(default=True)\n\n def __repr__(self):\n return 'AffiliatedStore(id={0!s}, title=\"{1!s}\")'.format(self.id,\n self.title)\n\n def __str__(self):\n return repr(self)\n",
"step-3": "<mask token>\n\n\nclass AffiliatedStoreManager(models.Manager):\n\n def get_queryset(self):\n return super().get_queryset().select_related('icon').select_related(\n 'icon__image_type')\n <mask token>\n\n def find_by_id(self, id):\n return self.get(id=id)\n\n\nclass AffiliatedStore(models.Model):\n\n\n class Meta:\n db_table = 'affiliated_store'\n objects = AffiliatedStoreManager()\n title = models.CharField(max_length=255)\n server_url = models.CharField(max_length=2083, validators=[\n RegexValidator(regex='^(https|http)://.*$', code='invalid url',\n message='server_url must be a url')])\n icon = models.ForeignKey(Image, related_name='+', null=True, blank=True)\n is_enabled = models.BooleanField(default=True)\n\n def __repr__(self):\n return 'AffiliatedStore(id={0!s}, title=\"{1!s}\")'.format(self.id,\n self.title)\n\n def __str__(self):\n return repr(self)\n",
"step-4": "from django.core.validators import RegexValidator\nfrom django.db import models\nfrom .image import Image\n\n\nclass AffiliatedStoreManager(models.Manager):\n\n def get_queryset(self):\n return super().get_queryset().select_related('icon').select_related(\n 'icon__image_type')\n\n def find_all(self):\n return self.all()\n\n def find_by_id(self, id):\n return self.get(id=id)\n\n\nclass AffiliatedStore(models.Model):\n\n\n class Meta:\n db_table = 'affiliated_store'\n objects = AffiliatedStoreManager()\n title = models.CharField(max_length=255)\n server_url = models.CharField(max_length=2083, validators=[\n RegexValidator(regex='^(https|http)://.*$', code='invalid url',\n message='server_url must be a url')])\n icon = models.ForeignKey(Image, related_name='+', null=True, blank=True)\n is_enabled = models.BooleanField(default=True)\n\n def __repr__(self):\n return 'AffiliatedStore(id={0!s}, title=\"{1!s}\")'.format(self.id,\n self.title)\n\n def __str__(self):\n return repr(self)\n",
"step-5": "from django.core.validators import RegexValidator\nfrom django.db import models\n\nfrom .image import Image\n\n\nclass AffiliatedStoreManager(models.Manager):\n\n def get_queryset(self):\n return super().get_queryset() \\\n .select_related('icon') \\\n .select_related('icon__image_type')\n\n def find_all(self):\n return self.all()\n\n def find_by_id(self, id):\n return self.get(id=id)\n\n\nclass AffiliatedStore(models.Model):\n class Meta:\n db_table = 'affiliated_store'\n\n objects = AffiliatedStoreManager()\n\n title = models.CharField(max_length=255)\n\n server_url = models.CharField(max_length=2083,\n validators=[RegexValidator(regex='^(https|http)://.*$',\n code='invalid url',\n message='server_url must be a url')])\n\n icon = models.ForeignKey(Image, related_name='+', null=True, blank=True)\n\n is_enabled = models.BooleanField(default=True)\n\n def __repr__(self):\n return 'AffiliatedStore(id={0!s}, title=\"{1!s}\")'.format(self.id, self.title)\n\n def __str__(self):\n return repr(self)\n",
"step-ids": [
5,
6,
7,
9,
10
]
}
|
[
5,
6,
7,
9,
10
] |
# Percy's playground.
from __future__ import print_function
import sympy as sp
import numpy as np
import BorderBasis as BB
np.set_printoptions(precision=3)
from IPython.display import display, Markdown, Math
sp.init_printing()
R, x, y = sp.ring('x,y', sp.RR, order=sp.grevlex)
I = [ x**2 + y**2 - 1.0, x + y ]
R, x, y, z = sp.ring('x,y,z', sp.RR, order=sp.grevlex)
I = [ x**2 - 1, y**2 - 4, z**2 - 9]
# n = 4 takes a long time
n = 4
Rvs = sp.ring(' '.join('v'+str(i) for i in range(1, n + 1)), sp.RR, order=sp.grevlex)
R, vs = Rvs[0], Rvs[1:]
I = []
I.extend([v**2 - 1 for v in vs])
#I.extend([(v-1)**2 for v in vs])
#I.extend([v-1 for v in vs])
#I.extend([vs[i] - vs[i-1] for i in range(1, len(vs))]) # Makes it fast
print('Generating')
B = BB.BorderBasisFactory(1e-5).generate(R,I)
print('Done')
print("=== Generator Basis:")
for f in B.generator_basis:
display(f.as_expr())
print("=== Quotient Basis:")
for f in B.quotient_basis():
display(f.as_expr())
# v2 is always zero
print("=== Variety:")
for v in B.zeros():
print(zip(R.symbols, v))
|
normal
|
{
"blob_id": "88af8b4eeb40ecf19622ecde1a5dea9a078bb66c",
"index": 8817,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnp.set_printoptions(precision=3)\n<mask token>\nsp.init_printing()\n<mask token>\nI.extend([(v ** 2 - 1) for v in vs])\nprint('Generating')\n<mask token>\nprint('Done')\nprint('=== Generator Basis:')\nfor f in B.generator_basis:\n display(f.as_expr())\nprint('=== Quotient Basis:')\nfor f in B.quotient_basis():\n display(f.as_expr())\nprint('=== Variety:')\nfor v in B.zeros():\n print(zip(R.symbols, v))\n",
"step-3": "<mask token>\nnp.set_printoptions(precision=3)\n<mask token>\nsp.init_printing()\nR, x, y = sp.ring('x,y', sp.RR, order=sp.grevlex)\nI = [x ** 2 + y ** 2 - 1.0, x + y]\nR, x, y, z = sp.ring('x,y,z', sp.RR, order=sp.grevlex)\nI = [x ** 2 - 1, y ** 2 - 4, z ** 2 - 9]\nn = 4\nRvs = sp.ring(' '.join('v' + str(i) for i in range(1, n + 1)), sp.RR, order\n =sp.grevlex)\nR, vs = Rvs[0], Rvs[1:]\nI = []\nI.extend([(v ** 2 - 1) for v in vs])\nprint('Generating')\nB = BB.BorderBasisFactory(1e-05).generate(R, I)\nprint('Done')\nprint('=== Generator Basis:')\nfor f in B.generator_basis:\n display(f.as_expr())\nprint('=== Quotient Basis:')\nfor f in B.quotient_basis():\n display(f.as_expr())\nprint('=== Variety:')\nfor v in B.zeros():\n print(zip(R.symbols, v))\n",
"step-4": "from __future__ import print_function\nimport sympy as sp\nimport numpy as np\nimport BorderBasis as BB\nnp.set_printoptions(precision=3)\nfrom IPython.display import display, Markdown, Math\nsp.init_printing()\nR, x, y = sp.ring('x,y', sp.RR, order=sp.grevlex)\nI = [x ** 2 + y ** 2 - 1.0, x + y]\nR, x, y, z = sp.ring('x,y,z', sp.RR, order=sp.grevlex)\nI = [x ** 2 - 1, y ** 2 - 4, z ** 2 - 9]\nn = 4\nRvs = sp.ring(' '.join('v' + str(i) for i in range(1, n + 1)), sp.RR, order\n =sp.grevlex)\nR, vs = Rvs[0], Rvs[1:]\nI = []\nI.extend([(v ** 2 - 1) for v in vs])\nprint('Generating')\nB = BB.BorderBasisFactory(1e-05).generate(R, I)\nprint('Done')\nprint('=== Generator Basis:')\nfor f in B.generator_basis:\n display(f.as_expr())\nprint('=== Quotient Basis:')\nfor f in B.quotient_basis():\n display(f.as_expr())\nprint('=== Variety:')\nfor v in B.zeros():\n print(zip(R.symbols, v))\n",
"step-5": "# Percy's playground.\n\nfrom __future__ import print_function\nimport sympy as sp\nimport numpy as np\nimport BorderBasis as BB\nnp.set_printoptions(precision=3)\nfrom IPython.display import display, Markdown, Math\nsp.init_printing()\n\nR, x, y = sp.ring('x,y', sp.RR, order=sp.grevlex)\nI = [ x**2 + y**2 - 1.0, x + y ]\n\nR, x, y, z = sp.ring('x,y,z', sp.RR, order=sp.grevlex)\nI = [ x**2 - 1, y**2 - 4, z**2 - 9]\n\n# n = 4 takes a long time\nn = 4\nRvs = sp.ring(' '.join('v'+str(i) for i in range(1, n + 1)), sp.RR, order=sp.grevlex)\nR, vs = Rvs[0], Rvs[1:]\nI = []\nI.extend([v**2 - 1 for v in vs])\n#I.extend([(v-1)**2 for v in vs])\n#I.extend([v-1 for v in vs])\n#I.extend([vs[i] - vs[i-1] for i in range(1, len(vs))]) # Makes it fast\n\nprint('Generating')\nB = BB.BorderBasisFactory(1e-5).generate(R,I)\nprint('Done')\n\nprint(\"=== Generator Basis:\")\nfor f in B.generator_basis:\n display(f.as_expr())\n\nprint(\"=== Quotient Basis:\")\nfor f in B.quotient_basis():\n display(f.as_expr())\n\n# v2 is always zero\nprint(\"=== Variety:\")\nfor v in B.zeros():\n print(zip(R.symbols, v))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import string
fhand = open("romeo-full.txt")
counts = dict()
for line in fhand:
line.tranc
|
normal
|
{
"blob_id": "5493887e32dbe7ae27eca79d28da8488183b37a3",
"index": 8792,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in fhand:\n line.tranc\n",
"step-3": "<mask token>\nfhand = open('romeo-full.txt')\ncounts = dict()\nfor line in fhand:\n line.tranc\n",
"step-4": "import string\nfhand = open('romeo-full.txt')\ncounts = dict()\nfor line in fhand:\n line.tranc\n",
"step-5": "import string\nfhand = open(\"romeo-full.txt\")\ncounts = dict()\nfor line in fhand:\n \n line.tranc",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
people = 20
cats = 30
dogs = 15
if people < cats:
print("Too many cats")
elif people > cats:
print("Not many cats")
else:
print("we cannnot decide")
|
normal
|
{
"blob_id": "0465e33d65c2ce47ebffeec38db6908826bf4934",
"index": 299,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif people < cats:\n print('Too many cats')\nelif people > cats:\n print('Not many cats')\nelse:\n print('we cannnot decide')\n",
"step-3": "people = 20\ncats = 30\ndogs = 15\nif people < cats:\n print('Too many cats')\nelif people > cats:\n print('Not many cats')\nelse:\n print('we cannnot decide')\n",
"step-4": "people = 20\ncats = 30\ndogs = 15\n\nif people < cats:\n print(\"Too many cats\")\nelif people > cats:\n print(\"Not many cats\")\nelse:\n print(\"we cannnot decide\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IssuerWalletInMemory(IssuerWallet, WalletInMemory):
def __init__(self, claimDefId, repo: PublicRepo):
WalletInMemory.__init__(self, claimDefId, repo)
self._sks = {}
self._skRs = {}
self._accumSks = {}
self._m2s = {}
self._attributes = {}
async def submitClaimDef(self, claimDef: ClaimDefinition
) ->ClaimDefinition:
claimDef = await self._repo.submitClaimDef(claimDef)
self._cacheClaimDef(claimDef)
return claimDef
async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR:
RevocationPublicKey=None) ->(PublicKey, RevocationPublicKey):
pk, pkR = await self._repo.submitPublicKeys(claimDefId, pk, pkR)
await self._cacheValueForId(self._pks, claimDefId, pk)
if pkR:
await self._cacheValueForId(self._pkRs, claimDefId, pkR)
return pk, pkR
async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR:
RevocationSecretKey=None):
await self._cacheValueForId(self._sks, claimDefId, sk)
if skR:
await self._cacheValueForId(self._skRs, claimDefId, skR)
async def submitAccumPublic(self, claimDefId: ID, accumPK:
AccumulatorPublicKey, accum: Accumulator, tails: TailsType
) ->AccumulatorPublicKey:
accumPK = await self._repo.submitAccumulator(claimDefId, accumPK,
accum, tails)
await self._cacheValueForId(self._accums, claimDefId, accum)
await self._cacheValueForId(self._accumPks, claimDefId, accumPK)
await self._cacheValueForId(self._tails, claimDefId, tails)
return accumPK
async def submitAccumSecret(self, claimDefId: ID, accumSK:
AccumulatorSecretKey):
await self._cacheValueForId(self._accumSks, claimDefId, accumSK)
async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,
timestampMs: TimestampType):
await self._repo.submitAccumUpdate(claimDefId, accum, timestampMs)
await self._cacheValueForId(self._accums, claimDefId, accum)
async def submitContextAttr(self, claimDefId: ID, m2):
await self._cacheValueForId(self._m2s, claimDefId, m2)
async def getSecretKey(self, claimDefId: ID) ->SecretKey:
return await self._getValueForId(self._sks, claimDefId)
async def getSecretKeyRevocation(self, claimDefId: ID
) ->RevocationSecretKey:
return await self._getValueForId(self._skRs, claimDefId)
async def getSecretKeyAccumulator(self, claimDefId: ID
) ->AccumulatorSecretKey:
return await self._getValueForId(self._accumSks, claimDefId)
async def getContextAttr(self, claimDefId: ID):
return await self._getValueForId(self._m2s, claimDefId)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IssuerWallet(Wallet):
def __init__(self, claimDefId, repo: PublicRepo):
Wallet.__init__(self, claimDefId, repo)
@abstractmethod
async def submitClaimDef(self, claimDef: ClaimDefinition
) ->ClaimDefinition:
raise NotImplementedError
@abstractmethod
async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR:
RevocationPublicKey=None) ->(PublicKey, RevocationPublicKey):
raise NotImplementedError
@abstractmethod
async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR:
RevocationSecretKey=None):
raise NotImplementedError
@abstractmethod
async def submitAccumPublic(self, claimDefId: ID, accumPK:
AccumulatorPublicKey, accum: Accumulator, tails: TailsType):
raise NotImplementedError
@abstractmethod
async def submitAccumSecret(self, claimDefId: ID, accumSK:
AccumulatorSecretKey):
raise NotImplementedError
@abstractmethod
async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,
timestampMs: TimestampType):
raise NotImplementedError
@abstractmethod
async def submitContextAttr(self, claimDefId: ID, m2):
raise NotImplementedError
@abstractmethod
async def getSecretKey(self, claimDefId: ID) ->SecretKey:
raise NotImplementedError
@abstractmethod
async def getSecretKeyRevocation(self, claimDefId: ID
) ->RevocationSecretKey:
raise NotImplementedError
@abstractmethod
async def getSecretKeyAccumulator(self, claimDefId: ID
) ->AccumulatorSecretKey:
raise NotImplementedError
@abstractmethod
async def getContextAttr(self, claimDefId: ID):
raise NotImplementedError
class IssuerWalletInMemory(IssuerWallet, WalletInMemory):
def __init__(self, claimDefId, repo: PublicRepo):
WalletInMemory.__init__(self, claimDefId, repo)
self._sks = {}
self._skRs = {}
self._accumSks = {}
self._m2s = {}
self._attributes = {}
async def submitClaimDef(self, claimDef: ClaimDefinition
) ->ClaimDefinition:
claimDef = await self._repo.submitClaimDef(claimDef)
self._cacheClaimDef(claimDef)
return claimDef
async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR:
RevocationPublicKey=None) ->(PublicKey, RevocationPublicKey):
pk, pkR = await self._repo.submitPublicKeys(claimDefId, pk, pkR)
await self._cacheValueForId(self._pks, claimDefId, pk)
if pkR:
await self._cacheValueForId(self._pkRs, claimDefId, pkR)
return pk, pkR
async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR:
RevocationSecretKey=None):
await self._cacheValueForId(self._sks, claimDefId, sk)
if skR:
await self._cacheValueForId(self._skRs, claimDefId, skR)
async def submitAccumPublic(self, claimDefId: ID, accumPK:
AccumulatorPublicKey, accum: Accumulator, tails: TailsType
) ->AccumulatorPublicKey:
accumPK = await self._repo.submitAccumulator(claimDefId, accumPK,
accum, tails)
await self._cacheValueForId(self._accums, claimDefId, accum)
await self._cacheValueForId(self._accumPks, claimDefId, accumPK)
await self._cacheValueForId(self._tails, claimDefId, tails)
return accumPK
async def submitAccumSecret(self, claimDefId: ID, accumSK:
AccumulatorSecretKey):
await self._cacheValueForId(self._accumSks, claimDefId, accumSK)
async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,
timestampMs: TimestampType):
await self._repo.submitAccumUpdate(claimDefId, accum, timestampMs)
await self._cacheValueForId(self._accums, claimDefId, accum)
async def submitContextAttr(self, claimDefId: ID, m2):
await self._cacheValueForId(self._m2s, claimDefId, m2)
async def getSecretKey(self, claimDefId: ID) ->SecretKey:
return await self._getValueForId(self._sks, claimDefId)
async def getSecretKeyRevocation(self, claimDefId: ID
) ->RevocationSecretKey:
return await self._getValueForId(self._skRs, claimDefId)
async def getSecretKeyAccumulator(self, claimDefId: ID
) ->AccumulatorSecretKey:
return await self._getValueForId(self._accumSks, claimDefId)
async def getContextAttr(self, claimDefId: ID):
return await self._getValueForId(self._m2s, claimDefId)
<|reserved_special_token_1|>
from abc import abstractmethod
from anoncreds.protocol.repo.public_repo import PublicRepo
from anoncreds.protocol.types import ClaimDefinition, PublicKey, SecretKey, ID, RevocationPublicKey, AccumulatorPublicKey, Accumulator, TailsType, RevocationSecretKey, AccumulatorSecretKey, TimestampType
from anoncreds.protocol.wallet.wallet import Wallet, WalletInMemory
class IssuerWallet(Wallet):
def __init__(self, claimDefId, repo: PublicRepo):
Wallet.__init__(self, claimDefId, repo)
@abstractmethod
async def submitClaimDef(self, claimDef: ClaimDefinition
) ->ClaimDefinition:
raise NotImplementedError
@abstractmethod
async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR:
RevocationPublicKey=None) ->(PublicKey, RevocationPublicKey):
raise NotImplementedError
@abstractmethod
async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR:
RevocationSecretKey=None):
raise NotImplementedError
@abstractmethod
async def submitAccumPublic(self, claimDefId: ID, accumPK:
AccumulatorPublicKey, accum: Accumulator, tails: TailsType):
raise NotImplementedError
@abstractmethod
async def submitAccumSecret(self, claimDefId: ID, accumSK:
AccumulatorSecretKey):
raise NotImplementedError
@abstractmethod
async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,
timestampMs: TimestampType):
raise NotImplementedError
@abstractmethod
async def submitContextAttr(self, claimDefId: ID, m2):
raise NotImplementedError
@abstractmethod
async def getSecretKey(self, claimDefId: ID) ->SecretKey:
raise NotImplementedError
@abstractmethod
async def getSecretKeyRevocation(self, claimDefId: ID
) ->RevocationSecretKey:
raise NotImplementedError
@abstractmethod
async def getSecretKeyAccumulator(self, claimDefId: ID
) ->AccumulatorSecretKey:
raise NotImplementedError
@abstractmethod
async def getContextAttr(self, claimDefId: ID):
raise NotImplementedError
class IssuerWalletInMemory(IssuerWallet, WalletInMemory):
def __init__(self, claimDefId, repo: PublicRepo):
WalletInMemory.__init__(self, claimDefId, repo)
self._sks = {}
self._skRs = {}
self._accumSks = {}
self._m2s = {}
self._attributes = {}
async def submitClaimDef(self, claimDef: ClaimDefinition
) ->ClaimDefinition:
claimDef = await self._repo.submitClaimDef(claimDef)
self._cacheClaimDef(claimDef)
return claimDef
async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR:
RevocationPublicKey=None) ->(PublicKey, RevocationPublicKey):
pk, pkR = await self._repo.submitPublicKeys(claimDefId, pk, pkR)
await self._cacheValueForId(self._pks, claimDefId, pk)
if pkR:
await self._cacheValueForId(self._pkRs, claimDefId, pkR)
return pk, pkR
async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR:
RevocationSecretKey=None):
await self._cacheValueForId(self._sks, claimDefId, sk)
if skR:
await self._cacheValueForId(self._skRs, claimDefId, skR)
async def submitAccumPublic(self, claimDefId: ID, accumPK:
AccumulatorPublicKey, accum: Accumulator, tails: TailsType
) ->AccumulatorPublicKey:
accumPK = await self._repo.submitAccumulator(claimDefId, accumPK,
accum, tails)
await self._cacheValueForId(self._accums, claimDefId, accum)
await self._cacheValueForId(self._accumPks, claimDefId, accumPK)
await self._cacheValueForId(self._tails, claimDefId, tails)
return accumPK
async def submitAccumSecret(self, claimDefId: ID, accumSK:
AccumulatorSecretKey):
await self._cacheValueForId(self._accumSks, claimDefId, accumSK)
async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,
timestampMs: TimestampType):
await self._repo.submitAccumUpdate(claimDefId, accum, timestampMs)
await self._cacheValueForId(self._accums, claimDefId, accum)
async def submitContextAttr(self, claimDefId: ID, m2):
await self._cacheValueForId(self._m2s, claimDefId, m2)
async def getSecretKey(self, claimDefId: ID) ->SecretKey:
return await self._getValueForId(self._sks, claimDefId)
async def getSecretKeyRevocation(self, claimDefId: ID
) ->RevocationSecretKey:
return await self._getValueForId(self._skRs, claimDefId)
async def getSecretKeyAccumulator(self, claimDefId: ID
) ->AccumulatorSecretKey:
return await self._getValueForId(self._accumSks, claimDefId)
async def getContextAttr(self, claimDefId: ID):
return await self._getValueForId(self._m2s, claimDefId)
<|reserved_special_token_1|>
from abc import abstractmethod
from anoncreds.protocol.repo.public_repo import PublicRepo
from anoncreds.protocol.types import ClaimDefinition, PublicKey, SecretKey, ID, \
RevocationPublicKey, AccumulatorPublicKey, Accumulator, TailsType, \
RevocationSecretKey, AccumulatorSecretKey, \
TimestampType
from anoncreds.protocol.wallet.wallet import Wallet, WalletInMemory
class IssuerWallet(Wallet):
def __init__(self, claimDefId, repo: PublicRepo):
Wallet.__init__(self, claimDefId, repo)
# SUBMIT
@abstractmethod
async def submitClaimDef(self,
claimDef: ClaimDefinition) -> ClaimDefinition:
raise NotImplementedError
@abstractmethod
async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey,
pkR: RevocationPublicKey = None) -> (
PublicKey, RevocationPublicKey):
raise NotImplementedError
@abstractmethod
async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey,
skR: RevocationSecretKey = None):
raise NotImplementedError
@abstractmethod
async def submitAccumPublic(self, claimDefId: ID,
accumPK: AccumulatorPublicKey,
accum: Accumulator, tails: TailsType):
raise NotImplementedError
@abstractmethod
async def submitAccumSecret(self, claimDefId: ID,
accumSK: AccumulatorSecretKey):
raise NotImplementedError
@abstractmethod
async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,
timestampMs: TimestampType):
raise NotImplementedError
@abstractmethod
async def submitContextAttr(self, claimDefId: ID, m2):
raise NotImplementedError
# GET
@abstractmethod
async def getSecretKey(self, claimDefId: ID) -> SecretKey:
raise NotImplementedError
@abstractmethod
async def getSecretKeyRevocation(self,
claimDefId: ID) -> RevocationSecretKey:
raise NotImplementedError
@abstractmethod
async def getSecretKeyAccumulator(self,
claimDefId: ID) -> AccumulatorSecretKey:
raise NotImplementedError
@abstractmethod
async def getContextAttr(self, claimDefId: ID):
raise NotImplementedError
class IssuerWalletInMemory(IssuerWallet, WalletInMemory):
def __init__(self, claimDefId, repo: PublicRepo):
WalletInMemory.__init__(self, claimDefId, repo)
# other dicts with key=claimDefKey
self._sks = {}
self._skRs = {}
self._accumSks = {}
self._m2s = {}
self._attributes = {}
# SUBMIT
async def submitClaimDef(self,
claimDef: ClaimDefinition) -> ClaimDefinition:
claimDef = await self._repo.submitClaimDef(claimDef)
self._cacheClaimDef(claimDef)
return claimDef
async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey,
pkR: RevocationPublicKey = None) -> (
PublicKey, RevocationPublicKey):
pk, pkR = await self._repo.submitPublicKeys(claimDefId, pk, pkR)
await self._cacheValueForId(self._pks, claimDefId, pk)
if pkR:
await self._cacheValueForId(self._pkRs, claimDefId, pkR)
return pk, pkR
async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey,
skR: RevocationSecretKey = None):
await self._cacheValueForId(self._sks, claimDefId, sk)
if skR:
await self._cacheValueForId(self._skRs, claimDefId, skR)
async def submitAccumPublic(self, claimDefId: ID,
accumPK: AccumulatorPublicKey,
accum: Accumulator,
tails: TailsType) -> AccumulatorPublicKey:
accumPK = await self._repo.submitAccumulator(claimDefId, accumPK, accum,
tails)
await self._cacheValueForId(self._accums, claimDefId, accum)
await self._cacheValueForId(self._accumPks, claimDefId, accumPK)
await self._cacheValueForId(self._tails, claimDefId, tails)
return accumPK
async def submitAccumSecret(self, claimDefId: ID,
accumSK: AccumulatorSecretKey):
await self._cacheValueForId(self._accumSks, claimDefId, accumSK)
async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,
timestampMs: TimestampType):
await self._repo.submitAccumUpdate(claimDefId, accum, timestampMs)
await self._cacheValueForId(self._accums, claimDefId, accum)
async def submitContextAttr(self, claimDefId: ID, m2):
await self._cacheValueForId(self._m2s, claimDefId, m2)
# GET
async def getSecretKey(self, claimDefId: ID) -> SecretKey:
return await self._getValueForId(self._sks, claimDefId)
async def getSecretKeyRevocation(self,
claimDefId: ID) -> RevocationSecretKey:
return await self._getValueForId(self._skRs, claimDefId)
async def getSecretKeyAccumulator(self,
claimDefId: ID) -> AccumulatorSecretKey:
return await self._getValueForId(self._accumSks, claimDefId)
async def getContextAttr(self, claimDefId: ID):
return await self._getValueForId(self._m2s, claimDefId)
|
flexible
|
{
"blob_id": "890841c8892e89375bb022f0d469fefc27414a2b",
"index": 5823,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass IssuerWalletInMemory(IssuerWallet, WalletInMemory):\n\n def __init__(self, claimDefId, repo: PublicRepo):\n WalletInMemory.__init__(self, claimDefId, repo)\n self._sks = {}\n self._skRs = {}\n self._accumSks = {}\n self._m2s = {}\n self._attributes = {}\n\n async def submitClaimDef(self, claimDef: ClaimDefinition\n ) ->ClaimDefinition:\n claimDef = await self._repo.submitClaimDef(claimDef)\n self._cacheClaimDef(claimDef)\n return claimDef\n\n async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR:\n RevocationPublicKey=None) ->(PublicKey, RevocationPublicKey):\n pk, pkR = await self._repo.submitPublicKeys(claimDefId, pk, pkR)\n await self._cacheValueForId(self._pks, claimDefId, pk)\n if pkR:\n await self._cacheValueForId(self._pkRs, claimDefId, pkR)\n return pk, pkR\n\n async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR:\n RevocationSecretKey=None):\n await self._cacheValueForId(self._sks, claimDefId, sk)\n if skR:\n await self._cacheValueForId(self._skRs, claimDefId, skR)\n\n async def submitAccumPublic(self, claimDefId: ID, accumPK:\n AccumulatorPublicKey, accum: Accumulator, tails: TailsType\n ) ->AccumulatorPublicKey:\n accumPK = await self._repo.submitAccumulator(claimDefId, accumPK,\n accum, tails)\n await self._cacheValueForId(self._accums, claimDefId, accum)\n await self._cacheValueForId(self._accumPks, claimDefId, accumPK)\n await self._cacheValueForId(self._tails, claimDefId, tails)\n return accumPK\n\n async def submitAccumSecret(self, claimDefId: ID, accumSK:\n AccumulatorSecretKey):\n await self._cacheValueForId(self._accumSks, claimDefId, accumSK)\n\n async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,\n timestampMs: TimestampType):\n await self._repo.submitAccumUpdate(claimDefId, accum, timestampMs)\n await self._cacheValueForId(self._accums, claimDefId, accum)\n\n async def submitContextAttr(self, claimDefId: ID, m2):\n await self._cacheValueForId(self._m2s, claimDefId, m2)\n\n async def getSecretKey(self, claimDefId: ID) ->SecretKey:\n return await self._getValueForId(self._sks, claimDefId)\n\n async def getSecretKeyRevocation(self, claimDefId: ID\n ) ->RevocationSecretKey:\n return await self._getValueForId(self._skRs, claimDefId)\n\n async def getSecretKeyAccumulator(self, claimDefId: ID\n ) ->AccumulatorSecretKey:\n return await self._getValueForId(self._accumSks, claimDefId)\n\n async def getContextAttr(self, claimDefId: ID):\n return await self._getValueForId(self._m2s, claimDefId)\n",
"step-3": "<mask token>\n\n\nclass IssuerWallet(Wallet):\n\n def __init__(self, claimDefId, repo: PublicRepo):\n Wallet.__init__(self, claimDefId, repo)\n\n @abstractmethod\n async def submitClaimDef(self, claimDef: ClaimDefinition\n ) ->ClaimDefinition:\n raise NotImplementedError\n\n @abstractmethod\n async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR:\n RevocationPublicKey=None) ->(PublicKey, RevocationPublicKey):\n raise NotImplementedError\n\n @abstractmethod\n async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR:\n RevocationSecretKey=None):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumPublic(self, claimDefId: ID, accumPK:\n AccumulatorPublicKey, accum: Accumulator, tails: TailsType):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumSecret(self, claimDefId: ID, accumSK:\n AccumulatorSecretKey):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,\n timestampMs: TimestampType):\n raise NotImplementedError\n\n @abstractmethod\n async def submitContextAttr(self, claimDefId: ID, m2):\n raise NotImplementedError\n\n @abstractmethod\n async def getSecretKey(self, claimDefId: ID) ->SecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getSecretKeyRevocation(self, claimDefId: ID\n ) ->RevocationSecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getSecretKeyAccumulator(self, claimDefId: ID\n ) ->AccumulatorSecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getContextAttr(self, claimDefId: ID):\n raise NotImplementedError\n\n\nclass IssuerWalletInMemory(IssuerWallet, WalletInMemory):\n\n def __init__(self, claimDefId, repo: PublicRepo):\n WalletInMemory.__init__(self, claimDefId, repo)\n self._sks = {}\n self._skRs = {}\n self._accumSks = {}\n self._m2s = {}\n self._attributes = {}\n\n async def submitClaimDef(self, claimDef: ClaimDefinition\n ) ->ClaimDefinition:\n claimDef = await self._repo.submitClaimDef(claimDef)\n self._cacheClaimDef(claimDef)\n return claimDef\n\n async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR:\n RevocationPublicKey=None) ->(PublicKey, RevocationPublicKey):\n pk, pkR = await self._repo.submitPublicKeys(claimDefId, pk, pkR)\n await self._cacheValueForId(self._pks, claimDefId, pk)\n if pkR:\n await self._cacheValueForId(self._pkRs, claimDefId, pkR)\n return pk, pkR\n\n async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR:\n RevocationSecretKey=None):\n await self._cacheValueForId(self._sks, claimDefId, sk)\n if skR:\n await self._cacheValueForId(self._skRs, claimDefId, skR)\n\n async def submitAccumPublic(self, claimDefId: ID, accumPK:\n AccumulatorPublicKey, accum: Accumulator, tails: TailsType\n ) ->AccumulatorPublicKey:\n accumPK = await self._repo.submitAccumulator(claimDefId, accumPK,\n accum, tails)\n await self._cacheValueForId(self._accums, claimDefId, accum)\n await self._cacheValueForId(self._accumPks, claimDefId, accumPK)\n await self._cacheValueForId(self._tails, claimDefId, tails)\n return accumPK\n\n async def submitAccumSecret(self, claimDefId: ID, accumSK:\n AccumulatorSecretKey):\n await self._cacheValueForId(self._accumSks, claimDefId, accumSK)\n\n async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,\n timestampMs: TimestampType):\n await self._repo.submitAccumUpdate(claimDefId, accum, timestampMs)\n await self._cacheValueForId(self._accums, claimDefId, accum)\n\n async def submitContextAttr(self, claimDefId: ID, m2):\n await self._cacheValueForId(self._m2s, claimDefId, m2)\n\n async def getSecretKey(self, claimDefId: ID) ->SecretKey:\n return await self._getValueForId(self._sks, claimDefId)\n\n async def getSecretKeyRevocation(self, claimDefId: ID\n ) ->RevocationSecretKey:\n return await self._getValueForId(self._skRs, claimDefId)\n\n async def getSecretKeyAccumulator(self, claimDefId: ID\n ) ->AccumulatorSecretKey:\n return await self._getValueForId(self._accumSks, claimDefId)\n\n async def getContextAttr(self, claimDefId: ID):\n return await self._getValueForId(self._m2s, claimDefId)\n",
"step-4": "from abc import abstractmethod\nfrom anoncreds.protocol.repo.public_repo import PublicRepo\nfrom anoncreds.protocol.types import ClaimDefinition, PublicKey, SecretKey, ID, RevocationPublicKey, AccumulatorPublicKey, Accumulator, TailsType, RevocationSecretKey, AccumulatorSecretKey, TimestampType\nfrom anoncreds.protocol.wallet.wallet import Wallet, WalletInMemory\n\n\nclass IssuerWallet(Wallet):\n\n def __init__(self, claimDefId, repo: PublicRepo):\n Wallet.__init__(self, claimDefId, repo)\n\n @abstractmethod\n async def submitClaimDef(self, claimDef: ClaimDefinition\n ) ->ClaimDefinition:\n raise NotImplementedError\n\n @abstractmethod\n async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR:\n RevocationPublicKey=None) ->(PublicKey, RevocationPublicKey):\n raise NotImplementedError\n\n @abstractmethod\n async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR:\n RevocationSecretKey=None):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumPublic(self, claimDefId: ID, accumPK:\n AccumulatorPublicKey, accum: Accumulator, tails: TailsType):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumSecret(self, claimDefId: ID, accumSK:\n AccumulatorSecretKey):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,\n timestampMs: TimestampType):\n raise NotImplementedError\n\n @abstractmethod\n async def submitContextAttr(self, claimDefId: ID, m2):\n raise NotImplementedError\n\n @abstractmethod\n async def getSecretKey(self, claimDefId: ID) ->SecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getSecretKeyRevocation(self, claimDefId: ID\n ) ->RevocationSecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getSecretKeyAccumulator(self, claimDefId: ID\n ) ->AccumulatorSecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getContextAttr(self, claimDefId: ID):\n raise NotImplementedError\n\n\nclass IssuerWalletInMemory(IssuerWallet, WalletInMemory):\n\n def __init__(self, claimDefId, repo: PublicRepo):\n WalletInMemory.__init__(self, claimDefId, repo)\n self._sks = {}\n self._skRs = {}\n self._accumSks = {}\n self._m2s = {}\n self._attributes = {}\n\n async def submitClaimDef(self, claimDef: ClaimDefinition\n ) ->ClaimDefinition:\n claimDef = await self._repo.submitClaimDef(claimDef)\n self._cacheClaimDef(claimDef)\n return claimDef\n\n async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR:\n RevocationPublicKey=None) ->(PublicKey, RevocationPublicKey):\n pk, pkR = await self._repo.submitPublicKeys(claimDefId, pk, pkR)\n await self._cacheValueForId(self._pks, claimDefId, pk)\n if pkR:\n await self._cacheValueForId(self._pkRs, claimDefId, pkR)\n return pk, pkR\n\n async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR:\n RevocationSecretKey=None):\n await self._cacheValueForId(self._sks, claimDefId, sk)\n if skR:\n await self._cacheValueForId(self._skRs, claimDefId, skR)\n\n async def submitAccumPublic(self, claimDefId: ID, accumPK:\n AccumulatorPublicKey, accum: Accumulator, tails: TailsType\n ) ->AccumulatorPublicKey:\n accumPK = await self._repo.submitAccumulator(claimDefId, accumPK,\n accum, tails)\n await self._cacheValueForId(self._accums, claimDefId, accum)\n await self._cacheValueForId(self._accumPks, claimDefId, accumPK)\n await self._cacheValueForId(self._tails, claimDefId, tails)\n return accumPK\n\n async def submitAccumSecret(self, claimDefId: ID, accumSK:\n AccumulatorSecretKey):\n await self._cacheValueForId(self._accumSks, claimDefId, accumSK)\n\n async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,\n timestampMs: TimestampType):\n await self._repo.submitAccumUpdate(claimDefId, accum, timestampMs)\n await self._cacheValueForId(self._accums, claimDefId, accum)\n\n async def submitContextAttr(self, claimDefId: ID, m2):\n await self._cacheValueForId(self._m2s, claimDefId, m2)\n\n async def getSecretKey(self, claimDefId: ID) ->SecretKey:\n return await self._getValueForId(self._sks, claimDefId)\n\n async def getSecretKeyRevocation(self, claimDefId: ID\n ) ->RevocationSecretKey:\n return await self._getValueForId(self._skRs, claimDefId)\n\n async def getSecretKeyAccumulator(self, claimDefId: ID\n ) ->AccumulatorSecretKey:\n return await self._getValueForId(self._accumSks, claimDefId)\n\n async def getContextAttr(self, claimDefId: ID):\n return await self._getValueForId(self._m2s, claimDefId)\n",
"step-5": "from abc import abstractmethod\n\nfrom anoncreds.protocol.repo.public_repo import PublicRepo\nfrom anoncreds.protocol.types import ClaimDefinition, PublicKey, SecretKey, ID, \\\n RevocationPublicKey, AccumulatorPublicKey, Accumulator, TailsType, \\\n RevocationSecretKey, AccumulatorSecretKey, \\\n TimestampType\nfrom anoncreds.protocol.wallet.wallet import Wallet, WalletInMemory\n\n\nclass IssuerWallet(Wallet):\n def __init__(self, claimDefId, repo: PublicRepo):\n Wallet.__init__(self, claimDefId, repo)\n\n # SUBMIT\n\n @abstractmethod\n async def submitClaimDef(self,\n claimDef: ClaimDefinition) -> ClaimDefinition:\n raise NotImplementedError\n\n @abstractmethod\n async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey,\n pkR: RevocationPublicKey = None) -> (\n PublicKey, RevocationPublicKey):\n raise NotImplementedError\n\n @abstractmethod\n async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey,\n skR: RevocationSecretKey = None):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumPublic(self, claimDefId: ID,\n accumPK: AccumulatorPublicKey,\n accum: Accumulator, tails: TailsType):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumSecret(self, claimDefId: ID,\n accumSK: AccumulatorSecretKey):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,\n timestampMs: TimestampType):\n raise NotImplementedError\n\n @abstractmethod\n async def submitContextAttr(self, claimDefId: ID, m2):\n raise NotImplementedError\n\n # GET\n\n @abstractmethod\n async def getSecretKey(self, claimDefId: ID) -> SecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getSecretKeyRevocation(self,\n claimDefId: ID) -> RevocationSecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getSecretKeyAccumulator(self,\n claimDefId: ID) -> AccumulatorSecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getContextAttr(self, claimDefId: ID):\n raise NotImplementedError\n\n\nclass IssuerWalletInMemory(IssuerWallet, WalletInMemory):\n def __init__(self, claimDefId, repo: PublicRepo):\n WalletInMemory.__init__(self, claimDefId, repo)\n\n # other dicts with key=claimDefKey\n self._sks = {}\n self._skRs = {}\n self._accumSks = {}\n self._m2s = {}\n self._attributes = {}\n\n # SUBMIT\n\n async def submitClaimDef(self,\n claimDef: ClaimDefinition) -> ClaimDefinition:\n claimDef = await self._repo.submitClaimDef(claimDef)\n self._cacheClaimDef(claimDef)\n return claimDef\n\n async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey,\n pkR: RevocationPublicKey = None) -> (\n PublicKey, RevocationPublicKey):\n pk, pkR = await self._repo.submitPublicKeys(claimDefId, pk, pkR)\n await self._cacheValueForId(self._pks, claimDefId, pk)\n if pkR:\n await self._cacheValueForId(self._pkRs, claimDefId, pkR)\n return pk, pkR\n\n async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey,\n skR: RevocationSecretKey = None):\n await self._cacheValueForId(self._sks, claimDefId, sk)\n if skR:\n await self._cacheValueForId(self._skRs, claimDefId, skR)\n\n async def submitAccumPublic(self, claimDefId: ID,\n accumPK: AccumulatorPublicKey,\n accum: Accumulator,\n tails: TailsType) -> AccumulatorPublicKey:\n accumPK = await self._repo.submitAccumulator(claimDefId, accumPK, accum,\n tails)\n await self._cacheValueForId(self._accums, claimDefId, accum)\n await self._cacheValueForId(self._accumPks, claimDefId, accumPK)\n await self._cacheValueForId(self._tails, claimDefId, tails)\n return accumPK\n\n async def submitAccumSecret(self, claimDefId: ID,\n accumSK: AccumulatorSecretKey):\n await self._cacheValueForId(self._accumSks, claimDefId, accumSK)\n\n async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,\n timestampMs: TimestampType):\n await self._repo.submitAccumUpdate(claimDefId, accum, timestampMs)\n await self._cacheValueForId(self._accums, claimDefId, accum)\n\n async def submitContextAttr(self, claimDefId: ID, m2):\n await self._cacheValueForId(self._m2s, claimDefId, m2)\n\n # GET\n\n async def getSecretKey(self, claimDefId: ID) -> SecretKey:\n return await self._getValueForId(self._sks, claimDefId)\n\n async def getSecretKeyRevocation(self,\n claimDefId: ID) -> RevocationSecretKey:\n return await self._getValueForId(self._skRs, claimDefId)\n\n async def getSecretKeyAccumulator(self,\n claimDefId: ID) -> AccumulatorSecretKey:\n return await self._getValueForId(self._accumSks, claimDefId)\n\n async def getContextAttr(self, claimDefId: ID):\n return await self._getValueForId(self._m2s, claimDefId)\n",
"step-ids": [
0,
2,
4,
5,
6
]
}
|
[
0,
2,
4,
5,
6
] |
# ----------------------------------------------------------------------------
# Written by Khanh Nguyen Le
# May 4th 2019
# Discord: https://discord.io/skyrst
# ----------------------------------------------------------------------------
import operator
def validInput(x):
if x=="a": return True
elif x=="b": return True
elif x=="c": return True
elif x=="d": return True
else: return False
def takeInput():
x=input()
while not validInput(x):
print("Invalid input. Try another one:")
x=input()
return x
def main():
stats = {'Council':0, 'United':0, 'Faceless': 0, 'Warband':0}
print("Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz.")
print("Answer the the following questions by typing a, b, c or d.")
print("1/8")
print("I'd die without my...\na. Freedom\nb. Knowledge\nc. Talent\nd. Hope")
x = takeInput()
if x=="a":
stats['Faceless'] += 1
elif x=="b":
stats['Council']+= 1
elif x=="c":
stats['Warband']+= 1
else:
stats['United']+= 1
print("2/8")
print("Pick an animal:\na. Owl\nb. Leopard\nc. Elepant\nd. Octopus")
x = takeInput()
if x=="a":
stats['Warband'] += 1
elif x=="b":
stats['Faceless']+= 1
elif x=="c":
stats['United']+= 1
else:
stats['Council']+= 1
print("3/8")
print("Wars are won...\na. In the heat of battle\nb. In the planning room\nc. With unbreaking resolve\nd. By the unpredictable")
x = takeInput()
if x=="a":
stats['Warband'] += 1
elif x=="b":
stats['Council']+= 1
elif x=="c":
stats['United']+= 1
else:
stats['Faceless']+= 1
print("4/8")
print("The perfect team would never...\na. Give up\nb. Lose focus\nc. Tell me what to do\nd. Feed my opponent")
x = takeInput()
if x=="a":
stats['United'] += 1
elif x=="b":
stats['Council']+= 1
elif x=="c":
stats['Warband']+= 1
else:
stats['Faceless']+= 1
print("5/8")
print("The enemy team is winning on all fronts. What do you do?\na. Outmaneuver them to steal some objectives\nb. Rally my team for a final stand\nc. Go pentakill them, like I always do\nd. This is right where I want them--I'll explain later")
x = takeInput()
if x=="a":
stats['Faceless'] += 1
elif x=="b":
stats['United']+= 1
elif x=="c":
stats['Warband']+= 1
else:
stats['Council']+= 1
print("6/8")
print("What's your favorite time of the day\na. Dawn\nb. Day\nc. Dusk\nd. Night")
x = takeInput()
if x=="a":
stats['United'] += 1
elif x=="b":
stats['Council']+= 1
elif x=="c":
stats['Faceless']+= 1
else:
stats['Warband']+= 1
print("7/8")
print("Which of these sounds like you\na. \"Can we please group\"\nb. \"Trust me. I'm not trolling\"\nc. \"ez\"\nd. \"WINNABLE\"")
x = takeInput()
if x=="a":
stats['Council'] += 1
elif x=="b":
stats['Faceless']+= 1
elif x=="c":
stats['Warband']+= 1
else:
stats['United']+= 1
print("8/8")
print("I want to be seen as a(n)...\na. Selfless leader\nb. Brilliant tactician\nc. Crafty wildcard\nd. Elite fighter")
x = takeInput()
if x=="a":
stats['United'] += 1
elif x=="b":
stats['Council']+= 1
elif x=="c":
stats['Faceless']+= 1
else:
stats['Warband']+= 1
print("\n")
result = max(stats.items(), key=operator.itemgetter(1))[0]
print("Congratulations! You are a " +result)
main()
|
normal
|
{
"blob_id": "5209638ec97a666783c102bec7a2b00991c41a08",
"index": 5438,
"step-1": "<mask token>\n\n\ndef takeInput():\n x = input()\n while not validInput(x):\n print('Invalid input. Try another one:')\n x = input()\n return x\n\n\ndef main():\n stats = {'Council': 0, 'United': 0, 'Faceless': 0, 'Warband': 0}\n print(\n \"Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz.\"\n )\n print('Answer the the following questions by typing a, b, c or d.')\n print('1/8')\n print(\"I'd die without my...\\na. Freedom\\nb. Knowledge\\nc. Talent\\nd. Hope\"\n )\n x = takeInput()\n if x == 'a':\n stats['Faceless'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['United'] += 1\n print('2/8')\n print('Pick an animal:\\na. Owl\\nb. Leopard\\nc. Elepant\\nd. Octopus')\n x = takeInput()\n if x == 'a':\n stats['Warband'] += 1\n elif x == 'b':\n stats['Faceless'] += 1\n elif x == 'c':\n stats['United'] += 1\n else:\n stats['Council'] += 1\n print('3/8')\n print(\n \"\"\"Wars are won...\na. In the heat of battle\nb. In the planning room\nc. With unbreaking resolve\nd. By the unpredictable\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Warband'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['United'] += 1\n else:\n stats['Faceless'] += 1\n print('4/8')\n print(\n \"\"\"The perfect team would never...\na. Give up\nb. Lose focus\nc. Tell me what to do\nd. Feed my opponent\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['Faceless'] += 1\n print('5/8')\n print(\n \"\"\"The enemy team is winning on all fronts. What do you do?\na. Outmaneuver them to steal some objectives\nb. Rally my team for a final stand\nc. Go pentakill them, like I always do\nd. This is right where I want them--I'll explain later\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Faceless'] += 1\n elif x == 'b':\n stats['United'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['Council'] += 1\n print('6/8')\n print(\n \"What's your favorite time of the day\\na. Dawn\\nb. Day\\nc. Dusk\\nd. Night\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Faceless'] += 1\n else:\n stats['Warband'] += 1\n print('7/8')\n print(\n \"\"\"Which of these sounds like you\na. \"Can we please group\"\nb. \"Trust me. I'm not trolling\"\nc. \"ez\"\nd. \"WINNABLE\\\"\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Council'] += 1\n elif x == 'b':\n stats['Faceless'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['United'] += 1\n print('8/8')\n print(\n \"\"\"I want to be seen as a(n)...\na. Selfless leader\nb. Brilliant tactician\nc. Crafty wildcard\nd. Elite fighter\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Faceless'] += 1\n else:\n stats['Warband'] += 1\n print('\\n')\n result = max(stats.items(), key=operator.itemgetter(1))[0]\n print('Congratulations! You are a ' + result)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef validInput(x):\n if x == 'a':\n return True\n elif x == 'b':\n return True\n elif x == 'c':\n return True\n elif x == 'd':\n return True\n else:\n return False\n\n\ndef takeInput():\n x = input()\n while not validInput(x):\n print('Invalid input. Try another one:')\n x = input()\n return x\n\n\ndef main():\n stats = {'Council': 0, 'United': 0, 'Faceless': 0, 'Warband': 0}\n print(\n \"Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz.\"\n )\n print('Answer the the following questions by typing a, b, c or d.')\n print('1/8')\n print(\"I'd die without my...\\na. Freedom\\nb. Knowledge\\nc. Talent\\nd. Hope\"\n )\n x = takeInput()\n if x == 'a':\n stats['Faceless'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['United'] += 1\n print('2/8')\n print('Pick an animal:\\na. Owl\\nb. Leopard\\nc. Elepant\\nd. Octopus')\n x = takeInput()\n if x == 'a':\n stats['Warband'] += 1\n elif x == 'b':\n stats['Faceless'] += 1\n elif x == 'c':\n stats['United'] += 1\n else:\n stats['Council'] += 1\n print('3/8')\n print(\n \"\"\"Wars are won...\na. In the heat of battle\nb. In the planning room\nc. With unbreaking resolve\nd. By the unpredictable\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Warband'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['United'] += 1\n else:\n stats['Faceless'] += 1\n print('4/8')\n print(\n \"\"\"The perfect team would never...\na. Give up\nb. Lose focus\nc. Tell me what to do\nd. Feed my opponent\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['Faceless'] += 1\n print('5/8')\n print(\n \"\"\"The enemy team is winning on all fronts. What do you do?\na. Outmaneuver them to steal some objectives\nb. Rally my team for a final stand\nc. Go pentakill them, like I always do\nd. This is right where I want them--I'll explain later\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Faceless'] += 1\n elif x == 'b':\n stats['United'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['Council'] += 1\n print('6/8')\n print(\n \"What's your favorite time of the day\\na. Dawn\\nb. Day\\nc. Dusk\\nd. Night\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Faceless'] += 1\n else:\n stats['Warband'] += 1\n print('7/8')\n print(\n \"\"\"Which of these sounds like you\na. \"Can we please group\"\nb. \"Trust me. I'm not trolling\"\nc. \"ez\"\nd. \"WINNABLE\\\"\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Council'] += 1\n elif x == 'b':\n stats['Faceless'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['United'] += 1\n print('8/8')\n print(\n \"\"\"I want to be seen as a(n)...\na. Selfless leader\nb. Brilliant tactician\nc. Crafty wildcard\nd. Elite fighter\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Faceless'] += 1\n else:\n stats['Warband'] += 1\n print('\\n')\n result = max(stats.items(), key=operator.itemgetter(1))[0]\n print('Congratulations! You are a ' + result)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef validInput(x):\n if x == 'a':\n return True\n elif x == 'b':\n return True\n elif x == 'c':\n return True\n elif x == 'd':\n return True\n else:\n return False\n\n\ndef takeInput():\n x = input()\n while not validInput(x):\n print('Invalid input. Try another one:')\n x = input()\n return x\n\n\ndef main():\n stats = {'Council': 0, 'United': 0, 'Faceless': 0, 'Warband': 0}\n print(\n \"Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz.\"\n )\n print('Answer the the following questions by typing a, b, c or d.')\n print('1/8')\n print(\"I'd die without my...\\na. Freedom\\nb. Knowledge\\nc. Talent\\nd. Hope\"\n )\n x = takeInput()\n if x == 'a':\n stats['Faceless'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['United'] += 1\n print('2/8')\n print('Pick an animal:\\na. Owl\\nb. Leopard\\nc. Elepant\\nd. Octopus')\n x = takeInput()\n if x == 'a':\n stats['Warband'] += 1\n elif x == 'b':\n stats['Faceless'] += 1\n elif x == 'c':\n stats['United'] += 1\n else:\n stats['Council'] += 1\n print('3/8')\n print(\n \"\"\"Wars are won...\na. In the heat of battle\nb. In the planning room\nc. With unbreaking resolve\nd. By the unpredictable\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Warband'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['United'] += 1\n else:\n stats['Faceless'] += 1\n print('4/8')\n print(\n \"\"\"The perfect team would never...\na. Give up\nb. Lose focus\nc. Tell me what to do\nd. Feed my opponent\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['Faceless'] += 1\n print('5/8')\n print(\n \"\"\"The enemy team is winning on all fronts. What do you do?\na. Outmaneuver them to steal some objectives\nb. Rally my team for a final stand\nc. Go pentakill them, like I always do\nd. This is right where I want them--I'll explain later\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Faceless'] += 1\n elif x == 'b':\n stats['United'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['Council'] += 1\n print('6/8')\n print(\n \"What's your favorite time of the day\\na. Dawn\\nb. Day\\nc. Dusk\\nd. Night\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Faceless'] += 1\n else:\n stats['Warband'] += 1\n print('7/8')\n print(\n \"\"\"Which of these sounds like you\na. \"Can we please group\"\nb. \"Trust me. I'm not trolling\"\nc. \"ez\"\nd. \"WINNABLE\\\"\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Council'] += 1\n elif x == 'b':\n stats['Faceless'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['United'] += 1\n print('8/8')\n print(\n \"\"\"I want to be seen as a(n)...\na. Selfless leader\nb. Brilliant tactician\nc. Crafty wildcard\nd. Elite fighter\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Faceless'] += 1\n else:\n stats['Warband'] += 1\n print('\\n')\n result = max(stats.items(), key=operator.itemgetter(1))[0]\n print('Congratulations! You are a ' + result)\n\n\nmain()\n",
"step-4": "import operator\n\n\ndef validInput(x):\n if x == 'a':\n return True\n elif x == 'b':\n return True\n elif x == 'c':\n return True\n elif x == 'd':\n return True\n else:\n return False\n\n\ndef takeInput():\n x = input()\n while not validInput(x):\n print('Invalid input. Try another one:')\n x = input()\n return x\n\n\ndef main():\n stats = {'Council': 0, 'United': 0, 'Faceless': 0, 'Warband': 0}\n print(\n \"Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz.\"\n )\n print('Answer the the following questions by typing a, b, c or d.')\n print('1/8')\n print(\"I'd die without my...\\na. Freedom\\nb. Knowledge\\nc. Talent\\nd. Hope\"\n )\n x = takeInput()\n if x == 'a':\n stats['Faceless'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['United'] += 1\n print('2/8')\n print('Pick an animal:\\na. Owl\\nb. Leopard\\nc. Elepant\\nd. Octopus')\n x = takeInput()\n if x == 'a':\n stats['Warband'] += 1\n elif x == 'b':\n stats['Faceless'] += 1\n elif x == 'c':\n stats['United'] += 1\n else:\n stats['Council'] += 1\n print('3/8')\n print(\n \"\"\"Wars are won...\na. In the heat of battle\nb. In the planning room\nc. With unbreaking resolve\nd. By the unpredictable\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Warband'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['United'] += 1\n else:\n stats['Faceless'] += 1\n print('4/8')\n print(\n \"\"\"The perfect team would never...\na. Give up\nb. Lose focus\nc. Tell me what to do\nd. Feed my opponent\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['Faceless'] += 1\n print('5/8')\n print(\n \"\"\"The enemy team is winning on all fronts. What do you do?\na. Outmaneuver them to steal some objectives\nb. Rally my team for a final stand\nc. Go pentakill them, like I always do\nd. This is right where I want them--I'll explain later\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Faceless'] += 1\n elif x == 'b':\n stats['United'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['Council'] += 1\n print('6/8')\n print(\n \"What's your favorite time of the day\\na. Dawn\\nb. Day\\nc. Dusk\\nd. Night\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Faceless'] += 1\n else:\n stats['Warband'] += 1\n print('7/8')\n print(\n \"\"\"Which of these sounds like you\na. \"Can we please group\"\nb. \"Trust me. I'm not trolling\"\nc. \"ez\"\nd. \"WINNABLE\\\"\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['Council'] += 1\n elif x == 'b':\n stats['Faceless'] += 1\n elif x == 'c':\n stats['Warband'] += 1\n else:\n stats['United'] += 1\n print('8/8')\n print(\n \"\"\"I want to be seen as a(n)...\na. Selfless leader\nb. Brilliant tactician\nc. Crafty wildcard\nd. Elite fighter\"\"\"\n )\n x = takeInput()\n if x == 'a':\n stats['United'] += 1\n elif x == 'b':\n stats['Council'] += 1\n elif x == 'c':\n stats['Faceless'] += 1\n else:\n stats['Warband'] += 1\n print('\\n')\n result = max(stats.items(), key=operator.itemgetter(1))[0]\n print('Congratulations! You are a ' + result)\n\n\nmain()\n",
"step-5": "# ----------------------------------------------------------------------------\r\n# Written by Khanh Nguyen Le\r\n# May 4th 2019\r\n# Discord: https://discord.io/skyrst\r\n# ----------------------------------------------------------------------------\r\nimport operator\r\ndef validInput(x):\r\n if x==\"a\": return True\r\n elif x==\"b\": return True\r\n elif x==\"c\": return True\r\n elif x==\"d\": return True\r\n else: return False\r\n \r\ndef takeInput():\r\n x=input()\r\n while not validInput(x):\r\n print(\"Invalid input. Try another one:\")\r\n x=input()\r\n return x\r\n \r\ndef main():\r\n stats = {'Council':0, 'United':0, 'Faceless': 0, 'Warband':0}\r\n print(\"Welcome to Skyrst's open-source recreation of League of Legends house entrance quiz.\")\r\n print(\"Answer the the following questions by typing a, b, c or d.\")\r\n print(\"1/8\")\r\n print(\"I'd die without my...\\na. Freedom\\nb. Knowledge\\nc. Talent\\nd. Hope\")\r\n x = takeInput()\r\n if x==\"a\":\r\n stats['Faceless'] += 1\r\n elif x==\"b\":\r\n stats['Council']+= 1\r\n elif x==\"c\":\r\n stats['Warband']+= 1\r\n else:\r\n stats['United']+= 1\r\n print(\"2/8\")\r\n print(\"Pick an animal:\\na. Owl\\nb. Leopard\\nc. Elepant\\nd. Octopus\")\r\n x = takeInput()\r\n if x==\"a\":\r\n stats['Warband'] += 1\r\n elif x==\"b\":\r\n stats['Faceless']+= 1\r\n elif x==\"c\":\r\n stats['United']+= 1\r\n else:\r\n stats['Council']+= 1\r\n print(\"3/8\")\r\n print(\"Wars are won...\\na. In the heat of battle\\nb. In the planning room\\nc. With unbreaking resolve\\nd. By the unpredictable\")\r\n x = takeInput()\r\n if x==\"a\":\r\n stats['Warband'] += 1\r\n elif x==\"b\":\r\n stats['Council']+= 1\r\n elif x==\"c\":\r\n stats['United']+= 1\r\n else:\r\n stats['Faceless']+= 1\r\n print(\"4/8\")\r\n print(\"The perfect team would never...\\na. Give up\\nb. Lose focus\\nc. Tell me what to do\\nd. Feed my opponent\")\r\n x = takeInput()\r\n if x==\"a\":\r\n stats['United'] += 1\r\n elif x==\"b\":\r\n stats['Council']+= 1\r\n elif x==\"c\":\r\n stats['Warband']+= 1\r\n else:\r\n stats['Faceless']+= 1\r\n print(\"5/8\")\r\n print(\"The enemy team is winning on all fronts. What do you do?\\na. Outmaneuver them to steal some objectives\\nb. Rally my team for a final stand\\nc. Go pentakill them, like I always do\\nd. This is right where I want them--I'll explain later\")\r\n x = takeInput()\r\n if x==\"a\":\r\n stats['Faceless'] += 1\r\n elif x==\"b\":\r\n stats['United']+= 1\r\n elif x==\"c\":\r\n stats['Warband']+= 1\r\n else:\r\n stats['Council']+= 1\r\n print(\"6/8\")\r\n print(\"What's your favorite time of the day\\na. Dawn\\nb. Day\\nc. Dusk\\nd. Night\")\r\n x = takeInput()\r\n if x==\"a\":\r\n stats['United'] += 1\r\n elif x==\"b\":\r\n stats['Council']+= 1\r\n elif x==\"c\":\r\n stats['Faceless']+= 1\r\n else:\r\n stats['Warband']+= 1\r\n print(\"7/8\")\r\n print(\"Which of these sounds like you\\na. \\\"Can we please group\\\"\\nb. \\\"Trust me. I'm not trolling\\\"\\nc. \\\"ez\\\"\\nd. \\\"WINNABLE\\\"\")\r\n x = takeInput()\r\n if x==\"a\":\r\n stats['Council'] += 1\r\n elif x==\"b\":\r\n stats['Faceless']+= 1\r\n elif x==\"c\":\r\n stats['Warband']+= 1\r\n else:\r\n stats['United']+= 1\r\n print(\"8/8\")\r\n print(\"I want to be seen as a(n)...\\na. Selfless leader\\nb. Brilliant tactician\\nc. Crafty wildcard\\nd. Elite fighter\")\r\n x = takeInput()\r\n if x==\"a\":\r\n stats['United'] += 1\r\n elif x==\"b\":\r\n stats['Council']+= 1\r\n elif x==\"c\":\r\n stats['Faceless']+= 1\r\n else:\r\n stats['Warband']+= 1\r\n print(\"\\n\")\r\n \r\n result = max(stats.items(), key=operator.itemgetter(1))[0]\r\n print(\"Congratulations! You are a \" +result)\r\n\r\n\r\nmain()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Locale(MainHandler):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class MainPage(MainHandler):
def get(self):
self.render('home.html')
def post(self):
pw = self.request.get('pw')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Locale(MainHandler):
<|reserved_special_token_0|>
def get(self):
locale = self.request.get('locale')
if not locale:
locale = LOCALE
locale = locale[:2].lower() + '_' + locale[-2:].upper()
if self.switch_locale(locale):
if self.local_user and self.local_user.locale != locale:
u = LocalUser.by_id(self.local_user.key.id())
u.locale = locale
u.put()
self.write_json({'done': True})
else:
self.write_json({'done': False})
class MainPage(MainHandler):
def get(self):
self.render('home.html')
def post(self):
pw = self.request.get('pw')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Locale(MainHandler):
"""
handles requests to change LOCALE or language for internationalization.
"""
def get(self):
locale = self.request.get('locale')
if not locale:
locale = LOCALE
locale = locale[:2].lower() + '_' + locale[-2:].upper()
if self.switch_locale(locale):
if self.local_user and self.local_user.locale != locale:
u = LocalUser.by_id(self.local_user.key.id())
u.locale = locale
u.put()
self.write_json({'done': True})
else:
self.write_json({'done': False})
class MainPage(MainHandler):
def get(self):
self.render('home.html')
def post(self):
pw = self.request.get('pw')
<|reserved_special_token_1|>
from mx.handlers import MainHandler
class Locale(MainHandler):
"""
handles requests to change LOCALE or language for internationalization.
"""
def get(self):
locale = self.request.get('locale')
if not locale:
locale = LOCALE
locale = locale[:2].lower() + '_' + locale[-2:].upper()
if self.switch_locale(locale):
if self.local_user and self.local_user.locale != locale:
u = LocalUser.by_id(self.local_user.key.id())
u.locale = locale
u.put()
self.write_json({'done': True})
else:
self.write_json({'done': False})
class MainPage(MainHandler):
def get(self):
self.render('home.html')
def post(self):
pw = self.request.get('pw')
<|reserved_special_token_1|>
from mx.handlers import MainHandler
# handler for changing app language
class Locale(MainHandler):
"""
handles requests to change LOCALE or language for internationalization.
"""
def get(self):
locale = self.request.get('locale')
if not locale :
locale = LOCALE
locale = locale[:2].lower()+'_'+locale[-2:].upper()
if self.switch_locale(locale):
if self.local_user and self.local_user.locale != locale:
u = LocalUser.by_id(self.local_user.key.id())
u.locale = locale
u.put()
self.write_json({'done':True})
else:
self.write_json({'done':False})
# home page handler
class MainPage(MainHandler):
def get(self):
self.render('home.html')
def post(self):
pw = self.request.get('pw')
|
flexible
|
{
"blob_id": "bdcbb946dadf168149342c651ad03eaf4b748401",
"index": 6803,
"step-1": "<mask token>\n\n\nclass Locale(MainHandler):\n <mask token>\n <mask token>\n\n\nclass MainPage(MainHandler):\n\n def get(self):\n self.render('home.html')\n\n def post(self):\n pw = self.request.get('pw')\n",
"step-2": "<mask token>\n\n\nclass Locale(MainHandler):\n <mask token>\n\n def get(self):\n locale = self.request.get('locale')\n if not locale:\n locale = LOCALE\n locale = locale[:2].lower() + '_' + locale[-2:].upper()\n if self.switch_locale(locale):\n if self.local_user and self.local_user.locale != locale:\n u = LocalUser.by_id(self.local_user.key.id())\n u.locale = locale\n u.put()\n self.write_json({'done': True})\n else:\n self.write_json({'done': False})\n\n\nclass MainPage(MainHandler):\n\n def get(self):\n self.render('home.html')\n\n def post(self):\n pw = self.request.get('pw')\n",
"step-3": "<mask token>\n\n\nclass Locale(MainHandler):\n \"\"\"\n handles requests to change LOCALE or language for internationalization.\n \"\"\"\n\n def get(self):\n locale = self.request.get('locale')\n if not locale:\n locale = LOCALE\n locale = locale[:2].lower() + '_' + locale[-2:].upper()\n if self.switch_locale(locale):\n if self.local_user and self.local_user.locale != locale:\n u = LocalUser.by_id(self.local_user.key.id())\n u.locale = locale\n u.put()\n self.write_json({'done': True})\n else:\n self.write_json({'done': False})\n\n\nclass MainPage(MainHandler):\n\n def get(self):\n self.render('home.html')\n\n def post(self):\n pw = self.request.get('pw')\n",
"step-4": "from mx.handlers import MainHandler\n\n\nclass Locale(MainHandler):\n \"\"\"\n handles requests to change LOCALE or language for internationalization.\n \"\"\"\n\n def get(self):\n locale = self.request.get('locale')\n if not locale:\n locale = LOCALE\n locale = locale[:2].lower() + '_' + locale[-2:].upper()\n if self.switch_locale(locale):\n if self.local_user and self.local_user.locale != locale:\n u = LocalUser.by_id(self.local_user.key.id())\n u.locale = locale\n u.put()\n self.write_json({'done': True})\n else:\n self.write_json({'done': False})\n\n\nclass MainPage(MainHandler):\n\n def get(self):\n self.render('home.html')\n\n def post(self):\n pw = self.request.get('pw')\n",
"step-5": "\nfrom mx.handlers import MainHandler\n\n# handler for changing app language\nclass Locale(MainHandler):\n \"\"\"\n handles requests to change LOCALE or language for internationalization.\n \"\"\"\n def get(self):\n locale = self.request.get('locale')\n if not locale :\n locale = LOCALE\n locale = locale[:2].lower()+'_'+locale[-2:].upper()\n if self.switch_locale(locale):\n if self.local_user and self.local_user.locale != locale:\n u = LocalUser.by_id(self.local_user.key.id())\n u.locale = locale\n u.put()\n self.write_json({'done':True})\n else:\n self.write_json({'done':False})\n\n\n# home page handler\nclass MainPage(MainHandler):\n def get(self):\n self.render('home.html')\n\n def post(self):\n pw = self.request.get('pw')\n \n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def findNonPrefixes(prefix, array):
result = []
prefixLength = len(prefix)
for string in array:
if string[0:prefixLength] != prefix:
result.append(string)
return result
def run():
r = requests.post('http://challenge.code2040.org/api/prefix', data={
'token': '747bece10e7785955b91c15de7435216'})
result = r.json()
prefix = result['prefix']
stringArray = result['array']
resultArray = findNonPrefixes(prefix, stringArray)
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'
}
payload = {'token': '747bece10e7785955b91c15de7435216', 'array':
resultArray}
r2 = requests.post('http://challenge.code2040.org/api/prefix/validate',
data=json.dumps(payload), headers=headers)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(os.path.join(os.path.dirname(__file__), 'requests'))
<|reserved_special_token_0|>
def findNonPrefixes(prefix, array):
result = []
prefixLength = len(prefix)
for string in array:
if string[0:prefixLength] != prefix:
result.append(string)
return result
def run():
r = requests.post('http://challenge.code2040.org/api/prefix', data={
'token': '747bece10e7785955b91c15de7435216'})
result = r.json()
prefix = result['prefix']
stringArray = result['array']
resultArray = findNonPrefixes(prefix, stringArray)
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'
}
payload = {'token': '747bece10e7785955b91c15de7435216', 'array':
resultArray}
r2 = requests.post('http://challenge.code2040.org/api/prefix/validate',
data=json.dumps(payload), headers=headers)
run()
<|reserved_special_token_1|>
import sys, os, json
sys.path.append(os.path.join(os.path.dirname(__file__), 'requests'))
import requests
def findNonPrefixes(prefix, array):
result = []
prefixLength = len(prefix)
for string in array:
if string[0:prefixLength] != prefix:
result.append(string)
return result
def run():
r = requests.post('http://challenge.code2040.org/api/prefix', data={
'token': '747bece10e7785955b91c15de7435216'})
result = r.json()
prefix = result['prefix']
stringArray = result['array']
resultArray = findNonPrefixes(prefix, stringArray)
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'
}
payload = {'token': '747bece10e7785955b91c15de7435216', 'array':
resultArray}
r2 = requests.post('http://challenge.code2040.org/api/prefix/validate',
data=json.dumps(payload), headers=headers)
run()
<|reserved_special_token_1|>
import sys, os, json
sys.path.append(os.path.join(os.path.dirname(__file__), "requests"))
import requests
def findNonPrefixes(prefix, array):
result = []
prefixLength = len(prefix)
for string in array:
if string[0:prefixLength] != prefix:
result.append(string)
return result
def run ():
r = requests.post("http://challenge.code2040.org/api/prefix", data={'token': '747bece10e7785955b91c15de7435216'})
result = r.json()
prefix = result["prefix"]
stringArray = result["array"]
resultArray = findNonPrefixes(prefix, stringArray)
headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
payload = {'token': '747bece10e7785955b91c15de7435216', 'array': resultArray}
r2 = requests.post("http://challenge.code2040.org/api/prefix/validate", data=json.dumps(payload), headers = headers)
run()
|
flexible
|
{
"blob_id": "8419aee5dbc64b51f3c0f364716aad1630f00fe9",
"index": 7173,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef findNonPrefixes(prefix, array):\n result = []\n prefixLength = len(prefix)\n for string in array:\n if string[0:prefixLength] != prefix:\n result.append(string)\n return result\n\n\ndef run():\n r = requests.post('http://challenge.code2040.org/api/prefix', data={\n 'token': '747bece10e7785955b91c15de7435216'})\n result = r.json()\n prefix = result['prefix']\n stringArray = result['array']\n resultArray = findNonPrefixes(prefix, stringArray)\n headers = {'Content-Type': 'application/json', 'Accept': 'application/json'\n }\n payload = {'token': '747bece10e7785955b91c15de7435216', 'array':\n resultArray}\n r2 = requests.post('http://challenge.code2040.org/api/prefix/validate',\n data=json.dumps(payload), headers=headers)\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append(os.path.join(os.path.dirname(__file__), 'requests'))\n<mask token>\n\n\ndef findNonPrefixes(prefix, array):\n result = []\n prefixLength = len(prefix)\n for string in array:\n if string[0:prefixLength] != prefix:\n result.append(string)\n return result\n\n\ndef run():\n r = requests.post('http://challenge.code2040.org/api/prefix', data={\n 'token': '747bece10e7785955b91c15de7435216'})\n result = r.json()\n prefix = result['prefix']\n stringArray = result['array']\n resultArray = findNonPrefixes(prefix, stringArray)\n headers = {'Content-Type': 'application/json', 'Accept': 'application/json'\n }\n payload = {'token': '747bece10e7785955b91c15de7435216', 'array':\n resultArray}\n r2 = requests.post('http://challenge.code2040.org/api/prefix/validate',\n data=json.dumps(payload), headers=headers)\n\n\nrun()\n",
"step-4": "import sys, os, json\nsys.path.append(os.path.join(os.path.dirname(__file__), 'requests'))\nimport requests\n\n\ndef findNonPrefixes(prefix, array):\n result = []\n prefixLength = len(prefix)\n for string in array:\n if string[0:prefixLength] != prefix:\n result.append(string)\n return result\n\n\ndef run():\n r = requests.post('http://challenge.code2040.org/api/prefix', data={\n 'token': '747bece10e7785955b91c15de7435216'})\n result = r.json()\n prefix = result['prefix']\n stringArray = result['array']\n resultArray = findNonPrefixes(prefix, stringArray)\n headers = {'Content-Type': 'application/json', 'Accept': 'application/json'\n }\n payload = {'token': '747bece10e7785955b91c15de7435216', 'array':\n resultArray}\n r2 = requests.post('http://challenge.code2040.org/api/prefix/validate',\n data=json.dumps(payload), headers=headers)\n\n\nrun()\n",
"step-5": "import sys, os, json\nsys.path.append(os.path.join(os.path.dirname(__file__), \"requests\"))\nimport requests \n\ndef findNonPrefixes(prefix, array):\n\tresult = []\n\tprefixLength = len(prefix)\n\tfor string in array:\n\t\tif string[0:prefixLength] != prefix: \n\t\t\tresult.append(string)\n\treturn result\n\ndef run ():\n\tr = requests.post(\"http://challenge.code2040.org/api/prefix\", data={'token': '747bece10e7785955b91c15de7435216'})\n\tresult = r.json()\n\tprefix = result[\"prefix\"]\n\tstringArray = result[\"array\"]\n\tresultArray = findNonPrefixes(prefix, stringArray)\n\theaders = {'Content-Type': 'application/json', 'Accept':'application/json'}\n\tpayload = {'token': '747bece10e7785955b91c15de7435216', 'array': resultArray}\n\tr2 = requests.post(\"http://challenge.code2040.org/api/prefix/validate\", data=json.dumps(payload), headers = headers)\nrun()",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class MovieRankings(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Movie(models.Model):
"""
电影的数据库表格
"""
movie_name = models.CharField(max_length=64, blank=True)
douban_link = models.CharField(max_length=256, null=True, blank=True)
douban_score = models.CharField(max_length=64, null=True, blank=True)
douban_counter = models.PositiveIntegerField(default=0, blank=True)
imdb_link = models.CharField(max_length=256, null=True, blank=True)
imdb_score = models.CharField(max_length=64, null=True, blank=True)
imdb_counter = models.PositiveIntegerField(default=0, blank=True)
nomovie_link = models.CharField(max_length=256, null=True, blank=True)
nomovie_score = models.CharField(max_length=64, null=True, blank=True)
nomovie_counter = models.PositiveIntegerField(default=0, blank=True)
country = models.CharField(max_length=64, null=True, blank=True)
dateyear = models.CharField(max_length=64, null=True, blank=True)
actor = models.CharField(max_length=256, null=True, blank=True)
director = models.CharField(max_length=256, null=True, blank=True)
style = models.CharField(max_length=64, null=True, blank=True)
movie_address = models.CharField(max_length=256, null=True, blank=True)
download_link = models.CharField(max_length=256, null=True, blank=True)
counter = models.PositiveIntegerField(default=0, blank=True)
original = models.CharField(max_length=256, null=True, blank=True)
status = models.IntegerField(null=True, blank=True)
image = models.CharField(max_length=256, null=True, blank=True)
spidertime = models.DateTimeField(auto_now_add=True, null=True)
aboutmovie = models.CharField(max_length=256, null=True, blank=True)
language = models.CharField(max_length=64, null=True, blank=True)
dyttsearch = models.CharField(max_length=256, null=True, blank=True)
dyttdetail = models.CharField(max_length=256, null=True, blank=True)
movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)
def __unicode__(self):
return self.movie_name
class MovieHistory(models.Model):
user = models.ForeignKey(User)
movie = models.ForeignKey(Movie)
date = models.DateTimeField(auto_now_add=True)
marked = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return '{%s}--{%s}' % (self.user.username, self.movie.movie_name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MovieRankings(models.Model):
<|reserved_special_token_0|>
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Movie(models.Model):
"""
电影的数据库表格
"""
movie_name = models.CharField(max_length=64, blank=True)
douban_link = models.CharField(max_length=256, null=True, blank=True)
douban_score = models.CharField(max_length=64, null=True, blank=True)
douban_counter = models.PositiveIntegerField(default=0, blank=True)
imdb_link = models.CharField(max_length=256, null=True, blank=True)
imdb_score = models.CharField(max_length=64, null=True, blank=True)
imdb_counter = models.PositiveIntegerField(default=0, blank=True)
nomovie_link = models.CharField(max_length=256, null=True, blank=True)
nomovie_score = models.CharField(max_length=64, null=True, blank=True)
nomovie_counter = models.PositiveIntegerField(default=0, blank=True)
country = models.CharField(max_length=64, null=True, blank=True)
dateyear = models.CharField(max_length=64, null=True, blank=True)
actor = models.CharField(max_length=256, null=True, blank=True)
director = models.CharField(max_length=256, null=True, blank=True)
style = models.CharField(max_length=64, null=True, blank=True)
movie_address = models.CharField(max_length=256, null=True, blank=True)
download_link = models.CharField(max_length=256, null=True, blank=True)
counter = models.PositiveIntegerField(default=0, blank=True)
original = models.CharField(max_length=256, null=True, blank=True)
status = models.IntegerField(null=True, blank=True)
image = models.CharField(max_length=256, null=True, blank=True)
spidertime = models.DateTimeField(auto_now_add=True, null=True)
aboutmovie = models.CharField(max_length=256, null=True, blank=True)
language = models.CharField(max_length=64, null=True, blank=True)
dyttsearch = models.CharField(max_length=256, null=True, blank=True)
dyttdetail = models.CharField(max_length=256, null=True, blank=True)
movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)
def __unicode__(self):
return self.movie_name
class MovieHistory(models.Model):
user = models.ForeignKey(User)
movie = models.ForeignKey(Movie)
date = models.DateTimeField(auto_now_add=True)
marked = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return '{%s}--{%s}' % (self.user.username, self.movie.movie_name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MovieRankings(models.Model):
"""
各种电影排行榜.
"""
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Movie(models.Model):
"""
电影的数据库表格
"""
movie_name = models.CharField(max_length=64, blank=True)
douban_link = models.CharField(max_length=256, null=True, blank=True)
douban_score = models.CharField(max_length=64, null=True, blank=True)
douban_counter = models.PositiveIntegerField(default=0, blank=True)
imdb_link = models.CharField(max_length=256, null=True, blank=True)
imdb_score = models.CharField(max_length=64, null=True, blank=True)
imdb_counter = models.PositiveIntegerField(default=0, blank=True)
nomovie_link = models.CharField(max_length=256, null=True, blank=True)
nomovie_score = models.CharField(max_length=64, null=True, blank=True)
nomovie_counter = models.PositiveIntegerField(default=0, blank=True)
country = models.CharField(max_length=64, null=True, blank=True)
dateyear = models.CharField(max_length=64, null=True, blank=True)
actor = models.CharField(max_length=256, null=True, blank=True)
director = models.CharField(max_length=256, null=True, blank=True)
style = models.CharField(max_length=64, null=True, blank=True)
movie_address = models.CharField(max_length=256, null=True, blank=True)
download_link = models.CharField(max_length=256, null=True, blank=True)
counter = models.PositiveIntegerField(default=0, blank=True)
original = models.CharField(max_length=256, null=True, blank=True)
status = models.IntegerField(null=True, blank=True)
image = models.CharField(max_length=256, null=True, blank=True)
spidertime = models.DateTimeField(auto_now_add=True, null=True)
aboutmovie = models.CharField(max_length=256, null=True, blank=True)
language = models.CharField(max_length=64, null=True, blank=True)
dyttsearch = models.CharField(max_length=256, null=True, blank=True)
dyttdetail = models.CharField(max_length=256, null=True, blank=True)
movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)
def __unicode__(self):
return self.movie_name
class MovieHistory(models.Model):
user = models.ForeignKey(User)
movie = models.ForeignKey(Movie)
date = models.DateTimeField(auto_now_add=True)
marked = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return '{%s}--{%s}' % (self.user.username, self.movie.movie_name)
<|reserved_special_token_1|>
from __future__ import unicode_literals
import markdown
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
class MovieRankings(models.Model):
"""
各种电影排行榜.
"""
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Movie(models.Model):
"""
电影的数据库表格
"""
movie_name = models.CharField(max_length=64, blank=True)
douban_link = models.CharField(max_length=256, null=True, blank=True)
douban_score = models.CharField(max_length=64, null=True, blank=True)
douban_counter = models.PositiveIntegerField(default=0, blank=True)
imdb_link = models.CharField(max_length=256, null=True, blank=True)
imdb_score = models.CharField(max_length=64, null=True, blank=True)
imdb_counter = models.PositiveIntegerField(default=0, blank=True)
nomovie_link = models.CharField(max_length=256, null=True, blank=True)
nomovie_score = models.CharField(max_length=64, null=True, blank=True)
nomovie_counter = models.PositiveIntegerField(default=0, blank=True)
country = models.CharField(max_length=64, null=True, blank=True)
dateyear = models.CharField(max_length=64, null=True, blank=True)
actor = models.CharField(max_length=256, null=True, blank=True)
director = models.CharField(max_length=256, null=True, blank=True)
style = models.CharField(max_length=64, null=True, blank=True)
movie_address = models.CharField(max_length=256, null=True, blank=True)
download_link = models.CharField(max_length=256, null=True, blank=True)
counter = models.PositiveIntegerField(default=0, blank=True)
original = models.CharField(max_length=256, null=True, blank=True)
status = models.IntegerField(null=True, blank=True)
image = models.CharField(max_length=256, null=True, blank=True)
spidertime = models.DateTimeField(auto_now_add=True, null=True)
aboutmovie = models.CharField(max_length=256, null=True, blank=True)
language = models.CharField(max_length=64, null=True, blank=True)
dyttsearch = models.CharField(max_length=256, null=True, blank=True)
dyttdetail = models.CharField(max_length=256, null=True, blank=True)
movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)
def __unicode__(self):
return self.movie_name
class MovieHistory(models.Model):
user = models.ForeignKey(User)
movie = models.ForeignKey(Movie)
date = models.DateTimeField(auto_now_add=True)
marked = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return '{%s}--{%s}' % (self.user.username, self.movie.movie_name)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import markdown
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
class MovieRankings(models.Model):
"""
各种电影排行榜.
"""
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Movie(models.Model):
"""
电影的数据库表格
"""
movie_name = models.CharField(max_length=64, blank=True)
# 豆瓣链接,值可以是null,也可以不填这个字段.
douban_link = models.CharField(max_length=256, null=True, blank=True)
# 豆瓣评分.
douban_score = models.CharField(max_length=64, null=True, blank=True)
# 豆瓣评分人数.
douban_counter = models.PositiveIntegerField(default=0, blank=True)
# Imdb链接.
imdb_link = models.CharField(max_length=256, null=True, blank=True)
# Imdb评分.
imdb_score = models.CharField(max_length=64, null=True, blank=True)
# Imdb评分人数.
imdb_counter = models.PositiveIntegerField(default=0, blank=True)
# 网站中的链接.
nomovie_link = models.CharField(max_length=256, null=True, blank=True)
# 网站中评分.
nomovie_score = models.CharField(max_length=64, null=True, blank=True)
# 网站中评分人数.
nomovie_counter = models.PositiveIntegerField(default=0, blank=True)
# 上映国家.
country = models.CharField(max_length=64, null=True, blank=True)
# 上映日期.
dateyear = models.CharField(max_length=64, null=True, blank=True)
# 主演.
actor = models.CharField(max_length=256, null=True, blank=True)
# 导演.
director = models.CharField(max_length=256, null=True, blank=True)
# 电影类型.
style = models.CharField(max_length=64, null=True, blank=True)
# 电影播放地址.
movie_address = models.CharField(max_length=256, null=True, blank=True)
# 电影下载链接.
download_link = models.CharField(max_length=256, null=True, blank=True)
# 电影在本网站的播放次数.
counter = models.PositiveIntegerField(default=0, blank=True)
# 电影来源,
# 0:表示豆瓣top250 1:表示imdbtop250 2:表示普通豆瓣 3:表示普通imdb
# 4:表示在豆瓣和imdb中都存在 5表示:用户自添加
original = models.CharField(max_length=256, null=True, blank=True)
# 1:表示通过 0:表示未通过 2:表示审核中
status = models.IntegerField(null=True, blank=True)
# 图片保存地址
image = models.CharField(max_length=256, null=True, blank=True)
# 爬取电影入库时间
spidertime = models.DateTimeField(auto_now_add=True, null=True)
# 关于电影
aboutmovie = models.CharField(max_length=256, null=True, blank=True)
# 电影语言
language = models.CharField(max_length=64, null=True, blank=True)
# 电影天堂搜索地址
dyttsearch = models.CharField(max_length=256, null=True, blank=True)
# 电影天堂搜索电影详情页面
dyttdetail = models.CharField(max_length=256, null=True, blank=True)
movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)
def __unicode__(self):
return self.movie_name
# def get_comments(self):
class MovieHistory(models.Model):
# 观看的用户.
# 用户一对多MovieHistory,可以看多个电影.
user = models.ForeignKey(User)
# 观看的电影.
movie = models.ForeignKey(Movie)
# 观看的时间.
date = models.DateTimeField(auto_now_add=True)
# 0表示用户观看了该电影,1表示收藏,2表示推荐.
marked = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return "{%s}--{%s}" % (self.user.username, self.movie.movie_name)
|
flexible
|
{
"blob_id": "449ae193f8817d4ee2fe67eadf72d9c19b2c5e53",
"index": 1319,
"step-1": "<mask token>\n\n\nclass MovieRankings(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Movie(models.Model):\n \"\"\"\n 电影的数据库表格\n \"\"\"\n movie_name = models.CharField(max_length=64, blank=True)\n douban_link = models.CharField(max_length=256, null=True, blank=True)\n douban_score = models.CharField(max_length=64, null=True, blank=True)\n douban_counter = models.PositiveIntegerField(default=0, blank=True)\n imdb_link = models.CharField(max_length=256, null=True, blank=True)\n imdb_score = models.CharField(max_length=64, null=True, blank=True)\n imdb_counter = models.PositiveIntegerField(default=0, blank=True)\n nomovie_link = models.CharField(max_length=256, null=True, blank=True)\n nomovie_score = models.CharField(max_length=64, null=True, blank=True)\n nomovie_counter = models.PositiveIntegerField(default=0, blank=True)\n country = models.CharField(max_length=64, null=True, blank=True)\n dateyear = models.CharField(max_length=64, null=True, blank=True)\n actor = models.CharField(max_length=256, null=True, blank=True)\n director = models.CharField(max_length=256, null=True, blank=True)\n style = models.CharField(max_length=64, null=True, blank=True)\n movie_address = models.CharField(max_length=256, null=True, blank=True)\n download_link = models.CharField(max_length=256, null=True, blank=True)\n counter = models.PositiveIntegerField(default=0, blank=True)\n original = models.CharField(max_length=256, null=True, blank=True)\n status = models.IntegerField(null=True, blank=True)\n image = models.CharField(max_length=256, null=True, blank=True)\n spidertime = models.DateTimeField(auto_now_add=True, null=True)\n aboutmovie = models.CharField(max_length=256, null=True, blank=True)\n language = models.CharField(max_length=64, null=True, blank=True)\n dyttsearch = models.CharField(max_length=256, null=True, blank=True)\n dyttdetail = models.CharField(max_length=256, null=True, blank=True)\n movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)\n\n def __unicode__(self):\n return self.movie_name\n\n\nclass MovieHistory(models.Model):\n user = models.ForeignKey(User)\n movie = models.ForeignKey(Movie)\n date = models.DateTimeField(auto_now_add=True)\n marked = models.IntegerField(blank=True, null=True)\n\n def __unicode__(self):\n return '{%s}--{%s}' % (self.user.username, self.movie.movie_name)\n",
"step-2": "<mask token>\n\n\nclass MovieRankings(models.Model):\n <mask token>\n name = models.CharField(max_length=100)\n\n def __unicode__(self):\n return self.name\n\n\nclass Movie(models.Model):\n \"\"\"\n 电影的数据库表格\n \"\"\"\n movie_name = models.CharField(max_length=64, blank=True)\n douban_link = models.CharField(max_length=256, null=True, blank=True)\n douban_score = models.CharField(max_length=64, null=True, blank=True)\n douban_counter = models.PositiveIntegerField(default=0, blank=True)\n imdb_link = models.CharField(max_length=256, null=True, blank=True)\n imdb_score = models.CharField(max_length=64, null=True, blank=True)\n imdb_counter = models.PositiveIntegerField(default=0, blank=True)\n nomovie_link = models.CharField(max_length=256, null=True, blank=True)\n nomovie_score = models.CharField(max_length=64, null=True, blank=True)\n nomovie_counter = models.PositiveIntegerField(default=0, blank=True)\n country = models.CharField(max_length=64, null=True, blank=True)\n dateyear = models.CharField(max_length=64, null=True, blank=True)\n actor = models.CharField(max_length=256, null=True, blank=True)\n director = models.CharField(max_length=256, null=True, blank=True)\n style = models.CharField(max_length=64, null=True, blank=True)\n movie_address = models.CharField(max_length=256, null=True, blank=True)\n download_link = models.CharField(max_length=256, null=True, blank=True)\n counter = models.PositiveIntegerField(default=0, blank=True)\n original = models.CharField(max_length=256, null=True, blank=True)\n status = models.IntegerField(null=True, blank=True)\n image = models.CharField(max_length=256, null=True, blank=True)\n spidertime = models.DateTimeField(auto_now_add=True, null=True)\n aboutmovie = models.CharField(max_length=256, null=True, blank=True)\n language = models.CharField(max_length=64, null=True, blank=True)\n dyttsearch = models.CharField(max_length=256, null=True, blank=True)\n dyttdetail = models.CharField(max_length=256, null=True, blank=True)\n movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)\n\n def __unicode__(self):\n return self.movie_name\n\n\nclass MovieHistory(models.Model):\n user = models.ForeignKey(User)\n movie = models.ForeignKey(Movie)\n date = models.DateTimeField(auto_now_add=True)\n marked = models.IntegerField(blank=True, null=True)\n\n def __unicode__(self):\n return '{%s}--{%s}' % (self.user.username, self.movie.movie_name)\n",
"step-3": "<mask token>\n\n\nclass MovieRankings(models.Model):\n \"\"\"\n 各种电影排行榜.\n \"\"\"\n name = models.CharField(max_length=100)\n\n def __unicode__(self):\n return self.name\n\n\nclass Movie(models.Model):\n \"\"\"\n 电影的数据库表格\n \"\"\"\n movie_name = models.CharField(max_length=64, blank=True)\n douban_link = models.CharField(max_length=256, null=True, blank=True)\n douban_score = models.CharField(max_length=64, null=True, blank=True)\n douban_counter = models.PositiveIntegerField(default=0, blank=True)\n imdb_link = models.CharField(max_length=256, null=True, blank=True)\n imdb_score = models.CharField(max_length=64, null=True, blank=True)\n imdb_counter = models.PositiveIntegerField(default=0, blank=True)\n nomovie_link = models.CharField(max_length=256, null=True, blank=True)\n nomovie_score = models.CharField(max_length=64, null=True, blank=True)\n nomovie_counter = models.PositiveIntegerField(default=0, blank=True)\n country = models.CharField(max_length=64, null=True, blank=True)\n dateyear = models.CharField(max_length=64, null=True, blank=True)\n actor = models.CharField(max_length=256, null=True, blank=True)\n director = models.CharField(max_length=256, null=True, blank=True)\n style = models.CharField(max_length=64, null=True, blank=True)\n movie_address = models.CharField(max_length=256, null=True, blank=True)\n download_link = models.CharField(max_length=256, null=True, blank=True)\n counter = models.PositiveIntegerField(default=0, blank=True)\n original = models.CharField(max_length=256, null=True, blank=True)\n status = models.IntegerField(null=True, blank=True)\n image = models.CharField(max_length=256, null=True, blank=True)\n spidertime = models.DateTimeField(auto_now_add=True, null=True)\n aboutmovie = models.CharField(max_length=256, null=True, blank=True)\n language = models.CharField(max_length=64, null=True, blank=True)\n dyttsearch = models.CharField(max_length=256, null=True, blank=True)\n dyttdetail = models.CharField(max_length=256, null=True, blank=True)\n movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)\n\n def __unicode__(self):\n return self.movie_name\n\n\nclass MovieHistory(models.Model):\n user = models.ForeignKey(User)\n movie = models.ForeignKey(Movie)\n date = models.DateTimeField(auto_now_add=True)\n marked = models.IntegerField(blank=True, null=True)\n\n def __unicode__(self):\n return '{%s}--{%s}' % (self.user.username, self.movie.movie_name)\n",
"step-4": "from __future__ import unicode_literals\nimport markdown\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom datetime import datetime\n\n\nclass MovieRankings(models.Model):\n \"\"\"\n 各种电影排行榜.\n \"\"\"\n name = models.CharField(max_length=100)\n\n def __unicode__(self):\n return self.name\n\n\nclass Movie(models.Model):\n \"\"\"\n 电影的数据库表格\n \"\"\"\n movie_name = models.CharField(max_length=64, blank=True)\n douban_link = models.CharField(max_length=256, null=True, blank=True)\n douban_score = models.CharField(max_length=64, null=True, blank=True)\n douban_counter = models.PositiveIntegerField(default=0, blank=True)\n imdb_link = models.CharField(max_length=256, null=True, blank=True)\n imdb_score = models.CharField(max_length=64, null=True, blank=True)\n imdb_counter = models.PositiveIntegerField(default=0, blank=True)\n nomovie_link = models.CharField(max_length=256, null=True, blank=True)\n nomovie_score = models.CharField(max_length=64, null=True, blank=True)\n nomovie_counter = models.PositiveIntegerField(default=0, blank=True)\n country = models.CharField(max_length=64, null=True, blank=True)\n dateyear = models.CharField(max_length=64, null=True, blank=True)\n actor = models.CharField(max_length=256, null=True, blank=True)\n director = models.CharField(max_length=256, null=True, blank=True)\n style = models.CharField(max_length=64, null=True, blank=True)\n movie_address = models.CharField(max_length=256, null=True, blank=True)\n download_link = models.CharField(max_length=256, null=True, blank=True)\n counter = models.PositiveIntegerField(default=0, blank=True)\n original = models.CharField(max_length=256, null=True, blank=True)\n status = models.IntegerField(null=True, blank=True)\n image = models.CharField(max_length=256, null=True, blank=True)\n spidertime = models.DateTimeField(auto_now_add=True, null=True)\n aboutmovie = models.CharField(max_length=256, null=True, blank=True)\n language = models.CharField(max_length=64, null=True, blank=True)\n dyttsearch = models.CharField(max_length=256, null=True, blank=True)\n dyttdetail = models.CharField(max_length=256, null=True, blank=True)\n movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)\n\n def __unicode__(self):\n return self.movie_name\n\n\nclass MovieHistory(models.Model):\n user = models.ForeignKey(User)\n movie = models.ForeignKey(Movie)\n date = models.DateTimeField(auto_now_add=True)\n marked = models.IntegerField(blank=True, null=True)\n\n def __unicode__(self):\n return '{%s}--{%s}' % (self.user.username, self.movie.movie_name)\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport markdown\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\nfrom datetime import datetime\n\nclass MovieRankings(models.Model):\n \"\"\"\n 各种电影排行榜.\n \"\"\"\n name = models.CharField(max_length=100)\n def __unicode__(self):\n return self.name\n\nclass Movie(models.Model):\n \"\"\"\n 电影的数据库表格\n \"\"\"\n movie_name = models.CharField(max_length=64, blank=True)\n # 豆瓣链接,值可以是null,也可以不填这个字段.\n douban_link = models.CharField(max_length=256, null=True, blank=True)\n # 豆瓣评分.\n douban_score = models.CharField(max_length=64, null=True, blank=True)\n # 豆瓣评分人数.\n douban_counter = models.PositiveIntegerField(default=0, blank=True)\n # Imdb链接.\n imdb_link = models.CharField(max_length=256, null=True, blank=True)\n # Imdb评分.\n imdb_score = models.CharField(max_length=64, null=True, blank=True)\n # Imdb评分人数.\n imdb_counter = models.PositiveIntegerField(default=0, blank=True)\n # 网站中的链接.\n nomovie_link = models.CharField(max_length=256, null=True, blank=True)\n # 网站中评分.\n nomovie_score = models.CharField(max_length=64, null=True, blank=True)\n # 网站中评分人数.\n nomovie_counter = models.PositiveIntegerField(default=0, blank=True)\n # 上映国家.\n country = models.CharField(max_length=64, null=True, blank=True)\n # 上映日期.\n dateyear = models.CharField(max_length=64, null=True, blank=True)\n # 主演.\n actor = models.CharField(max_length=256, null=True, blank=True)\n # 导演.\n director = models.CharField(max_length=256, null=True, blank=True)\n # 电影类型.\n style = models.CharField(max_length=64, null=True, blank=True)\n # 电影播放地址.\n movie_address = models.CharField(max_length=256, null=True, blank=True)\n # 电影下载链接.\n download_link = models.CharField(max_length=256, null=True, blank=True)\n # 电影在本网站的播放次数.\n counter = models.PositiveIntegerField(default=0, blank=True)\n # 电影来源,\n # 0:表示豆瓣top250 1:表示imdbtop250 2:表示普通豆瓣 3:表示普通imdb \n # 4:表示在豆瓣和imdb中都存在 5表示:用户自添加\n original = models.CharField(max_length=256, null=True, blank=True)\n # 1:表示通过 0:表示未通过 2:表示审核中\n status = models.IntegerField(null=True, blank=True)\n # 图片保存地址\n image = models.CharField(max_length=256, null=True, blank=True)\n # 爬取电影入库时间\n spidertime = models.DateTimeField(auto_now_add=True, null=True)\n # 关于电影\n aboutmovie = models.CharField(max_length=256, null=True, blank=True)\n # 电影语言\n language = models.CharField(max_length=64, null=True, blank=True)\n # 电影天堂搜索地址\n dyttsearch = models.CharField(max_length=256, null=True, blank=True)\n # 电影天堂搜索电影详情页面\n dyttdetail = models.CharField(max_length=256, null=True, blank=True)\n movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)\n\n def __unicode__(self):\n return self.movie_name\n\n # def get_comments(self):\n\nclass MovieHistory(models.Model):\n # 观看的用户.\n # 用户一对多MovieHistory,可以看多个电影.\n user = models.ForeignKey(User)\n # 观看的电影.\n movie = models.ForeignKey(Movie)\n # 观看的时间.\n date = models.DateTimeField(auto_now_add=True)\n # 0表示用户观看了该电影,1表示收藏,2表示推荐.\n marked = models.IntegerField(blank=True, null=True)\n \n def __unicode__(self):\n return \"{%s}--{%s}\" % (self.user.username, self.movie.movie_name)\n\n\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
# -*- coding: utf-8 -*-
"""
Created on Thu May 3 09:12:11 2018
@author: shen1994
"""
import codecs
import numpy as np
def create_documents():
""" 按标点符号或是空格存储文件 """
documents_length = 0
chars,labels = [],[]
chars_file = codecs.open("data/data.data", 'w', 'utf-8')
labels_file = codecs.open("data/label.data", 'w', 'utf-8')
with codecs.open("data/train.data", 'r', 'utf-8') as f:
for line in f:
line=line.strip()
if len(line)==0:
if len(chars)!=0:
for char in chars:
chars_file.write(char + "\t")
chars_file.write("\n")
for label in labels:
labels_file.write(label + "\t")
labels_file.write("\n")
documents_length += 1
chars, labels=[], []
else:
pieces=line.strip().split()
chars.append(pieces[0])
labels.append(pieces[1])
if pieces[0] in ['。',',',';','!','?']:
for char in chars:
chars_file.write(char + "\t")
chars_file.write("\n")
for label in labels:
labels_file.write(label + "\t")
labels_file.write("\n")
documents_length += 1
chars, labels=[], []
if len(chars)!=0:
for char in chars:
chars_file.write(char + "\t")
chars_file.write("\n")
for label in labels:
labels_file.write(label + "\t")
labels_file.write("\n")
documents_length += 1
chars, labels=[], []
chars_file.close()
labels_file.close()
return documents_length
def create_useful_words(embedding_model):
return list(embedding_model.wv.vocab.keys())
def create_lexicon(word_dict):
""" 生成词典 """
chars = {}
# 统计词出现的次数
with codecs.open("data/data.data", 'r', 'utf-8') as f:
line = f.readline()
while(line):
book_chars = line.strip().split()
for sequence in book_chars:
for char in sequence:
chars[char] = chars.get(char,0) + 1
line = f.readline()
sorted_chars = sorted(chars.items(), key=lambda x:x[1], reverse=True)
# 下标从1开始 0用来补长
lexicon = dict([(item[0],index+1) for index, item in enumerate(sorted_chars)])
del sorted_chars
# 替换无用词的标记,标记为-1
for v in lexicon:
if v not in word_dict:
lexicon[v] = -1
lexicon_reverse = dict(zip(lexicon.values(), lexicon.keys()))
return lexicon, lexicon_reverse
def create_label_index():
return {'P':0, 'B':1, 'M':2, 'E':3, 'S':4, 'U':5}
def create_index_label():
return {0:'Pad',1:'B',2:'M',3:'E',4:'S',5:'Unk'}
def create_embedding(embedding_model, embedding_size, lexicon_reverse):
word_dict = create_useful_words(embedding_model)
useful_word = []
useful_word_length = 0
for word in list(lexicon_reverse.values()):
if word in word_dict:
useful_word_length += 1
useful_word.append(word)
del word_dict
# 增加 padding 和 unknown
embedding_weights = np.zeros((useful_word_length + 2, embedding_size))
for i in range(useful_word_length):
embedding_weights[i + 1] = embedding_model.wv[useful_word[i]]
# 无效词嵌入向量
embedding_weights[-1] = np.random.uniform(-1, 1, embedding_size)
return useful_word_length, embedding_weights
def create_matrix(lexicon, label_2_index):
data_index = codecs.open("data/data_index.data", 'w', 'utf-8')
label_index = codecs.open("data/label_index.data", 'w', 'utf-8')
file_chars = codecs.open("data/data.data", 'r', 'utf-8')
file_labels = codecs.open("data/label.data", 'r', 'utf-8')
chars_line = file_chars.readline()
labels_line = file_labels.readline()
while (chars_line and labels_line):
book_chars = chars_line.strip().split()
book_labels = labels_line.strip().split()
for char, label in zip(book_chars, book_labels):
data_index.write(str(lexicon[char]) + "\t")
label_index.write(str(label_2_index[label]) + "\t")
data_index.write("\n")
label_index.write("\n")
chars_line = file_chars.readline()
labels_line = file_labels.readline()
file_chars.close()
file_labels.close()
data_index.close()
label_index.close()
def padding_sentences(max_len):
data_index = codecs.open("data/data_index.data", 'r', 'utf-8')
label_index = codecs.open("data/label_index.data", 'r', 'utf-8')
data_index_padding = codecs.open("data/data_index_padding.data", 'w', 'utf-8')
label_index_padding = codecs.open("data/label_index_padding.data", 'w', 'utf-8')
data_line = data_index.readline()
while data_line:
book_data = data_line.strip().split()
book_data_len = len(book_data)
new_book_data = []
if book_data_len < max_len:
new_book_data = ([str(0)] * (max_len - book_data_len) + book_data)
else:
new_book_data = book_data
for data_word in new_book_data:
data_index_padding.write(data_word + "\t")
data_index_padding.write("\n")
data_line = data_index.readline()
label_line = label_index.readline()
while label_line:
book_label = label_line.strip().split()
book_label_len = len(book_label)
new_book_label = []
if book_label_len < max_len:
new_book_label = ([str(0)] * (max_len - book_label_len) + book_label)
else:
new_book_label = book_label
for label_word in new_book_label:
label_index_padding.write(label_word + "\t")
label_index_padding.write("\n")
label_line = label_index.readline()
data_index.close()
label_index.close()
data_index_padding.close()
label_index_padding.close()
def maxlen_2d_list():
max_len = 0
data_index = codecs.open("data/data_index.data", 'r', 'utf-8')
data_line = data_index.readline()
while data_line:
book_data = data_line.strip().split()
book_data_len = len(book_data)
if book_data_len > max_len:
max_len = book_data_len
data_line = data_index.readline()
data_index.close()
return max_len
|
normal
|
{
"blob_id": "f22836fc4fed22d833755db0ff34502170260766",
"index": 9260,
"step-1": "<mask token>\n\n\ndef create_documents():\n \"\"\" 按标点符号或是空格存储文件 \"\"\"\n documents_length = 0\n chars, labels = [], []\n chars_file = codecs.open('data/data.data', 'w', 'utf-8')\n labels_file = codecs.open('data/label.data', 'w', 'utf-8')\n with codecs.open('data/train.data', 'r', 'utf-8') as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n if len(chars) != 0:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n else:\n pieces = line.strip().split()\n chars.append(pieces[0])\n labels.append(pieces[1])\n if pieces[0] in ['。', ',', ';', '!', '?']:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n if len(chars) != 0:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n chars_file.close()\n labels_file.close()\n return documents_length\n\n\ndef create_useful_words(embedding_model):\n return list(embedding_model.wv.vocab.keys())\n\n\n<mask token>\n\n\ndef create_label_index():\n return {'P': 0, 'B': 1, 'M': 2, 'E': 3, 'S': 4, 'U': 5}\n\n\n<mask token>\n\n\ndef create_embedding(embedding_model, embedding_size, lexicon_reverse):\n word_dict = create_useful_words(embedding_model)\n useful_word = []\n useful_word_length = 0\n for word in list(lexicon_reverse.values()):\n if word in word_dict:\n useful_word_length += 1\n useful_word.append(word)\n del word_dict\n embedding_weights = np.zeros((useful_word_length + 2, embedding_size))\n for i in range(useful_word_length):\n embedding_weights[i + 1] = embedding_model.wv[useful_word[i]]\n embedding_weights[-1] = np.random.uniform(-1, 1, embedding_size)\n return useful_word_length, embedding_weights\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_documents():\n \"\"\" 按标点符号或是空格存储文件 \"\"\"\n documents_length = 0\n chars, labels = [], []\n chars_file = codecs.open('data/data.data', 'w', 'utf-8')\n labels_file = codecs.open('data/label.data', 'w', 'utf-8')\n with codecs.open('data/train.data', 'r', 'utf-8') as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n if len(chars) != 0:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n else:\n pieces = line.strip().split()\n chars.append(pieces[0])\n labels.append(pieces[1])\n if pieces[0] in ['。', ',', ';', '!', '?']:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n if len(chars) != 0:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n chars_file.close()\n labels_file.close()\n return documents_length\n\n\ndef create_useful_words(embedding_model):\n return list(embedding_model.wv.vocab.keys())\n\n\n<mask token>\n\n\ndef create_label_index():\n return {'P': 0, 'B': 1, 'M': 2, 'E': 3, 'S': 4, 'U': 5}\n\n\ndef create_index_label():\n return {(0): 'Pad', (1): 'B', (2): 'M', (3): 'E', (4): 'S', (5): 'Unk'}\n\n\ndef create_embedding(embedding_model, embedding_size, lexicon_reverse):\n word_dict = create_useful_words(embedding_model)\n useful_word = []\n useful_word_length = 0\n for word in list(lexicon_reverse.values()):\n if word in word_dict:\n useful_word_length += 1\n useful_word.append(word)\n del word_dict\n embedding_weights = np.zeros((useful_word_length + 2, embedding_size))\n for i in range(useful_word_length):\n embedding_weights[i + 1] = embedding_model.wv[useful_word[i]]\n embedding_weights[-1] = np.random.uniform(-1, 1, embedding_size)\n return useful_word_length, embedding_weights\n\n\ndef create_matrix(lexicon, label_2_index):\n data_index = codecs.open('data/data_index.data', 'w', 'utf-8')\n label_index = codecs.open('data/label_index.data', 'w', 'utf-8')\n file_chars = codecs.open('data/data.data', 'r', 'utf-8')\n file_labels = codecs.open('data/label.data', 'r', 'utf-8')\n chars_line = file_chars.readline()\n labels_line = file_labels.readline()\n while chars_line and labels_line:\n book_chars = chars_line.strip().split()\n book_labels = labels_line.strip().split()\n for char, label in zip(book_chars, book_labels):\n data_index.write(str(lexicon[char]) + '\\t')\n label_index.write(str(label_2_index[label]) + '\\t')\n data_index.write('\\n')\n label_index.write('\\n')\n chars_line = file_chars.readline()\n labels_line = file_labels.readline()\n file_chars.close()\n file_labels.close()\n data_index.close()\n label_index.close()\n\n\n<mask token>\n\n\ndef maxlen_2d_list():\n max_len = 0\n data_index = codecs.open('data/data_index.data', 'r', 'utf-8')\n data_line = data_index.readline()\n while data_line:\n book_data = data_line.strip().split()\n book_data_len = len(book_data)\n if book_data_len > max_len:\n max_len = book_data_len\n data_line = data_index.readline()\n data_index.close()\n return max_len\n",
"step-3": "<mask token>\n\n\ndef create_documents():\n \"\"\" 按标点符号或是空格存储文件 \"\"\"\n documents_length = 0\n chars, labels = [], []\n chars_file = codecs.open('data/data.data', 'w', 'utf-8')\n labels_file = codecs.open('data/label.data', 'w', 'utf-8')\n with codecs.open('data/train.data', 'r', 'utf-8') as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n if len(chars) != 0:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n else:\n pieces = line.strip().split()\n chars.append(pieces[0])\n labels.append(pieces[1])\n if pieces[0] in ['。', ',', ';', '!', '?']:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n if len(chars) != 0:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n chars_file.close()\n labels_file.close()\n return documents_length\n\n\ndef create_useful_words(embedding_model):\n return list(embedding_model.wv.vocab.keys())\n\n\n<mask token>\n\n\ndef create_label_index():\n return {'P': 0, 'B': 1, 'M': 2, 'E': 3, 'S': 4, 'U': 5}\n\n\ndef create_index_label():\n return {(0): 'Pad', (1): 'B', (2): 'M', (3): 'E', (4): 'S', (5): 'Unk'}\n\n\ndef create_embedding(embedding_model, embedding_size, lexicon_reverse):\n word_dict = create_useful_words(embedding_model)\n useful_word = []\n useful_word_length = 0\n for word in list(lexicon_reverse.values()):\n if word in word_dict:\n useful_word_length += 1\n useful_word.append(word)\n del word_dict\n embedding_weights = np.zeros((useful_word_length + 2, embedding_size))\n for i in range(useful_word_length):\n embedding_weights[i + 1] = embedding_model.wv[useful_word[i]]\n embedding_weights[-1] = np.random.uniform(-1, 1, embedding_size)\n return useful_word_length, embedding_weights\n\n\ndef create_matrix(lexicon, label_2_index):\n data_index = codecs.open('data/data_index.data', 'w', 'utf-8')\n label_index = codecs.open('data/label_index.data', 'w', 'utf-8')\n file_chars = codecs.open('data/data.data', 'r', 'utf-8')\n file_labels = codecs.open('data/label.data', 'r', 'utf-8')\n chars_line = file_chars.readline()\n labels_line = file_labels.readline()\n while chars_line and labels_line:\n book_chars = chars_line.strip().split()\n book_labels = labels_line.strip().split()\n for char, label in zip(book_chars, book_labels):\n data_index.write(str(lexicon[char]) + '\\t')\n label_index.write(str(label_2_index[label]) + '\\t')\n data_index.write('\\n')\n label_index.write('\\n')\n chars_line = file_chars.readline()\n labels_line = file_labels.readline()\n file_chars.close()\n file_labels.close()\n data_index.close()\n label_index.close()\n\n\ndef padding_sentences(max_len):\n data_index = codecs.open('data/data_index.data', 'r', 'utf-8')\n label_index = codecs.open('data/label_index.data', 'r', 'utf-8')\n data_index_padding = codecs.open('data/data_index_padding.data', 'w',\n 'utf-8')\n label_index_padding = codecs.open('data/label_index_padding.data', 'w',\n 'utf-8')\n data_line = data_index.readline()\n while data_line:\n book_data = data_line.strip().split()\n book_data_len = len(book_data)\n new_book_data = []\n if book_data_len < max_len:\n new_book_data = [str(0)] * (max_len - book_data_len) + book_data\n else:\n new_book_data = book_data\n for data_word in new_book_data:\n data_index_padding.write(data_word + '\\t')\n data_index_padding.write('\\n')\n data_line = data_index.readline()\n label_line = label_index.readline()\n while label_line:\n book_label = label_line.strip().split()\n book_label_len = len(book_label)\n new_book_label = []\n if book_label_len < max_len:\n new_book_label = [str(0)] * (max_len - book_label_len) + book_label\n else:\n new_book_label = book_label\n for label_word in new_book_label:\n label_index_padding.write(label_word + '\\t')\n label_index_padding.write('\\n')\n label_line = label_index.readline()\n data_index.close()\n label_index.close()\n data_index_padding.close()\n label_index_padding.close()\n\n\ndef maxlen_2d_list():\n max_len = 0\n data_index = codecs.open('data/data_index.data', 'r', 'utf-8')\n data_line = data_index.readline()\n while data_line:\n book_data = data_line.strip().split()\n book_data_len = len(book_data)\n if book_data_len > max_len:\n max_len = book_data_len\n data_line = data_index.readline()\n data_index.close()\n return max_len\n",
"step-4": "<mask token>\nimport codecs\nimport numpy as np\n\n\ndef create_documents():\n \"\"\" 按标点符号或是空格存储文件 \"\"\"\n documents_length = 0\n chars, labels = [], []\n chars_file = codecs.open('data/data.data', 'w', 'utf-8')\n labels_file = codecs.open('data/label.data', 'w', 'utf-8')\n with codecs.open('data/train.data', 'r', 'utf-8') as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n if len(chars) != 0:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n else:\n pieces = line.strip().split()\n chars.append(pieces[0])\n labels.append(pieces[1])\n if pieces[0] in ['。', ',', ';', '!', '?']:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n if len(chars) != 0:\n for char in chars:\n chars_file.write(char + '\\t')\n chars_file.write('\\n')\n for label in labels:\n labels_file.write(label + '\\t')\n labels_file.write('\\n')\n documents_length += 1\n chars, labels = [], []\n chars_file.close()\n labels_file.close()\n return documents_length\n\n\ndef create_useful_words(embedding_model):\n return list(embedding_model.wv.vocab.keys())\n\n\ndef create_lexicon(word_dict):\n \"\"\" 生成词典 \"\"\"\n chars = {}\n with codecs.open('data/data.data', 'r', 'utf-8') as f:\n line = f.readline()\n while line:\n book_chars = line.strip().split()\n for sequence in book_chars:\n for char in sequence:\n chars[char] = chars.get(char, 0) + 1\n line = f.readline()\n sorted_chars = sorted(chars.items(), key=lambda x: x[1], reverse=True)\n lexicon = dict([(item[0], index + 1) for index, item in enumerate(\n sorted_chars)])\n del sorted_chars\n for v in lexicon:\n if v not in word_dict:\n lexicon[v] = -1\n lexicon_reverse = dict(zip(lexicon.values(), lexicon.keys()))\n return lexicon, lexicon_reverse\n\n\ndef create_label_index():\n return {'P': 0, 'B': 1, 'M': 2, 'E': 3, 'S': 4, 'U': 5}\n\n\ndef create_index_label():\n return {(0): 'Pad', (1): 'B', (2): 'M', (3): 'E', (4): 'S', (5): 'Unk'}\n\n\ndef create_embedding(embedding_model, embedding_size, lexicon_reverse):\n word_dict = create_useful_words(embedding_model)\n useful_word = []\n useful_word_length = 0\n for word in list(lexicon_reverse.values()):\n if word in word_dict:\n useful_word_length += 1\n useful_word.append(word)\n del word_dict\n embedding_weights = np.zeros((useful_word_length + 2, embedding_size))\n for i in range(useful_word_length):\n embedding_weights[i + 1] = embedding_model.wv[useful_word[i]]\n embedding_weights[-1] = np.random.uniform(-1, 1, embedding_size)\n return useful_word_length, embedding_weights\n\n\ndef create_matrix(lexicon, label_2_index):\n data_index = codecs.open('data/data_index.data', 'w', 'utf-8')\n label_index = codecs.open('data/label_index.data', 'w', 'utf-8')\n file_chars = codecs.open('data/data.data', 'r', 'utf-8')\n file_labels = codecs.open('data/label.data', 'r', 'utf-8')\n chars_line = file_chars.readline()\n labels_line = file_labels.readline()\n while chars_line and labels_line:\n book_chars = chars_line.strip().split()\n book_labels = labels_line.strip().split()\n for char, label in zip(book_chars, book_labels):\n data_index.write(str(lexicon[char]) + '\\t')\n label_index.write(str(label_2_index[label]) + '\\t')\n data_index.write('\\n')\n label_index.write('\\n')\n chars_line = file_chars.readline()\n labels_line = file_labels.readline()\n file_chars.close()\n file_labels.close()\n data_index.close()\n label_index.close()\n\n\ndef padding_sentences(max_len):\n data_index = codecs.open('data/data_index.data', 'r', 'utf-8')\n label_index = codecs.open('data/label_index.data', 'r', 'utf-8')\n data_index_padding = codecs.open('data/data_index_padding.data', 'w',\n 'utf-8')\n label_index_padding = codecs.open('data/label_index_padding.data', 'w',\n 'utf-8')\n data_line = data_index.readline()\n while data_line:\n book_data = data_line.strip().split()\n book_data_len = len(book_data)\n new_book_data = []\n if book_data_len < max_len:\n new_book_data = [str(0)] * (max_len - book_data_len) + book_data\n else:\n new_book_data = book_data\n for data_word in new_book_data:\n data_index_padding.write(data_word + '\\t')\n data_index_padding.write('\\n')\n data_line = data_index.readline()\n label_line = label_index.readline()\n while label_line:\n book_label = label_line.strip().split()\n book_label_len = len(book_label)\n new_book_label = []\n if book_label_len < max_len:\n new_book_label = [str(0)] * (max_len - book_label_len) + book_label\n else:\n new_book_label = book_label\n for label_word in new_book_label:\n label_index_padding.write(label_word + '\\t')\n label_index_padding.write('\\n')\n label_line = label_index.readline()\n data_index.close()\n label_index.close()\n data_index_padding.close()\n label_index_padding.close()\n\n\ndef maxlen_2d_list():\n max_len = 0\n data_index = codecs.open('data/data_index.data', 'r', 'utf-8')\n data_line = data_index.readline()\n while data_line:\n book_data = data_line.strip().split()\n book_data_len = len(book_data)\n if book_data_len > max_len:\n max_len = book_data_len\n data_line = data_index.readline()\n data_index.close()\n return max_len\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 3 09:12:11 2018\n\n@author: shen1994\n\"\"\"\n\nimport codecs\nimport numpy as np\n\ndef create_documents():\n \"\"\" 按标点符号或是空格存储文件 \"\"\"\n documents_length = 0\n chars,labels = [],[]\n\n chars_file = codecs.open(\"data/data.data\", 'w', 'utf-8')\n labels_file = codecs.open(\"data/label.data\", 'w', 'utf-8')\n \n\n with codecs.open(\"data/train.data\", 'r', 'utf-8') as f:\n for line in f:\n\n line=line.strip()\n\t\t\t\n if len(line)==0:\n if len(chars)!=0:\n for char in chars:\n chars_file.write(char + \"\\t\")\n chars_file.write(\"\\n\")\n for label in labels:\n labels_file.write(label + \"\\t\")\n labels_file.write(\"\\n\")\n documents_length += 1\n chars, labels=[], []\n\n else:\n pieces=line.strip().split()\n chars.append(pieces[0])\n labels.append(pieces[1])\n\n if pieces[0] in ['。',',',';','!','?']:\n \n for char in chars:\n chars_file.write(char + \"\\t\")\n chars_file.write(\"\\n\")\n for label in labels:\n labels_file.write(label + \"\\t\")\n labels_file.write(\"\\n\")\n \n documents_length += 1\n chars, labels=[], []\n\n if len(chars)!=0:\n \n for char in chars:\n chars_file.write(char + \"\\t\")\n chars_file.write(\"\\n\")\n for label in labels:\n labels_file.write(label + \"\\t\")\n labels_file.write(\"\\n\")\n \n documents_length += 1\n chars, labels=[], []\n\n chars_file.close()\n labels_file.close()\n \n return documents_length\n \ndef create_useful_words(embedding_model):\n \n return list(embedding_model.wv.vocab.keys())\n \ndef create_lexicon(word_dict):\n \"\"\" 生成词典 \"\"\"\n chars = {}\n # 统计词出现的次数\n with codecs.open(\"data/data.data\", 'r', 'utf-8') as f:\n line = f.readline()\n while(line):\n \n book_chars = line.strip().split()\n for sequence in book_chars:\n for char in sequence:\n chars[char] = chars.get(char,0) + 1\n\n line = f.readline()\n\n sorted_chars = sorted(chars.items(), key=lambda x:x[1], reverse=True)\n\n # 下标从1开始 0用来补长\n lexicon = dict([(item[0],index+1) for index, item in enumerate(sorted_chars)])\n \n del sorted_chars\n \n # 替换无用词的标记,标记为-1\n for v in lexicon:\n if v not in word_dict:\n lexicon[v] = -1\n\n lexicon_reverse = dict(zip(lexicon.values(), lexicon.keys()))\n \n return lexicon, lexicon_reverse\n \ndef create_label_index(): \n\n return {'P':0, 'B':1, 'M':2, 'E':3, 'S':4, 'U':5}\n\ndef create_index_label(): \n\n return {0:'Pad',1:'B',2:'M',3:'E',4:'S',5:'Unk'}\n \ndef create_embedding(embedding_model, embedding_size, lexicon_reverse):\n \n word_dict = create_useful_words(embedding_model)\n \n useful_word = []\n useful_word_length = 0\n for word in list(lexicon_reverse.values()):\n if word in word_dict:\n useful_word_length += 1\n useful_word.append(word)\n \n del word_dict\n \n # 增加 padding 和 unknown\n embedding_weights = np.zeros((useful_word_length + 2, embedding_size))\n \n for i in range(useful_word_length):\n embedding_weights[i + 1] = embedding_model.wv[useful_word[i]]\n\n # 无效词嵌入向量\n embedding_weights[-1] = np.random.uniform(-1, 1, embedding_size)\n \n return useful_word_length, embedding_weights\n \ndef create_matrix(lexicon, label_2_index):\n\n data_index = codecs.open(\"data/data_index.data\", 'w', 'utf-8')\n label_index = codecs.open(\"data/label_index.data\", 'w', 'utf-8')\n\n file_chars = codecs.open(\"data/data.data\", 'r', 'utf-8')\n file_labels = codecs.open(\"data/label.data\", 'r', 'utf-8')\n \n chars_line = file_chars.readline()\n labels_line = file_labels.readline()\n \n while (chars_line and labels_line):\n \n book_chars = chars_line.strip().split()\n book_labels = labels_line.strip().split()\n \n for char, label in zip(book_chars, book_labels):\n data_index.write(str(lexicon[char]) + \"\\t\")\n label_index.write(str(label_2_index[label]) + \"\\t\")\n \n data_index.write(\"\\n\")\n label_index.write(\"\\n\")\n \n chars_line = file_chars.readline()\n labels_line = file_labels.readline()\n \n file_chars.close()\n file_labels.close()\n \n data_index.close()\n label_index.close()\n \ndef padding_sentences(max_len):\n \n data_index = codecs.open(\"data/data_index.data\", 'r', 'utf-8')\n label_index = codecs.open(\"data/label_index.data\", 'r', 'utf-8')\n \n data_index_padding = codecs.open(\"data/data_index_padding.data\", 'w', 'utf-8')\n label_index_padding = codecs.open(\"data/label_index_padding.data\", 'w', 'utf-8')\n \n data_line = data_index.readline()\n \n while data_line:\n \n book_data = data_line.strip().split()\n \n book_data_len = len(book_data)\n \n new_book_data = []\n \n if book_data_len < max_len:\n new_book_data = ([str(0)] * (max_len - book_data_len) + book_data)\n else:\n new_book_data = book_data\n \n for data_word in new_book_data:\n \n data_index_padding.write(data_word + \"\\t\")\n \n data_index_padding.write(\"\\n\")\n \n data_line = data_index.readline()\n\n label_line = label_index.readline()\n \n while label_line:\n \n book_label = label_line.strip().split()\n \n book_label_len = len(book_label)\n \n new_book_label = []\n\n if book_label_len < max_len:\n new_book_label = ([str(0)] * (max_len - book_label_len) + book_label)\n else:\n new_book_label = book_label\n \n for label_word in new_book_label:\n \n label_index_padding.write(label_word + \"\\t\")\n \n label_index_padding.write(\"\\n\")\n \n label_line = label_index.readline()\n \n data_index.close()\n label_index.close()\n data_index_padding.close()\n label_index_padding.close()\n \ndef maxlen_2d_list():\n \n max_len = 0\n \n data_index = codecs.open(\"data/data_index.data\", 'r', 'utf-8')\n \n data_line = data_index.readline()\n \n while data_line:\n \n book_data = data_line.strip().split()\n \n book_data_len = len(book_data)\n \n if book_data_len > max_len:\n \n max_len = book_data_len\n \n data_line = data_index.readline()\n \n data_index.close()\n \n return max_len \n ",
"step-ids": [
4,
7,
8,
10,
11
]
}
|
[
4,
7,
8,
10,
11
] |
from config import Config
from flask import Flask
from flask_cors import CORS
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
CORS(app)
app.config.from_object(Config)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///ws.db'
# app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://api:uyLmQ5M1AjCvm1R2@localhost/ws'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
migrate = Migrate(app, db)
from ws import routes
|
normal
|
{
"blob_id": "f494d8aeee8c72cce8fc14e44ca896bcf30c100a",
"index": 5627,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nCORS(app)\napp.config.from_object(Config)\n<mask token>\n",
"step-3": "<mask token>\napp = Flask(__name__)\nCORS(app)\napp.config.from_object(Config)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///ws.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n<mask token>\n",
"step-4": "from config import Config\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\napp = Flask(__name__)\nCORS(app)\napp.config.from_object(Config)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///ws.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\nfrom ws import routes\n",
"step-5": "from config import Config\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\nCORS(app)\napp.config.from_object(Config)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///ws.db'\n# app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://api:uyLmQ5M1AjCvm1R2@localhost/ws'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n\nfrom ws import routes\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from ...routes import Route
from .providers import SQSProvider
from .message_translators import SQSMessageTranslator, SNSMessageTranslator
class SQSRoute(Route):
def __init__(self, provider_queue, provider_options=None, *args, **kwargs):
provider_options = provider_options or {}
provider = SQSProvider(provider_queue, **provider_options)
kwargs['provider'] = provider
if 'message_translator' not in kwargs:
kwargs['message_translator'] = SQSMessageTranslator()
if 'name' not in kwargs:
kwargs['name'] = provider_queue
super().__init__(*args, **kwargs)
class SNSQueueRoute(Route):
def __init__(self, provider_queue, provider_options=None, *args, **kwargs):
provider_options = provider_options or {}
provider = SQSProvider(provider_queue, **provider_options)
kwargs['provider'] = provider
if 'message_translator' not in kwargs:
kwargs['message_translator'] = SNSMessageTranslator()
if 'name' not in kwargs:
kwargs['name'] = provider_queue
super().__init__(*args, **kwargs)
|
normal
|
{
"blob_id": "041f1d7c482fe4f65e8cc5a508da62ee6ccf59ff",
"index": 6686,
"step-1": "<mask token>\n\n\nclass SNSQueueRoute(Route):\n\n def __init__(self, provider_queue, provider_options=None, *args, **kwargs):\n provider_options = provider_options or {}\n provider = SQSProvider(provider_queue, **provider_options)\n kwargs['provider'] = provider\n if 'message_translator' not in kwargs:\n kwargs['message_translator'] = SNSMessageTranslator()\n if 'name' not in kwargs:\n kwargs['name'] = provider_queue\n super().__init__(*args, **kwargs)\n",
"step-2": "<mask token>\n\n\nclass SQSRoute(Route):\n <mask token>\n\n\nclass SNSQueueRoute(Route):\n\n def __init__(self, provider_queue, provider_options=None, *args, **kwargs):\n provider_options = provider_options or {}\n provider = SQSProvider(provider_queue, **provider_options)\n kwargs['provider'] = provider\n if 'message_translator' not in kwargs:\n kwargs['message_translator'] = SNSMessageTranslator()\n if 'name' not in kwargs:\n kwargs['name'] = provider_queue\n super().__init__(*args, **kwargs)\n",
"step-3": "<mask token>\n\n\nclass SQSRoute(Route):\n\n def __init__(self, provider_queue, provider_options=None, *args, **kwargs):\n provider_options = provider_options or {}\n provider = SQSProvider(provider_queue, **provider_options)\n kwargs['provider'] = provider\n if 'message_translator' not in kwargs:\n kwargs['message_translator'] = SQSMessageTranslator()\n if 'name' not in kwargs:\n kwargs['name'] = provider_queue\n super().__init__(*args, **kwargs)\n\n\nclass SNSQueueRoute(Route):\n\n def __init__(self, provider_queue, provider_options=None, *args, **kwargs):\n provider_options = provider_options or {}\n provider = SQSProvider(provider_queue, **provider_options)\n kwargs['provider'] = provider\n if 'message_translator' not in kwargs:\n kwargs['message_translator'] = SNSMessageTranslator()\n if 'name' not in kwargs:\n kwargs['name'] = provider_queue\n super().__init__(*args, **kwargs)\n",
"step-4": "from ...routes import Route\nfrom .providers import SQSProvider\nfrom .message_translators import SQSMessageTranslator, SNSMessageTranslator\n\n\nclass SQSRoute(Route):\n\n def __init__(self, provider_queue, provider_options=None, *args, **kwargs):\n provider_options = provider_options or {}\n provider = SQSProvider(provider_queue, **provider_options)\n kwargs['provider'] = provider\n if 'message_translator' not in kwargs:\n kwargs['message_translator'] = SQSMessageTranslator()\n if 'name' not in kwargs:\n kwargs['name'] = provider_queue\n super().__init__(*args, **kwargs)\n\n\nclass SNSQueueRoute(Route):\n\n def __init__(self, provider_queue, provider_options=None, *args, **kwargs):\n provider_options = provider_options or {}\n provider = SQSProvider(provider_queue, **provider_options)\n kwargs['provider'] = provider\n if 'message_translator' not in kwargs:\n kwargs['message_translator'] = SNSMessageTranslator()\n if 'name' not in kwargs:\n kwargs['name'] = provider_queue\n super().__init__(*args, **kwargs)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
'''
sin(x) = x^1/1! - x^3/3! + x^5/5! - x^7/7! + …..
Input : x, n ( No. of terms I want in series )
Input : 3.14, 10
Output : sin(3.14) = sin(180) = 0
Radians vs Degrees
( 0, 30, 60, 90 ….)
2pi = 360
Pi = 180
Pseudo code :
1.Take input variables radians,num
2. sin = 0
3. Indices = 1
4. odd = 1
4. Iterate indices from 1 to num with condition index <= num
If index%2 == 1
sin = sin + exponent(radians,odd)/factorial(odd)
If index%2 == 0
sin = sin - exponent(radians,odd)/factorial(odd)
Index += 1
odd += 2
5 . print the value of th sin
'''
def exponent(base,index):
if(index == 0 and base == 0):
return -1
elif(index == 0):
return 1
elif(base == 0):
return 0
else:
product = 1
for indices in range(index):
product *= base
return product
def factorial(num):
if(num == 0):
return 1
else:
fact = 1
index =1
while(index <= num):
fact *= index
index = index+1
return fact
radians = 3*3.14159/2
num = 15
sin = 0
index = 1
odd = 1
while(index <= num):
if(index%2 == 1):
sin = sin + (exponent(radians,odd)/factorial(odd))
if(index%2 == 0):
sin = sin - (exponent(radians,odd)/factorial(odd))
index += 1
odd += 2
print("The value of sin for the given radians is :",sin)
|
normal
|
{
"blob_id": "a99426c0751885f17078e709fd523cf3a26f5286",
"index": 5533,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef exponent(base, index):\n if index == 0 and base == 0:\n return -1\n elif index == 0:\n return 1\n elif base == 0:\n return 0\n else:\n product = 1\n for indices in range(index):\n product *= base\n return product\n\n\ndef factorial(num):\n if num == 0:\n return 1\n else:\n fact = 1\n index = 1\n while index <= num:\n fact *= index\n index = index + 1\n return fact\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef exponent(base, index):\n if index == 0 and base == 0:\n return -1\n elif index == 0:\n return 1\n elif base == 0:\n return 0\n else:\n product = 1\n for indices in range(index):\n product *= base\n return product\n\n\ndef factorial(num):\n if num == 0:\n return 1\n else:\n fact = 1\n index = 1\n while index <= num:\n fact *= index\n index = index + 1\n return fact\n\n\n<mask token>\nwhile index <= num:\n if index % 2 == 1:\n sin = sin + exponent(radians, odd) / factorial(odd)\n if index % 2 == 0:\n sin = sin - exponent(radians, odd) / factorial(odd)\n index += 1\n odd += 2\nprint('The value of sin for the given radians is :', sin)\n",
"step-4": "<mask token>\n\n\ndef exponent(base, index):\n if index == 0 and base == 0:\n return -1\n elif index == 0:\n return 1\n elif base == 0:\n return 0\n else:\n product = 1\n for indices in range(index):\n product *= base\n return product\n\n\ndef factorial(num):\n if num == 0:\n return 1\n else:\n fact = 1\n index = 1\n while index <= num:\n fact *= index\n index = index + 1\n return fact\n\n\nradians = 3 * 3.14159 / 2\nnum = 15\nsin = 0\nindex = 1\nodd = 1\nwhile index <= num:\n if index % 2 == 1:\n sin = sin + exponent(radians, odd) / factorial(odd)\n if index % 2 == 0:\n sin = sin - exponent(radians, odd) / factorial(odd)\n index += 1\n odd += 2\nprint('The value of sin for the given radians is :', sin)\n",
"step-5": "'''\nsin(x) = x^1/1! - x^3/3! + x^5/5! - x^7/7! + …..\n\nInput : x, n ( No. of terms I want in series )\n\nInput : 3.14, 10\n\nOutput : sin(3.14) = sin(180) = 0\n\nRadians vs Degrees\n\n\n( 0, 30, 60, 90 ….)\n2pi = 360\nPi = 180\n\n\nPseudo code :\n1.Take input variables radians,num\n2. sin = 0\n3. Indices = 1\n4. odd = 1\n4. Iterate indices from 1 to num with condition index <= num\n\tIf index%2 == 1\n sin = sin + exponent(radians,odd)/factorial(odd)\n If index%2 == 0\n\tsin = sin - exponent(radians,odd)/factorial(odd)\n Index += 1\n odd += 2\n5 . print the value of th sin\n\n'''\ndef exponent(base,index):\n if(index == 0 and base == 0):\n return -1\n elif(index == 0):\n return 1\n elif(base == 0):\n return 0\n else:\n product = 1\n for indices in range(index):\n product *= base\n return product\n\ndef factorial(num):\n if(num == 0):\n return 1\n else:\n fact = 1\n index =1\n while(index <= num):\n fact *= index\n index = index+1\n return fact\n\n\nradians = 3*3.14159/2\nnum = 15\nsin = 0\nindex = 1\nodd = 1\nwhile(index <= num):\n if(index%2 == 1):\n sin = sin + (exponent(radians,odd)/factorial(odd))\n if(index%2 == 0):\n sin = sin - (exponent(radians,odd)/factorial(odd))\n index += 1\n odd += 2\nprint(\"The value of sin for the given radians is :\",sin)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CnnArticleItem(scrapy.Item):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class GoogleArticleItem(scrapy.Item):
title = scrapy.Field()
date = scrapy.Field()
snippet = scrapy.Field()
source = scrapy.Field()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CnnArticleItem(scrapy.Item):
title = scrapy.Field()
developments = scrapy.Field()
body = scrapy.Field()
date = scrapy.Field()
class GoogleArticleItem(scrapy.Item):
title = scrapy.Field()
date = scrapy.Field()
snippet = scrapy.Field()
source = scrapy.Field()
<|reserved_special_token_1|>
import scrapy
class CnnArticleItem(scrapy.Item):
title = scrapy.Field()
developments = scrapy.Field()
body = scrapy.Field()
date = scrapy.Field()
class GoogleArticleItem(scrapy.Item):
title = scrapy.Field()
date = scrapy.Field()
snippet = scrapy.Field()
source = scrapy.Field()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class CnnArticleItem(scrapy.Item):
title = scrapy.Field()
developments = scrapy.Field()
body = scrapy.Field()
date = scrapy.Field()
class GoogleArticleItem(scrapy.Item):
title = scrapy.Field()
date = scrapy.Field()
snippet = scrapy.Field()
source = scrapy.Field()
|
flexible
|
{
"blob_id": "cf0eb9685cdfc412871d3b36270ddab3e520bb8f",
"index": 104,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CnnArticleItem(scrapy.Item):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass GoogleArticleItem(scrapy.Item):\n title = scrapy.Field()\n date = scrapy.Field()\n snippet = scrapy.Field()\n source = scrapy.Field()\n",
"step-3": "<mask token>\n\n\nclass CnnArticleItem(scrapy.Item):\n title = scrapy.Field()\n developments = scrapy.Field()\n body = scrapy.Field()\n date = scrapy.Field()\n\n\nclass GoogleArticleItem(scrapy.Item):\n title = scrapy.Field()\n date = scrapy.Field()\n snippet = scrapy.Field()\n source = scrapy.Field()\n",
"step-4": "import scrapy\n\n\nclass CnnArticleItem(scrapy.Item):\n title = scrapy.Field()\n developments = scrapy.Field()\n body = scrapy.Field()\n date = scrapy.Field()\n\n\nclass GoogleArticleItem(scrapy.Item):\n title = scrapy.Field()\n date = scrapy.Field()\n snippet = scrapy.Field()\n source = scrapy.Field()\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\nclass CnnArticleItem(scrapy.Item):\n title = scrapy.Field()\n developments = scrapy.Field()\n body = scrapy.Field()\n date = scrapy.Field()\n\nclass GoogleArticleItem(scrapy.Item):\n title = scrapy.Field()\n date = scrapy.Field()\n snippet = scrapy.Field()\n source = scrapy.Field()",
"step-ids": [
0,
3,
4,
5,
6
]
}
|
[
0,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class BatchGenerator(object):
def __init__(self, data, batch_size=1):
self.inputs, self.labels = data
self.batch_size = batch_size
self.data_length = len(self.inputs)
self.sequence_length = np.array([x.shape[0] for x in self.inputs])
def next_batch(self):
self._suffle()
start = 0
end = 0
batch_size, data_length = self.batch_size, self.data_length
while end != data_length:
end += batch_size
end = data_length if end >= data_length else end
yield self._get(start, end)
start = end
def _suffle(self):
permutation = np.random.permutation(self.data_length)
self.inputs = self.inputs[permutation]
self.labels = self.labels[permutation]
self.sequence_length = self.sequence_length[permutation]
def _get(self, start, end):
sequence_length = self.sequence_length[start:end]
batch_sequence_length = np.max(sequence_length)
inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -
len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]
)
labels = list_to_sparse(self.labels[start:end])
return inputs, labels, sequence_length
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def list_to_sparse(inputs):
"""Convert list of lists into scipy coo matrix.
"""
data = list(itertools.chain(*inputs))
row = list(itertools.chain(*[itertools.repeat(i, len(x)) for i, x in
enumerate(inputs)]))
col = list(itertools.chain(*[range(len(x)) for x in inputs]))
s = coo_matrix((data, (row, col)), shape=(len(inputs), np.max([len(x) for
x in inputs])))
return s
class BatchGenerator(object):
def __init__(self, data, batch_size=1):
self.inputs, self.labels = data
self.batch_size = batch_size
self.data_length = len(self.inputs)
self.sequence_length = np.array([x.shape[0] for x in self.inputs])
def next_batch(self):
self._suffle()
start = 0
end = 0
batch_size, data_length = self.batch_size, self.data_length
while end != data_length:
end += batch_size
end = data_length if end >= data_length else end
yield self._get(start, end)
start = end
def _suffle(self):
permutation = np.random.permutation(self.data_length)
self.inputs = self.inputs[permutation]
self.labels = self.labels[permutation]
self.sequence_length = self.sequence_length[permutation]
def _get(self, start, end):
sequence_length = self.sequence_length[start:end]
batch_sequence_length = np.max(sequence_length)
inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -
len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]
)
labels = list_to_sparse(self.labels[start:end])
return inputs, labels, sequence_length
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def merge_and_split(inputs, labels):
df = inputs.reset_index().merge(labels.reset_index(), on='utterance',
how='inner').set_index('utterance')
return df.feat, df.label
def list_to_sparse(inputs):
"""Convert list of lists into scipy coo matrix.
"""
data = list(itertools.chain(*inputs))
row = list(itertools.chain(*[itertools.repeat(i, len(x)) for i, x in
enumerate(inputs)]))
col = list(itertools.chain(*[range(len(x)) for x in inputs]))
s = coo_matrix((data, (row, col)), shape=(len(inputs), np.max([len(x) for
x in inputs])))
return s
class BatchGenerator(object):
def __init__(self, data, batch_size=1):
self.inputs, self.labels = data
self.batch_size = batch_size
self.data_length = len(self.inputs)
self.sequence_length = np.array([x.shape[0] for x in self.inputs])
def next_batch(self):
self._suffle()
start = 0
end = 0
batch_size, data_length = self.batch_size, self.data_length
while end != data_length:
end += batch_size
end = data_length if end >= data_length else end
yield self._get(start, end)
start = end
def _suffle(self):
permutation = np.random.permutation(self.data_length)
self.inputs = self.inputs[permutation]
self.labels = self.labels[permutation]
self.sequence_length = self.sequence_length[permutation]
def _get(self, start, end):
sequence_length = self.sequence_length[start:end]
batch_sequence_length = np.max(sequence_length)
inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -
len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]
)
labels = list_to_sparse(self.labels[start:end])
return inputs, labels, sequence_length
<|reserved_special_token_1|>
import itertools
import numpy as np
import pandas as pd
from scipy.sparse import coo_matrix
def merge_and_split(inputs, labels):
df = inputs.reset_index().merge(labels.reset_index(), on='utterance',
how='inner').set_index('utterance')
return df.feat, df.label
def list_to_sparse(inputs):
"""Convert list of lists into scipy coo matrix.
"""
data = list(itertools.chain(*inputs))
row = list(itertools.chain(*[itertools.repeat(i, len(x)) for i, x in
enumerate(inputs)]))
col = list(itertools.chain(*[range(len(x)) for x in inputs]))
s = coo_matrix((data, (row, col)), shape=(len(inputs), np.max([len(x) for
x in inputs])))
return s
class BatchGenerator(object):
def __init__(self, data, batch_size=1):
self.inputs, self.labels = data
self.batch_size = batch_size
self.data_length = len(self.inputs)
self.sequence_length = np.array([x.shape[0] for x in self.inputs])
def next_batch(self):
self._suffle()
start = 0
end = 0
batch_size, data_length = self.batch_size, self.data_length
while end != data_length:
end += batch_size
end = data_length if end >= data_length else end
yield self._get(start, end)
start = end
def _suffle(self):
permutation = np.random.permutation(self.data_length)
self.inputs = self.inputs[permutation]
self.labels = self.labels[permutation]
self.sequence_length = self.sequence_length[permutation]
def _get(self, start, end):
sequence_length = self.sequence_length[start:end]
batch_sequence_length = np.max(sequence_length)
inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -
len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]
)
labels = list_to_sparse(self.labels[start:end])
return inputs, labels, sequence_length
|
flexible
|
{
"blob_id": "912928cea0f96e601eecfcb6dba695ef26a3c6e2",
"index": 9618,
"step-1": "<mask token>\n\n\nclass BatchGenerator(object):\n\n def __init__(self, data, batch_size=1):\n self.inputs, self.labels = data\n self.batch_size = batch_size\n self.data_length = len(self.inputs)\n self.sequence_length = np.array([x.shape[0] for x in self.inputs])\n\n def next_batch(self):\n self._suffle()\n start = 0\n end = 0\n batch_size, data_length = self.batch_size, self.data_length\n while end != data_length:\n end += batch_size\n end = data_length if end >= data_length else end\n yield self._get(start, end)\n start = end\n\n def _suffle(self):\n permutation = np.random.permutation(self.data_length)\n self.inputs = self.inputs[permutation]\n self.labels = self.labels[permutation]\n self.sequence_length = self.sequence_length[permutation]\n\n def _get(self, start, end):\n sequence_length = self.sequence_length[start:end]\n batch_sequence_length = np.max(sequence_length)\n inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -\n len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]\n )\n labels = list_to_sparse(self.labels[start:end])\n return inputs, labels, sequence_length\n",
"step-2": "<mask token>\n\n\ndef list_to_sparse(inputs):\n \"\"\"Convert list of lists into scipy coo matrix.\n \"\"\"\n data = list(itertools.chain(*inputs))\n row = list(itertools.chain(*[itertools.repeat(i, len(x)) for i, x in\n enumerate(inputs)]))\n col = list(itertools.chain(*[range(len(x)) for x in inputs]))\n s = coo_matrix((data, (row, col)), shape=(len(inputs), np.max([len(x) for\n x in inputs])))\n return s\n\n\nclass BatchGenerator(object):\n\n def __init__(self, data, batch_size=1):\n self.inputs, self.labels = data\n self.batch_size = batch_size\n self.data_length = len(self.inputs)\n self.sequence_length = np.array([x.shape[0] for x in self.inputs])\n\n def next_batch(self):\n self._suffle()\n start = 0\n end = 0\n batch_size, data_length = self.batch_size, self.data_length\n while end != data_length:\n end += batch_size\n end = data_length if end >= data_length else end\n yield self._get(start, end)\n start = end\n\n def _suffle(self):\n permutation = np.random.permutation(self.data_length)\n self.inputs = self.inputs[permutation]\n self.labels = self.labels[permutation]\n self.sequence_length = self.sequence_length[permutation]\n\n def _get(self, start, end):\n sequence_length = self.sequence_length[start:end]\n batch_sequence_length = np.max(sequence_length)\n inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -\n len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]\n )\n labels = list_to_sparse(self.labels[start:end])\n return inputs, labels, sequence_length\n",
"step-3": "<mask token>\n\n\ndef merge_and_split(inputs, labels):\n df = inputs.reset_index().merge(labels.reset_index(), on='utterance',\n how='inner').set_index('utterance')\n return df.feat, df.label\n\n\ndef list_to_sparse(inputs):\n \"\"\"Convert list of lists into scipy coo matrix.\n \"\"\"\n data = list(itertools.chain(*inputs))\n row = list(itertools.chain(*[itertools.repeat(i, len(x)) for i, x in\n enumerate(inputs)]))\n col = list(itertools.chain(*[range(len(x)) for x in inputs]))\n s = coo_matrix((data, (row, col)), shape=(len(inputs), np.max([len(x) for\n x in inputs])))\n return s\n\n\nclass BatchGenerator(object):\n\n def __init__(self, data, batch_size=1):\n self.inputs, self.labels = data\n self.batch_size = batch_size\n self.data_length = len(self.inputs)\n self.sequence_length = np.array([x.shape[0] for x in self.inputs])\n\n def next_batch(self):\n self._suffle()\n start = 0\n end = 0\n batch_size, data_length = self.batch_size, self.data_length\n while end != data_length:\n end += batch_size\n end = data_length if end >= data_length else end\n yield self._get(start, end)\n start = end\n\n def _suffle(self):\n permutation = np.random.permutation(self.data_length)\n self.inputs = self.inputs[permutation]\n self.labels = self.labels[permutation]\n self.sequence_length = self.sequence_length[permutation]\n\n def _get(self, start, end):\n sequence_length = self.sequence_length[start:end]\n batch_sequence_length = np.max(sequence_length)\n inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -\n len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]\n )\n labels = list_to_sparse(self.labels[start:end])\n return inputs, labels, sequence_length\n",
"step-4": "import itertools\nimport numpy as np\nimport pandas as pd\nfrom scipy.sparse import coo_matrix\n\n\ndef merge_and_split(inputs, labels):\n df = inputs.reset_index().merge(labels.reset_index(), on='utterance',\n how='inner').set_index('utterance')\n return df.feat, df.label\n\n\ndef list_to_sparse(inputs):\n \"\"\"Convert list of lists into scipy coo matrix.\n \"\"\"\n data = list(itertools.chain(*inputs))\n row = list(itertools.chain(*[itertools.repeat(i, len(x)) for i, x in\n enumerate(inputs)]))\n col = list(itertools.chain(*[range(len(x)) for x in inputs]))\n s = coo_matrix((data, (row, col)), shape=(len(inputs), np.max([len(x) for\n x in inputs])))\n return s\n\n\nclass BatchGenerator(object):\n\n def __init__(self, data, batch_size=1):\n self.inputs, self.labels = data\n self.batch_size = batch_size\n self.data_length = len(self.inputs)\n self.sequence_length = np.array([x.shape[0] for x in self.inputs])\n\n def next_batch(self):\n self._suffle()\n start = 0\n end = 0\n batch_size, data_length = self.batch_size, self.data_length\n while end != data_length:\n end += batch_size\n end = data_length if end >= data_length else end\n yield self._get(start, end)\n start = end\n\n def _suffle(self):\n permutation = np.random.permutation(self.data_length)\n self.inputs = self.inputs[permutation]\n self.labels = self.labels[permutation]\n self.sequence_length = self.sequence_length[permutation]\n\n def _get(self, start, end):\n sequence_length = self.sequence_length[start:end]\n batch_sequence_length = np.max(sequence_length)\n inputs = np.array([np.pad(x, pad_width=((0, batch_sequence_length -\n len(x)), (0, 0)), mode='constant') for x in self.inputs[start:end]]\n )\n labels = list_to_sparse(self.labels[start:end])\n return inputs, labels, sequence_length\n",
"step-5": null,
"step-ids": [
5,
6,
7,
8
]
}
|
[
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
fig.gca().plot(x, y, 'k.')
<|reserved_special_token_0|>
canvas.draw()
<|reserved_special_token_0|>
fig.gca().plot([bbox.x0, bbox.x1, bbox.x1, bbox.x0, bbox.x0], [bbox.y0,
bbox.y0, bbox.y1, bbox.y1, bbox.y0], 'k:', transform=None)
canvas.print_figure('img.png')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
fig = mpl_Figure()
x, y, text = 5, 7, 'My label text'
fig.gca().plot(x, y, 'k.')
canvas = mpl_Canvas(fig)
t = fig.gca().text(x, y, text, color='red')
canvas.draw()
bbox = t.get_window_extent(renderer=canvas.get_renderer())
fig.gca().plot([bbox.x0, bbox.x1, bbox.x1, bbox.x0, bbox.x0], [bbox.y0,
bbox.y0, bbox.y1, bbox.y1, bbox.y0], 'k:', transform=None)
canvas.print_figure('img.png')
<|reserved_special_token_1|>
from matplotlib.figure import Figure as mpl_Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as mpl_Canvas
fig = mpl_Figure()
x, y, text = 5, 7, 'My label text'
fig.gca().plot(x, y, 'k.')
canvas = mpl_Canvas(fig)
t = fig.gca().text(x, y, text, color='red')
canvas.draw()
bbox = t.get_window_extent(renderer=canvas.get_renderer())
fig.gca().plot([bbox.x0, bbox.x1, bbox.x1, bbox.x0, bbox.x0], [bbox.y0,
bbox.y0, bbox.y1, bbox.y1, bbox.y0], 'k:', transform=None)
canvas.print_figure('img.png')
<|reserved_special_token_1|>
# https://stackoverflow.com/questions/69473844/can-you-calculate-the-size-of-a-text-annotation-in-matplotlib
from matplotlib.figure import Figure as mpl_Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as mpl_Canvas
fig = mpl_Figure()
x, y, text = 5, 7, 'My label text'
fig.gca().plot(x, y, 'k.')
canvas = mpl_Canvas(fig)
t = fig.gca().text(x, y, text, color='red')
canvas.draw()
bbox = t.get_window_extent(renderer = canvas.get_renderer())
fig.gca().plot(
[bbox.x0, bbox.x1, bbox.x1, bbox.x0, bbox.x0],
[bbox.y0, bbox.y0, bbox.y1, bbox.y1, bbox.y0],
'k:',
transform=None)
canvas.print_figure("img.png")
|
flexible
|
{
"blob_id": "c87f9885e96abdd32df68f9fe1942b2782bd5b96",
"index": 8149,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfig.gca().plot(x, y, 'k.')\n<mask token>\ncanvas.draw()\n<mask token>\nfig.gca().plot([bbox.x0, bbox.x1, bbox.x1, bbox.x0, bbox.x0], [bbox.y0,\n bbox.y0, bbox.y1, bbox.y1, bbox.y0], 'k:', transform=None)\ncanvas.print_figure('img.png')\n",
"step-3": "<mask token>\nfig = mpl_Figure()\nx, y, text = 5, 7, 'My label text'\nfig.gca().plot(x, y, 'k.')\ncanvas = mpl_Canvas(fig)\nt = fig.gca().text(x, y, text, color='red')\ncanvas.draw()\nbbox = t.get_window_extent(renderer=canvas.get_renderer())\nfig.gca().plot([bbox.x0, bbox.x1, bbox.x1, bbox.x0, bbox.x0], [bbox.y0,\n bbox.y0, bbox.y1, bbox.y1, bbox.y0], 'k:', transform=None)\ncanvas.print_figure('img.png')\n",
"step-4": "from matplotlib.figure import Figure as mpl_Figure\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as mpl_Canvas\nfig = mpl_Figure()\nx, y, text = 5, 7, 'My label text'\nfig.gca().plot(x, y, 'k.')\ncanvas = mpl_Canvas(fig)\nt = fig.gca().text(x, y, text, color='red')\ncanvas.draw()\nbbox = t.get_window_extent(renderer=canvas.get_renderer())\nfig.gca().plot([bbox.x0, bbox.x1, bbox.x1, bbox.x0, bbox.x0], [bbox.y0,\n bbox.y0, bbox.y1, bbox.y1, bbox.y0], 'k:', transform=None)\ncanvas.print_figure('img.png')\n",
"step-5": "# https://stackoverflow.com/questions/69473844/can-you-calculate-the-size-of-a-text-annotation-in-matplotlib\n\nfrom matplotlib.figure import Figure as mpl_Figure\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as mpl_Canvas\n\nfig = mpl_Figure()\nx, y, text = 5, 7, 'My label text'\n\nfig.gca().plot(x, y, 'k.')\ncanvas = mpl_Canvas(fig)\nt = fig.gca().text(x, y, text, color='red')\ncanvas.draw()\n\nbbox = t.get_window_extent(renderer = canvas.get_renderer())\nfig.gca().plot(\n [bbox.x0, bbox.x1, bbox.x1, bbox.x0, bbox.x0],\n [bbox.y0, bbox.y0, bbox.y1, bbox.y1, bbox.y0],\n 'k:',\n transform=None)\ncanvas.print_figure(\"img.png\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db import models
# Create your models here.
STATUS_CHOICES=(
('Pending','Pending'),
('Completed','Completed'))
class Appointment(models.Model):
first_name=models.CharField(max_length=100)
last_name=models.CharField(max_length=100)
phone_number=models.CharField(max_length=12,null=False)
date=models.DateField(null=True)
time=models.TimeField(default="10:00")
presciption = models.TextField(max_length=100,default="Write here")
status = models.CharField(max_length=10,choices=STATUS_CHOICES,default="Pending")
def __str__(self):
return self.first_name + self.last_name
|
normal
|
{
"blob_id": "3343844bf49cb3f4d655613475e44a140ac3106d",
"index": 4505,
"step-1": "<mask token>\n\n\nclass Appointment(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.first_name + self.last_name\n",
"step-2": "<mask token>\n\n\nclass Appointment(models.Model):\n first_name = models.CharField(max_length=100)\n last_name = models.CharField(max_length=100)\n phone_number = models.CharField(max_length=12, null=False)\n date = models.DateField(null=True)\n time = models.TimeField(default='10:00')\n presciption = models.TextField(max_length=100, default='Write here')\n status = models.CharField(max_length=10, choices=STATUS_CHOICES,\n default='Pending')\n\n def __str__(self):\n return self.first_name + self.last_name\n",
"step-3": "<mask token>\nSTATUS_CHOICES = ('Pending', 'Pending'), ('Completed', 'Completed')\n\n\nclass Appointment(models.Model):\n first_name = models.CharField(max_length=100)\n last_name = models.CharField(max_length=100)\n phone_number = models.CharField(max_length=12, null=False)\n date = models.DateField(null=True)\n time = models.TimeField(default='10:00')\n presciption = models.TextField(max_length=100, default='Write here')\n status = models.CharField(max_length=10, choices=STATUS_CHOICES,\n default='Pending')\n\n def __str__(self):\n return self.first_name + self.last_name\n",
"step-4": "from django.db import models\nSTATUS_CHOICES = ('Pending', 'Pending'), ('Completed', 'Completed')\n\n\nclass Appointment(models.Model):\n first_name = models.CharField(max_length=100)\n last_name = models.CharField(max_length=100)\n phone_number = models.CharField(max_length=12, null=False)\n date = models.DateField(null=True)\n time = models.TimeField(default='10:00')\n presciption = models.TextField(max_length=100, default='Write here')\n status = models.CharField(max_length=10, choices=STATUS_CHOICES,\n default='Pending')\n\n def __str__(self):\n return self.first_name + self.last_name\n",
"step-5": "from django.db import models\n\n# Create your models here.\n\nSTATUS_CHOICES=(\n ('Pending','Pending'),\n ('Completed','Completed'))\n\nclass Appointment(models.Model):\n first_name=models.CharField(max_length=100)\n last_name=models.CharField(max_length=100)\n phone_number=models.CharField(max_length=12,null=False)\n date=models.DateField(null=True)\n time=models.TimeField(default=\"10:00\")\n presciption = models.TextField(max_length=100,default=\"Write here\")\n status = models.CharField(max_length=10,choices=STATUS_CHOICES,default=\"Pending\")\n\n def __str__(self):\n return self.first_name + self.last_name ",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import drawSvg
import noise
import random
import math
import numpy as np
sizex = 950
sizey = 500
noisescale = 400
persistence = 0.5
lacunarity = 2
seed = random.randint(0, 100)
actorsnum = 1000
stepsnum = 50
steplenght = 2
noisemap = np.zeros((sizex, sizey))
for i in range(sizex):
for j in range(sizey):
noisemap[i][j] = noise.pnoise2(i / noisescale, j / noisescale,
octaves=2, persistence=persistence, lacunarity=lacunarity,
repeatx=1024, repeaty=1024, base=seed)
map_max = np.max(noisemap)
map_min = np.min(noisemap)
map_range = map_max - map_min
for i in range(sizex):
for j in range(sizey):
k = noisemap[i][j]
k = (k - map_min) / map_range
noisemap[i][j] = k
map_max = np.max(noisemap)
map_min = np.min(noisemap)
def getnoise(x, y):
return noisemap[math.floor(x)][math.floor(y)]
class Actor:
def __init__(self):
self.x = random.random() * sizex
self.y = random.random() * sizey
self.xn = self.x
self.yn = self.y
def step(self):
t = getnoise(self.x, self.y) * 5 * math.pi
self.x = self.xn
self.y = self.yn
self.xn += steplenght * math.cos(t)
self.yn += steplenght * math.sin(t)
if self.xn < 0 or self.xn > sizex or self.yn < 0 or self.yn > sizey:
return None
return self.xn, self.yn, self.x, self.y
canvas = drawSvg.Drawing(sizex, sizey, displayInline='False')
actors = []
for a in range(actorsnum):
n = Actor()
actors.append(n)
for s in range(stepsnum):
for a in actors:
p = a.step()
if p:
canvas.append(drawSvg.Line(p[2], p[3], p[0], p[1], stroke=
'black', stroke_width=1))
else:
actors.remove(a)
canvas.saveSvg('test.svg')
|
normal
|
{
"blob_id": "68c9944c788b9976660384e5d1cd0a736c4cd0e6",
"index": 3826,
"step-1": "<mask token>\n\n\nclass Actor:\n\n def __init__(self):\n self.x = random.random() * sizex\n self.y = random.random() * sizey\n self.xn = self.x\n self.yn = self.y\n\n def step(self):\n t = getnoise(self.x, self.y) * 5 * math.pi\n self.x = self.xn\n self.y = self.yn\n self.xn += steplenght * math.cos(t)\n self.yn += steplenght * math.sin(t)\n if self.xn < 0 or self.xn > sizex or self.yn < 0 or self.yn > sizey:\n return None\n return self.xn, self.yn, self.x, self.y\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getnoise(x, y):\n return noisemap[math.floor(x)][math.floor(y)]\n\n\nclass Actor:\n\n def __init__(self):\n self.x = random.random() * sizex\n self.y = random.random() * sizey\n self.xn = self.x\n self.yn = self.y\n\n def step(self):\n t = getnoise(self.x, self.y) * 5 * math.pi\n self.x = self.xn\n self.y = self.yn\n self.xn += steplenght * math.cos(t)\n self.yn += steplenght * math.sin(t)\n if self.xn < 0 or self.xn > sizex or self.yn < 0 or self.yn > sizey:\n return None\n return self.xn, self.yn, self.x, self.y\n\n\n<mask token>\n",
"step-3": "<mask token>\nsizex = 950\nsizey = 500\nnoisescale = 400\npersistence = 0.5\nlacunarity = 2\nseed = random.randint(0, 100)\nactorsnum = 1000\nstepsnum = 50\nsteplenght = 2\nnoisemap = np.zeros((sizex, sizey))\nfor i in range(sizex):\n for j in range(sizey):\n noisemap[i][j] = noise.pnoise2(i / noisescale, j / noisescale,\n octaves=2, persistence=persistence, lacunarity=lacunarity,\n repeatx=1024, repeaty=1024, base=seed)\nmap_max = np.max(noisemap)\nmap_min = np.min(noisemap)\nmap_range = map_max - map_min\nfor i in range(sizex):\n for j in range(sizey):\n k = noisemap[i][j]\n k = (k - map_min) / map_range\n noisemap[i][j] = k\nmap_max = np.max(noisemap)\nmap_min = np.min(noisemap)\n\n\ndef getnoise(x, y):\n return noisemap[math.floor(x)][math.floor(y)]\n\n\nclass Actor:\n\n def __init__(self):\n self.x = random.random() * sizex\n self.y = random.random() * sizey\n self.xn = self.x\n self.yn = self.y\n\n def step(self):\n t = getnoise(self.x, self.y) * 5 * math.pi\n self.x = self.xn\n self.y = self.yn\n self.xn += steplenght * math.cos(t)\n self.yn += steplenght * math.sin(t)\n if self.xn < 0 or self.xn > sizex or self.yn < 0 or self.yn > sizey:\n return None\n return self.xn, self.yn, self.x, self.y\n\n\ncanvas = drawSvg.Drawing(sizex, sizey, displayInline='False')\nactors = []\nfor a in range(actorsnum):\n n = Actor()\n actors.append(n)\nfor s in range(stepsnum):\n for a in actors:\n p = a.step()\n if p:\n canvas.append(drawSvg.Line(p[2], p[3], p[0], p[1], stroke=\n 'black', stroke_width=1))\n else:\n actors.remove(a)\ncanvas.saveSvg('test.svg')\n",
"step-4": "import drawSvg\nimport noise\nimport random\nimport math\nimport numpy as np\nsizex = 950\nsizey = 500\nnoisescale = 400\npersistence = 0.5\nlacunarity = 2\nseed = random.randint(0, 100)\nactorsnum = 1000\nstepsnum = 50\nsteplenght = 2\nnoisemap = np.zeros((sizex, sizey))\nfor i in range(sizex):\n for j in range(sizey):\n noisemap[i][j] = noise.pnoise2(i / noisescale, j / noisescale,\n octaves=2, persistence=persistence, lacunarity=lacunarity,\n repeatx=1024, repeaty=1024, base=seed)\nmap_max = np.max(noisemap)\nmap_min = np.min(noisemap)\nmap_range = map_max - map_min\nfor i in range(sizex):\n for j in range(sizey):\n k = noisemap[i][j]\n k = (k - map_min) / map_range\n noisemap[i][j] = k\nmap_max = np.max(noisemap)\nmap_min = np.min(noisemap)\n\n\ndef getnoise(x, y):\n return noisemap[math.floor(x)][math.floor(y)]\n\n\nclass Actor:\n\n def __init__(self):\n self.x = random.random() * sizex\n self.y = random.random() * sizey\n self.xn = self.x\n self.yn = self.y\n\n def step(self):\n t = getnoise(self.x, self.y) * 5 * math.pi\n self.x = self.xn\n self.y = self.yn\n self.xn += steplenght * math.cos(t)\n self.yn += steplenght * math.sin(t)\n if self.xn < 0 or self.xn > sizex or self.yn < 0 or self.yn > sizey:\n return None\n return self.xn, self.yn, self.x, self.y\n\n\ncanvas = drawSvg.Drawing(sizex, sizey, displayInline='False')\nactors = []\nfor a in range(actorsnum):\n n = Actor()\n actors.append(n)\nfor s in range(stepsnum):\n for a in actors:\n p = a.step()\n if p:\n canvas.append(drawSvg.Line(p[2], p[3], p[0], p[1], stroke=\n 'black', stroke_width=1))\n else:\n actors.remove(a)\ncanvas.saveSvg('test.svg')\n",
"step-5": null,
"step-ids": [
3,
4,
6,
7
]
}
|
[
3,
4,
6,
7
] |
<|reserved_special_token_0|>
def cos_dist(a, b):
if len(a) != len(b):
return None
part_up = 0.0
a_sq = 0.0
b_sq = 0.0
for a1, b1 in zip(a, b):
part_up += a1 * b1
a_sq += a1 ** 2
b_sq += b1 ** 2
part_down = math.sqrt(a_sq * b_sq)
if part_down == 0.0:
return None
else:
return part_up / part_down
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
reload(sys)
sys.setdefaultencoding('utf-8')
<|reserved_special_token_0|>
def cos_dist(a, b):
if len(a) != len(b):
return None
part_up = 0.0
a_sq = 0.0
b_sq = 0.0
for a1, b1 in zip(a, b):
part_up += a1 * b1
a_sq += a1 ** 2
b_sq += b1 ** 2
part_down = math.sqrt(a_sq * b_sq)
if part_down == 0.0:
return None
else:
return part_up / part_down
<|reserved_special_token_0|>
for iii in range(len(vex1)):
sumdot += vex1[iii] * vex2[iii]
<|reserved_special_token_0|>
for index in range(len(vex1)):
sum12 += vex1[index] * vex2[index]
sum1 += vex1[index] ** 2
sum2 += vex2[index] ** 2
<|reserved_special_token_0|>
res.write('余弦: ' + str(Similarity_Cos) + '\n内积: ' + str(sumdot) +
'\nJaccard系数: ' + str(Similarity_Jaccard))
res.close()
print('余弦: ' + str(Similarity_Cos) + ' 内积: ' + str(sumdot) + ' Jaccard系数: ' +
str(Similarity_Jaccard))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
reload(sys)
sys.setdefaultencoding('utf-8')
<|reserved_special_token_0|>
sentence1 = sys.argv[1]
sentence2 = sys.argv[2]
Divlist1 = jieba.lcut(sentence1, cut_all=True)
Divlist2 = jieba.lcut(sentence2, cut_all=True)
Sen = [' '.join(Divlist1), ' '.join(Divlist2)]
vectorizer = CountVectorizer()
transformer = TfidfTransformer()
TFIDF_value = vectorizer.fit_transform(Sen)
word = vectorizer.get_feature_names()
matrix_value = TFIDF_value.toarray()
vex1 = list(matrix_value[0])
vex2 = list(matrix_value[1])
def cos_dist(a, b):
if len(a) != len(b):
return None
part_up = 0.0
a_sq = 0.0
b_sq = 0.0
for a1, b1 in zip(a, b):
part_up += a1 * b1
a_sq += a1 ** 2
b_sq += b1 ** 2
part_down = math.sqrt(a_sq * b_sq)
if part_down == 0.0:
return None
else:
return part_up / part_down
Similarity_Cos = cos_dist(vex1, vex2)
sumdot = 0.0
for iii in range(len(vex1)):
sumdot += vex1[iii] * vex2[iii]
Similarity_dot = sumdot
sum12 = 0.0
sum1 = 0.0
sum2 = 0.0
for index in range(len(vex1)):
sum12 += vex1[index] * vex2[index]
sum1 += vex1[index] ** 2
sum2 += vex2[index] ** 2
Similarity_Jaccard = sum12 / (sum1 + sum2 - sum12)
res = open('SIMresult.txt', 'w')
res.write('余弦: ' + str(Similarity_Cos) + '\n内积: ' + str(sumdot) +
'\nJaccard系数: ' + str(Similarity_Jaccard))
res.close()
print('余弦: ' + str(Similarity_Cos) + ' 内积: ' + str(sumdot) + ' Jaccard系数: ' +
str(Similarity_Jaccard))
<|reserved_special_token_1|>
import jieba
import os
import sys
import math
reload(sys)
sys.setdefaultencoding('utf-8')
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
sentence1 = sys.argv[1]
sentence2 = sys.argv[2]
Divlist1 = jieba.lcut(sentence1, cut_all=True)
Divlist2 = jieba.lcut(sentence2, cut_all=True)
Sen = [' '.join(Divlist1), ' '.join(Divlist2)]
vectorizer = CountVectorizer()
transformer = TfidfTransformer()
TFIDF_value = vectorizer.fit_transform(Sen)
word = vectorizer.get_feature_names()
matrix_value = TFIDF_value.toarray()
vex1 = list(matrix_value[0])
vex2 = list(matrix_value[1])
def cos_dist(a, b):
if len(a) != len(b):
return None
part_up = 0.0
a_sq = 0.0
b_sq = 0.0
for a1, b1 in zip(a, b):
part_up += a1 * b1
a_sq += a1 ** 2
b_sq += b1 ** 2
part_down = math.sqrt(a_sq * b_sq)
if part_down == 0.0:
return None
else:
return part_up / part_down
Similarity_Cos = cos_dist(vex1, vex2)
sumdot = 0.0
for iii in range(len(vex1)):
sumdot += vex1[iii] * vex2[iii]
Similarity_dot = sumdot
sum12 = 0.0
sum1 = 0.0
sum2 = 0.0
for index in range(len(vex1)):
sum12 += vex1[index] * vex2[index]
sum1 += vex1[index] ** 2
sum2 += vex2[index] ** 2
Similarity_Jaccard = sum12 / (sum1 + sum2 - sum12)
res = open('SIMresult.txt', 'w')
res.write('余弦: ' + str(Similarity_Cos) + '\n内积: ' + str(sumdot) +
'\nJaccard系数: ' + str(Similarity_Jaccard))
res.close()
print('余弦: ' + str(Similarity_Cos) + ' 内积: ' + str(sumdot) + ' Jaccard系数: ' +
str(Similarity_Jaccard))
<|reserved_special_token_1|>
# coding:utf-8
import jieba
import os
import sys
import math
reload(sys)
sys.setdefaultencoding('utf-8')
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
#import csv
#import pandas
#import numpy
sentence1 = sys.argv[1]
sentence2 = sys.argv[2]
#sentence1 = '他很喜欢玩游戏,也喜欢看小说'
#sentence2 = '他喜欢玩游戏,最喜欢看小说'
Divlist1 = jieba.lcut(sentence1, cut_all=True)
Divlist2 = jieba.lcut(sentence2, cut_all=True)
Sen = [" ".join(Divlist1), " ".join(Divlist2)]
vectorizer=CountVectorizer()#该类会将文本中的词语转换为词频矩阵
transformer=TfidfTransformer()#该类会统计每个词语的tf-idf权值
TFIDF_value=vectorizer.fit_transform(Sen)
word=vectorizer.get_feature_names()#获取词袋模型中的所有词语
matrix_value = TFIDF_value.toarray()
vex1 = list(matrix_value[0])
vex2 = list(matrix_value[1])
def cos_dist(a, b):
if len(a) != len(b):
return None
part_up = 0.0
a_sq = 0.0
b_sq = 0.0
for a1, b1 in zip(a,b):
part_up += a1*b1
a_sq += a1**2
b_sq += b1**2
part_down = math.sqrt(a_sq*b_sq)
if part_down == 0.0:
return None
else:
return part_up / part_down
Similarity_Cos = cos_dist(vex1, vex2) #余弦
sumdot = 0.0
for iii in range(len(vex1)):
sumdot += vex1[iii] * vex2[iii]
Similarity_dot = sumdot #内积
sum12 = 0.0
sum1 = 0.0
sum2 = 0.0
for index in range(len(vex1)):
sum12 += vex1[index] *vex2[index]
sum1 += vex1[index] ** 2
sum2 += vex2[index] ** 2
Similarity_Jaccard = sum12/(sum1 + sum2 - sum12) #jaccard相似度
res=open("SIMresult.txt", 'w')
res.write('余弦: '+str(Similarity_Cos)+'\n内积: '+str(sumdot)+'\nJaccard系数: '+str(Similarity_Jaccard))
res.close()
print('余弦: '+str(Similarity_Cos)+' 内积: '+str(sumdot)+' Jaccard系数: '+str(Similarity_Jaccard))
#print(' ')
#print(Similarity_dot)
#print(' ')
#print(Similarity_Jaccard)
|
flexible
|
{
"blob_id": "1a7e83fe9528b177246d6374ddaf2a76a0046e83",
"index": 200,
"step-1": "<mask token>\n\n\ndef cos_dist(a, b):\n if len(a) != len(b):\n return None\n part_up = 0.0\n a_sq = 0.0\n b_sq = 0.0\n for a1, b1 in zip(a, b):\n part_up += a1 * b1\n a_sq += a1 ** 2\n b_sq += b1 ** 2\n part_down = math.sqrt(a_sq * b_sq)\n if part_down == 0.0:\n return None\n else:\n return part_up / part_down\n\n\n<mask token>\n",
"step-2": "<mask token>\nreload(sys)\nsys.setdefaultencoding('utf-8')\n<mask token>\n\n\ndef cos_dist(a, b):\n if len(a) != len(b):\n return None\n part_up = 0.0\n a_sq = 0.0\n b_sq = 0.0\n for a1, b1 in zip(a, b):\n part_up += a1 * b1\n a_sq += a1 ** 2\n b_sq += b1 ** 2\n part_down = math.sqrt(a_sq * b_sq)\n if part_down == 0.0:\n return None\n else:\n return part_up / part_down\n\n\n<mask token>\nfor iii in range(len(vex1)):\n sumdot += vex1[iii] * vex2[iii]\n<mask token>\nfor index in range(len(vex1)):\n sum12 += vex1[index] * vex2[index]\n sum1 += vex1[index] ** 2\n sum2 += vex2[index] ** 2\n<mask token>\nres.write('余弦: ' + str(Similarity_Cos) + '\\n内积: ' + str(sumdot) +\n '\\nJaccard系数: ' + str(Similarity_Jaccard))\nres.close()\nprint('余弦: ' + str(Similarity_Cos) + ' 内积: ' + str(sumdot) + ' Jaccard系数: ' +\n str(Similarity_Jaccard))\n",
"step-3": "<mask token>\nreload(sys)\nsys.setdefaultencoding('utf-8')\n<mask token>\nsentence1 = sys.argv[1]\nsentence2 = sys.argv[2]\nDivlist1 = jieba.lcut(sentence1, cut_all=True)\nDivlist2 = jieba.lcut(sentence2, cut_all=True)\nSen = [' '.join(Divlist1), ' '.join(Divlist2)]\nvectorizer = CountVectorizer()\ntransformer = TfidfTransformer()\nTFIDF_value = vectorizer.fit_transform(Sen)\nword = vectorizer.get_feature_names()\nmatrix_value = TFIDF_value.toarray()\nvex1 = list(matrix_value[0])\nvex2 = list(matrix_value[1])\n\n\ndef cos_dist(a, b):\n if len(a) != len(b):\n return None\n part_up = 0.0\n a_sq = 0.0\n b_sq = 0.0\n for a1, b1 in zip(a, b):\n part_up += a1 * b1\n a_sq += a1 ** 2\n b_sq += b1 ** 2\n part_down = math.sqrt(a_sq * b_sq)\n if part_down == 0.0:\n return None\n else:\n return part_up / part_down\n\n\nSimilarity_Cos = cos_dist(vex1, vex2)\nsumdot = 0.0\nfor iii in range(len(vex1)):\n sumdot += vex1[iii] * vex2[iii]\nSimilarity_dot = sumdot\nsum12 = 0.0\nsum1 = 0.0\nsum2 = 0.0\nfor index in range(len(vex1)):\n sum12 += vex1[index] * vex2[index]\n sum1 += vex1[index] ** 2\n sum2 += vex2[index] ** 2\nSimilarity_Jaccard = sum12 / (sum1 + sum2 - sum12)\nres = open('SIMresult.txt', 'w')\nres.write('余弦: ' + str(Similarity_Cos) + '\\n内积: ' + str(sumdot) +\n '\\nJaccard系数: ' + str(Similarity_Jaccard))\nres.close()\nprint('余弦: ' + str(Similarity_Cos) + ' 内积: ' + str(sumdot) + ' Jaccard系数: ' +\n str(Similarity_Jaccard))\n",
"step-4": "import jieba\nimport os\nimport sys\nimport math\nreload(sys)\nsys.setdefaultencoding('utf-8')\nfrom sklearn import feature_extraction\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nsentence1 = sys.argv[1]\nsentence2 = sys.argv[2]\nDivlist1 = jieba.lcut(sentence1, cut_all=True)\nDivlist2 = jieba.lcut(sentence2, cut_all=True)\nSen = [' '.join(Divlist1), ' '.join(Divlist2)]\nvectorizer = CountVectorizer()\ntransformer = TfidfTransformer()\nTFIDF_value = vectorizer.fit_transform(Sen)\nword = vectorizer.get_feature_names()\nmatrix_value = TFIDF_value.toarray()\nvex1 = list(matrix_value[0])\nvex2 = list(matrix_value[1])\n\n\ndef cos_dist(a, b):\n if len(a) != len(b):\n return None\n part_up = 0.0\n a_sq = 0.0\n b_sq = 0.0\n for a1, b1 in zip(a, b):\n part_up += a1 * b1\n a_sq += a1 ** 2\n b_sq += b1 ** 2\n part_down = math.sqrt(a_sq * b_sq)\n if part_down == 0.0:\n return None\n else:\n return part_up / part_down\n\n\nSimilarity_Cos = cos_dist(vex1, vex2)\nsumdot = 0.0\nfor iii in range(len(vex1)):\n sumdot += vex1[iii] * vex2[iii]\nSimilarity_dot = sumdot\nsum12 = 0.0\nsum1 = 0.0\nsum2 = 0.0\nfor index in range(len(vex1)):\n sum12 += vex1[index] * vex2[index]\n sum1 += vex1[index] ** 2\n sum2 += vex2[index] ** 2\nSimilarity_Jaccard = sum12 / (sum1 + sum2 - sum12)\nres = open('SIMresult.txt', 'w')\nres.write('余弦: ' + str(Similarity_Cos) + '\\n内积: ' + str(sumdot) +\n '\\nJaccard系数: ' + str(Similarity_Jaccard))\nres.close()\nprint('余弦: ' + str(Similarity_Cos) + ' 内积: ' + str(sumdot) + ' Jaccard系数: ' +\n str(Similarity_Jaccard))\n",
"step-5": "# coding:utf-8 \nimport jieba\nimport os \nimport sys\nimport math\nreload(sys)\nsys.setdefaultencoding('utf-8')\nfrom sklearn import feature_extraction \nfrom sklearn.feature_extraction.text import TfidfTransformer \nfrom sklearn.feature_extraction.text import CountVectorizer\n#import csv\n#import pandas\n#import numpy\n\nsentence1 = sys.argv[1]\nsentence2 = sys.argv[2]\n#sentence1 = '他很喜欢玩游戏,也喜欢看小说'\n#sentence2 = '他喜欢玩游戏,最喜欢看小说'\n\nDivlist1 = jieba.lcut(sentence1, cut_all=True)\nDivlist2 = jieba.lcut(sentence2, cut_all=True)\n\nSen = [\" \".join(Divlist1), \" \".join(Divlist2)]\n\nvectorizer=CountVectorizer()#该类会将文本中的词语转换为词频矩阵\ntransformer=TfidfTransformer()#该类会统计每个词语的tf-idf权值 \n\nTFIDF_value=vectorizer.fit_transform(Sen)\nword=vectorizer.get_feature_names()#获取词袋模型中的所有词语 \nmatrix_value = TFIDF_value.toarray()\nvex1 = list(matrix_value[0])\nvex2 = list(matrix_value[1])\n\ndef cos_dist(a, b):\n if len(a) != len(b):\n return None\n part_up = 0.0\n a_sq = 0.0\n b_sq = 0.0\n for a1, b1 in zip(a,b):\n part_up += a1*b1\n a_sq += a1**2\n b_sq += b1**2\n part_down = math.sqrt(a_sq*b_sq)\n if part_down == 0.0:\n return None\n else:\n return part_up / part_down\n\nSimilarity_Cos = cos_dist(vex1, vex2) #余弦\n\nsumdot = 0.0\nfor iii in range(len(vex1)):\n sumdot += vex1[iii] * vex2[iii]\nSimilarity_dot = sumdot #内积\n\nsum12 = 0.0\nsum1 = 0.0\nsum2 = 0.0\nfor index in range(len(vex1)):\n sum12 += vex1[index] *vex2[index]\n sum1 += vex1[index] ** 2\n sum2 += vex2[index] ** 2\n\nSimilarity_Jaccard = sum12/(sum1 + sum2 - sum12) #jaccard相似度\n\nres=open(\"SIMresult.txt\", 'w')\nres.write('余弦: '+str(Similarity_Cos)+'\\n内积: '+str(sumdot)+'\\nJaccard系数: '+str(Similarity_Jaccard))\nres.close()\nprint('余弦: '+str(Similarity_Cos)+' 内积: '+str(sumdot)+' Jaccard系数: '+str(Similarity_Jaccard))\n#print(' ')\n#print(Similarity_dot)\n#print(' ')\n#print(Similarity_Jaccard)\n\n \n\n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Implements the webservice calls of the command
like rest apis or other network related methods
"""
|
flexible
|
{
"blob_id": "48369e1ed826a9a50c0fd9f63b7cc10b8225ce2b",
"index": 8760,
"step-1": "<mask token>\n",
"step-2": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nImplements the webservice calls of the command\nlike rest apis or other network related methods\n\"\"\"",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while len(s) != 1:
count += 1
a = 0
for i in range(len(s)):
a += int(s[i])
s = str(a)
print(count)
<|reserved_special_token_1|>
s = input()
count = 0
while len(s) != 1:
count += 1
a = 0
for i in range(len(s)):
a += int(s[i])
s = str(a)
print(count)
<|reserved_special_token_1|>
s=input()
count=0
while(len(s)!=1):
count+=1
a=0
for i in range(len(s)):
a+=int(s[i])
s=str(a)
print(count)
|
flexible
|
{
"blob_id": "638e21e1eb1e2e14244628260d9c7ac179983721",
"index": 2541,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile len(s) != 1:\n count += 1\n a = 0\n for i in range(len(s)):\n a += int(s[i])\n s = str(a)\nprint(count)\n",
"step-3": "s = input()\ncount = 0\nwhile len(s) != 1:\n count += 1\n a = 0\n for i in range(len(s)):\n a += int(s[i])\n s = str(a)\nprint(count)\n",
"step-4": "s=input()\r\ncount=0\r\nwhile(len(s)!=1):\r\n count+=1\r\n a=0\r\n for i in range(len(s)):\r\n a+=int(s[i])\r\n s=str(a)\r\nprint(count)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
__version__='3.3.0'
__doc__="""
The Canvas object is the primary interface for creating PDF files. See
doc/reportlab-userguide.pdf for copious examples.
"""
__all__ = ['Canvas']
ENABLE_TRACKING = 1 # turn this off to do profile testing w/o tracking
import os
import sys
import re
import hashlib
from string import digits
import tempfile
from math import sin, cos, tan, pi, ceil
from reportlab import rl_config, ascii, xrange
from reportlab.pdfbase import pdfutils
from reportlab.pdfbase import pdfdoc
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfgen import pdfgeom, pathobject
from reportlab.pdfgen.textobject import PDFTextObject, _PDFColorSetter
from reportlab.lib.colors import black, _chooseEnforceColorSpace, Color, CMYKColor, toColor
from reportlab.lib.utils import import_zlib, ImageReader, isSeq, isStr, isUnicode, _digester
from reportlab.lib.rl_accel import fp_str, escapePDF
from reportlab.lib.boxstuff import aspectRatioFix
from reportlab.pdfgen import canvas
c = canvas.Canvas("essai.pdf")
from reportlab.lib.units import inch
# move the origin up and to the left
c.translate(inch, inch)
# define a large font
c.setFont("Helvetica", 80)
# choose some colors
c.setStrokeColorRGB(0.2, 0.5, 0.3)
c.setFillColorRGB(1, 0, 1)
# draw a rectangle
c.rect(inch, inch, 6 * inch, 9 * inch, fill=1)
# make text go straight up
c.rotate(90)
# change color
c.setFillColorRGB(0, 0, 0.77)
# say hello (note after rotate the y coord needs to be negative!)
c.drawString(6 * inch, -6 * inch, "welcome my project pharmacie")
c.showPage()
c.save()
|
normal
|
{
"blob_id": "7d6e8e6142184a1540daa29dac802fe75bd93d8e",
"index": 4428,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nc.translate(inch, inch)\nc.setFont('Helvetica', 80)\nc.setStrokeColorRGB(0.2, 0.5, 0.3)\nc.setFillColorRGB(1, 0, 1)\nc.rect(inch, inch, 6 * inch, 9 * inch, fill=1)\nc.rotate(90)\nc.setFillColorRGB(0, 0, 0.77)\nc.drawString(6 * inch, -6 * inch, 'welcome my project pharmacie')\nc.showPage()\nc.save()\n",
"step-3": "__version__ = '3.3.0'\n__doc__ = \"\"\"\nThe Canvas object is the primary interface for creating PDF files. See\ndoc/reportlab-userguide.pdf for copious examples.\n\"\"\"\n__all__ = ['Canvas']\nENABLE_TRACKING = 1\n<mask token>\nc = canvas.Canvas('essai.pdf')\n<mask token>\nc.translate(inch, inch)\nc.setFont('Helvetica', 80)\nc.setStrokeColorRGB(0.2, 0.5, 0.3)\nc.setFillColorRGB(1, 0, 1)\nc.rect(inch, inch, 6 * inch, 9 * inch, fill=1)\nc.rotate(90)\nc.setFillColorRGB(0, 0, 0.77)\nc.drawString(6 * inch, -6 * inch, 'welcome my project pharmacie')\nc.showPage()\nc.save()\n",
"step-4": "__version__ = '3.3.0'\n__doc__ = \"\"\"\nThe Canvas object is the primary interface for creating PDF files. See\ndoc/reportlab-userguide.pdf for copious examples.\n\"\"\"\n__all__ = ['Canvas']\nENABLE_TRACKING = 1\nimport os\nimport sys\nimport re\nimport hashlib\nfrom string import digits\nimport tempfile\nfrom math import sin, cos, tan, pi, ceil\nfrom reportlab import rl_config, ascii, xrange\nfrom reportlab.pdfbase import pdfutils\nfrom reportlab.pdfbase import pdfdoc\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfgen import pdfgeom, pathobject\nfrom reportlab.pdfgen.textobject import PDFTextObject, _PDFColorSetter\nfrom reportlab.lib.colors import black, _chooseEnforceColorSpace, Color, CMYKColor, toColor\nfrom reportlab.lib.utils import import_zlib, ImageReader, isSeq, isStr, isUnicode, _digester\nfrom reportlab.lib.rl_accel import fp_str, escapePDF\nfrom reportlab.lib.boxstuff import aspectRatioFix\nfrom reportlab.pdfgen import canvas\nc = canvas.Canvas('essai.pdf')\nfrom reportlab.lib.units import inch\nc.translate(inch, inch)\nc.setFont('Helvetica', 80)\nc.setStrokeColorRGB(0.2, 0.5, 0.3)\nc.setFillColorRGB(1, 0, 1)\nc.rect(inch, inch, 6 * inch, 9 * inch, fill=1)\nc.rotate(90)\nc.setFillColorRGB(0, 0, 0.77)\nc.drawString(6 * inch, -6 * inch, 'welcome my project pharmacie')\nc.showPage()\nc.save()\n",
"step-5": "\n#Copyright ReportLab Europe Ltd. 2000-2017\n#see license.txt for license details\n__version__='3.3.0'\n__doc__=\"\"\"\nThe Canvas object is the primary interface for creating PDF files. See\ndoc/reportlab-userguide.pdf for copious examples.\n\"\"\"\n\n__all__ = ['Canvas']\nENABLE_TRACKING = 1 # turn this off to do profile testing w/o tracking\n\nimport os\nimport sys\nimport re\nimport hashlib\nfrom string import digits\nimport tempfile\nfrom math import sin, cos, tan, pi, ceil\nfrom reportlab import rl_config, ascii, xrange\nfrom reportlab.pdfbase import pdfutils\nfrom reportlab.pdfbase import pdfdoc\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfgen import pdfgeom, pathobject\nfrom reportlab.pdfgen.textobject import PDFTextObject, _PDFColorSetter\nfrom reportlab.lib.colors import black, _chooseEnforceColorSpace, Color, CMYKColor, toColor\nfrom reportlab.lib.utils import import_zlib, ImageReader, isSeq, isStr, isUnicode, _digester\nfrom reportlab.lib.rl_accel import fp_str, escapePDF\nfrom reportlab.lib.boxstuff import aspectRatioFix\n\nfrom reportlab.pdfgen import canvas\n\nc = canvas.Canvas(\"essai.pdf\")\nfrom reportlab.lib.units import inch\n\n# move the origin up and to the left\nc.translate(inch, inch)\n# define a large font\nc.setFont(\"Helvetica\", 80)\n# choose some colors\nc.setStrokeColorRGB(0.2, 0.5, 0.3)\nc.setFillColorRGB(1, 0, 1)\n# draw a rectangle\nc.rect(inch, inch, 6 * inch, 9 * inch, fill=1)\n# make text go straight up\nc.rotate(90)\n# change color\nc.setFillColorRGB(0, 0, 0.77)\n# say hello (note after rotate the y coord needs to be negative!)\nc.drawString(6 * inch, -6 * inch, \"welcome my project pharmacie\")\nc.showPage()\nc.save()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Test Spotify module"""
from spoetify.spotify import Spotify
from nose.tools import assert_equal
def test_search_track():
sp = Spotify()
t = sp.search_track("avocado")
assert_equal(t.id, "1UyzA43l3OIcJ6jd3hh3ac")
|
normal
|
{
"blob_id": "337309da79ce9d90010fef5c171b6b344e6dc63f",
"index": 5937,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_search_track():\n sp = Spotify()\n t = sp.search_track('avocado')\n assert_equal(t.id, '1UyzA43l3OIcJ6jd3hh3ac')\n",
"step-3": "<mask token>\nfrom spoetify.spotify import Spotify\nfrom nose.tools import assert_equal\n\n\ndef test_search_track():\n sp = Spotify()\n t = sp.search_track('avocado')\n assert_equal(t.id, '1UyzA43l3OIcJ6jd3hh3ac')\n",
"step-4": "\"\"\"Test Spotify module\"\"\"\nfrom spoetify.spotify import Spotify\nfrom nose.tools import assert_equal\n\n\ndef test_search_track():\n sp = Spotify()\n t = sp.search_track(\"avocado\")\n assert_equal(t.id, \"1UyzA43l3OIcJ6jd3hh3ac\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
log.fit(X_train, y_train)
log.predict(sc.transform([[45, 87000]]))
<|reserved_special_token_0|>
np.set_printoptions(precision=2)
np.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test),
1)), 1)
<|reserved_special_token_0|>
confusion_matrix(y_test, y_pred)
<|reserved_special_token_0|>
accuracy_score(y_test, y_pred)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, 0:2].values
y = dataset.iloc[:, 2].values
<|reserved_special_token_0|>
X_train, X_test, y_train, y_test = train_test_split(X, y)
<|reserved_special_token_0|>
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
<|reserved_special_token_0|>
log = LogisticRegression()
log.fit(X_train, y_train)
log.predict(sc.transform([[45, 87000]]))
y_pred = log.predict(X_test)
np.set_printoptions(precision=2)
np.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test),
1)), 1)
<|reserved_special_token_0|>
confusion_matrix(y_test, y_pred)
<|reserved_special_token_0|>
accuracy_score(y_test, y_pred)
<|reserved_special_token_1|>
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, 0:2].values
y = dataset.iloc[:, 2].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from sklearn.linear_model import LogisticRegression
log = LogisticRegression()
log.fit(X_train, y_train)
log.predict(sc.transform([[45, 87000]]))
y_pred = log.predict(X_test)
np.set_printoptions(precision=2)
np.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test),
1)), 1)
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_pred)
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_pred)
<|reserved_special_token_1|>
#Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#importing the data
dataset=pd.read_csv('Social_Network_Ads.csv')
X=dataset.iloc[:,0:2].values
y=dataset.iloc[:,2].values
#spiliting the data into training data and testing data
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y)
#feature Scaling to improve the predictions
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train=sc.fit_transform(X_train)
X_test=sc.transform(X_test)
#training the logistic regression on the model
from sklearn.linear_model import LogisticRegression
log=LogisticRegression()
log.fit(X_train,y_train)
#predicting the new result
log.predict(sc.transform([[45,87000]]))
#predicting the test set results
y_pred=log.predict(X_test)
np.set_printoptions(precision=2)
np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1)
#confusion matrix
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test,y_pred)
#accuracy score
from sklearn.metrics import accuracy_score
accuracy_score(y_test,y_pred)
|
flexible
|
{
"blob_id": "149f8b453786ec54668a55ec349ac157d2b93b5d",
"index": 2397,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlog.fit(X_train, y_train)\nlog.predict(sc.transform([[45, 87000]]))\n<mask token>\nnp.set_printoptions(precision=2)\nnp.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test),\n 1)), 1)\n<mask token>\nconfusion_matrix(y_test, y_pred)\n<mask token>\naccuracy_score(y_test, y_pred)\n",
"step-3": "<mask token>\ndataset = pd.read_csv('Social_Network_Ads.csv')\nX = dataset.iloc[:, 0:2].values\ny = dataset.iloc[:, 2].values\n<mask token>\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n<mask token>\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n<mask token>\nlog = LogisticRegression()\nlog.fit(X_train, y_train)\nlog.predict(sc.transform([[45, 87000]]))\ny_pred = log.predict(X_test)\nnp.set_printoptions(precision=2)\nnp.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test),\n 1)), 1)\n<mask token>\nconfusion_matrix(y_test, y_pred)\n<mask token>\naccuracy_score(y_test, y_pred)\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\ndataset = pd.read_csv('Social_Network_Ads.csv')\nX = dataset.iloc[:, 0:2].values\ny = dataset.iloc[:, 2].values\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y)\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\nfrom sklearn.linear_model import LogisticRegression\nlog = LogisticRegression()\nlog.fit(X_train, y_train)\nlog.predict(sc.transform([[45, 87000]]))\ny_pred = log.predict(X_test)\nnp.set_printoptions(precision=2)\nnp.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test),\n 1)), 1)\nfrom sklearn.metrics import confusion_matrix\nconfusion_matrix(y_test, y_pred)\nfrom sklearn.metrics import accuracy_score\naccuracy_score(y_test, y_pred)\n",
"step-5": "#Importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n#importing the data\r\ndataset=pd.read_csv('Social_Network_Ads.csv')\r\nX=dataset.iloc[:,0:2].values\r\ny=dataset.iloc[:,2].values\r\n\r\n#spiliting the data into training data and testing data\r\nfrom sklearn.model_selection import train_test_split\r\nX_train,X_test,y_train,y_test=train_test_split(X,y)\r\n\r\n#feature Scaling to improve the predictions \r\nfrom sklearn.preprocessing import StandardScaler\r\nsc=StandardScaler()\r\nX_train=sc.fit_transform(X_train)\r\nX_test=sc.transform(X_test)\r\n\r\n#training the logistic regression on the model\r\nfrom sklearn.linear_model import LogisticRegression\r\nlog=LogisticRegression()\r\nlog.fit(X_train,y_train)\r\n\r\n#predicting the new result\r\nlog.predict(sc.transform([[45,87000]]))\r\n\r\n#predicting the test set results\r\ny_pred=log.predict(X_test)\r\nnp.set_printoptions(precision=2)\r\nnp.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1)\r\n\r\n#confusion matrix\r\nfrom sklearn.metrics import confusion_matrix\r\nconfusion_matrix(y_test,y_pred)\r\n\r\n#accuracy score\r\nfrom sklearn.metrics import accuracy_score\r\naccuracy_score(y_test,y_pred)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def rows(**ro):
print(ro)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(id(a))
<|reserved_special_token_0|>
print('hello.....')
print(type(a))
print(id(a))
<|reserved_special_token_0|>
print(id(b))
b.append(10)
print(id(b))
<|reserved_special_token_0|>
print(name)
print(len(name))
print(name[2])
print(name[0:3])
print(name[-2:])
<|reserved_special_token_0|>
print(message)
<|reserved_special_token_0|>
print(message)
<|reserved_special_token_0|>
print(lastname)
print(name + ' ' + lastname)
<|reserved_special_token_0|>
print('Another way of writing... \n' + full)
print(full.upper())
print(full.find('ip'))
print('Dipesh' in full)
print('Patel' in full)
print(full.replace('Rafaliya', 'Patel'))
print(bin(a))
print(hex(a))
<|reserved_special_token_0|>
print(x)
print(bin(x))
<|reserved_special_token_0|>
print(complex)
<|reserved_special_token_0|>
print(q)
<|reserved_special_token_0|>
print(w)
<|reserved_special_token_0|>
print(e)
<|reserved_special_token_0|>
print(r)
<|reserved_special_token_0|>
print(t)
<|reserved_special_token_0|>
print(g)
<|reserved_special_token_0|>
print(m)
<|reserved_special_token_0|>
print(abs(PI))
print(round(PI))
<|reserved_special_token_0|>
print(math.floor(no))
print(math.ceil(no))
<|reserved_special_token_0|>
if age >= 21:
print('Adult')
elif age >= 13:
print('Teenager')
else:
print('Child')
print('Adult' if age >= 21 else 'Teenager')
for p in 'Dipesh':
print(p)
for l in range(0, 10, 2):
print(l)
<|reserved_special_token_0|>
while answer != guess:
guess = int(input('Enter your Guess:: '))
else:
pass
def evenodd(numb):
if numb % 2 == 0:
return 'even'
else:
return 'odd'
print('The Number is ' + evenodd(20))
def rows(**ro):
print(ro)
rows(name='Dipesh', id=1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
a = 5.0
print(id(a))
a = 10
print('hello.....')
print(type(a))
print(id(a))
b = [5, 6, 7]
print(id(b))
b.append(10)
print(id(b))
name = input('Enter Your Name:: ')
print(name)
print(len(name))
print(name[2])
print(name[0:3])
print(name[-2:])
message = 'Python "Programming"'
print(message)
message = """Python
New Line..
Programmin"""
print(message)
lastname = input('Enter Your Last Name:: ')
print(lastname)
print(name + ' ' + lastname)
full = f'{name} {lastname}'
print('Another way of writing... \n' + full)
print(full.upper())
print(full.find('ip'))
print('Dipesh' in full)
print('Patel' in full)
print(full.replace('Rafaliya', 'Patel'))
print(bin(a))
print(hex(a))
x = 5
print(x)
print(bin(x))
complex = a + 5.0j
print(complex)
y = 3
q = a + y
print(q)
w = a - y
print(w)
e = a * y
print(e)
r = a / y
print(r)
t = a // y
print(t)
g = a ** y
print(g)
m = a % y
print(m)
PI = 3.14
print(abs(PI))
print(round(PI))
no = -8.56
print(math.floor(no))
print(math.ceil(no))
age = 10
if age >= 21:
print('Adult')
elif age >= 13:
print('Teenager')
else:
print('Child')
print('Adult' if age >= 21 else 'Teenager')
for p in 'Dipesh':
print(p)
for l in range(0, 10, 2):
print(l)
answer = 10
guess = 1
while answer != guess:
guess = int(input('Enter your Guess:: '))
else:
pass
def evenodd(numb):
if numb % 2 == 0:
return 'even'
else:
return 'odd'
print('The Number is ' + evenodd(20))
def rows(**ro):
print(ro)
rows(name='Dipesh', id=1)
<|reserved_special_token_1|>
import math
a = 5.0
print(id(a))
a = 10
print('hello.....')
print(type(a))
print(id(a))
b = [5, 6, 7]
print(id(b))
b.append(10)
print(id(b))
name = input('Enter Your Name:: ')
print(name)
print(len(name))
print(name[2])
print(name[0:3])
print(name[-2:])
message = 'Python "Programming"'
print(message)
message = """Python
New Line..
Programmin"""
print(message)
lastname = input('Enter Your Last Name:: ')
print(lastname)
print(name + ' ' + lastname)
full = f'{name} {lastname}'
print('Another way of writing... \n' + full)
print(full.upper())
print(full.find('ip'))
print('Dipesh' in full)
print('Patel' in full)
print(full.replace('Rafaliya', 'Patel'))
print(bin(a))
print(hex(a))
x = 5
print(x)
print(bin(x))
complex = a + 5.0j
print(complex)
y = 3
q = a + y
print(q)
w = a - y
print(w)
e = a * y
print(e)
r = a / y
print(r)
t = a // y
print(t)
g = a ** y
print(g)
m = a % y
print(m)
PI = 3.14
print(abs(PI))
print(round(PI))
no = -8.56
print(math.floor(no))
print(math.ceil(no))
age = 10
if age >= 21:
print('Adult')
elif age >= 13:
print('Teenager')
else:
print('Child')
print('Adult' if age >= 21 else 'Teenager')
for p in 'Dipesh':
print(p)
for l in range(0, 10, 2):
print(l)
answer = 10
guess = 1
while answer != guess:
guess = int(input('Enter your Guess:: '))
else:
pass
def evenodd(numb):
if numb % 2 == 0:
return 'even'
else:
return 'odd'
print('The Number is ' + evenodd(20))
def rows(**ro):
print(ro)
rows(name='Dipesh', id=1)
<|reserved_special_token_1|>
import math
# type defining of the variable and playing with variables.
a = 5.0
print(id(a))
a = 10
print("hello.....")
print(type(a))
print(id(a))
# locating addresses...
b = [5, 6, 7]
print(id(b))
b.append(10)
print(id(b))
# Strings...
name = input("Enter Your Name:: ") # iNPUTTING AS NAME
print(name)
print(len(name))
print(name[2])
print(name[0:3])
print(name[-2:])
# Escape Sequence
# \'
# \"
# \\
# \n
message = 'Python "Programming"'
print(message)
message = """Python
New Line..
Programmin"""
print(message)
# string Concatenation
lastname = input("Enter Your Last Name:: ") # iNPUTTING AS NAME
print(lastname)
print(name + " " + lastname)
full = f"{name} {lastname}"
print("Another way of writing... \n" + full)
print(full.upper()) # converts into upper case.
print(full.find("ip")) # finding location of specific char. Returns index number.
print("Dipesh" in full) # returns Boolean value either true or false..
print("Patel" in full)
print(full.replace("Rafaliya", "Patel"))
# Binary representation of any number...
print(bin(a)) # binary of a = 10
print(hex(a)) # Hexadecimal of a..
x = 0b0101
print((x)) # binary num a
print(bin(x)) # binary printing of a
# complex Number...
complex = a + 5j
print(complex) # printing complex number
y = 3
# operations
q = a + y # addition
print(q)
w = a - y # substraction
print(w)
e = a * y # multiplication
print(e)
r = a / y # division
print(r)
t = a // y # division but only print integer value
print(t)
g = a ** y # to the power of
print(g)
m = a % y # remainder
print(m)
# constants variables..
PI = 3.14 # this is a var with a constant value
print(abs(PI)) # absolute value of PI
print(round(PI)) # round up value of PI
no = -8.56
print(math.floor(no)) # floor value of no
print(math.ceil(no)) # ceiling value of no
# if-elif-else loop
age = 10
if age >= 21:
print("Adult")
elif age >= 13:
print("Teenager")
else:
print("Child")
# ternary operator
print("Adult" if age >= 21 else "Teenager")
# for loops
for p in "Dipesh":
print(p)
for l in range(0, 10, 2): # range is a kind of list...
print(l)
answer = 10
guess = 1
while answer != guess: # while loop for guessing
guess = int(input("Enter your Guess:: "))
else:
pass # this is used to break the loop...
# defining a function ... Number is even or odd..
def evenodd(numb):
if numb % 2 == 0:
return "even"
else:
return "odd"
print("The Number is " + evenodd(20))
# printing the row at a time...
def rows(**ro):
print(ro)
rows(name="Dipesh", id=1)
|
flexible
|
{
"blob_id": "95b75395cafc6ba9f75ecf48157421e37ced2518",
"index": 815,
"step-1": "<mask token>\n\n\ndef rows(**ro):\n print(ro)\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint(id(a))\n<mask token>\nprint('hello.....')\nprint(type(a))\nprint(id(a))\n<mask token>\nprint(id(b))\nb.append(10)\nprint(id(b))\n<mask token>\nprint(name)\nprint(len(name))\nprint(name[2])\nprint(name[0:3])\nprint(name[-2:])\n<mask token>\nprint(message)\n<mask token>\nprint(message)\n<mask token>\nprint(lastname)\nprint(name + ' ' + lastname)\n<mask token>\nprint('Another way of writing... \\n' + full)\nprint(full.upper())\nprint(full.find('ip'))\nprint('Dipesh' in full)\nprint('Patel' in full)\nprint(full.replace('Rafaliya', 'Patel'))\nprint(bin(a))\nprint(hex(a))\n<mask token>\nprint(x)\nprint(bin(x))\n<mask token>\nprint(complex)\n<mask token>\nprint(q)\n<mask token>\nprint(w)\n<mask token>\nprint(e)\n<mask token>\nprint(r)\n<mask token>\nprint(t)\n<mask token>\nprint(g)\n<mask token>\nprint(m)\n<mask token>\nprint(abs(PI))\nprint(round(PI))\n<mask token>\nprint(math.floor(no))\nprint(math.ceil(no))\n<mask token>\nif age >= 21:\n print('Adult')\nelif age >= 13:\n print('Teenager')\nelse:\n print('Child')\nprint('Adult' if age >= 21 else 'Teenager')\nfor p in 'Dipesh':\n print(p)\nfor l in range(0, 10, 2):\n print(l)\n<mask token>\nwhile answer != guess:\n guess = int(input('Enter your Guess:: '))\nelse:\n pass\n\n\ndef evenodd(numb):\n if numb % 2 == 0:\n return 'even'\n else:\n return 'odd'\n\n\nprint('The Number is ' + evenodd(20))\n\n\ndef rows(**ro):\n print(ro)\n\n\nrows(name='Dipesh', id=1)\n",
"step-3": "<mask token>\na = 5.0\nprint(id(a))\na = 10\nprint('hello.....')\nprint(type(a))\nprint(id(a))\nb = [5, 6, 7]\nprint(id(b))\nb.append(10)\nprint(id(b))\nname = input('Enter Your Name:: ')\nprint(name)\nprint(len(name))\nprint(name[2])\nprint(name[0:3])\nprint(name[-2:])\nmessage = 'Python \"Programming\"'\nprint(message)\nmessage = \"\"\"Python \nNew Line..\nProgrammin\"\"\"\nprint(message)\nlastname = input('Enter Your Last Name:: ')\nprint(lastname)\nprint(name + ' ' + lastname)\nfull = f'{name} {lastname}'\nprint('Another way of writing... \\n' + full)\nprint(full.upper())\nprint(full.find('ip'))\nprint('Dipesh' in full)\nprint('Patel' in full)\nprint(full.replace('Rafaliya', 'Patel'))\nprint(bin(a))\nprint(hex(a))\nx = 5\nprint(x)\nprint(bin(x))\ncomplex = a + 5.0j\nprint(complex)\ny = 3\nq = a + y\nprint(q)\nw = a - y\nprint(w)\ne = a * y\nprint(e)\nr = a / y\nprint(r)\nt = a // y\nprint(t)\ng = a ** y\nprint(g)\nm = a % y\nprint(m)\nPI = 3.14\nprint(abs(PI))\nprint(round(PI))\nno = -8.56\nprint(math.floor(no))\nprint(math.ceil(no))\nage = 10\nif age >= 21:\n print('Adult')\nelif age >= 13:\n print('Teenager')\nelse:\n print('Child')\nprint('Adult' if age >= 21 else 'Teenager')\nfor p in 'Dipesh':\n print(p)\nfor l in range(0, 10, 2):\n print(l)\nanswer = 10\nguess = 1\nwhile answer != guess:\n guess = int(input('Enter your Guess:: '))\nelse:\n pass\n\n\ndef evenodd(numb):\n if numb % 2 == 0:\n return 'even'\n else:\n return 'odd'\n\n\nprint('The Number is ' + evenodd(20))\n\n\ndef rows(**ro):\n print(ro)\n\n\nrows(name='Dipesh', id=1)\n",
"step-4": "import math\na = 5.0\nprint(id(a))\na = 10\nprint('hello.....')\nprint(type(a))\nprint(id(a))\nb = [5, 6, 7]\nprint(id(b))\nb.append(10)\nprint(id(b))\nname = input('Enter Your Name:: ')\nprint(name)\nprint(len(name))\nprint(name[2])\nprint(name[0:3])\nprint(name[-2:])\nmessage = 'Python \"Programming\"'\nprint(message)\nmessage = \"\"\"Python \nNew Line..\nProgrammin\"\"\"\nprint(message)\nlastname = input('Enter Your Last Name:: ')\nprint(lastname)\nprint(name + ' ' + lastname)\nfull = f'{name} {lastname}'\nprint('Another way of writing... \\n' + full)\nprint(full.upper())\nprint(full.find('ip'))\nprint('Dipesh' in full)\nprint('Patel' in full)\nprint(full.replace('Rafaliya', 'Patel'))\nprint(bin(a))\nprint(hex(a))\nx = 5\nprint(x)\nprint(bin(x))\ncomplex = a + 5.0j\nprint(complex)\ny = 3\nq = a + y\nprint(q)\nw = a - y\nprint(w)\ne = a * y\nprint(e)\nr = a / y\nprint(r)\nt = a // y\nprint(t)\ng = a ** y\nprint(g)\nm = a % y\nprint(m)\nPI = 3.14\nprint(abs(PI))\nprint(round(PI))\nno = -8.56\nprint(math.floor(no))\nprint(math.ceil(no))\nage = 10\nif age >= 21:\n print('Adult')\nelif age >= 13:\n print('Teenager')\nelse:\n print('Child')\nprint('Adult' if age >= 21 else 'Teenager')\nfor p in 'Dipesh':\n print(p)\nfor l in range(0, 10, 2):\n print(l)\nanswer = 10\nguess = 1\nwhile answer != guess:\n guess = int(input('Enter your Guess:: '))\nelse:\n pass\n\n\ndef evenodd(numb):\n if numb % 2 == 0:\n return 'even'\n else:\n return 'odd'\n\n\nprint('The Number is ' + evenodd(20))\n\n\ndef rows(**ro):\n print(ro)\n\n\nrows(name='Dipesh', id=1)\n",
"step-5": "import math\r\n\r\n# type defining of the variable and playing with variables.\r\na = 5.0\r\nprint(id(a))\r\na = 10\r\nprint(\"hello.....\")\r\nprint(type(a))\r\nprint(id(a))\r\n\r\n# locating addresses...\r\nb = [5, 6, 7]\r\nprint(id(b))\r\nb.append(10)\r\nprint(id(b))\r\n\r\n# Strings...\r\n\r\nname = input(\"Enter Your Name:: \") # iNPUTTING AS NAME\r\nprint(name)\r\nprint(len(name))\r\nprint(name[2])\r\nprint(name[0:3])\r\nprint(name[-2:])\r\n\r\n# Escape Sequence\r\n# \\'\r\n# \\\"\r\n# \\\\\r\n# \\n\r\nmessage = 'Python \"Programming\"'\r\nprint(message)\r\nmessage = \"\"\"Python \r\nNew Line..\r\nProgrammin\"\"\"\r\nprint(message)\r\n# string Concatenation\r\n\r\nlastname = input(\"Enter Your Last Name:: \") # iNPUTTING AS NAME\r\nprint(lastname)\r\nprint(name + \" \" + lastname)\r\n\r\nfull = f\"{name} {lastname}\"\r\nprint(\"Another way of writing... \\n\" + full)\r\nprint(full.upper()) # converts into upper case.\r\nprint(full.find(\"ip\")) # finding location of specific char. Returns index number.\r\n\r\nprint(\"Dipesh\" in full) # returns Boolean value either true or false..\r\nprint(\"Patel\" in full)\r\nprint(full.replace(\"Rafaliya\", \"Patel\"))\r\n\r\n# Binary representation of any number...\r\nprint(bin(a)) # binary of a = 10\r\nprint(hex(a)) # Hexadecimal of a..\r\n\r\nx = 0b0101\r\nprint((x)) # binary num a\r\nprint(bin(x)) # binary printing of a\r\n\r\n# complex Number...\r\ncomplex = a + 5j\r\nprint(complex) # printing complex number\r\ny = 3\r\n# operations\r\nq = a + y # addition\r\nprint(q)\r\nw = a - y # substraction\r\nprint(w)\r\ne = a * y # multiplication\r\nprint(e)\r\nr = a / y # division\r\nprint(r)\r\nt = a // y # division but only print integer value\r\nprint(t)\r\ng = a ** y # to the power of\r\nprint(g)\r\nm = a % y # remainder\r\nprint(m)\r\n\r\n# constants variables..\r\nPI = 3.14 # this is a var with a constant value\r\nprint(abs(PI)) # absolute value of PI\r\nprint(round(PI)) # round up value of PI\r\nno = -8.56\r\nprint(math.floor(no)) # floor value of no\r\nprint(math.ceil(no)) # ceiling value of no\r\n\r\n# if-elif-else loop\r\nage = 10\r\nif age >= 21:\r\n print(\"Adult\")\r\nelif age >= 13:\r\n print(\"Teenager\")\r\nelse:\r\n print(\"Child\")\r\n\r\n# ternary operator\r\nprint(\"Adult\" if age >= 21 else \"Teenager\")\r\n\r\n# for loops\r\nfor p in \"Dipesh\":\r\n print(p)\r\n\r\nfor l in range(0, 10, 2): # range is a kind of list...\r\n print(l)\r\n\r\nanswer = 10\r\nguess = 1\r\nwhile answer != guess: # while loop for guessing\r\n guess = int(input(\"Enter your Guess:: \"))\r\nelse:\r\n pass # this is used to break the loop...\r\n\r\n# defining a function ... Number is even or odd..\r\ndef evenodd(numb):\r\n if numb % 2 == 0:\r\n return \"even\"\r\n else:\r\n return \"odd\"\r\n\r\n\r\nprint(\"The Number is \" + evenodd(20))\r\n\r\n# printing the row at a time...\r\ndef rows(**ro):\r\n print(ro)\r\n\r\n\r\nrows(name=\"Dipesh\", id=1)\r\n\r\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import json
import struct
import pymel.core as pmc
import os.path
def exportVSSD(path, camName, wantTris=False, renderdata=None):
mainFileDict = {}
mainFilePath = path
mainFileStem = os.path.basename(path)[:-5]
mainFileDir = os.path.dirname(path)
resolution = pmc.ls('defaultResolution')[0]
renderWidth = resolution.width.get()
renderHeight = resolution.height.get()
if renderdata is not None:
mainFileDict['render'] = {'width': renderWidth, 'height':
renderHeight, 'spp': renderdata['spp']}
cam = pmc.ls(camName)[0].getShape()
mainFileDict['camera'] = {'focal': cam.getFocalLength(), 'gate': cam.
getVerticalFilmAperture(), 'aspect': renderWidth / renderHeight,
'eye': list(cam.getEyePoint(space='world')), 'up': list(cam.
upDirection(space='world')), 'look': list(cam.viewDirection(space=
'world'))}
bufPath = os.path.join(mainFileDir, '{}.bin'.format(mainFileStem))
geomList = pmc.ls(type='mesh', visible=True)
mainFileGeoms = []
offset = 0
with open(bufPath, 'wb') as bufFd:
for geom in geomList:
print('Processing {}...'.format(geom))
smoothLevel = pmc.displaySmoothness(geom, q=True, po=0)[0]
isSmooth = smoothLevel > 1
print('Smooth level {}'.format(smoothLevel))
faceBuf = ''
idxBuf = ''
vtxBuf = ''
nidxs = 0
for face in geom.f:
vtxidxs = face.getVertices()
nvtxidxs = len(vtxidxs)
if not isSmooth and wantTris:
if nvtxidxs > 3:
print(
'Non-triangulated face. Triangulate before exporting'
)
return
else:
faceBuf += struct.pack('<I', nvtxidxs)
nidxs += nvtxidxs
for vtxidx in vtxidxs:
idxBuf += struct.pack('<I', vtxidx)
for vertex in geom.vtx:
p = vertex.getPosition('world')
vtxBuf += struct.pack('<fff', p.x, p.y, p.z)
hasCreases = False
if isSmooth:
edges = geom.edges
creaseIdxBuf = ''
creaseValBuf = ''
creases = pmc.modeling.polyCrease(edges, q=True, v=0)
for e in range(0, len(edges)):
c = creases[e]
if c > 0:
hasCreases = True
vtxs = edges[e].connectedVertices()
creaseIdxBuf += struct.pack('<I', vtxs[0].index())
creaseIdxBuf += struct.pack('<I', vtxs[1].index())
creaseValBuf += struct.pack('<f', c)
buffers = [(idxBuf, 'indices'), (vtxBuf, 'vertices')]
if not wantTris:
buffers += [(faceBuf, 'faces')]
if hasCreases:
buffers += [(creaseIdxBuf, 'creaseindices'), (creaseValBuf,
'creasevalues')]
buffersList = []
for b in buffers:
print('Writing buffer {}'.format(b[1]))
bufFd.write(b[0])
s = len(b[0])
buffersList.append({'offset': offset, 'size': s, 'type': b[1]})
offset += s
sg = geom.connections(t='shadingEngine')[0]
mat = sg.surfaceShader.connections()[0]
albedo = mat.color.get()
emittance = mat.incandescence.get()
geomDict = {'triangles': wantTris, 'smooth': isSmooth,
'buffers': buffersList, 'material': {'albedo': list(albedo),
'emittance': list(emittance)}}
mainFileGeoms.append(geomDict)
mainFileDict['geometries'] = mainFileGeoms
mainFileJson = json.dumps(mainFileDict, indent=2)
with open(mainFilePath, 'w') as fd:
fd.write(mainFileJson)
print('Done')
|
normal
|
{
"blob_id": "004a9cd0e459116bf3f88f3546ff4eded3dfb2a8",
"index": 2512,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef exportVSSD(path, camName, wantTris=False, renderdata=None):\n mainFileDict = {}\n mainFilePath = path\n mainFileStem = os.path.basename(path)[:-5]\n mainFileDir = os.path.dirname(path)\n resolution = pmc.ls('defaultResolution')[0]\n renderWidth = resolution.width.get()\n renderHeight = resolution.height.get()\n if renderdata is not None:\n mainFileDict['render'] = {'width': renderWidth, 'height':\n renderHeight, 'spp': renderdata['spp']}\n cam = pmc.ls(camName)[0].getShape()\n mainFileDict['camera'] = {'focal': cam.getFocalLength(), 'gate': cam.\n getVerticalFilmAperture(), 'aspect': renderWidth / renderHeight,\n 'eye': list(cam.getEyePoint(space='world')), 'up': list(cam.\n upDirection(space='world')), 'look': list(cam.viewDirection(space=\n 'world'))}\n bufPath = os.path.join(mainFileDir, '{}.bin'.format(mainFileStem))\n geomList = pmc.ls(type='mesh', visible=True)\n mainFileGeoms = []\n offset = 0\n with open(bufPath, 'wb') as bufFd:\n for geom in geomList:\n print('Processing {}...'.format(geom))\n smoothLevel = pmc.displaySmoothness(geom, q=True, po=0)[0]\n isSmooth = smoothLevel > 1\n print('Smooth level {}'.format(smoothLevel))\n faceBuf = ''\n idxBuf = ''\n vtxBuf = ''\n nidxs = 0\n for face in geom.f:\n vtxidxs = face.getVertices()\n nvtxidxs = len(vtxidxs)\n if not isSmooth and wantTris:\n if nvtxidxs > 3:\n print(\n 'Non-triangulated face. Triangulate before exporting'\n )\n return\n else:\n faceBuf += struct.pack('<I', nvtxidxs)\n nidxs += nvtxidxs\n for vtxidx in vtxidxs:\n idxBuf += struct.pack('<I', vtxidx)\n for vertex in geom.vtx:\n p = vertex.getPosition('world')\n vtxBuf += struct.pack('<fff', p.x, p.y, p.z)\n hasCreases = False\n if isSmooth:\n edges = geom.edges\n creaseIdxBuf = ''\n creaseValBuf = ''\n creases = pmc.modeling.polyCrease(edges, q=True, v=0)\n for e in range(0, len(edges)):\n c = creases[e]\n if c > 0:\n hasCreases = True\n vtxs = edges[e].connectedVertices()\n creaseIdxBuf += struct.pack('<I', vtxs[0].index())\n creaseIdxBuf += struct.pack('<I', vtxs[1].index())\n creaseValBuf += struct.pack('<f', c)\n buffers = [(idxBuf, 'indices'), (vtxBuf, 'vertices')]\n if not wantTris:\n buffers += [(faceBuf, 'faces')]\n if hasCreases:\n buffers += [(creaseIdxBuf, 'creaseindices'), (creaseValBuf,\n 'creasevalues')]\n buffersList = []\n for b in buffers:\n print('Writing buffer {}'.format(b[1]))\n bufFd.write(b[0])\n s = len(b[0])\n buffersList.append({'offset': offset, 'size': s, 'type': b[1]})\n offset += s\n sg = geom.connections(t='shadingEngine')[0]\n mat = sg.surfaceShader.connections()[0]\n albedo = mat.color.get()\n emittance = mat.incandescence.get()\n geomDict = {'triangles': wantTris, 'smooth': isSmooth,\n 'buffers': buffersList, 'material': {'albedo': list(albedo),\n 'emittance': list(emittance)}}\n mainFileGeoms.append(geomDict)\n mainFileDict['geometries'] = mainFileGeoms\n mainFileJson = json.dumps(mainFileDict, indent=2)\n with open(mainFilePath, 'w') as fd:\n fd.write(mainFileJson)\n print('Done')\n",
"step-3": "import json\nimport struct\nimport pymel.core as pmc\nimport os.path\n\n\ndef exportVSSD(path, camName, wantTris=False, renderdata=None):\n mainFileDict = {}\n mainFilePath = path\n mainFileStem = os.path.basename(path)[:-5]\n mainFileDir = os.path.dirname(path)\n resolution = pmc.ls('defaultResolution')[0]\n renderWidth = resolution.width.get()\n renderHeight = resolution.height.get()\n if renderdata is not None:\n mainFileDict['render'] = {'width': renderWidth, 'height':\n renderHeight, 'spp': renderdata['spp']}\n cam = pmc.ls(camName)[0].getShape()\n mainFileDict['camera'] = {'focal': cam.getFocalLength(), 'gate': cam.\n getVerticalFilmAperture(), 'aspect': renderWidth / renderHeight,\n 'eye': list(cam.getEyePoint(space='world')), 'up': list(cam.\n upDirection(space='world')), 'look': list(cam.viewDirection(space=\n 'world'))}\n bufPath = os.path.join(mainFileDir, '{}.bin'.format(mainFileStem))\n geomList = pmc.ls(type='mesh', visible=True)\n mainFileGeoms = []\n offset = 0\n with open(bufPath, 'wb') as bufFd:\n for geom in geomList:\n print('Processing {}...'.format(geom))\n smoothLevel = pmc.displaySmoothness(geom, q=True, po=0)[0]\n isSmooth = smoothLevel > 1\n print('Smooth level {}'.format(smoothLevel))\n faceBuf = ''\n idxBuf = ''\n vtxBuf = ''\n nidxs = 0\n for face in geom.f:\n vtxidxs = face.getVertices()\n nvtxidxs = len(vtxidxs)\n if not isSmooth and wantTris:\n if nvtxidxs > 3:\n print(\n 'Non-triangulated face. Triangulate before exporting'\n )\n return\n else:\n faceBuf += struct.pack('<I', nvtxidxs)\n nidxs += nvtxidxs\n for vtxidx in vtxidxs:\n idxBuf += struct.pack('<I', vtxidx)\n for vertex in geom.vtx:\n p = vertex.getPosition('world')\n vtxBuf += struct.pack('<fff', p.x, p.y, p.z)\n hasCreases = False\n if isSmooth:\n edges = geom.edges\n creaseIdxBuf = ''\n creaseValBuf = ''\n creases = pmc.modeling.polyCrease(edges, q=True, v=0)\n for e in range(0, len(edges)):\n c = creases[e]\n if c > 0:\n hasCreases = True\n vtxs = edges[e].connectedVertices()\n creaseIdxBuf += struct.pack('<I', vtxs[0].index())\n creaseIdxBuf += struct.pack('<I', vtxs[1].index())\n creaseValBuf += struct.pack('<f', c)\n buffers = [(idxBuf, 'indices'), (vtxBuf, 'vertices')]\n if not wantTris:\n buffers += [(faceBuf, 'faces')]\n if hasCreases:\n buffers += [(creaseIdxBuf, 'creaseindices'), (creaseValBuf,\n 'creasevalues')]\n buffersList = []\n for b in buffers:\n print('Writing buffer {}'.format(b[1]))\n bufFd.write(b[0])\n s = len(b[0])\n buffersList.append({'offset': offset, 'size': s, 'type': b[1]})\n offset += s\n sg = geom.connections(t='shadingEngine')[0]\n mat = sg.surfaceShader.connections()[0]\n albedo = mat.color.get()\n emittance = mat.incandescence.get()\n geomDict = {'triangles': wantTris, 'smooth': isSmooth,\n 'buffers': buffersList, 'material': {'albedo': list(albedo),\n 'emittance': list(emittance)}}\n mainFileGeoms.append(geomDict)\n mainFileDict['geometries'] = mainFileGeoms\n mainFileJson = json.dumps(mainFileDict, indent=2)\n with open(mainFilePath, 'w') as fd:\n fd.write(mainFileJson)\n print('Done')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class SpiderMain(object):
<|reserved_special_token_0|>
def craw(self, root_url):
count = 1
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print('craw %d : %s' % (count, new_url))
html_cont = self.downloader.download(new_url)
new_urls, new_data = self.parser.parse(new_url, html_cont)
self.urls.add_new_urls(new_urls)
self.outputer.collect_data(new_data)
if count == 20:
break
count = count + 1
except:
print('craw failed')
self.outputer.output_html()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.outputer = html_outputer.HtmlOutputer()
def craw(self, root_url):
count = 1
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print('craw %d : %s' % (count, new_url))
html_cont = self.downloader.download(new_url)
new_urls, new_data = self.parser.parse(new_url, html_cont)
self.urls.add_new_urls(new_urls)
self.outputer.collect_data(new_data)
if count == 20:
break
count = count + 1
except:
print('craw failed')
self.outputer.output_html()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('—— 七、Python爬虫实战演练:爬取百度百科1000个页面的数据 ——')
print('—— 7.2、调度程序 ——')
print('————— Python爬虫:1、总教程程序 ———————————————')
<|reserved_special_token_0|>
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.outputer = html_outputer.HtmlOutputer()
def craw(self, root_url):
count = 1
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print('craw %d : %s' % (count, new_url))
html_cont = self.downloader.download(new_url)
new_urls, new_data = self.parser.parse(new_url, html_cont)
self.urls.add_new_urls(new_urls)
self.outputer.collect_data(new_data)
if count == 20:
break
count = count + 1
except:
print('craw failed')
self.outputer.output_html()
if __name__ == '__main__':
root_url = 'https://baike.baidu.com/item/Python/407313'
obj_spider = SpiderMain()
obj_spider.craw(root_url)
<|reserved_special_token_1|>
print('—— 七、Python爬虫实战演练:爬取百度百科1000个页面的数据 ——')
print('—— 7.2、调度程序 ——')
print('————— Python爬虫:1、总教程程序 ———————————————')
from Reptilian.baike_spider import url_manager, html_downloader, html_parser, html_outputer
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.outputer = html_outputer.HtmlOutputer()
def craw(self, root_url):
count = 1
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print('craw %d : %s' % (count, new_url))
html_cont = self.downloader.download(new_url)
new_urls, new_data = self.parser.parse(new_url, html_cont)
self.urls.add_new_urls(new_urls)
self.outputer.collect_data(new_data)
if count == 20:
break
count = count + 1
except:
print('craw failed')
self.outputer.output_html()
if __name__ == '__main__':
root_url = 'https://baike.baidu.com/item/Python/407313'
obj_spider = SpiderMain()
obj_spider.craw(root_url)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
print ("—— 七、Python爬虫实战演练:爬取百度百科1000个页面的数据 ——");
print ("—— 7.2、调度程序 ——");
print ("————— Python爬虫:1、总教程程序 ———————————————");
from Reptilian.baike_spider import url_manager, html_downloader, html_parser, html_outputer
class SpiderMain(object):
# 构造函数初始化各个对象
def __init__(self):
self.urls = url_manager.UrlManager() # Url管理器
self.downloader = html_downloader.HtmlDownloader() # 下载器
self.parser = html_parser.HtmlParser() # 解析器
self.outputer = html_outputer.HtmlOutputer() # 输出器
# 爬虫的调度程序
def craw(self, root_url):
# 添加辅助信息,用count判断当前爬取的是第几个url
count = 1
# 入口url添加到管理器
self.urls.add_new_url(root_url)
# (如果有待爬取的url)遍历url管理器获取url
while self.urls.has_new_url():
try:
# 获取一个待爬取的url(当前爬取的url)
new_url = self.urls.get_new_url()
print ('craw %d : %s' % (count, new_url) )
# 启动下载器下载页面(页面数据)
html_cont = self.downloader.download(new_url)
# 下载好页面,调用解析器解析页面数据-->得到新的url列表和新的数据
new_urls, new_data = self.parser.parse(new_url, html_cont)
# url添加进url管理器;收集数据
self.urls.add_new_urls(new_urls)
self.outputer.collect_data(new_data)
if count == 20:
break
count = count + 1
except:
print ('craw failed')
# 输出收集好的数据
self.outputer.output_html()
# 1、编写main函数
if __name__=="__main__":
# 编写入口url
root_url = "https://baike.baidu.com/item/Python/407313"
# 创建spider
obj_spider = SpiderMain()
# 启动爬虫
obj_spider.craw(root_url)
|
flexible
|
{
"blob_id": "e99a81a5600aad6111bb2694cbda02021ccfd71c",
"index": 2817,
"step-1": "<mask token>\n\n\nclass SpiderMain(object):\n <mask token>\n\n def craw(self, root_url):\n count = 1\n self.urls.add_new_url(root_url)\n while self.urls.has_new_url():\n try:\n new_url = self.urls.get_new_url()\n print('craw %d : %s' % (count, new_url))\n html_cont = self.downloader.download(new_url)\n new_urls, new_data = self.parser.parse(new_url, html_cont)\n self.urls.add_new_urls(new_urls)\n self.outputer.collect_data(new_data)\n if count == 20:\n break\n count = count + 1\n except:\n print('craw failed')\n self.outputer.output_html()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SpiderMain(object):\n\n def __init__(self):\n self.urls = url_manager.UrlManager()\n self.downloader = html_downloader.HtmlDownloader()\n self.parser = html_parser.HtmlParser()\n self.outputer = html_outputer.HtmlOutputer()\n\n def craw(self, root_url):\n count = 1\n self.urls.add_new_url(root_url)\n while self.urls.has_new_url():\n try:\n new_url = self.urls.get_new_url()\n print('craw %d : %s' % (count, new_url))\n html_cont = self.downloader.download(new_url)\n new_urls, new_data = self.parser.parse(new_url, html_cont)\n self.urls.add_new_urls(new_urls)\n self.outputer.collect_data(new_data)\n if count == 20:\n break\n count = count + 1\n except:\n print('craw failed')\n self.outputer.output_html()\n\n\n<mask token>\n",
"step-3": "print('—— 七、Python爬虫实战演练:爬取百度百科1000个页面的数据 ——')\nprint('—— 7.2、调度程序 ——')\nprint('————— Python爬虫:1、总教程程序 ———————————————')\n<mask token>\n\n\nclass SpiderMain(object):\n\n def __init__(self):\n self.urls = url_manager.UrlManager()\n self.downloader = html_downloader.HtmlDownloader()\n self.parser = html_parser.HtmlParser()\n self.outputer = html_outputer.HtmlOutputer()\n\n def craw(self, root_url):\n count = 1\n self.urls.add_new_url(root_url)\n while self.urls.has_new_url():\n try:\n new_url = self.urls.get_new_url()\n print('craw %d : %s' % (count, new_url))\n html_cont = self.downloader.download(new_url)\n new_urls, new_data = self.parser.parse(new_url, html_cont)\n self.urls.add_new_urls(new_urls)\n self.outputer.collect_data(new_data)\n if count == 20:\n break\n count = count + 1\n except:\n print('craw failed')\n self.outputer.output_html()\n\n\nif __name__ == '__main__':\n root_url = 'https://baike.baidu.com/item/Python/407313'\n obj_spider = SpiderMain()\n obj_spider.craw(root_url)\n",
"step-4": "print('—— 七、Python爬虫实战演练:爬取百度百科1000个页面的数据 ——')\nprint('—— 7.2、调度程序 ——')\nprint('————— Python爬虫:1、总教程程序 ———————————————')\nfrom Reptilian.baike_spider import url_manager, html_downloader, html_parser, html_outputer\n\n\nclass SpiderMain(object):\n\n def __init__(self):\n self.urls = url_manager.UrlManager()\n self.downloader = html_downloader.HtmlDownloader()\n self.parser = html_parser.HtmlParser()\n self.outputer = html_outputer.HtmlOutputer()\n\n def craw(self, root_url):\n count = 1\n self.urls.add_new_url(root_url)\n while self.urls.has_new_url():\n try:\n new_url = self.urls.get_new_url()\n print('craw %d : %s' % (count, new_url))\n html_cont = self.downloader.download(new_url)\n new_urls, new_data = self.parser.parse(new_url, html_cont)\n self.urls.add_new_urls(new_urls)\n self.outputer.collect_data(new_data)\n if count == 20:\n break\n count = count + 1\n except:\n print('craw failed')\n self.outputer.output_html()\n\n\nif __name__ == '__main__':\n root_url = 'https://baike.baidu.com/item/Python/407313'\n obj_spider = SpiderMain()\n obj_spider.craw(root_url)\n",
"step-5": "# -*- coding: utf-8 -*-\nprint (\"—— 七、Python爬虫实战演练:爬取百度百科1000个页面的数据 ——\");\nprint (\"—— 7.2、调度程序 ——\");\n\nprint (\"————— Python爬虫:1、总教程程序 ———————————————\");\n\nfrom Reptilian.baike_spider import url_manager, html_downloader, html_parser, html_outputer\n\nclass SpiderMain(object):\n # 构造函数初始化各个对象\n def __init__(self):\n self.urls = url_manager.UrlManager() # Url管理器\n self.downloader = html_downloader.HtmlDownloader() # 下载器\n self.parser = html_parser.HtmlParser() # 解析器\n self.outputer = html_outputer.HtmlOutputer() # 输出器\n\n # 爬虫的调度程序\n def craw(self, root_url):\n # 添加辅助信息,用count判断当前爬取的是第几个url\n count = 1\n # 入口url添加到管理器\n self.urls.add_new_url(root_url)\n # (如果有待爬取的url)遍历url管理器获取url\n while self.urls.has_new_url():\n try:\n # 获取一个待爬取的url(当前爬取的url)\n new_url = self.urls.get_new_url()\n print ('craw %d : %s' % (count, new_url) )\n # 启动下载器下载页面(页面数据)\n html_cont = self.downloader.download(new_url)\n # 下载好页面,调用解析器解析页面数据-->得到新的url列表和新的数据\n new_urls, new_data = self.parser.parse(new_url, html_cont)\n # url添加进url管理器;收集数据\n self.urls.add_new_urls(new_urls)\n self.outputer.collect_data(new_data)\n\n if count == 20:\n break\n count = count + 1\n except:\n print ('craw failed')\n\n # 输出收集好的数据\n self.outputer.output_html()\n\n# 1、编写main函数\nif __name__==\"__main__\":\n # 编写入口url\n root_url = \"https://baike.baidu.com/item/Python/407313\"\n # 创建spider\n obj_spider = SpiderMain()\n # 启动爬虫\n obj_spider.craw(root_url)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
line_numbers = input().split(", ")
print("Positive:", ", ".join(list(filter((lambda x: int(x) > -1), line_numbers))))
print("Negative:", ", ".join((list(filter((lambda x: int(x) < 0), line_numbers)))))
print("Even:", ", ".join((list(filter((lambda x: int(x) % 2 == 0), line_numbers)))))
print("Odd:", ", ".join((list(filter((lambda x: int(x) % 2 != 0), line_numbers)))))
# # INPUT 1
# 1, -2, 0, 5, 3, 4, -100, -20, 12, 19, -33
# # OUTPUT 1
# Positive: 1, 0, 5, 3, 4, 12, 19
# Negative: -2, -100, -20, -33
# Even: -2, 0, 4, -100, -20, 12
# Odd: 1, 5, 3, 19, -33
|
normal
|
{
"blob_id": "e4845e5aa949ec523515efc4d7996d647fddabdb",
"index": 7060,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Positive:', ', '.join(list(filter(lambda x: int(x) > -1, line_numbers)))\n )\nprint('Negative:', ', '.join(list(filter(lambda x: int(x) < 0, line_numbers))))\nprint('Even:', ', '.join(list(filter(lambda x: int(x) % 2 == 0, line_numbers)))\n )\nprint('Odd:', ', '.join(list(filter(lambda x: int(x) % 2 != 0, line_numbers))))\n",
"step-3": "line_numbers = input().split(', ')\nprint('Positive:', ', '.join(list(filter(lambda x: int(x) > -1, line_numbers)))\n )\nprint('Negative:', ', '.join(list(filter(lambda x: int(x) < 0, line_numbers))))\nprint('Even:', ', '.join(list(filter(lambda x: int(x) % 2 == 0, line_numbers)))\n )\nprint('Odd:', ', '.join(list(filter(lambda x: int(x) % 2 != 0, line_numbers))))\n",
"step-4": "line_numbers = input().split(\", \")\nprint(\"Positive:\", \", \".join(list(filter((lambda x: int(x) > -1), line_numbers))))\nprint(\"Negative:\", \", \".join((list(filter((lambda x: int(x) < 0), line_numbers)))))\nprint(\"Even:\", \", \".join((list(filter((lambda x: int(x) % 2 == 0), line_numbers)))))\nprint(\"Odd:\", \", \".join((list(filter((lambda x: int(x) % 2 != 0), line_numbers)))))\n# # INPUT 1\n# 1, -2, 0, 5, 3, 4, -100, -20, 12, 19, -33\n# # OUTPUT 1\n# Positive: 1, 0, 5, 3, 4, 12, 19\n# Negative: -2, -100, -20, -33\n# Even: -2, 0, 4, -100, -20, 12\n# Odd: 1, 5, 3, 19, -33\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def latex_formula(form):
latex = form.simplify().to_latex(outer=True)
if latex:
display(Math(latex))
display(Markdown('<details><pre>$' + latex + '$</pre></details>'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def latex_formula(form):
latex = form.simplify().to_latex(outer=True)
if latex:
display(Math(latex))
display(Markdown('<details><pre>$' + latex + '$</pre></details>'))
<|reserved_special_token_0|>
def latex_amatrix(M, labels=None):
if len(M.shape) > 2:
raise ValueError('array can at most display two dimensions')
lines = str(M).replace('[', '').replace(']', '').splitlines()
if labels and len(labels) == 2:
result = ['(\\mathbf{' + labels[0] + '} | \\vec ' + labels[1] + ') = ']
else:
result = ['']
result += ['\\left[\\begin{array}{ccc|c}']
result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\') for l in
lines]
result += ['\\end{array}\\right]']
display(Math('\n'.join(result)))
display(Markdown('<details><pre>$' + ' '.join(result) +
'$</pre></details>'))
def latex_msquare(sq):
if sq.shape != (3, 3):
raise ValueError('Geen magisch vierkant')
lines = str(sq).replace('[', '').replace(']', '').splitlines()
result = ['\\begin{array}{|c|c|c|}\\hline']
result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\hline'
) for l in lines]
result += ['\\end{array}']
display(Math('\n'.join(result)))
display(Markdown('<details><pre>$' + ' '.join(result) +
'$</pre></details>'))
def latex_ratio(x):
"""Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string
geconverteerd."""
if isinstance(x, int):
return str(x)
else:
n, d = x.as_integer_ratio()
return ('-' if n < 0 else '') + '\\frac{' + str(abs(n)) + '}{' + str(d
) + '}'
def latex_polynomial(poly):
terms, label, var, primes = poly
def power(exp):
"""Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.
In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd."""
if exp is 1:
return var
elif exp is 0:
return ''
else:
return var + '^{' + latex_ratio(exp) + '}'
result = label + ('^{' + '\\prime' * primes + '}' if primes > 0 else ''
) + '(' + var + ') = '
first = True
for k, v in reversed(sorted(terms.items())):
if v > 0 and not first:
result += '+'
elif v < 0:
result += '-'
if v != 0:
first = False
if k is 0:
result += str(v)
elif abs(v) is 1:
result += str(power(k))
elif v != 0:
result += latex_ratio(abs(v)) + str(power(k))
display(Math(result))
display(Markdown('<details><pre>$' + result + '$</pre></details>'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def show_num(x):
return re.compile('\\.(?!\\d)').sub('\x01', x)
def latex_formula(form):
latex = form.simplify().to_latex(outer=True)
if latex:
display(Math(latex))
display(Markdown('<details><pre>$' + latex + '$</pre></details>'))
def latex_bmatrix(M, label=None):
if len(M.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(M).replace('[', '').replace(']', '').splitlines()
if label:
result = [label + ' = ']
else:
result = ['']
result += ['\\begin{bmatrix}']
result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\') for l in
lines]
result += ['\\end{bmatrix}']
display(Math('\n'.join(result)))
display(Markdown('<details><pre>$' + ' '.join(result) +
'$</pre></details>'))
def latex_amatrix(M, labels=None):
if len(M.shape) > 2:
raise ValueError('array can at most display two dimensions')
lines = str(M).replace('[', '').replace(']', '').splitlines()
if labels and len(labels) == 2:
result = ['(\\mathbf{' + labels[0] + '} | \\vec ' + labels[1] + ') = ']
else:
result = ['']
result += ['\\left[\\begin{array}{ccc|c}']
result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\') for l in
lines]
result += ['\\end{array}\\right]']
display(Math('\n'.join(result)))
display(Markdown('<details><pre>$' + ' '.join(result) +
'$</pre></details>'))
def latex_msquare(sq):
if sq.shape != (3, 3):
raise ValueError('Geen magisch vierkant')
lines = str(sq).replace('[', '').replace(']', '').splitlines()
result = ['\\begin{array}{|c|c|c|}\\hline']
result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\hline'
) for l in lines]
result += ['\\end{array}']
display(Math('\n'.join(result)))
display(Markdown('<details><pre>$' + ' '.join(result) +
'$</pre></details>'))
def latex_ratio(x):
"""Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string
geconverteerd."""
if isinstance(x, int):
return str(x)
else:
n, d = x.as_integer_ratio()
return ('-' if n < 0 else '') + '\\frac{' + str(abs(n)) + '}{' + str(d
) + '}'
def latex_polynomial(poly):
terms, label, var, primes = poly
def power(exp):
"""Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.
In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd."""
if exp is 1:
return var
elif exp is 0:
return ''
else:
return var + '^{' + latex_ratio(exp) + '}'
result = label + ('^{' + '\\prime' * primes + '}' if primes > 0 else ''
) + '(' + var + ') = '
first = True
for k, v in reversed(sorted(terms.items())):
if v > 0 and not first:
result += '+'
elif v < 0:
result += '-'
if v != 0:
first = False
if k is 0:
result += str(v)
elif abs(v) is 1:
result += str(power(k))
elif v != 0:
result += latex_ratio(abs(v)) + str(power(k))
display(Math(result))
display(Markdown('<details><pre>$' + result + '$</pre></details>'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'Brian van der Bijl'
__copyright__ = 'Copyright 2020, Hogeschool Utrecht'
from IPython.display import display, Math, Markdown
import re
def show_num(x):
return re.compile('\\.(?!\\d)').sub('\x01', x)
def latex_formula(form):
latex = form.simplify().to_latex(outer=True)
if latex:
display(Math(latex))
display(Markdown('<details><pre>$' + latex + '$</pre></details>'))
def latex_bmatrix(M, label=None):
if len(M.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(M).replace('[', '').replace(']', '').splitlines()
if label:
result = [label + ' = ']
else:
result = ['']
result += ['\\begin{bmatrix}']
result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\') for l in
lines]
result += ['\\end{bmatrix}']
display(Math('\n'.join(result)))
display(Markdown('<details><pre>$' + ' '.join(result) +
'$</pre></details>'))
def latex_amatrix(M, labels=None):
if len(M.shape) > 2:
raise ValueError('array can at most display two dimensions')
lines = str(M).replace('[', '').replace(']', '').splitlines()
if labels and len(labels) == 2:
result = ['(\\mathbf{' + labels[0] + '} | \\vec ' + labels[1] + ') = ']
else:
result = ['']
result += ['\\left[\\begin{array}{ccc|c}']
result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\') for l in
lines]
result += ['\\end{array}\\right]']
display(Math('\n'.join(result)))
display(Markdown('<details><pre>$' + ' '.join(result) +
'$</pre></details>'))
def latex_msquare(sq):
if sq.shape != (3, 3):
raise ValueError('Geen magisch vierkant')
lines = str(sq).replace('[', '').replace(']', '').splitlines()
result = ['\\begin{array}{|c|c|c|}\\hline']
result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\hline'
) for l in lines]
result += ['\\end{array}']
display(Math('\n'.join(result)))
display(Markdown('<details><pre>$' + ' '.join(result) +
'$</pre></details>'))
def latex_ratio(x):
"""Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string
geconverteerd."""
if isinstance(x, int):
return str(x)
else:
n, d = x.as_integer_ratio()
return ('-' if n < 0 else '') + '\\frac{' + str(abs(n)) + '}{' + str(d
) + '}'
def latex_polynomial(poly):
terms, label, var, primes = poly
def power(exp):
"""Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.
In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd."""
if exp is 1:
return var
elif exp is 0:
return ''
else:
return var + '^{' + latex_ratio(exp) + '}'
result = label + ('^{' + '\\prime' * primes + '}' if primes > 0 else ''
) + '(' + var + ') = '
first = True
for k, v in reversed(sorted(terms.items())):
if v > 0 and not first:
result += '+'
elif v < 0:
result += '-'
if v != 0:
first = False
if k is 0:
result += str(v)
elif abs(v) is 1:
result += str(power(k))
elif v != 0:
result += latex_ratio(abs(v)) + str(power(k))
display(Math(result))
display(Markdown('<details><pre>$' + result + '$</pre></details>'))
<|reserved_special_token_1|>
#!/usr/bin/env python
"""Diverse wiskundige structuren weergeven in LaTeX in Jupyter Notebook."""
__author__ = "Brian van der Bijl"
__copyright__ = "Copyright 2020, Hogeschool Utrecht"
from IPython.display import display, Math, Markdown
import re
def show_num(x):
return re.compile(r"\.(?!\d)").sub("\1",x)
def latex_formula(form):
latex = form.simplify().to_latex(outer=True)
if latex:
display(Math(latex))
display(Markdown("<details><pre>$" + latex + "$</pre></details>"))
def latex_bmatrix(M, label=None): # Gebaseerd op https://stackoverflow.com/questions/17129290/numpy-2d-and-1d-array-to-latex-bmatrix
if len(M.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(M).replace("[", "").replace("]", "").splitlines()
if label:
result = [label + " = "]
else:
result = [""]
result += [r"\begin{bmatrix}"]
result += [" " + " & ".join(map(show_num, l.split())) + r"\\" for l in lines]
result += [r"\end{bmatrix}"]
display(Math("\n".join(result)))
display(Markdown("<details><pre>$" + " ".join(result) + "$</pre></details>"))
def latex_amatrix(M, labels=None):
if len(M.shape) > 2:
raise ValueError('array can at most display two dimensions')
lines = str(M).replace("[", "").replace("]", "").splitlines()
if labels and len(labels) == 2:
result = [r"(\mathbf{" + labels[0] + r"} | \vec " + labels[1] + ") = "]
else:
result = [""]
result += [r"\left[\begin{array}{ccc|c}"]
result += [" " + " & ".join(map(show_num, l.split())) + r"\\" for l in lines]
result += [r"\end{array}\right]"]
display(Math("\n".join(result)))
display(Markdown("<details><pre>$" + " ".join(result) + "$</pre></details>"))
def latex_msquare(sq):
if sq.shape != (3,3):
raise ValueError('Geen magisch vierkant')
lines = str(sq).replace("[", "").replace("]", "").splitlines()
result = [r"\begin{array}{|c|c|c|}\hline"]
result += [" " + " & ".join(map(show_num, l.split())) + r"\\\hline" for l in lines]
result += [r"\end{array}"]
display(Math("\n".join(result)))
display(Markdown("<details><pre>$" + " ".join(result) + "$</pre></details>"))
def latex_ratio(x):
"""Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string
geconverteerd."""
if isinstance(x, int):
return str(x)
else:
n, d = x.as_integer_ratio() # Nul buiten de breuk halen
return ("-" if n < 0 else "") + r"\frac{" + str(abs(n)) + "}{" + str(d) + "}"
def latex_polynomial(poly):
terms, label, var, primes = poly # Bind parameters uit tuple
def power(exp):
"""Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.
In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd."""
if exp is 1:
return var
elif exp is 0:
return ""
else:
return (var + r"^{" + latex_ratio(exp) + "}")
# Print f(x) met het juiste aantal primes
result = label + ("^{" + r"\prime"*primes + "}" if primes > 0 else "") + "(" + var + ") = "
first = True # Na de eerste moet er "+" tussen de termen komen
for k, v in reversed(sorted(terms.items())): # Voor iedere term, van groot (hoog exponent) naar klein
if v > 0 and not first: # Koppel met een plus, tenzij het de eerste term is
result += "+"
elif v < 0: # Koppel met een min als de term negatief is, ook de eerste term
result += "-"
if v != 0: # Zet first op False na de eerste keer
first = False
if k is 0:
result += str(v)
elif abs(v) is 1: # Print x in plaats van 1x en -x in plaats van -1x
result += str(power(k))
elif v != 0: # Print iedere term die niet 0 of 1 is op de gebruikelijke manier, zonder min want die staat
result += latex_ratio(abs(v)) + str(power(k)) # erboven al
display(Math(result))
display(Markdown("<details><pre>$" + result + "$</pre></details>"))
|
flexible
|
{
"blob_id": "7f7bd2e9ec1932ccfd8aa900956ce85473ee8dbd",
"index": 4668,
"step-1": "<mask token>\n\n\ndef latex_formula(form):\n latex = form.simplify().to_latex(outer=True)\n if latex:\n display(Math(latex))\n display(Markdown('<details><pre>$' + latex + '$</pre></details>'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef latex_formula(form):\n latex = form.simplify().to_latex(outer=True)\n if latex:\n display(Math(latex))\n display(Markdown('<details><pre>$' + latex + '$</pre></details>'))\n\n\n<mask token>\n\n\ndef latex_amatrix(M, labels=None):\n if len(M.shape) > 2:\n raise ValueError('array can at most display two dimensions')\n lines = str(M).replace('[', '').replace(']', '').splitlines()\n if labels and len(labels) == 2:\n result = ['(\\\\mathbf{' + labels[0] + '} | \\\\vec ' + labels[1] + ') = ']\n else:\n result = ['']\n result += ['\\\\left[\\\\begin{array}{ccc|c}']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\') for l in\n lines]\n result += ['\\\\end{array}\\\\right]']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_msquare(sq):\n if sq.shape != (3, 3):\n raise ValueError('Geen magisch vierkant')\n lines = str(sq).replace('[', '').replace(']', '').splitlines()\n result = ['\\\\begin{array}{|c|c|c|}\\\\hline']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\\\\\hline'\n ) for l in lines]\n result += ['\\\\end{array}']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_ratio(x):\n \"\"\"Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string\n geconverteerd.\"\"\"\n if isinstance(x, int):\n return str(x)\n else:\n n, d = x.as_integer_ratio()\n return ('-' if n < 0 else '') + '\\\\frac{' + str(abs(n)) + '}{' + str(d\n ) + '}'\n\n\ndef latex_polynomial(poly):\n terms, label, var, primes = poly\n\n def power(exp):\n \"\"\"Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.\n In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd.\"\"\"\n if exp is 1:\n return var\n elif exp is 0:\n return ''\n else:\n return var + '^{' + latex_ratio(exp) + '}'\n result = label + ('^{' + '\\\\prime' * primes + '}' if primes > 0 else ''\n ) + '(' + var + ') = '\n first = True\n for k, v in reversed(sorted(terms.items())):\n if v > 0 and not first:\n result += '+'\n elif v < 0:\n result += '-'\n if v != 0:\n first = False\n if k is 0:\n result += str(v)\n elif abs(v) is 1:\n result += str(power(k))\n elif v != 0:\n result += latex_ratio(abs(v)) + str(power(k))\n display(Math(result))\n display(Markdown('<details><pre>$' + result + '$</pre></details>'))\n",
"step-3": "<mask token>\n\n\ndef show_num(x):\n return re.compile('\\\\.(?!\\\\d)').sub('\\x01', x)\n\n\ndef latex_formula(form):\n latex = form.simplify().to_latex(outer=True)\n if latex:\n display(Math(latex))\n display(Markdown('<details><pre>$' + latex + '$</pre></details>'))\n\n\ndef latex_bmatrix(M, label=None):\n if len(M.shape) > 2:\n raise ValueError('bmatrix can at most display two dimensions')\n lines = str(M).replace('[', '').replace(']', '').splitlines()\n if label:\n result = [label + ' = ']\n else:\n result = ['']\n result += ['\\\\begin{bmatrix}']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\') for l in\n lines]\n result += ['\\\\end{bmatrix}']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_amatrix(M, labels=None):\n if len(M.shape) > 2:\n raise ValueError('array can at most display two dimensions')\n lines = str(M).replace('[', '').replace(']', '').splitlines()\n if labels and len(labels) == 2:\n result = ['(\\\\mathbf{' + labels[0] + '} | \\\\vec ' + labels[1] + ') = ']\n else:\n result = ['']\n result += ['\\\\left[\\\\begin{array}{ccc|c}']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\') for l in\n lines]\n result += ['\\\\end{array}\\\\right]']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_msquare(sq):\n if sq.shape != (3, 3):\n raise ValueError('Geen magisch vierkant')\n lines = str(sq).replace('[', '').replace(']', '').splitlines()\n result = ['\\\\begin{array}{|c|c|c|}\\\\hline']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\\\\\hline'\n ) for l in lines]\n result += ['\\\\end{array}']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_ratio(x):\n \"\"\"Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string\n geconverteerd.\"\"\"\n if isinstance(x, int):\n return str(x)\n else:\n n, d = x.as_integer_ratio()\n return ('-' if n < 0 else '') + '\\\\frac{' + str(abs(n)) + '}{' + str(d\n ) + '}'\n\n\ndef latex_polynomial(poly):\n terms, label, var, primes = poly\n\n def power(exp):\n \"\"\"Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.\n In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd.\"\"\"\n if exp is 1:\n return var\n elif exp is 0:\n return ''\n else:\n return var + '^{' + latex_ratio(exp) + '}'\n result = label + ('^{' + '\\\\prime' * primes + '}' if primes > 0 else ''\n ) + '(' + var + ') = '\n first = True\n for k, v in reversed(sorted(terms.items())):\n if v > 0 and not first:\n result += '+'\n elif v < 0:\n result += '-'\n if v != 0:\n first = False\n if k is 0:\n result += str(v)\n elif abs(v) is 1:\n result += str(power(k))\n elif v != 0:\n result += latex_ratio(abs(v)) + str(power(k))\n display(Math(result))\n display(Markdown('<details><pre>$' + result + '$</pre></details>'))\n",
"step-4": "<mask token>\n__author__ = 'Brian van der Bijl'\n__copyright__ = 'Copyright 2020, Hogeschool Utrecht'\nfrom IPython.display import display, Math, Markdown\nimport re\n\n\ndef show_num(x):\n return re.compile('\\\\.(?!\\\\d)').sub('\\x01', x)\n\n\ndef latex_formula(form):\n latex = form.simplify().to_latex(outer=True)\n if latex:\n display(Math(latex))\n display(Markdown('<details><pre>$' + latex + '$</pre></details>'))\n\n\ndef latex_bmatrix(M, label=None):\n if len(M.shape) > 2:\n raise ValueError('bmatrix can at most display two dimensions')\n lines = str(M).replace('[', '').replace(']', '').splitlines()\n if label:\n result = [label + ' = ']\n else:\n result = ['']\n result += ['\\\\begin{bmatrix}']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\') for l in\n lines]\n result += ['\\\\end{bmatrix}']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_amatrix(M, labels=None):\n if len(M.shape) > 2:\n raise ValueError('array can at most display two dimensions')\n lines = str(M).replace('[', '').replace(']', '').splitlines()\n if labels and len(labels) == 2:\n result = ['(\\\\mathbf{' + labels[0] + '} | \\\\vec ' + labels[1] + ') = ']\n else:\n result = ['']\n result += ['\\\\left[\\\\begin{array}{ccc|c}']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\') for l in\n lines]\n result += ['\\\\end{array}\\\\right]']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_msquare(sq):\n if sq.shape != (3, 3):\n raise ValueError('Geen magisch vierkant')\n lines = str(sq).replace('[', '').replace(']', '').splitlines()\n result = ['\\\\begin{array}{|c|c|c|}\\\\hline']\n result += [(' ' + ' & '.join(map(show_num, l.split())) + '\\\\\\\\\\\\hline'\n ) for l in lines]\n result += ['\\\\end{array}']\n display(Math('\\n'.join(result)))\n display(Markdown('<details><pre>$' + ' '.join(result) +\n '$</pre></details>'))\n\n\ndef latex_ratio(x):\n \"\"\"Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string\n geconverteerd.\"\"\"\n if isinstance(x, int):\n return str(x)\n else:\n n, d = x.as_integer_ratio()\n return ('-' if n < 0 else '') + '\\\\frac{' + str(abs(n)) + '}{' + str(d\n ) + '}'\n\n\ndef latex_polynomial(poly):\n terms, label, var, primes = poly\n\n def power(exp):\n \"\"\"Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.\n In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd.\"\"\"\n if exp is 1:\n return var\n elif exp is 0:\n return ''\n else:\n return var + '^{' + latex_ratio(exp) + '}'\n result = label + ('^{' + '\\\\prime' * primes + '}' if primes > 0 else ''\n ) + '(' + var + ') = '\n first = True\n for k, v in reversed(sorted(terms.items())):\n if v > 0 and not first:\n result += '+'\n elif v < 0:\n result += '-'\n if v != 0:\n first = False\n if k is 0:\n result += str(v)\n elif abs(v) is 1:\n result += str(power(k))\n elif v != 0:\n result += latex_ratio(abs(v)) + str(power(k))\n display(Math(result))\n display(Markdown('<details><pre>$' + result + '$</pre></details>'))\n",
"step-5": "#!/usr/bin/env python\r\n\r\n\"\"\"Diverse wiskundige structuren weergeven in LaTeX in Jupyter Notebook.\"\"\"\r\n\r\n__author__ = \"Brian van der Bijl\"\r\n__copyright__ = \"Copyright 2020, Hogeschool Utrecht\"\r\n\r\nfrom IPython.display import display, Math, Markdown\r\nimport re\r\n\r\ndef show_num(x):\r\n return re.compile(r\"\\.(?!\\d)\").sub(\"\\1\",x)\r\n\r\ndef latex_formula(form):\r\n latex = form.simplify().to_latex(outer=True)\r\n if latex:\r\n display(Math(latex))\r\n display(Markdown(\"<details><pre>$\" + latex + \"$</pre></details>\"))\r\n\r\ndef latex_bmatrix(M, label=None): # Gebaseerd op https://stackoverflow.com/questions/17129290/numpy-2d-and-1d-array-to-latex-bmatrix\r\n if len(M.shape) > 2:\r\n raise ValueError('bmatrix can at most display two dimensions')\r\n lines = str(M).replace(\"[\", \"\").replace(\"]\", \"\").splitlines()\r\n if label:\r\n result = [label + \" = \"]\r\n else:\r\n result = [\"\"]\r\n result += [r\"\\begin{bmatrix}\"]\r\n result += [\" \" + \" & \".join(map(show_num, l.split())) + r\"\\\\\" for l in lines]\r\n result += [r\"\\end{bmatrix}\"]\r\n display(Math(\"\\n\".join(result)))\r\n display(Markdown(\"<details><pre>$\" + \" \".join(result) + \"$</pre></details>\"))\r\n\r\ndef latex_amatrix(M, labels=None):\r\n if len(M.shape) > 2:\r\n raise ValueError('array can at most display two dimensions')\r\n lines = str(M).replace(\"[\", \"\").replace(\"]\", \"\").splitlines()\r\n if labels and len(labels) == 2:\r\n result = [r\"(\\mathbf{\" + labels[0] + r\"} | \\vec \" + labels[1] + \") = \"]\r\n else:\r\n result = [\"\"]\r\n result += [r\"\\left[\\begin{array}{ccc|c}\"]\r\n result += [\" \" + \" & \".join(map(show_num, l.split())) + r\"\\\\\" for l in lines]\r\n result += [r\"\\end{array}\\right]\"]\r\n display(Math(\"\\n\".join(result)))\r\n display(Markdown(\"<details><pre>$\" + \" \".join(result) + \"$</pre></details>\"))\r\n\r\ndef latex_msquare(sq):\r\n if sq.shape != (3,3):\r\n raise ValueError('Geen magisch vierkant')\r\n lines = str(sq).replace(\"[\", \"\").replace(\"]\", \"\").splitlines()\r\n result = [r\"\\begin{array}{|c|c|c|}\\hline\"]\r\n result += [\" \" + \" & \".join(map(show_num, l.split())) + r\"\\\\\\hline\" for l in lines]\r\n result += [r\"\\end{array}\"]\r\n display(Math(\"\\n\".join(result)))\r\n display(Markdown(\"<details><pre>$\" + \" \".join(result) + \"$</pre></details>\"))\r\n\r\ndef latex_ratio(x):\r\n \"\"\"Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string\r\n geconverteerd.\"\"\"\r\n if isinstance(x, int):\r\n return str(x)\r\n else:\r\n n, d = x.as_integer_ratio() # Nul buiten de breuk halen\r\n return (\"-\" if n < 0 else \"\") + r\"\\frac{\" + str(abs(n)) + \"}{\" + str(d) + \"}\"\r\n\r\ndef latex_polynomial(poly):\r\n terms, label, var, primes = poly # Bind parameters uit tuple\r\n\r\n def power(exp):\r\n \"\"\"Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.\r\n In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd.\"\"\"\r\n if exp is 1:\r\n return var\r\n elif exp is 0:\r\n return \"\"\r\n else:\r\n return (var + r\"^{\" + latex_ratio(exp) + \"}\")\r\n\r\n # Print f(x) met het juiste aantal primes \r\n result = label + (\"^{\" + r\"\\prime\"*primes + \"}\" if primes > 0 else \"\") + \"(\" + var + \") = \"\r\n first = True # Na de eerste moet er \"+\" tussen de termen komen\r\n\r\n for k, v in reversed(sorted(terms.items())): # Voor iedere term, van groot (hoog exponent) naar klein\r\n if v > 0 and not first: # Koppel met een plus, tenzij het de eerste term is\r\n result += \"+\"\r\n elif v < 0: # Koppel met een min als de term negatief is, ook de eerste term\r\n result += \"-\"\r\n\r\n if v != 0: # Zet first op False na de eerste keer\r\n first = False\r\n\r\n if k is 0:\r\n result += str(v)\r\n elif abs(v) is 1: # Print x in plaats van 1x en -x in plaats van -1x\r\n result += str(power(k))\r\n elif v != 0: # Print iedere term die niet 0 of 1 is op de gebruikelijke manier, zonder min want die staat\r\n result += latex_ratio(abs(v)) + str(power(k)) # erboven al\r\n\r\n display(Math(result))\r\n display(Markdown(\"<details><pre>$\" + result + \"$</pre></details>\"))\r\n",
"step-ids": [
1,
5,
7,
9,
10
]
}
|
[
1,
5,
7,
9,
10
] |
import json
import os
from subprocess import PIPE, Popen as popen
from unittest import TestCase
from substra.commands import Config
objective = [[{
'descriptionStorageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/description/',
'key': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'metrics': {'hash': '750f622262854341bd44f55c1018949e9c119606ef5068bd7d137040a482a756',
'name': 'macro-average recall',
'storageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/metrics/'},
'name': 'Skin Lesion Classification Challenge',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'testDataKeys': ['e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1']}, {
'descriptionStorageAddress': 'http://owkin.substrabac:8000/objective/6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c/description/',
'key': '6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c',
'metrics': {'hash': '0bc732c26bafdc41321c2bffd35b6835aa35f7371a4eb02994642c2c3a688f60',
'name': 'macro-average recall',
'storageAddress': 'http://owkin.substrabac:8000/objective/6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c/metrics/'},
'name': 'Simplified skin lesion classification',
'owner': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55', 'permissions': 'all',
'testDataKeys': ['2d0f943aa81a9cb3fe84b162559ce6aff068ccb04e0cb284733b8f9d7e06517e',
'533ee6e7b9d8b247e7e853b24547f57e6ef351852bac0418f13a0666173448f1']}]]
data_manager = [[{'objectiveKeys': [],
'description': {'hash': '7a90514f88c70002608a9868681dd1589ea598e78d00a8cd7783c3ea0f9ceb09',
'storageAddress': 'http://chunantes.substrabac:8001/data_manager/ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994/description/'},
'key': 'ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994', 'name': 'ISIC 2018',
'nbData': 2,
'openerStorageAddress': 'http://chunantes.substrabac:8001/data_manager/ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994/opener/',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'size': 553113, 'type': 'Images'}, {
'objectiveKeys': ['6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c',
'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f'],
'description': {'hash': '258bef187a166b3fef5cb86e68c8f7e154c283a148cd5bc344fec7e698821ad3',
'storageAddress': 'http://owkin.substrabac:8000/data_manager/b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0/description/'},
'key': 'b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0',
'name': 'Simplified ISIC 2018', 'nbData': 6,
'openerStorageAddress': 'http://owkin.substrabac:8000/data_manager/b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0/opener/',
'owner': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55', 'permissions': 'all',
'size': 1415097, 'type': 'Images'}]]
data = [{'pkhash': 'e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1/0024900.zip'},
{'pkhash': '4b5152871b181d10ee774c10458c064c70710f4ba35938f10c0b7aa51f7dc010', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/4b5152871b181d10ee774c10458c064c70710f4ba35938f10c0b7aa51f7dc010/0024701.zip'},
{'pkhash': '93e4b1e040b08cfa8a68b13f9dddb95a6672e8a377378545b2b1254691cfc060', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/93e4b1e040b08cfa8a68b13f9dddb95a6672e8a377378545b2b1254691cfc060/0024317.zip'},
{'pkhash': 'eed4c6ea09babe7ca6428377fff6e54102ef5cdb0cae593732ddbe3f224217cb', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/eed4c6ea09babe7ca6428377fff6e54102ef5cdb0cae593732ddbe3f224217cb/0024316.zip'},
{'pkhash': '2d0f943aa81a9cb3fe84b162559ce6aff068ccb04e0cb284733b8f9d7e06517e', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/2d0f943aa81a9cb3fe84b162559ce6aff068ccb04e0cb284733b8f9d7e06517e/0024315.zip'},
{'pkhash': '533ee6e7b9d8b247e7e853b24547f57e6ef351852bac0418f13a0666173448f1', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/533ee6e7b9d8b247e7e853b24547f57e6ef351852bac0418f13a0666173448f1/0024318.zip'}]
algo = [[{'objectiveKey': '6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c',
'description': {'hash': '3b1281cbdd6ebfec650d0a9f932a64e45a27262848065d7cecf11fd7191b4b1f',
'storageAddress': 'http://chunantes.substrabac:8001/algo/7742aea2001ceb40e9ce8a37fa27237d5b2d1f574e06d48677af945cfdf42ec0/description/'},
'key': '7742aea2001ceb40e9ce8a37fa27237d5b2d1f574e06d48677af945cfdf42ec0',
'name': 'Logistic regression for balanced problem',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'storageAddress': 'http://chunantes.substrabac:8001/algo/7742aea2001ceb40e9ce8a37fa27237d5b2d1f574e06d48677af945cfdf42ec0/file/'},
{'objectiveKey': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'description': {'hash': 'b9463411a01ea00869bdffce6e59a5c100a4e635c0a9386266cad3c77eb28e9e',
'storageAddress': 'http://chunantes.substrabac:8001/algo/0acc5180e09b6a6ac250f4e3c172e2893f617aa1c22ef1f379019d20fe44142f/description/'},
'key': '0acc5180e09b6a6ac250f4e3c172e2893f617aa1c22ef1f379019d20fe44142f', 'name': 'Neural Network',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'storageAddress': 'http://chunantes.substrabac:8001/algo/0acc5180e09b6a6ac250f4e3c172e2893f617aa1c22ef1f379019d20fe44142f/file/'},
{'objectiveKey': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'description': {'hash': '124a0425b746d7072282d167b53cb6aab3a31bf1946dae89135c15b0126ebec3',
'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/description/'},
'key': '6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f', 'name': 'Logistic regression',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/file/'},
{'objectiveKey': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'description': {'hash': '4acea40c4b51996c88ef279c5c9aa41ab77b97d38c5ca167e978a98b2e402675',
'storageAddress': 'http://chunantes.substrabac:8001/algo/f2d9fd38e25cd975c49f3ce7e6739846585e89635a86689b5db42ab2c0c57284/description/'},
'key': 'f2d9fd38e25cd975c49f3ce7e6739846585e89635a86689b5db42ab2c0c57284', 'name': 'Random Forest',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'storageAddress': 'http://chunantes.substrabac:8001/algo/f2d9fd38e25cd975c49f3ce7e6739846585e89635a86689b5db42ab2c0c57284/file/'}]]
model = [[{'algo': {'hash': '6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f',
'name': 'Logistic regression',
'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/file/'},
'objective': {'hash': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'metrics': {'hash': '750f622262854341bd44f55c1018949e9c119606ef5068bd7d137040a482a756',
'storageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/metrics/'}},
'creator': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f',
'endModel': {'hash': 'fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb',
'storageAddress': 'http://chunantes.substrabac:8001/model/fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb/file/'},
'key': '3e1a9e122765b2976f393322ab9d1c59fb113b35e2531900e06c9ae0f41e8afb',
'log': 'Train - CPU:100.23 % - Mem:0.14 GB - GPU:0.00 % - GPU Mem:0.00 GB; Test - CPU:0.00 % - Mem:0.00 GB - GPU:0.00 % - GPU Mem:0.00 GB; ',
'permissions': 'all', 'startModel': None, 'status': 'done',
'testData': {'keys': ['e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1'],
'openerHash': 'b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0', 'perf': 1,
'worker': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55'}, 'trainData': {
'keys': ['62fb3263208d62c7235a046ee1d80e25512fe782254b730a9e566276b8c0ef3a',
'42303efa663015e729159833a12ffb510ff92a6e386b8152f90f6fb14ddc94c9'],
'openerHash': 'ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994', 'perf': 1,
'worker': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f'}}]]
traintuple = [{'algo': {'hash': '6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f',
'name': 'Logistic regression',
'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/file/'},
'objective': {'hash': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'metrics': {'hash': '750f622262854341bd44f55c1018949e9c119606ef5068bd7d137040a482a756',
'storageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/metrics/'}},
'creator': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f',
'endModel': {'hash': 'fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb',
'storageAddress': 'http://chunantes.substrabac:8001/model/fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb/file/'},
'key': '3e1a9e122765b2976f393322ab9d1c59fb113b35e2531900e06c9ae0f41e8afb',
'log': 'Train - CPU:100.23 % - Mem:0.14 GB - GPU:0.00 % - GPU Mem:0.00 GB; Test - CPU:0.00 % - Mem:0.00 GB - GPU:0.00 % - GPU Mem:0.00 GB; ',
'permissions': 'all', 'startModel': None, 'status': 'done',
'testData': {'keys': ['e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1'],
'openerHash': 'b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0', 'perf': 1,
'worker': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55'},
'trainData': {'keys': ['62fb3263208d62c7235a046ee1d80e25512fe782254b730a9e566276b8c0ef3a',
'42303efa663015e729159833a12ffb510ff92a6e386b8152f90f6fb14ddc94c9'],
'openerHash': 'ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994',
'perf': 1, 'worker': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f'}}]
# Run this test only after an e2e multi orgs
class TestList(TestCase):
def setUp(self):
Config({
'<url>': 'http://owkin.substrabac:8000',
'<version>': '0.0',
'<user>': os.environ.get('BACK_AUTH_USER', ''),
'<password>': os.environ.get('BACK_AUTH_PASSWORD', ''),
'--config': '/tmp/.substra_e2e'
}).run()
def tearDown(self):
try:
os.remove('/tmp/.substra_e2e')
except:
pass
def test_list_objective(self):
output = popen(['substra', 'list', 'objective', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == objective)
def test_list_data_manager(self):
output = popen(['substra', 'list', 'data_manager', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == data_manager)
def test_list_data(self):
output = popen(['substra', 'list', 'data_sample', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == data)
def test_list_algo(self):
output = popen(['substra', 'list', 'algo', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == algo)
def test_list_model(self):
output = popen(['substra', 'list', 'model', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == model)
def test_list_traintuple(self):
output = popen(['substra', 'list', 'traintuple', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == traintuple)
|
normal
|
{
"blob_id": "c55b768466309d2e655c9222e0674a6bc2a958b3",
"index": 9899,
"step-1": "<mask token>\n\n\nclass TestList(TestCase):\n <mask token>\n <mask token>\n\n def test_list_objective(self):\n output = popen(['substra', 'list', 'objective',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == objective)\n\n def test_list_data_manager(self):\n output = popen(['substra', 'list', 'data_manager',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == data_manager)\n\n def test_list_data(self):\n output = popen(['substra', 'list', 'data_sample',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == data)\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestList(TestCase):\n <mask token>\n\n def tearDown(self):\n try:\n os.remove('/tmp/.substra_e2e')\n except:\n pass\n\n def test_list_objective(self):\n output = popen(['substra', 'list', 'objective',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == objective)\n\n def test_list_data_manager(self):\n output = popen(['substra', 'list', 'data_manager',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == data_manager)\n\n def test_list_data(self):\n output = popen(['substra', 'list', 'data_sample',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == data)\n\n def test_list_algo(self):\n output = popen(['substra', 'list', 'algo',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == algo)\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestList(TestCase):\n\n def setUp(self):\n Config({'<url>': 'http://owkin.substrabac:8000', '<version>': '0.0',\n '<user>': os.environ.get('BACK_AUTH_USER', ''), '<password>':\n os.environ.get('BACK_AUTH_PASSWORD', ''), '--config':\n '/tmp/.substra_e2e'}).run()\n\n def tearDown(self):\n try:\n os.remove('/tmp/.substra_e2e')\n except:\n pass\n\n def test_list_objective(self):\n output = popen(['substra', 'list', 'objective',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == objective)\n\n def test_list_data_manager(self):\n output = popen(['substra', 'list', 'data_manager',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == data_manager)\n\n def test_list_data(self):\n output = popen(['substra', 'list', 'data_sample',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == data)\n\n def test_list_algo(self):\n output = popen(['substra', 'list', 'algo',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == algo)\n <mask token>\n\n def test_list_traintuple(self):\n output = popen(['substra', 'list', 'traintuple',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == traintuple)\n",
"step-4": "<mask token>\n\n\nclass TestList(TestCase):\n\n def setUp(self):\n Config({'<url>': 'http://owkin.substrabac:8000', '<version>': '0.0',\n '<user>': os.environ.get('BACK_AUTH_USER', ''), '<password>':\n os.environ.get('BACK_AUTH_PASSWORD', ''), '--config':\n '/tmp/.substra_e2e'}).run()\n\n def tearDown(self):\n try:\n os.remove('/tmp/.substra_e2e')\n except:\n pass\n\n def test_list_objective(self):\n output = popen(['substra', 'list', 'objective',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == objective)\n\n def test_list_data_manager(self):\n output = popen(['substra', 'list', 'data_manager',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == data_manager)\n\n def test_list_data(self):\n output = popen(['substra', 'list', 'data_sample',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == data)\n\n def test_list_algo(self):\n output = popen(['substra', 'list', 'algo',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == algo)\n\n def test_list_model(self):\n output = popen(['substra', 'list', 'model',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == model)\n\n def test_list_traintuple(self):\n output = popen(['substra', 'list', 'traintuple',\n '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n self.assertTrue(json.loads(res) == traintuple)\n",
"step-5": "import json\nimport os\nfrom subprocess import PIPE, Popen as popen\nfrom unittest import TestCase\n\nfrom substra.commands import Config\n\nobjective = [[{\n 'descriptionStorageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/description/',\n 'key': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',\n 'metrics': {'hash': '750f622262854341bd44f55c1018949e9c119606ef5068bd7d137040a482a756',\n 'name': 'macro-average recall',\n 'storageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/metrics/'},\n 'name': 'Skin Lesion Classification Challenge',\n 'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',\n 'testDataKeys': ['e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1']}, {\n 'descriptionStorageAddress': 'http://owkin.substrabac:8000/objective/6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c/description/',\n 'key': '6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c',\n 'metrics': {'hash': '0bc732c26bafdc41321c2bffd35b6835aa35f7371a4eb02994642c2c3a688f60',\n 'name': 'macro-average recall',\n 'storageAddress': 'http://owkin.substrabac:8000/objective/6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c/metrics/'},\n 'name': 'Simplified skin lesion classification',\n 'owner': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55', 'permissions': 'all',\n 'testDataKeys': ['2d0f943aa81a9cb3fe84b162559ce6aff068ccb04e0cb284733b8f9d7e06517e',\n '533ee6e7b9d8b247e7e853b24547f57e6ef351852bac0418f13a0666173448f1']}]]\n\ndata_manager = [[{'objectiveKeys': [],\n 'description': {'hash': '7a90514f88c70002608a9868681dd1589ea598e78d00a8cd7783c3ea0f9ceb09',\n 'storageAddress': 'http://chunantes.substrabac:8001/data_manager/ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994/description/'},\n 'key': 'ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994', 'name': 'ISIC 2018',\n 'nbData': 2,\n 'openerStorageAddress': 'http://chunantes.substrabac:8001/data_manager/ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994/opener/',\n 'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',\n 'size': 553113, 'type': 'Images'}, {\n 'objectiveKeys': ['6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c',\n 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f'],\n 'description': {'hash': '258bef187a166b3fef5cb86e68c8f7e154c283a148cd5bc344fec7e698821ad3',\n 'storageAddress': 'http://owkin.substrabac:8000/data_manager/b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0/description/'},\n 'key': 'b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0',\n 'name': 'Simplified ISIC 2018', 'nbData': 6,\n 'openerStorageAddress': 'http://owkin.substrabac:8000/data_manager/b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0/opener/',\n 'owner': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55', 'permissions': 'all',\n 'size': 1415097, 'type': 'Images'}]]\n\ndata = [{'pkhash': 'e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1', 'validated': True,\n 'file': 'http://owkin.substrabac:8000/media/data/e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1/0024900.zip'},\n {'pkhash': '4b5152871b181d10ee774c10458c064c70710f4ba35938f10c0b7aa51f7dc010', 'validated': True,\n 'file': 'http://owkin.substrabac:8000/media/data/4b5152871b181d10ee774c10458c064c70710f4ba35938f10c0b7aa51f7dc010/0024701.zip'},\n {'pkhash': '93e4b1e040b08cfa8a68b13f9dddb95a6672e8a377378545b2b1254691cfc060', 'validated': True,\n 'file': 'http://owkin.substrabac:8000/media/data/93e4b1e040b08cfa8a68b13f9dddb95a6672e8a377378545b2b1254691cfc060/0024317.zip'},\n {'pkhash': 'eed4c6ea09babe7ca6428377fff6e54102ef5cdb0cae593732ddbe3f224217cb', 'validated': True,\n 'file': 'http://owkin.substrabac:8000/media/data/eed4c6ea09babe7ca6428377fff6e54102ef5cdb0cae593732ddbe3f224217cb/0024316.zip'},\n {'pkhash': '2d0f943aa81a9cb3fe84b162559ce6aff068ccb04e0cb284733b8f9d7e06517e', 'validated': True,\n 'file': 'http://owkin.substrabac:8000/media/data/2d0f943aa81a9cb3fe84b162559ce6aff068ccb04e0cb284733b8f9d7e06517e/0024315.zip'},\n {'pkhash': '533ee6e7b9d8b247e7e853b24547f57e6ef351852bac0418f13a0666173448f1', 'validated': True,\n 'file': 'http://owkin.substrabac:8000/media/data/533ee6e7b9d8b247e7e853b24547f57e6ef351852bac0418f13a0666173448f1/0024318.zip'}]\n\nalgo = [[{'objectiveKey': '6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c',\n 'description': {'hash': '3b1281cbdd6ebfec650d0a9f932a64e45a27262848065d7cecf11fd7191b4b1f',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/7742aea2001ceb40e9ce8a37fa27237d5b2d1f574e06d48677af945cfdf42ec0/description/'},\n 'key': '7742aea2001ceb40e9ce8a37fa27237d5b2d1f574e06d48677af945cfdf42ec0',\n 'name': 'Logistic regression for balanced problem',\n 'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/7742aea2001ceb40e9ce8a37fa27237d5b2d1f574e06d48677af945cfdf42ec0/file/'},\n {'objectiveKey': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',\n 'description': {'hash': 'b9463411a01ea00869bdffce6e59a5c100a4e635c0a9386266cad3c77eb28e9e',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/0acc5180e09b6a6ac250f4e3c172e2893f617aa1c22ef1f379019d20fe44142f/description/'},\n 'key': '0acc5180e09b6a6ac250f4e3c172e2893f617aa1c22ef1f379019d20fe44142f', 'name': 'Neural Network',\n 'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/0acc5180e09b6a6ac250f4e3c172e2893f617aa1c22ef1f379019d20fe44142f/file/'},\n {'objectiveKey': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',\n 'description': {'hash': '124a0425b746d7072282d167b53cb6aab3a31bf1946dae89135c15b0126ebec3',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/description/'},\n 'key': '6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f', 'name': 'Logistic regression',\n 'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/file/'},\n {'objectiveKey': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',\n 'description': {'hash': '4acea40c4b51996c88ef279c5c9aa41ab77b97d38c5ca167e978a98b2e402675',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/f2d9fd38e25cd975c49f3ce7e6739846585e89635a86689b5db42ab2c0c57284/description/'},\n 'key': 'f2d9fd38e25cd975c49f3ce7e6739846585e89635a86689b5db42ab2c0c57284', 'name': 'Random Forest',\n 'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/f2d9fd38e25cd975c49f3ce7e6739846585e89635a86689b5db42ab2c0c57284/file/'}]]\n\nmodel = [[{'algo': {'hash': '6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f',\n 'name': 'Logistic regression',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/file/'},\n 'objective': {'hash': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',\n 'metrics': {'hash': '750f622262854341bd44f55c1018949e9c119606ef5068bd7d137040a482a756',\n 'storageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/metrics/'}},\n 'creator': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f',\n 'endModel': {'hash': 'fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb',\n 'storageAddress': 'http://chunantes.substrabac:8001/model/fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb/file/'},\n 'key': '3e1a9e122765b2976f393322ab9d1c59fb113b35e2531900e06c9ae0f41e8afb',\n 'log': 'Train - CPU:100.23 % - Mem:0.14 GB - GPU:0.00 % - GPU Mem:0.00 GB; Test - CPU:0.00 % - Mem:0.00 GB - GPU:0.00 % - GPU Mem:0.00 GB; ',\n 'permissions': 'all', 'startModel': None, 'status': 'done',\n 'testData': {'keys': ['e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1'],\n 'openerHash': 'b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0', 'perf': 1,\n 'worker': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55'}, 'trainData': {\n 'keys': ['62fb3263208d62c7235a046ee1d80e25512fe782254b730a9e566276b8c0ef3a',\n '42303efa663015e729159833a12ffb510ff92a6e386b8152f90f6fb14ddc94c9'],\n 'openerHash': 'ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994', 'perf': 1,\n 'worker': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f'}}]]\n\ntraintuple = [{'algo': {'hash': '6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f',\n 'name': 'Logistic regression',\n 'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/file/'},\n 'objective': {'hash': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',\n 'metrics': {'hash': '750f622262854341bd44f55c1018949e9c119606ef5068bd7d137040a482a756',\n 'storageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/metrics/'}},\n 'creator': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f',\n 'endModel': {'hash': 'fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb',\n 'storageAddress': 'http://chunantes.substrabac:8001/model/fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb/file/'},\n 'key': '3e1a9e122765b2976f393322ab9d1c59fb113b35e2531900e06c9ae0f41e8afb',\n 'log': 'Train - CPU:100.23 % - Mem:0.14 GB - GPU:0.00 % - GPU Mem:0.00 GB; Test - CPU:0.00 % - Mem:0.00 GB - GPU:0.00 % - GPU Mem:0.00 GB; ',\n 'permissions': 'all', 'startModel': None, 'status': 'done',\n 'testData': {'keys': ['e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1'],\n 'openerHash': 'b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0', 'perf': 1,\n 'worker': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55'},\n 'trainData': {'keys': ['62fb3263208d62c7235a046ee1d80e25512fe782254b730a9e566276b8c0ef3a',\n '42303efa663015e729159833a12ffb510ff92a6e386b8152f90f6fb14ddc94c9'],\n 'openerHash': 'ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994',\n 'perf': 1, 'worker': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f'}}]\n\n\n# Run this test only after an e2e multi orgs\nclass TestList(TestCase):\n\n def setUp(self):\n Config({\n '<url>': 'http://owkin.substrabac:8000',\n '<version>': '0.0',\n '<user>': os.environ.get('BACK_AUTH_USER', ''),\n '<password>': os.environ.get('BACK_AUTH_PASSWORD', ''),\n '--config': '/tmp/.substra_e2e'\n }).run()\n\n def tearDown(self):\n try:\n os.remove('/tmp/.substra_e2e')\n except:\n pass\n\n def test_list_objective(self):\n output = popen(['substra', 'list', 'objective', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n\n self.assertTrue(json.loads(res) == objective)\n\n def test_list_data_manager(self):\n output = popen(['substra', 'list', 'data_manager', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n\n self.assertTrue(json.loads(res) == data_manager)\n\n def test_list_data(self):\n output = popen(['substra', 'list', 'data_sample', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n\n self.assertTrue(json.loads(res) == data)\n\n def test_list_algo(self):\n output = popen(['substra', 'list', 'algo', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n\n self.assertTrue(json.loads(res) == algo)\n\n def test_list_model(self):\n output = popen(['substra', 'list', 'model', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n\n self.assertTrue(json.loads(res) == model)\n\n def test_list_traintuple(self):\n output = popen(['substra', 'list', 'traintuple', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]\n res = output.decode('utf-8')\n\n self.assertTrue(json.loads(res) == traintuple)\n",
"step-ids": [
4,
6,
8,
9,
12
]
}
|
[
4,
6,
8,
9,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WebApiAppConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WebApiAppConfig(AppConfig):
name = 'WebApiApp'
<|reserved_special_token_1|>
from django.apps import AppConfig
class WebApiAppConfig(AppConfig):
name = 'WebApiApp'
|
flexible
|
{
"blob_id": "cc97f70b9d41357f020ea9c59d8b149392a336cc",
"index": 9656,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass WebApiAppConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass WebApiAppConfig(AppConfig):\n name = 'WebApiApp'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass WebApiAppConfig(AppConfig):\n name = 'WebApiApp'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def check_version(ctx, _, value):
"""
Print current version, and check for latest version.
Called via 'geocube --version'
:param ctx: Application context object (click.Context)
:param value: Passed in by Click
:return None
"""
if not value or ctx.resilient_parsing:
return
click.echo(f"geocube v{importlib.metadata.version('geocube')}")
ctx.exit()
def cli_show_version(ctx, _, value):
"""
Print debugging version information.
:param ctx: Application context object (click.Context)
:param value: Passed in by Click
:return None
"""
if not value or ctx.resilient_parsing:
return
show_versions()
ctx.exit()
@group(context_settings=CONTEXT_SETTINGS)
@click.option('-v', '--version', is_flag=True, is_eager=True, expose_value=
False, callback=check_version, help='Show the current version')
@click.option('--show-versions', is_flag=True, is_eager=True, expose_value=
False, callback=cli_show_version, help='Show debugging version information'
)
def geocube():
"""Top-level command and entry point into the GeoCube CLI"""
def _add_subcommands():
"""
Individual commands (and sub-commands) are encapsulated in separate files
under /commands. Collect these command groups, and add them underneath the
top-level command (geocube).
"""
geocube.add_command(cmd_modules.make_geocube.make_geocube)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def check_version(ctx, _, value):
"""
Print current version, and check for latest version.
Called via 'geocube --version'
:param ctx: Application context object (click.Context)
:param value: Passed in by Click
:return None
"""
if not value or ctx.resilient_parsing:
return
click.echo(f"geocube v{importlib.metadata.version('geocube')}")
ctx.exit()
def cli_show_version(ctx, _, value):
"""
Print debugging version information.
:param ctx: Application context object (click.Context)
:param value: Passed in by Click
:return None
"""
if not value or ctx.resilient_parsing:
return
show_versions()
ctx.exit()
@group(context_settings=CONTEXT_SETTINGS)
@click.option('-v', '--version', is_flag=True, is_eager=True, expose_value=
False, callback=check_version, help='Show the current version')
@click.option('--show-versions', is_flag=True, is_eager=True, expose_value=
False, callback=cli_show_version, help='Show debugging version information'
)
def geocube():
"""Top-level command and entry point into the GeoCube CLI"""
def _add_subcommands():
"""
Individual commands (and sub-commands) are encapsulated in separate files
under /commands. Collect these command groups, and add them underneath the
top-level command (geocube).
"""
geocube.add_command(cmd_modules.make_geocube.make_geocube)
_add_subcommands()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CONTEXT_SETTINGS = {'help_option_names': ['-h', '--help'],
'token_normalize_func': lambda x: x.replace('-', '_')}
def check_version(ctx, _, value):
"""
Print current version, and check for latest version.
Called via 'geocube --version'
:param ctx: Application context object (click.Context)
:param value: Passed in by Click
:return None
"""
if not value or ctx.resilient_parsing:
return
click.echo(f"geocube v{importlib.metadata.version('geocube')}")
ctx.exit()
def cli_show_version(ctx, _, value):
"""
Print debugging version information.
:param ctx: Application context object (click.Context)
:param value: Passed in by Click
:return None
"""
if not value or ctx.resilient_parsing:
return
show_versions()
ctx.exit()
@group(context_settings=CONTEXT_SETTINGS)
@click.option('-v', '--version', is_flag=True, is_eager=True, expose_value=
False, callback=check_version, help='Show the current version')
@click.option('--show-versions', is_flag=True, is_eager=True, expose_value=
False, callback=cli_show_version, help='Show debugging version information'
)
def geocube():
"""Top-level command and entry point into the GeoCube CLI"""
def _add_subcommands():
"""
Individual commands (and sub-commands) are encapsulated in separate files
under /commands. Collect these command groups, and add them underneath the
top-level command (geocube).
"""
geocube.add_command(cmd_modules.make_geocube.make_geocube)
_add_subcommands()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import importlib.metadata
import click
from click import group
import geocube.cli.commands as cmd_modules
from geocube import show_versions
CONTEXT_SETTINGS = {'help_option_names': ['-h', '--help'],
'token_normalize_func': lambda x: x.replace('-', '_')}
def check_version(ctx, _, value):
"""
Print current version, and check for latest version.
Called via 'geocube --version'
:param ctx: Application context object (click.Context)
:param value: Passed in by Click
:return None
"""
if not value or ctx.resilient_parsing:
return
click.echo(f"geocube v{importlib.metadata.version('geocube')}")
ctx.exit()
def cli_show_version(ctx, _, value):
"""
Print debugging version information.
:param ctx: Application context object (click.Context)
:param value: Passed in by Click
:return None
"""
if not value or ctx.resilient_parsing:
return
show_versions()
ctx.exit()
@group(context_settings=CONTEXT_SETTINGS)
@click.option('-v', '--version', is_flag=True, is_eager=True, expose_value=
False, callback=check_version, help='Show the current version')
@click.option('--show-versions', is_flag=True, is_eager=True, expose_value=
False, callback=cli_show_version, help='Show debugging version information'
)
def geocube():
"""Top-level command and entry point into the GeoCube CLI"""
def _add_subcommands():
"""
Individual commands (and sub-commands) are encapsulated in separate files
under /commands. Collect these command groups, and add them underneath the
top-level command (geocube).
"""
geocube.add_command(cmd_modules.make_geocube.make_geocube)
_add_subcommands()
<|reserved_special_token_1|>
"""
Main CLI endpoint for GeoCube
"""
import importlib.metadata
import click
from click import group
import geocube.cli.commands as cmd_modules
from geocube import show_versions
CONTEXT_SETTINGS = {
"help_option_names": ["-h", "--help"],
"token_normalize_func": lambda x: x.replace("-", "_"),
}
def check_version(ctx, _, value):
"""
Print current version, and check for latest version.
Called via 'geocube --version'
:param ctx: Application context object (click.Context)
:param value: Passed in by Click
:return None
"""
if not value or ctx.resilient_parsing:
return
click.echo(f"geocube v{importlib.metadata.version('geocube')}")
ctx.exit()
def cli_show_version(ctx, _, value):
"""
Print debugging version information.
:param ctx: Application context object (click.Context)
:param value: Passed in by Click
:return None
"""
if not value or ctx.resilient_parsing:
return
show_versions()
ctx.exit()
@group(context_settings=CONTEXT_SETTINGS)
@click.option(
"-v",
"--version",
is_flag=True,
is_eager=True,
expose_value=False,
callback=check_version,
help="Show the current version",
)
@click.option(
"--show-versions",
is_flag=True,
is_eager=True,
expose_value=False,
callback=cli_show_version,
help="Show debugging version information",
)
def geocube():
"""Top-level command and entry point into the GeoCube CLI"""
def _add_subcommands():
"""
Individual commands (and sub-commands) are encapsulated in separate files
under /commands. Collect these command groups, and add them underneath the
top-level command (geocube).
"""
geocube.add_command(cmd_modules.make_geocube.make_geocube)
_add_subcommands()
|
flexible
|
{
"blob_id": "0964121d88fad2906311de7532eac52ff784fff6",
"index": 8306,
"step-1": "<mask token>\n\n\ndef check_version(ctx, _, value):\n \"\"\"\n Print current version, and check for latest version.\n\n Called via 'geocube --version'\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n ctx.exit()\n\n\ndef cli_show_version(ctx, _, value):\n \"\"\"\n Print debugging version information.\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n show_versions()\n ctx.exit()\n\n\n@group(context_settings=CONTEXT_SETTINGS)\[email protected]('-v', '--version', is_flag=True, is_eager=True, expose_value=\n False, callback=check_version, help='Show the current version')\[email protected]('--show-versions', is_flag=True, is_eager=True, expose_value=\n False, callback=cli_show_version, help='Show debugging version information'\n )\ndef geocube():\n \"\"\"Top-level command and entry point into the GeoCube CLI\"\"\"\n\n\ndef _add_subcommands():\n \"\"\"\n Individual commands (and sub-commands) are encapsulated in separate files\n under /commands. Collect these command groups, and add them underneath the\n top-level command (geocube).\n \"\"\"\n geocube.add_command(cmd_modules.make_geocube.make_geocube)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_version(ctx, _, value):\n \"\"\"\n Print current version, and check for latest version.\n\n Called via 'geocube --version'\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n ctx.exit()\n\n\ndef cli_show_version(ctx, _, value):\n \"\"\"\n Print debugging version information.\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n show_versions()\n ctx.exit()\n\n\n@group(context_settings=CONTEXT_SETTINGS)\[email protected]('-v', '--version', is_flag=True, is_eager=True, expose_value=\n False, callback=check_version, help='Show the current version')\[email protected]('--show-versions', is_flag=True, is_eager=True, expose_value=\n False, callback=cli_show_version, help='Show debugging version information'\n )\ndef geocube():\n \"\"\"Top-level command and entry point into the GeoCube CLI\"\"\"\n\n\ndef _add_subcommands():\n \"\"\"\n Individual commands (and sub-commands) are encapsulated in separate files\n under /commands. Collect these command groups, and add them underneath the\n top-level command (geocube).\n \"\"\"\n geocube.add_command(cmd_modules.make_geocube.make_geocube)\n\n\n_add_subcommands()\n",
"step-3": "<mask token>\nCONTEXT_SETTINGS = {'help_option_names': ['-h', '--help'],\n 'token_normalize_func': lambda x: x.replace('-', '_')}\n\n\ndef check_version(ctx, _, value):\n \"\"\"\n Print current version, and check for latest version.\n\n Called via 'geocube --version'\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n ctx.exit()\n\n\ndef cli_show_version(ctx, _, value):\n \"\"\"\n Print debugging version information.\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n show_versions()\n ctx.exit()\n\n\n@group(context_settings=CONTEXT_SETTINGS)\[email protected]('-v', '--version', is_flag=True, is_eager=True, expose_value=\n False, callback=check_version, help='Show the current version')\[email protected]('--show-versions', is_flag=True, is_eager=True, expose_value=\n False, callback=cli_show_version, help='Show debugging version information'\n )\ndef geocube():\n \"\"\"Top-level command and entry point into the GeoCube CLI\"\"\"\n\n\ndef _add_subcommands():\n \"\"\"\n Individual commands (and sub-commands) are encapsulated in separate files\n under /commands. Collect these command groups, and add them underneath the\n top-level command (geocube).\n \"\"\"\n geocube.add_command(cmd_modules.make_geocube.make_geocube)\n\n\n_add_subcommands()\n",
"step-4": "<mask token>\nimport importlib.metadata\nimport click\nfrom click import group\nimport geocube.cli.commands as cmd_modules\nfrom geocube import show_versions\nCONTEXT_SETTINGS = {'help_option_names': ['-h', '--help'],\n 'token_normalize_func': lambda x: x.replace('-', '_')}\n\n\ndef check_version(ctx, _, value):\n \"\"\"\n Print current version, and check for latest version.\n\n Called via 'geocube --version'\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n ctx.exit()\n\n\ndef cli_show_version(ctx, _, value):\n \"\"\"\n Print debugging version information.\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n show_versions()\n ctx.exit()\n\n\n@group(context_settings=CONTEXT_SETTINGS)\[email protected]('-v', '--version', is_flag=True, is_eager=True, expose_value=\n False, callback=check_version, help='Show the current version')\[email protected]('--show-versions', is_flag=True, is_eager=True, expose_value=\n False, callback=cli_show_version, help='Show debugging version information'\n )\ndef geocube():\n \"\"\"Top-level command and entry point into the GeoCube CLI\"\"\"\n\n\ndef _add_subcommands():\n \"\"\"\n Individual commands (and sub-commands) are encapsulated in separate files\n under /commands. Collect these command groups, and add them underneath the\n top-level command (geocube).\n \"\"\"\n geocube.add_command(cmd_modules.make_geocube.make_geocube)\n\n\n_add_subcommands()\n",
"step-5": "\"\"\"\nMain CLI endpoint for GeoCube\n\"\"\"\nimport importlib.metadata\n\nimport click\nfrom click import group\n\nimport geocube.cli.commands as cmd_modules\nfrom geocube import show_versions\n\nCONTEXT_SETTINGS = {\n \"help_option_names\": [\"-h\", \"--help\"],\n \"token_normalize_func\": lambda x: x.replace(\"-\", \"_\"),\n}\n\n\ndef check_version(ctx, _, value):\n \"\"\"\n Print current version, and check for latest version.\n\n Called via 'geocube --version'\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n\n ctx.exit()\n\n\ndef cli_show_version(ctx, _, value):\n \"\"\"\n Print debugging version information.\n\n :param ctx: Application context object (click.Context)\n :param value: Passed in by Click\n :return None\n \"\"\"\n if not value or ctx.resilient_parsing:\n return\n\n show_versions()\n\n ctx.exit()\n\n\n@group(context_settings=CONTEXT_SETTINGS)\[email protected](\n \"-v\",\n \"--version\",\n is_flag=True,\n is_eager=True,\n expose_value=False,\n callback=check_version,\n help=\"Show the current version\",\n)\[email protected](\n \"--show-versions\",\n is_flag=True,\n is_eager=True,\n expose_value=False,\n callback=cli_show_version,\n help=\"Show debugging version information\",\n)\ndef geocube():\n \"\"\"Top-level command and entry point into the GeoCube CLI\"\"\"\n\n\ndef _add_subcommands():\n \"\"\"\n Individual commands (and sub-commands) are encapsulated in separate files\n under /commands. Collect these command groups, and add them underneath the\n top-level command (geocube).\n \"\"\"\n geocube.add_command(cmd_modules.make_geocube.make_geocube)\n\n\n_add_subcommands()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class Test_PW_Functions(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_pw_long_enough_min(self):
sample_pass = 'abcdadcaabc'
expected_result = False
result = validate_pw_long(sample_pass)
self.assertEqual(expected_result, result)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test_PW_Functions(unittest.TestCase):
def test_pw_not_long_enough_min(self):
sample_pass = 'abcd'
expected_result = False
result = validate_pw_long(sample_pass)
self.assertEqual(expected_result, result)
<|reserved_special_token_0|>
def test_pw_long_enough_min(self):
sample_pass = 'abcdadcaabc'
expected_result = False
result = validate_pw_long(sample_pass)
self.assertEqual(expected_result, result)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test_PW_Functions(unittest.TestCase):
def test_pw_not_long_enough_min(self):
sample_pass = 'abcd'
expected_result = False
result = validate_pw_long(sample_pass)
self.assertEqual(expected_result, result)
def test_pw_just_long_enough_min(self):
sample_pass = 'abcdadca'
expected_result = False
result = validate_pw_long(sample_pass)
self.assertEqual(expected_result, result)
def test_pw_long_enough_min(self):
sample_pass = 'abcdadcaabc'
expected_result = False
result = validate_pw_long(sample_pass)
self.assertEqual(expected_result, result)
<|reserved_special_token_1|>
import unittest
from validate_pw_complexity import *
class Test_PW_Functions(unittest.TestCase):
def test_pw_not_long_enough_min(self):
sample_pass = 'abcd'
expected_result = False
result = validate_pw_long(sample_pass)
self.assertEqual(expected_result, result)
def test_pw_just_long_enough_min(self):
sample_pass = 'abcdadca'
expected_result = False
result = validate_pw_long(sample_pass)
self.assertEqual(expected_result, result)
def test_pw_long_enough_min(self):
sample_pass = 'abcdadcaabc'
expected_result = False
result = validate_pw_long(sample_pass)
self.assertEqual(expected_result, result)
<|reserved_special_token_1|>
import unittest
from validate_pw_complexity import *
class Test_PW_Functions(unittest.TestCase):
def test_pw_not_long_enough_min(self):
sample_pass ="abcd"
expected_result = False
result = validate_pw_long(sample_pass)
self.assertEqual(expected_result, result)
def test_pw_just_long_enough_min(self):
sample_pass = "abcdadca"
expected_result = False
result = validate_pw_long(sample_pass)
self.assertEqual(expected_result, result)
def test_pw_long_enough_min(self):
sample_pass = "abcdadcaabc"
expected_result = False
result = validate_pw_long(sample_pass)
self.assertEqual(expected_result, result)
|
flexible
|
{
"blob_id": "dc7d75bf43f1ba55673a43f863dd08e99a1c0e0f",
"index": 4820,
"step-1": "<mask token>\n\n\nclass Test_PW_Functions(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_pw_long_enough_min(self):\n sample_pass = 'abcdadcaabc'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n",
"step-2": "<mask token>\n\n\nclass Test_PW_Functions(unittest.TestCase):\n\n def test_pw_not_long_enough_min(self):\n sample_pass = 'abcd'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n <mask token>\n\n def test_pw_long_enough_min(self):\n sample_pass = 'abcdadcaabc'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n",
"step-3": "<mask token>\n\n\nclass Test_PW_Functions(unittest.TestCase):\n\n def test_pw_not_long_enough_min(self):\n sample_pass = 'abcd'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n\n def test_pw_just_long_enough_min(self):\n sample_pass = 'abcdadca'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n\n def test_pw_long_enough_min(self):\n sample_pass = 'abcdadcaabc'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n",
"step-4": "import unittest\nfrom validate_pw_complexity import *\n\n\nclass Test_PW_Functions(unittest.TestCase):\n\n def test_pw_not_long_enough_min(self):\n sample_pass = 'abcd'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n\n def test_pw_just_long_enough_min(self):\n sample_pass = 'abcdadca'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n\n def test_pw_long_enough_min(self):\n sample_pass = 'abcdadcaabc'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n",
"step-5": "import unittest\n\nfrom validate_pw_complexity import *\n\nclass Test_PW_Functions(unittest.TestCase):\n\n def test_pw_not_long_enough_min(self):\n sample_pass =\"abcd\"\n expected_result = False\n\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n\n def test_pw_just_long_enough_min(self):\n sample_pass = \"abcdadca\"\n expected_result = False\n\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n\n def test_pw_long_enough_min(self):\n sample_pass = \"abcdadcaabc\"\n expected_result = False\n\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Communication(Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def receive(self, length):
if not isinstance(length, int):
raise Exception('Receive length must be an integer.')
return self.port.read(length)
<|reserved_special_token_0|>
@property
def port(self):
return self._port
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Communication(Module):
def __init__(self, parent, port_name, baud_rate):
self.parent = parent
if not isinstance(port_name, str):
raise Exception('Port name must be a string.')
if not isinstance(baud_rate, int):
raise Exception('Baud rate must be an integer.')
if baud_rate not in BAUD_RATES:
raise Exception(
'%d is not a valid baud rate; check the SCI Specification for acceptable values.'
% baud_rate)
self.port = serial.Serial(port_name, baud_rate)
def send(self, data):
if not isinstance(data, str):
raise Exception('Data must be a string.')
self.port.write(data)
def receive(self, length):
if not isinstance(length, int):
raise Exception('Receive length must be an integer.')
return self.port.read(length)
<|reserved_special_token_0|>
@property
def port(self):
return self._port
@port.setter
def port(self, value):
self._port = value
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Communication(Module):
def __init__(self, parent, port_name, baud_rate):
self.parent = parent
if not isinstance(port_name, str):
raise Exception('Port name must be a string.')
if not isinstance(baud_rate, int):
raise Exception('Baud rate must be an integer.')
if baud_rate not in BAUD_RATES:
raise Exception(
'%d is not a valid baud rate; check the SCI Specification for acceptable values.'
% baud_rate)
self.port = serial.Serial(port_name, baud_rate)
def send(self, data):
if not isinstance(data, str):
raise Exception('Data must be a string.')
self.port.write(data)
def receive(self, length):
if not isinstance(length, int):
raise Exception('Receive length must be an integer.')
return self.port.read(length)
_port = None
@property
def port(self):
return self._port
@port.setter
def port(self, value):
self._port = value
<|reserved_special_token_1|>
from common import *
import serial
CMD_BAUD = chr(129)
BAUD_RATES = [300, 600, 1200, 2400, 4800, 9600, 14400, 19200, 28800, 38400,
57600, 115200]
class Communication(Module):
def __init__(self, parent, port_name, baud_rate):
self.parent = parent
if not isinstance(port_name, str):
raise Exception('Port name must be a string.')
if not isinstance(baud_rate, int):
raise Exception('Baud rate must be an integer.')
if baud_rate not in BAUD_RATES:
raise Exception(
'%d is not a valid baud rate; check the SCI Specification for acceptable values.'
% baud_rate)
self.port = serial.Serial(port_name, baud_rate)
def send(self, data):
if not isinstance(data, str):
raise Exception('Data must be a string.')
self.port.write(data)
def receive(self, length):
if not isinstance(length, int):
raise Exception('Receive length must be an integer.')
return self.port.read(length)
_port = None
@property
def port(self):
return self._port
@port.setter
def port(self, value):
self._port = value
<|reserved_special_token_1|>
from common import *
import serial
CMD_BAUD = chr(129)
BAUD_RATES = [300, 600, 1200, 2400, 4800, 9600, 14400, 19200, 28800, 38400, 57600, 115200]
class Communication(Module):
def __init__(self, parent, port_name, baud_rate):
self.parent = parent
if not isinstance(port_name, str):
raise Exception("Port name must be a string.")
if not isinstance(baud_rate, int):
raise Exception("Baud rate must be an integer.")
if baud_rate not in BAUD_RATES:
raise Exception("%d is not a valid baud rate; check the SCI Specification for acceptable values." % baud_rate)
self.port = serial.Serial(port_name, baud_rate)
def send(self, data):
if not isinstance(data, str):
raise Exception("Data must be a string.")
self.port.write(data)
def receive(self, length):
if not isinstance(length, int):
raise Exception("Receive length must be an integer.")
return self.port.read(length)
_port = None
@property
def port(self):
return self._port
@port.setter
def port(self, value):
self._port = value
|
flexible
|
{
"blob_id": "eab5bf4776582349615ad56ee1ed93bc8f868565",
"index": 768,
"step-1": "<mask token>\n\n\nclass Communication(Module):\n <mask token>\n <mask token>\n\n def receive(self, length):\n if not isinstance(length, int):\n raise Exception('Receive length must be an integer.')\n return self.port.read(length)\n <mask token>\n\n @property\n def port(self):\n return self._port\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Communication(Module):\n\n def __init__(self, parent, port_name, baud_rate):\n self.parent = parent\n if not isinstance(port_name, str):\n raise Exception('Port name must be a string.')\n if not isinstance(baud_rate, int):\n raise Exception('Baud rate must be an integer.')\n if baud_rate not in BAUD_RATES:\n raise Exception(\n '%d is not a valid baud rate; check the SCI Specification for acceptable values.'\n % baud_rate)\n self.port = serial.Serial(port_name, baud_rate)\n\n def send(self, data):\n if not isinstance(data, str):\n raise Exception('Data must be a string.')\n self.port.write(data)\n\n def receive(self, length):\n if not isinstance(length, int):\n raise Exception('Receive length must be an integer.')\n return self.port.read(length)\n <mask token>\n\n @property\n def port(self):\n return self._port\n\n @port.setter\n def port(self, value):\n self._port = value\n",
"step-3": "<mask token>\n\n\nclass Communication(Module):\n\n def __init__(self, parent, port_name, baud_rate):\n self.parent = parent\n if not isinstance(port_name, str):\n raise Exception('Port name must be a string.')\n if not isinstance(baud_rate, int):\n raise Exception('Baud rate must be an integer.')\n if baud_rate not in BAUD_RATES:\n raise Exception(\n '%d is not a valid baud rate; check the SCI Specification for acceptable values.'\n % baud_rate)\n self.port = serial.Serial(port_name, baud_rate)\n\n def send(self, data):\n if not isinstance(data, str):\n raise Exception('Data must be a string.')\n self.port.write(data)\n\n def receive(self, length):\n if not isinstance(length, int):\n raise Exception('Receive length must be an integer.')\n return self.port.read(length)\n _port = None\n\n @property\n def port(self):\n return self._port\n\n @port.setter\n def port(self, value):\n self._port = value\n",
"step-4": "from common import *\nimport serial\nCMD_BAUD = chr(129)\nBAUD_RATES = [300, 600, 1200, 2400, 4800, 9600, 14400, 19200, 28800, 38400,\n 57600, 115200]\n\n\nclass Communication(Module):\n\n def __init__(self, parent, port_name, baud_rate):\n self.parent = parent\n if not isinstance(port_name, str):\n raise Exception('Port name must be a string.')\n if not isinstance(baud_rate, int):\n raise Exception('Baud rate must be an integer.')\n if baud_rate not in BAUD_RATES:\n raise Exception(\n '%d is not a valid baud rate; check the SCI Specification for acceptable values.'\n % baud_rate)\n self.port = serial.Serial(port_name, baud_rate)\n\n def send(self, data):\n if not isinstance(data, str):\n raise Exception('Data must be a string.')\n self.port.write(data)\n\n def receive(self, length):\n if not isinstance(length, int):\n raise Exception('Receive length must be an integer.')\n return self.port.read(length)\n _port = None\n\n @property\n def port(self):\n return self._port\n\n @port.setter\n def port(self, value):\n self._port = value\n",
"step-5": "from common import *\n\nimport serial\n\nCMD_BAUD = chr(129)\n\nBAUD_RATES = [300, 600, 1200, 2400, 4800, 9600, 14400, 19200, 28800, 38400, 57600, 115200]\n\nclass Communication(Module):\n def __init__(self, parent, port_name, baud_rate):\n self.parent = parent\n\n if not isinstance(port_name, str):\n raise Exception(\"Port name must be a string.\")\n if not isinstance(baud_rate, int):\n raise Exception(\"Baud rate must be an integer.\")\n if baud_rate not in BAUD_RATES:\n raise Exception(\"%d is not a valid baud rate; check the SCI Specification for acceptable values.\" % baud_rate)\n\n self.port = serial.Serial(port_name, baud_rate)\n\n def send(self, data):\n if not isinstance(data, str):\n raise Exception(\"Data must be a string.\")\n self.port.write(data)\n\n def receive(self, length):\n if not isinstance(length, int):\n raise Exception(\"Receive length must be an integer.\")\n return self.port.read(length)\n\n _port = None\n @property\n def port(self):\n return self._port\n @port.setter\n def port(self, value):\n self._port = value\n",
"step-ids": [
3,
6,
7,
9,
10
]
}
|
[
3,
6,
7,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(f'The apporximate volume is {approxVolume:.2f} liters')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
width = float(input('Enter the width of the tire in mm (ex 205): '))
aspectRatio = float(input('Enter the aspect ratio of the tire (ex 60): '))
diameter = float(input('Enter the diameter of the wheel in inches (ex 15): '))
approxVolume = pi * width ** 2 * aspectRatio * (width * aspectRatio + 2540 *
diameter) / 10000000000
print(f'The apporximate volume is {approxVolume:.2f} liters')
<|reserved_special_token_1|>
from math import pi
width = float(input('Enter the width of the tire in mm (ex 205): '))
aspectRatio = float(input('Enter the aspect ratio of the tire (ex 60): '))
diameter = float(input('Enter the diameter of the wheel in inches (ex 15): '))
approxVolume = pi * width ** 2 * aspectRatio * (width * aspectRatio + 2540 *
diameter) / 10000000000
print(f'The apporximate volume is {approxVolume:.2f} liters')
<|reserved_special_token_1|>
from math import pi
width = float(input("Enter the width of the tire in mm (ex 205): "))
aspectRatio = float(input("Enter the aspect ratio of the tire (ex 60): "))
diameter = float(input("Enter the diameter of the wheel in inches (ex 15): "))
approxVolume = (pi * (width ** 2) * aspectRatio * ((width * aspectRatio) + (2540 * diameter)))/10000000000
print(f"The apporximate volume is {approxVolume:.2f} liters")
|
flexible
|
{
"blob_id": "65752c8ac50205df0fea105123935110e4a30aba",
"index": 7913,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(f'The apporximate volume is {approxVolume:.2f} liters')\n",
"step-3": "<mask token>\nwidth = float(input('Enter the width of the tire in mm (ex 205): '))\naspectRatio = float(input('Enter the aspect ratio of the tire (ex 60): '))\ndiameter = float(input('Enter the diameter of the wheel in inches (ex 15): '))\napproxVolume = pi * width ** 2 * aspectRatio * (width * aspectRatio + 2540 *\n diameter) / 10000000000\nprint(f'The apporximate volume is {approxVolume:.2f} liters')\n",
"step-4": "from math import pi\nwidth = float(input('Enter the width of the tire in mm (ex 205): '))\naspectRatio = float(input('Enter the aspect ratio of the tire (ex 60): '))\ndiameter = float(input('Enter the diameter of the wheel in inches (ex 15): '))\napproxVolume = pi * width ** 2 * aspectRatio * (width * aspectRatio + 2540 *\n diameter) / 10000000000\nprint(f'The apporximate volume is {approxVolume:.2f} liters')\n",
"step-5": "from math import pi\n\nwidth = float(input(\"Enter the width of the tire in mm (ex 205): \"))\naspectRatio = float(input(\"Enter the aspect ratio of the tire (ex 60): \"))\ndiameter = float(input(\"Enter the diameter of the wheel in inches (ex 15): \"))\n\napproxVolume = (pi * (width ** 2) * aspectRatio * ((width * aspectRatio) + (2540 * diameter)))/10000000000\n\nprint(f\"The apporximate volume is {approxVolume:.2f} liters\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import glob
import os
import partition
import pickle
import matplotlib.pyplot as plt
import numpy as np
from Cluster import fishermans_algorithm
import argparse
parser = argparse.ArgumentParser()
plt.ion()
parser.add_argument("--fish", help="flag for using fisherman's algorithm")
parser.add_argument("--heat", help="flag for using heatmap")
parser.add_argument("--object", help="flag for dumping the clusters")
args = parser.parse_args()
print(args)
print(args.fish)
print(args.object)
for file in glob.glob("./examples/*.p"):
print(file)
name = file[11:-2]
recover = open("./examples/" + name + ".p", "rb")
input_list = pickle.load(recover)
print("Loaded ...")
cancer_cells = []
T_cells = []
cyto_T_cells = []
for i, row in enumerate(input_list):
try:
row = [int(x) for x in row]
except ValueError:
continue
if row[4] > 0:
cancer_cells.append([row[0], row[1], row[2], row[3]])
if row[5] > 0:
T_cells.append([row[0], row[1], row[2], row[3]])
if row[6] > 0:
cyto_T_cells.append([row[0], row[1], row[2], row[3]])
cancer_cells = np.asarray(cancer_cells)
T_cells = np.asarray(T_cells)
cyto_T_cells = np.asarray(cyto_T_cells)
print("Separated ...")
t = 25
partitioned_cancer_cells, windows, w, h = partition.partition(cancer_cells, tile_size=t, to_list=True)
print("Cancer cells partitioned ...")
if args.heat:
spatial_distribution = np.zeros_like(partitioned_cancer_cells)
for i in range(t):
for j in range(t):
spatial_distribution[i][j] = len(partitioned_cancer_cells[i][j])
with open("./inputs/spatial/" + name + ".txt", "w", newline="") as dest:
dest.write(str(spatial_distribution))
if args.fish:
result = fishermans_algorithm(partitioned_cancer_cells, (t, t), windows, w, h)
print("Result retrieved ...")
if args.object:
with open("./inputs/object/" + name + ".p", "wb") as dest:
pickle.dump(result, dest)
dups = set()
histogram = np.zeros(21, dtype=np.uint32)
for cluster in result:
dups.add(cluster)
total_cluster_cells = 0
clusters_sum = 0
dups_length = len(dups)
for i in dups:
value = len(i.cells)
clusters_sum += value
total_cluster_cells += len(i.cells)
if value > 20:
histogram[20] += 1
else:
histogram[value - 1] += 1
print("Histogram retrieved ...")
clusters_avg = clusters_sum / dups_length
assert(total_cluster_cells == len(cancer_cells))
y = np.array(histogram)
x = np.arange(21) + 1
plt.bar(x, y)
plt.xlabel("Value")
plt.ylabel("Frequency")
# plt.savefig("./inputs/" + name + ".png", bbox_inches='tight')
plt.show()
plt.close()
if args.object:
with open("./inputs/object/" + name + ".txt", "w", newline="") as dest:
dest.write("Average size of cluster: " + str(clusters_avg) + "\n")
dest.write("Number of clusters: " + str(len(dups)) + "\n")
dest.write("Total number of cells: " + str(total_cluster_cells) + "\n")
dest.write("Cluster counts: " + "\n")
for i, x in enumerate(histogram):
dest.write(str(i) + ", " + str(x) + "\n")
os.system('say "All pickle files done in this batch."')
# End of file
|
normal
|
{
"blob_id": "805bc144a4945b46b398853e79ded17370ada380",
"index": 3940,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.ion()\nparser.add_argument('--fish', help=\"flag for using fisherman's algorithm\")\nparser.add_argument('--heat', help='flag for using heatmap')\nparser.add_argument('--object', help='flag for dumping the clusters')\n<mask token>\nprint(args)\nprint(args.fish)\nprint(args.object)\nfor file in glob.glob('./examples/*.p'):\n print(file)\n name = file[11:-2]\n recover = open('./examples/' + name + '.p', 'rb')\n input_list = pickle.load(recover)\n print('Loaded ...')\n cancer_cells = []\n T_cells = []\n cyto_T_cells = []\n for i, row in enumerate(input_list):\n try:\n row = [int(x) for x in row]\n except ValueError:\n continue\n if row[4] > 0:\n cancer_cells.append([row[0], row[1], row[2], row[3]])\n if row[5] > 0:\n T_cells.append([row[0], row[1], row[2], row[3]])\n if row[6] > 0:\n cyto_T_cells.append([row[0], row[1], row[2], row[3]])\n cancer_cells = np.asarray(cancer_cells)\n T_cells = np.asarray(T_cells)\n cyto_T_cells = np.asarray(cyto_T_cells)\n print('Separated ...')\n t = 25\n partitioned_cancer_cells, windows, w, h = partition.partition(cancer_cells,\n tile_size=t, to_list=True)\n print('Cancer cells partitioned ...')\n if args.heat:\n spatial_distribution = np.zeros_like(partitioned_cancer_cells)\n for i in range(t):\n for j in range(t):\n spatial_distribution[i][j] = len(partitioned_cancer_cells[i][j]\n )\n with open('./inputs/spatial/' + name + '.txt', 'w', newline=''\n ) as dest:\n dest.write(str(spatial_distribution))\n if args.fish:\n result = fishermans_algorithm(partitioned_cancer_cells, (t, t),\n windows, w, h)\n print('Result retrieved ...')\n if args.object:\n with open('./inputs/object/' + name + '.p', 'wb') as dest:\n pickle.dump(result, dest)\n dups = set()\n histogram = np.zeros(21, dtype=np.uint32)\n for cluster in result:\n dups.add(cluster)\n total_cluster_cells = 0\n clusters_sum = 0\n dups_length = len(dups)\n for i in dups:\n value = len(i.cells)\n clusters_sum += value\n total_cluster_cells += len(i.cells)\n if value > 20:\n histogram[20] += 1\n else:\n histogram[value - 1] += 1\n print('Histogram retrieved ...')\n clusters_avg = clusters_sum / dups_length\n assert total_cluster_cells == len(cancer_cells)\n y = np.array(histogram)\n x = np.arange(21) + 1\n plt.bar(x, y)\n plt.xlabel('Value')\n plt.ylabel('Frequency')\n plt.show()\n plt.close()\n if args.object:\n with open('./inputs/object/' + name + '.txt', 'w', newline=''\n ) as dest:\n dest.write('Average size of cluster: ' + str(clusters_avg) +\n '\\n')\n dest.write('Number of clusters: ' + str(len(dups)) + '\\n')\n dest.write('Total number of cells: ' + str(\n total_cluster_cells) + '\\n')\n dest.write('Cluster counts: ' + '\\n')\n for i, x in enumerate(histogram):\n dest.write(str(i) + ', ' + str(x) + '\\n')\nos.system('say \"All pickle files done in this batch.\"')\n",
"step-3": "<mask token>\nparser = argparse.ArgumentParser()\nplt.ion()\nparser.add_argument('--fish', help=\"flag for using fisherman's algorithm\")\nparser.add_argument('--heat', help='flag for using heatmap')\nparser.add_argument('--object', help='flag for dumping the clusters')\nargs = parser.parse_args()\nprint(args)\nprint(args.fish)\nprint(args.object)\nfor file in glob.glob('./examples/*.p'):\n print(file)\n name = file[11:-2]\n recover = open('./examples/' + name + '.p', 'rb')\n input_list = pickle.load(recover)\n print('Loaded ...')\n cancer_cells = []\n T_cells = []\n cyto_T_cells = []\n for i, row in enumerate(input_list):\n try:\n row = [int(x) for x in row]\n except ValueError:\n continue\n if row[4] > 0:\n cancer_cells.append([row[0], row[1], row[2], row[3]])\n if row[5] > 0:\n T_cells.append([row[0], row[1], row[2], row[3]])\n if row[6] > 0:\n cyto_T_cells.append([row[0], row[1], row[2], row[3]])\n cancer_cells = np.asarray(cancer_cells)\n T_cells = np.asarray(T_cells)\n cyto_T_cells = np.asarray(cyto_T_cells)\n print('Separated ...')\n t = 25\n partitioned_cancer_cells, windows, w, h = partition.partition(cancer_cells,\n tile_size=t, to_list=True)\n print('Cancer cells partitioned ...')\n if args.heat:\n spatial_distribution = np.zeros_like(partitioned_cancer_cells)\n for i in range(t):\n for j in range(t):\n spatial_distribution[i][j] = len(partitioned_cancer_cells[i][j]\n )\n with open('./inputs/spatial/' + name + '.txt', 'w', newline=''\n ) as dest:\n dest.write(str(spatial_distribution))\n if args.fish:\n result = fishermans_algorithm(partitioned_cancer_cells, (t, t),\n windows, w, h)\n print('Result retrieved ...')\n if args.object:\n with open('./inputs/object/' + name + '.p', 'wb') as dest:\n pickle.dump(result, dest)\n dups = set()\n histogram = np.zeros(21, dtype=np.uint32)\n for cluster in result:\n dups.add(cluster)\n total_cluster_cells = 0\n clusters_sum = 0\n dups_length = len(dups)\n for i in dups:\n value = len(i.cells)\n clusters_sum += value\n total_cluster_cells += len(i.cells)\n if value > 20:\n histogram[20] += 1\n else:\n histogram[value - 1] += 1\n print('Histogram retrieved ...')\n clusters_avg = clusters_sum / dups_length\n assert total_cluster_cells == len(cancer_cells)\n y = np.array(histogram)\n x = np.arange(21) + 1\n plt.bar(x, y)\n plt.xlabel('Value')\n plt.ylabel('Frequency')\n plt.show()\n plt.close()\n if args.object:\n with open('./inputs/object/' + name + '.txt', 'w', newline=''\n ) as dest:\n dest.write('Average size of cluster: ' + str(clusters_avg) +\n '\\n')\n dest.write('Number of clusters: ' + str(len(dups)) + '\\n')\n dest.write('Total number of cells: ' + str(\n total_cluster_cells) + '\\n')\n dest.write('Cluster counts: ' + '\\n')\n for i, x in enumerate(histogram):\n dest.write(str(i) + ', ' + str(x) + '\\n')\nos.system('say \"All pickle files done in this batch.\"')\n",
"step-4": "import glob\nimport os\nimport partition\nimport pickle\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom Cluster import fishermans_algorithm\nimport argparse\nparser = argparse.ArgumentParser()\nplt.ion()\nparser.add_argument('--fish', help=\"flag for using fisherman's algorithm\")\nparser.add_argument('--heat', help='flag for using heatmap')\nparser.add_argument('--object', help='flag for dumping the clusters')\nargs = parser.parse_args()\nprint(args)\nprint(args.fish)\nprint(args.object)\nfor file in glob.glob('./examples/*.p'):\n print(file)\n name = file[11:-2]\n recover = open('./examples/' + name + '.p', 'rb')\n input_list = pickle.load(recover)\n print('Loaded ...')\n cancer_cells = []\n T_cells = []\n cyto_T_cells = []\n for i, row in enumerate(input_list):\n try:\n row = [int(x) for x in row]\n except ValueError:\n continue\n if row[4] > 0:\n cancer_cells.append([row[0], row[1], row[2], row[3]])\n if row[5] > 0:\n T_cells.append([row[0], row[1], row[2], row[3]])\n if row[6] > 0:\n cyto_T_cells.append([row[0], row[1], row[2], row[3]])\n cancer_cells = np.asarray(cancer_cells)\n T_cells = np.asarray(T_cells)\n cyto_T_cells = np.asarray(cyto_T_cells)\n print('Separated ...')\n t = 25\n partitioned_cancer_cells, windows, w, h = partition.partition(cancer_cells,\n tile_size=t, to_list=True)\n print('Cancer cells partitioned ...')\n if args.heat:\n spatial_distribution = np.zeros_like(partitioned_cancer_cells)\n for i in range(t):\n for j in range(t):\n spatial_distribution[i][j] = len(partitioned_cancer_cells[i][j]\n )\n with open('./inputs/spatial/' + name + '.txt', 'w', newline=''\n ) as dest:\n dest.write(str(spatial_distribution))\n if args.fish:\n result = fishermans_algorithm(partitioned_cancer_cells, (t, t),\n windows, w, h)\n print('Result retrieved ...')\n if args.object:\n with open('./inputs/object/' + name + '.p', 'wb') as dest:\n pickle.dump(result, dest)\n dups = set()\n histogram = np.zeros(21, dtype=np.uint32)\n for cluster in result:\n dups.add(cluster)\n total_cluster_cells = 0\n clusters_sum = 0\n dups_length = len(dups)\n for i in dups:\n value = len(i.cells)\n clusters_sum += value\n total_cluster_cells += len(i.cells)\n if value > 20:\n histogram[20] += 1\n else:\n histogram[value - 1] += 1\n print('Histogram retrieved ...')\n clusters_avg = clusters_sum / dups_length\n assert total_cluster_cells == len(cancer_cells)\n y = np.array(histogram)\n x = np.arange(21) + 1\n plt.bar(x, y)\n plt.xlabel('Value')\n plt.ylabel('Frequency')\n plt.show()\n plt.close()\n if args.object:\n with open('./inputs/object/' + name + '.txt', 'w', newline=''\n ) as dest:\n dest.write('Average size of cluster: ' + str(clusters_avg) +\n '\\n')\n dest.write('Number of clusters: ' + str(len(dups)) + '\\n')\n dest.write('Total number of cells: ' + str(\n total_cluster_cells) + '\\n')\n dest.write('Cluster counts: ' + '\\n')\n for i, x in enumerate(histogram):\n dest.write(str(i) + ', ' + str(x) + '\\n')\nos.system('say \"All pickle files done in this batch.\"')\n",
"step-5": "import glob\nimport os\nimport partition\nimport pickle\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom Cluster import fishermans_algorithm\nimport argparse\n\nparser = argparse.ArgumentParser()\n\nplt.ion()\n\nparser.add_argument(\"--fish\", help=\"flag for using fisherman's algorithm\")\nparser.add_argument(\"--heat\", help=\"flag for using heatmap\")\nparser.add_argument(\"--object\", help=\"flag for dumping the clusters\")\nargs = parser.parse_args()\n\nprint(args)\nprint(args.fish)\nprint(args.object)\n\nfor file in glob.glob(\"./examples/*.p\"):\n print(file)\n name = file[11:-2]\n recover = open(\"./examples/\" + name + \".p\", \"rb\")\n input_list = pickle.load(recover)\n print(\"Loaded ...\")\n\n cancer_cells = []\n T_cells = []\n cyto_T_cells = []\n\n for i, row in enumerate(input_list):\n try:\n row = [int(x) for x in row]\n except ValueError:\n continue\n\n if row[4] > 0:\n cancer_cells.append([row[0], row[1], row[2], row[3]])\n if row[5] > 0:\n T_cells.append([row[0], row[1], row[2], row[3]])\n if row[6] > 0:\n cyto_T_cells.append([row[0], row[1], row[2], row[3]])\n\n cancer_cells = np.asarray(cancer_cells)\n T_cells = np.asarray(T_cells)\n cyto_T_cells = np.asarray(cyto_T_cells)\n\n print(\"Separated ...\")\n\n t = 25\n partitioned_cancer_cells, windows, w, h = partition.partition(cancer_cells, tile_size=t, to_list=True)\n print(\"Cancer cells partitioned ...\")\n\n if args.heat:\n spatial_distribution = np.zeros_like(partitioned_cancer_cells)\n\n for i in range(t):\n for j in range(t):\n spatial_distribution[i][j] = len(partitioned_cancer_cells[i][j])\n\n with open(\"./inputs/spatial/\" + name + \".txt\", \"w\", newline=\"\") as dest:\n dest.write(str(spatial_distribution))\n\n if args.fish:\n result = fishermans_algorithm(partitioned_cancer_cells, (t, t), windows, w, h)\n print(\"Result retrieved ...\")\n\n if args.object:\n with open(\"./inputs/object/\" + name + \".p\", \"wb\") as dest:\n pickle.dump(result, dest)\n\n dups = set()\n histogram = np.zeros(21, dtype=np.uint32)\n\n for cluster in result:\n dups.add(cluster)\n\n total_cluster_cells = 0\n\n clusters_sum = 0\n dups_length = len(dups)\n\n for i in dups:\n value = len(i.cells)\n clusters_sum += value\n total_cluster_cells += len(i.cells)\n if value > 20:\n histogram[20] += 1\n else:\n histogram[value - 1] += 1\n\n print(\"Histogram retrieved ...\")\n\n clusters_avg = clusters_sum / dups_length\n\n assert(total_cluster_cells == len(cancer_cells))\n\n y = np.array(histogram)\n x = np.arange(21) + 1\n\n plt.bar(x, y)\n plt.xlabel(\"Value\")\n plt.ylabel(\"Frequency\")\n # plt.savefig(\"./inputs/\" + name + \".png\", bbox_inches='tight')\n plt.show()\n plt.close()\n\n if args.object:\n with open(\"./inputs/object/\" + name + \".txt\", \"w\", newline=\"\") as dest:\n dest.write(\"Average size of cluster: \" + str(clusters_avg) + \"\\n\")\n dest.write(\"Number of clusters: \" + str(len(dups)) + \"\\n\")\n dest.write(\"Total number of cells: \" + str(total_cluster_cells) + \"\\n\")\n dest.write(\"Cluster counts: \" + \"\\n\")\n for i, x in enumerate(histogram):\n dest.write(str(i) + \", \" + str(x) + \"\\n\")\n\nos.system('say \"All pickle files done in this batch.\"')\n\n\n\n\n\n\n\n# End of file\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class MovingAverage(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class MovingAverage(object):
def __init__(self, size):
"""
Initialize your data structure here.
:type size: int
"""
self.size = size
self.q = collections.deque()
self.sum_ = 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class MovingAverage(object):
def __init__(self, size):
"""
Initialize your data structure here.
:type size: int
"""
self.size = size
self.q = collections.deque()
self.sum_ = 0
def next(self, val):
"""
:type val: int
:rtype: float
"""
if len(self.q) == self.size:
a = self.q.popleft()
self.sum_ -= a
self.q.append(val)
self.sum_ += val
return float(self.sum_) / len(self.q)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# @Author: Lich_Amnesia
# @Email: [email protected]
# @Date: 2016-11-17 11:00:33
# @Last Modified time: 2016-11-17 11:00:34
# @FileName: 346.py
class MovingAverage(object):
def __init__(self, size):
"""
Initialize your data structure here.
:type size: int
"""
self.size = size
self.q = collections.deque()
self.sum_ = 0
def next(self, val):
"""
:type val: int
:rtype: float
"""
if len(self.q) == self.size:
a = self.q.popleft()
self.sum_ -= a
self.q.append(val)
self.sum_ += val
return float(self.sum_) / len(self.q)
# Your MovingAverage object will be instantiated and called as such:
# obj = MovingAverage(size)
# param_1 = obj.next(val)
|
flexible
|
{
"blob_id": "9e37b728d8045726aef7625fccc14111ecb0e1c8",
"index": 5578,
"step-1": "<mask token>\n",
"step-2": "class MovingAverage(object):\n <mask token>\n <mask token>\n",
"step-3": "class MovingAverage(object):\n\n def __init__(self, size):\n \"\"\"\n Initialize your data structure here.\n :type size: int\n \"\"\"\n self.size = size\n self.q = collections.deque()\n self.sum_ = 0\n <mask token>\n",
"step-4": "class MovingAverage(object):\n\n def __init__(self, size):\n \"\"\"\n Initialize your data structure here.\n :type size: int\n \"\"\"\n self.size = size\n self.q = collections.deque()\n self.sum_ = 0\n\n def next(self, val):\n \"\"\"\n :type val: int\n :rtype: float\n \"\"\"\n if len(self.q) == self.size:\n a = self.q.popleft()\n self.sum_ -= a\n self.q.append(val)\n self.sum_ += val\n return float(self.sum_) / len(self.q)\n",
"step-5": "# -*- coding: utf-8 -*-\n# @Author: Lich_Amnesia\n# @Email: [email protected]\n# @Date: 2016-11-17 11:00:33\n# @Last Modified time: 2016-11-17 11:00:34\n# @FileName: 346.py\n\n\nclass MovingAverage(object):\n\n def __init__(self, size):\n \"\"\"\n Initialize your data structure here.\n :type size: int\n \"\"\"\n self.size = size\n self.q = collections.deque()\n self.sum_ = 0\n\n def next(self, val):\n \"\"\"\n :type val: int\n :rtype: float\n \"\"\"\n if len(self.q) == self.size:\n a = self.q.popleft()\n self.sum_ -= a\n self.q.append(val)\n self.sum_ += val\n return float(self.sum_) / len(self.q)\n\n# Your MovingAverage object will be instantiated and called as such:\n# obj = MovingAverage(size)\n# param_1 = obj.next(val)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while abs(t0 - time.time() < 60):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
t0 = time.time()
while abs(t0 - time.time() < 60):
pass
<|reserved_special_token_1|>
import time
t0 = time.time()
while abs(t0 - time.time() < 60):
pass
|
flexible
|
{
"blob_id": "9a0e37aaa41f3b21ed7ad11096cd6c5dd0bb8564",
"index": 5608,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile abs(t0 - time.time() < 60):\n pass\n",
"step-3": "<mask token>\nt0 = time.time()\nwhile abs(t0 - time.time() < 60):\n pass\n",
"step-4": "import time\nt0 = time.time()\nwhile abs(t0 - time.time() < 60):\n pass\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@pytest.mark.usefixtures('driver')
class Test_001_ShedulePage:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_001_elements_exists(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
найти кнопку отмены, кнопку карты, поле поиска"""
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
self.event_detail_page.click(*self.event_detail_page_locators.
btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
self.shedule_page.find_element(*self.shedule_locators.btn_back)
self.shedule_page.find_element(*self.shedule_locators.btn_map)
self.shedule_page.find_element(*self.shedule_locators.search_field)
def test_002_valid_filters(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
проверить соответствие фильтров и ответа сервера
проверить порядок фильтров"""
dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.
url_creations_movie_schedule_filter)
try:
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
sleep(5)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
sleep(5)
self.event_detail_page.click(*self.
event_detail_page_locators.btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
sleep(5)
self.shedule_page.check_rows_filters(dbg_api)
finally:
dbg_api.kill()
def test_003_check_time_ticket_filter(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
проверять соответствие времени на билетах с выставленными фильтрами"""
dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.
url_creations_movie_schedule_filter)
try:
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
sleep(10)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
self.event_detail_page.click(*self.
event_detail_page_locators.btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
sleep(2)
self.shedule_page.compare_tickets_datetime_options_second_filter(
dbg_api)
finally:
dbg_api.kill()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.usefixtures('driver')
class Test_001_ShedulePage:
<|reserved_special_token_0|>
@staticmethod
def teardown_class(cls):
enable_proxy(mode=False)
def test_001_elements_exists(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
найти кнопку отмены, кнопку карты, поле поиска"""
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
self.event_detail_page.click(*self.event_detail_page_locators.
btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
self.shedule_page.find_element(*self.shedule_locators.btn_back)
self.shedule_page.find_element(*self.shedule_locators.btn_map)
self.shedule_page.find_element(*self.shedule_locators.search_field)
def test_002_valid_filters(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
проверить соответствие фильтров и ответа сервера
проверить порядок фильтров"""
dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.
url_creations_movie_schedule_filter)
try:
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
sleep(5)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
sleep(5)
self.event_detail_page.click(*self.
event_detail_page_locators.btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
sleep(5)
self.shedule_page.check_rows_filters(dbg_api)
finally:
dbg_api.kill()
def test_003_check_time_ticket_filter(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
проверять соответствие времени на билетах с выставленными фильтрами"""
dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.
url_creations_movie_schedule_filter)
try:
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
sleep(10)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
self.event_detail_page.click(*self.
event_detail_page_locators.btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
sleep(2)
self.shedule_page.compare_tickets_datetime_options_second_filter(
dbg_api)
finally:
dbg_api.kill()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.usefixtures('driver')
class Test_001_ShedulePage:
@classmethod
def setup_class(cls):
cls.movies_locators = MoviesPageLocators()
cls.shedule_locators = ShedulePageLocators()
cls.event_detail_page_locators = MoviesDetailsPageLocators()
@staticmethod
def teardown_class(cls):
enable_proxy(mode=False)
def test_001_elements_exists(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
найти кнопку отмены, кнопку карты, поле поиска"""
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
self.event_detail_page.click(*self.event_detail_page_locators.
btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
self.shedule_page.find_element(*self.shedule_locators.btn_back)
self.shedule_page.find_element(*self.shedule_locators.btn_map)
self.shedule_page.find_element(*self.shedule_locators.search_field)
def test_002_valid_filters(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
проверить соответствие фильтров и ответа сервера
проверить порядок фильтров"""
dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.
url_creations_movie_schedule_filter)
try:
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
sleep(5)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
sleep(5)
self.event_detail_page.click(*self.
event_detail_page_locators.btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
sleep(5)
self.shedule_page.check_rows_filters(dbg_api)
finally:
dbg_api.kill()
def test_003_check_time_ticket_filter(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
проверять соответствие времени на билетах с выставленными фильтрами"""
dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.
url_creations_movie_schedule_filter)
try:
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
sleep(10)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
self.event_detail_page.click(*self.
event_detail_page_locators.btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
sleep(2)
self.shedule_page.compare_tickets_datetime_options_second_filter(
dbg_api)
finally:
dbg_api.kill()
<|reserved_special_token_1|>
from time import sleep
import pytest
import allure
from app.debug_api import DebugAPI
from app.check_api import HandlersAPI
from locators.movies_details_locators import MoviesDetailsPageLocators
from locators.movies_locators import MoviesPageLocators
from locators.shedule_locators import ShedulePageLocators
from screens.MoviesPage import MoviesPage
from screens.MoviesDetailsPage import MoviesDetailsPage
from screens.ShedulePage import ShedulePage
from utils.internet import enable_proxy
@pytest.mark.usefixtures('driver')
class Test_001_ShedulePage:
@classmethod
def setup_class(cls):
cls.movies_locators = MoviesPageLocators()
cls.shedule_locators = ShedulePageLocators()
cls.event_detail_page_locators = MoviesDetailsPageLocators()
@staticmethod
def teardown_class(cls):
enable_proxy(mode=False)
def test_001_elements_exists(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
найти кнопку отмены, кнопку карты, поле поиска"""
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
self.event_detail_page.click(*self.event_detail_page_locators.
btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
self.shedule_page.find_element(*self.shedule_locators.btn_back)
self.shedule_page.find_element(*self.shedule_locators.btn_map)
self.shedule_page.find_element(*self.shedule_locators.search_field)
def test_002_valid_filters(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
проверить соответствие фильтров и ответа сервера
проверить порядок фильтров"""
dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.
url_creations_movie_schedule_filter)
try:
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
sleep(5)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
sleep(5)
self.event_detail_page.click(*self.
event_detail_page_locators.btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
sleep(5)
self.shedule_page.check_rows_filters(dbg_api)
finally:
dbg_api.kill()
def test_003_check_time_ticket_filter(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
проверять соответствие времени на билетах с выставленными фильтрами"""
dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.
url_creations_movie_schedule_filter)
try:
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
sleep(10)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
self.event_detail_page.click(*self.
event_detail_page_locators.btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
sleep(2)
self.shedule_page.compare_tickets_datetime_options_second_filter(
dbg_api)
finally:
dbg_api.kill()
|
flexible
|
{
"blob_id": "c7c412fe4e2d53af1b4f2a55bd3453496767890d",
"index": 975,
"step-1": "<mask token>\n\n\[email protected]('driver')\nclass Test_001_ShedulePage:\n <mask token>\n <mask token>\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n\n def test_002_valid_filters(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверить соответствие фильтров и ответа сервера\n проверить порядок фильтров\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(5)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n sleep(5)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(5)\n self.shedule_page.check_rows_filters(dbg_api)\n finally:\n dbg_api.kill()\n\n def test_003_check_time_ticket_filter(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверять соответствие времени на билетах с выставленными фильтрами\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(2)\n self.shedule_page.compare_tickets_datetime_options_second_filter(\n dbg_api)\n finally:\n dbg_api.kill()\n",
"step-2": "<mask token>\n\n\[email protected]('driver')\nclass Test_001_ShedulePage:\n <mask token>\n\n @staticmethod\n def teardown_class(cls):\n enable_proxy(mode=False)\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n\n def test_002_valid_filters(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверить соответствие фильтров и ответа сервера\n проверить порядок фильтров\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(5)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n sleep(5)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(5)\n self.shedule_page.check_rows_filters(dbg_api)\n finally:\n dbg_api.kill()\n\n def test_003_check_time_ticket_filter(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверять соответствие времени на билетах с выставленными фильтрами\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(2)\n self.shedule_page.compare_tickets_datetime_options_second_filter(\n dbg_api)\n finally:\n dbg_api.kill()\n",
"step-3": "<mask token>\n\n\[email protected]('driver')\nclass Test_001_ShedulePage:\n\n @classmethod\n def setup_class(cls):\n cls.movies_locators = MoviesPageLocators()\n cls.shedule_locators = ShedulePageLocators()\n cls.event_detail_page_locators = MoviesDetailsPageLocators()\n\n @staticmethod\n def teardown_class(cls):\n enable_proxy(mode=False)\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n\n def test_002_valid_filters(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверить соответствие фильтров и ответа сервера\n проверить порядок фильтров\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(5)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n sleep(5)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(5)\n self.shedule_page.check_rows_filters(dbg_api)\n finally:\n dbg_api.kill()\n\n def test_003_check_time_ticket_filter(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверять соответствие времени на билетах с выставленными фильтрами\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(2)\n self.shedule_page.compare_tickets_datetime_options_second_filter(\n dbg_api)\n finally:\n dbg_api.kill()\n",
"step-4": "from time import sleep\nimport pytest\nimport allure\nfrom app.debug_api import DebugAPI\nfrom app.check_api import HandlersAPI\nfrom locators.movies_details_locators import MoviesDetailsPageLocators\nfrom locators.movies_locators import MoviesPageLocators\nfrom locators.shedule_locators import ShedulePageLocators\nfrom screens.MoviesPage import MoviesPage\nfrom screens.MoviesDetailsPage import MoviesDetailsPage\nfrom screens.ShedulePage import ShedulePage\nfrom utils.internet import enable_proxy\n\n\[email protected]('driver')\nclass Test_001_ShedulePage:\n\n @classmethod\n def setup_class(cls):\n cls.movies_locators = MoviesPageLocators()\n cls.shedule_locators = ShedulePageLocators()\n cls.event_detail_page_locators = MoviesDetailsPageLocators()\n\n @staticmethod\n def teardown_class(cls):\n enable_proxy(mode=False)\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n\n def test_002_valid_filters(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверить соответствие фильтров и ответа сервера\n проверить порядок фильтров\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(5)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n sleep(5)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(5)\n self.shedule_page.check_rows_filters(dbg_api)\n finally:\n dbg_api.kill()\n\n def test_003_check_time_ticket_filter(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверять соответствие времени на билетах с выставленными фильтрами\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(2)\n self.shedule_page.compare_tickets_datetime_options_second_filter(\n dbg_api)\n finally:\n dbg_api.kill()\n",
"step-5": null,
"step-ids": [
4,
5,
6,
7
]
}
|
[
4,
5,
6,
7
] |
from .models import Owner, Vehicle
from rest_framework import viewsets, permissions
from .serializers import OwnerSerializer, VehicleSerializer
class OwnerViewSet(viewsets.ModelViewSet):
queryset = Owner.objects.all().order_by('id')
serializer_class = OwnerSerializer
permission_classes = [permissions.IsAuthenticated]
class VehicleViewSet(viewsets.ModelViewSet):
queryset = Vehicle.objects.all().order_by('id')
serializer_class = VehicleSerializer
permission_classes = [permissions.IsAuthenticated]
|
normal
|
{
"blob_id": "9290294b5df081ef0cae5450a9ea3baef789c041",
"index": 6421,
"step-1": "<mask token>\n\n\nclass VehicleViewSet(viewsets.ModelViewSet):\n queryset = Vehicle.objects.all().order_by('id')\n serializer_class = VehicleSerializer\n permission_classes = [permissions.IsAuthenticated]\n",
"step-2": "<mask token>\n\n\nclass OwnerViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass VehicleViewSet(viewsets.ModelViewSet):\n queryset = Vehicle.objects.all().order_by('id')\n serializer_class = VehicleSerializer\n permission_classes = [permissions.IsAuthenticated]\n",
"step-3": "<mask token>\n\n\nclass OwnerViewSet(viewsets.ModelViewSet):\n queryset = Owner.objects.all().order_by('id')\n serializer_class = OwnerSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n\nclass VehicleViewSet(viewsets.ModelViewSet):\n queryset = Vehicle.objects.all().order_by('id')\n serializer_class = VehicleSerializer\n permission_classes = [permissions.IsAuthenticated]\n",
"step-4": "from .models import Owner, Vehicle\nfrom rest_framework import viewsets, permissions\nfrom .serializers import OwnerSerializer, VehicleSerializer\n\n\nclass OwnerViewSet(viewsets.ModelViewSet):\n queryset = Owner.objects.all().order_by('id')\n serializer_class = OwnerSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n\nclass VehicleViewSet(viewsets.ModelViewSet):\n queryset = Vehicle.objects.all().order_by('id')\n serializer_class = VehicleSerializer\n permission_classes = [permissions.IsAuthenticated]\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class DuplicatedBlockException(Exception):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class DuplicatedBlockException(Exception):
code = 'duplicated_block_exception'
|
flexible
|
{
"blob_id": "983e3b2902fe3bc701167da2f308fdaed612ae84",
"index": 1784,
"step-1": "<mask token>\n",
"step-2": "class DuplicatedBlockException(Exception):\n <mask token>\n",
"step-3": "class DuplicatedBlockException(Exception):\n code = 'duplicated_block_exception'\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.asyncio
async def test_ping_once():
async with pair_of_connected_hosts() as (host_a, host_b):
stream = await host_b.new_stream(host_a.get_id(), (ID,))
some_ping = secrets.token_bytes(PING_LENGTH)
await stream.write(some_ping)
some_pong = await stream.read(PING_LENGTH)
assert some_ping == some_pong
await stream.close()
<|reserved_special_token_0|>
@pytest.mark.asyncio
async def test_ping_several():
async with pair_of_connected_hosts() as (host_a, host_b):
stream = await host_b.new_stream(host_a.get_id(), (ID,))
for _ in range(SOME_PING_COUNT):
some_ping = secrets.token_bytes(PING_LENGTH)
await stream.write(some_ping)
some_pong = await stream.read(PING_LENGTH)
assert some_ping == some_pong
await asyncio.sleep(0)
await stream.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.asyncio
async def test_ping_once():
async with pair_of_connected_hosts() as (host_a, host_b):
stream = await host_b.new_stream(host_a.get_id(), (ID,))
some_ping = secrets.token_bytes(PING_LENGTH)
await stream.write(some_ping)
some_pong = await stream.read(PING_LENGTH)
assert some_ping == some_pong
await stream.close()
SOME_PING_COUNT = 3
@pytest.mark.asyncio
async def test_ping_several():
async with pair_of_connected_hosts() as (host_a, host_b):
stream = await host_b.new_stream(host_a.get_id(), (ID,))
for _ in range(SOME_PING_COUNT):
some_ping = secrets.token_bytes(PING_LENGTH)
await stream.write(some_ping)
some_pong = await stream.read(PING_LENGTH)
assert some_ping == some_pong
await asyncio.sleep(0)
await stream.close()
<|reserved_special_token_1|>
import asyncio
import secrets
import pytest
from libp2p.host.ping import ID, PING_LENGTH
from libp2p.tools.factories import pair_of_connected_hosts
@pytest.mark.asyncio
async def test_ping_once():
async with pair_of_connected_hosts() as (host_a, host_b):
stream = await host_b.new_stream(host_a.get_id(), (ID,))
some_ping = secrets.token_bytes(PING_LENGTH)
await stream.write(some_ping)
some_pong = await stream.read(PING_LENGTH)
assert some_ping == some_pong
await stream.close()
SOME_PING_COUNT = 3
@pytest.mark.asyncio
async def test_ping_several():
async with pair_of_connected_hosts() as (host_a, host_b):
stream = await host_b.new_stream(host_a.get_id(), (ID,))
for _ in range(SOME_PING_COUNT):
some_ping = secrets.token_bytes(PING_LENGTH)
await stream.write(some_ping)
some_pong = await stream.read(PING_LENGTH)
assert some_ping == some_pong
await asyncio.sleep(0)
await stream.close()
<|reserved_special_token_1|>
import asyncio
import secrets
import pytest
from libp2p.host.ping import ID, PING_LENGTH
from libp2p.tools.factories import pair_of_connected_hosts
@pytest.mark.asyncio
async def test_ping_once():
async with pair_of_connected_hosts() as (host_a, host_b):
stream = await host_b.new_stream(host_a.get_id(), (ID,))
some_ping = secrets.token_bytes(PING_LENGTH)
await stream.write(some_ping)
some_pong = await stream.read(PING_LENGTH)
assert some_ping == some_pong
await stream.close()
SOME_PING_COUNT = 3
@pytest.mark.asyncio
async def test_ping_several():
async with pair_of_connected_hosts() as (host_a, host_b):
stream = await host_b.new_stream(host_a.get_id(), (ID,))
for _ in range(SOME_PING_COUNT):
some_ping = secrets.token_bytes(PING_LENGTH)
await stream.write(some_ping)
some_pong = await stream.read(PING_LENGTH)
assert some_ping == some_pong
# NOTE: simulate some time to sleep to mirror a real
# world usage where a peer sends pings on some periodic interval
# NOTE: this interval can be `0` for this test.
await asyncio.sleep(0)
await stream.close()
|
flexible
|
{
"blob_id": "0233b46da3b9351f110ffc7f8622ca8f9ee9944d",
"index": 3000,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\nasync def test_ping_once():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await stream.close()\n\n\n<mask token>\n\n\[email protected]\nasync def test_ping_several():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n for _ in range(SOME_PING_COUNT):\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await asyncio.sleep(0)\n await stream.close()\n",
"step-3": "<mask token>\n\n\[email protected]\nasync def test_ping_once():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await stream.close()\n\n\nSOME_PING_COUNT = 3\n\n\[email protected]\nasync def test_ping_several():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n for _ in range(SOME_PING_COUNT):\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await asyncio.sleep(0)\n await stream.close()\n",
"step-4": "import asyncio\nimport secrets\nimport pytest\nfrom libp2p.host.ping import ID, PING_LENGTH\nfrom libp2p.tools.factories import pair_of_connected_hosts\n\n\[email protected]\nasync def test_ping_once():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await stream.close()\n\n\nSOME_PING_COUNT = 3\n\n\[email protected]\nasync def test_ping_several():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n for _ in range(SOME_PING_COUNT):\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await asyncio.sleep(0)\n await stream.close()\n",
"step-5": "import asyncio\nimport secrets\n\nimport pytest\n\nfrom libp2p.host.ping import ID, PING_LENGTH\nfrom libp2p.tools.factories import pair_of_connected_hosts\n\n\[email protected]\nasync def test_ping_once():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await stream.close()\n\n\nSOME_PING_COUNT = 3\n\n\[email protected]\nasync def test_ping_several():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n for _ in range(SOME_PING_COUNT):\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n # NOTE: simulate some time to sleep to mirror a real\n # world usage where a peer sends pings on some periodic interval\n # NOTE: this interval can be `0` for this test.\n await asyncio.sleep(0)\n await stream.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
carpeta = Carpeta(settings.folder_sat)
sentinela = SentinelSat(carpeta)
sentinela.start_Monitoring()
<|reserved_special_token_1|>
from LibTools.filesystem import Carpeta
from slaves import SentinelSat
import settings
if __name__ == '__main__':
carpeta = Carpeta(settings.folder_sat)
sentinela = SentinelSat(carpeta)
sentinela.start_Monitoring()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from LibTools.filesystem import Carpeta
from slaves import SentinelSat
import settings
if __name__ == '__main__':
carpeta = Carpeta(settings.folder_sat)
sentinela = SentinelSat(carpeta)
sentinela.start_Monitoring()
|
flexible
|
{
"blob_id": "9e3f4484542c2629d636fcb4166584ba52bebe21",
"index": 2196,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n carpeta = Carpeta(settings.folder_sat)\n sentinela = SentinelSat(carpeta)\n sentinela.start_Monitoring()\n",
"step-3": "from LibTools.filesystem import Carpeta\nfrom slaves import SentinelSat\nimport settings\nif __name__ == '__main__':\n carpeta = Carpeta(settings.folder_sat)\n sentinela = SentinelSat(carpeta)\n sentinela.start_Monitoring()\n",
"step-4": "# -*- coding: utf-8 -*-\nfrom LibTools.filesystem import Carpeta\nfrom slaves import SentinelSat\n\nimport settings\n\nif __name__ == '__main__':\n\n carpeta = Carpeta(settings.folder_sat)\n sentinela = SentinelSat(carpeta)\n sentinela.start_Monitoring()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
# @File : config.py
# @Author: TT
# @Email : [email protected]
# @Date : 2018/12/4
# @Desc : config file
from utils.general import getchromdriver_version
from chromedriver.path import path
import os
import sys
chromedriver = os.path.abspath(os.path.dirname(__file__)) + "\\chromedriver\\"+ getchromdriver_version()
download_path = os.path.abspath(os.path.dirname(__file__)) + "\\"
Suffix_name = ['.bin', '.rar', '.zip', '.7z']
|
normal
|
{
"blob_id": "5b4a196de60a3a30bc571c559fe5f211563b8999",
"index": 5449,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nchromedriver = os.path.abspath(os.path.dirname(__file__)\n ) + '\\\\chromedriver\\\\' + getchromdriver_version()\ndownload_path = os.path.abspath(os.path.dirname(__file__)) + '\\\\'\nSuffix_name = ['.bin', '.rar', '.zip', '.7z']\n",
"step-3": "from utils.general import getchromdriver_version\nfrom chromedriver.path import path\nimport os\nimport sys\nchromedriver = os.path.abspath(os.path.dirname(__file__)\n ) + '\\\\chromedriver\\\\' + getchromdriver_version()\ndownload_path = os.path.abspath(os.path.dirname(__file__)) + '\\\\'\nSuffix_name = ['.bin', '.rar', '.zip', '.7z']\n",
"step-4": "# -*- coding: utf-8 -*-\n# @File : config.py\n# @Author: TT\n# @Email : [email protected]\n# @Date : 2018/12/4\n# @Desc : config file\nfrom utils.general import getchromdriver_version\nfrom chromedriver.path import path\nimport os\nimport sys\n\nchromedriver = os.path.abspath(os.path.dirname(__file__)) + \"\\\\chromedriver\\\\\"+ getchromdriver_version()\n\ndownload_path = os.path.abspath(os.path.dirname(__file__)) + \"\\\\\"\n\nSuffix_name = ['.bin', '.rar', '.zip', '.7z']\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Window:
def __init__(self, width, height, title='MyWindow', resizable=(False,
False), icon='resources/feather.ico'):
self.root = Tk()
self.root.title(title)
self.root.geometry('+600+300')
if icon:
self.root.iconbitmap(icon)
self.text = ScrolledText(self.root)
<|reserved_special_token_0|>
def draw_widgets(self):
self.draw_menu()
self.text.pack()
def draw_menu(self):
menu_bar = Menu(self.root)
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label='Открыть', command=self.open_file)
file_menu.add_command(label='Сохранить как', command=self.save_file)
file_menu.add_command(label='Отркыть папку', command=self.open_dir)
file_menu.add_separator()
file_menu.add_command(label='Выйти', command=self.exit)
info_menu = Menu(menu_bar, tearoff=0)
info_menu.add_command(label='О приложении', command=self.show_info)
menu_bar.add_cascade(label='Файл', menu=file_menu)
menu_bar.add_cascade(label='Справка', menu=info_menu)
self.root.configure(menu=menu_bar)
def open_file(self):
file_names = fd.askopenfilenames()
self.text.insert(END, str(file_names))
def save_file(self):
name = fd.asksaveasfilename(filetypes=(('TEXT files', '*.txt'), (
'Py files', '*.py')))
if name:
self.text.insert(END, f'Сохранить файл по пути {name}\n')
def open_dir(self):
path = fd.askdirectory(mustexist=True)
self.text.insert(END, f'Папка {path}\n')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def create_child(self, width, height, title='Child', resizable=(False,
False), icon=None):
ChildWindow(self.root, width, height, title, resizable, icon)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Window:
def __init__(self, width, height, title='MyWindow', resizable=(False,
False), icon='resources/feather.ico'):
self.root = Tk()
self.root.title(title)
self.root.geometry('+600+300')
if icon:
self.root.iconbitmap(icon)
self.text = ScrolledText(self.root)
<|reserved_special_token_0|>
def draw_widgets(self):
self.draw_menu()
self.text.pack()
def draw_menu(self):
menu_bar = Menu(self.root)
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label='Открыть', command=self.open_file)
file_menu.add_command(label='Сохранить как', command=self.save_file)
file_menu.add_command(label='Отркыть папку', command=self.open_dir)
file_menu.add_separator()
file_menu.add_command(label='Выйти', command=self.exit)
info_menu = Menu(menu_bar, tearoff=0)
info_menu.add_command(label='О приложении', command=self.show_info)
menu_bar.add_cascade(label='Файл', menu=file_menu)
menu_bar.add_cascade(label='Справка', menu=info_menu)
self.root.configure(menu=menu_bar)
def open_file(self):
file_names = fd.askopenfilenames()
self.text.insert(END, str(file_names))
def save_file(self):
name = fd.asksaveasfilename(filetypes=(('TEXT files', '*.txt'), (
'Py files', '*.py')))
if name:
self.text.insert(END, f'Сохранить файл по пути {name}\n')
def open_dir(self):
path = fd.askdirectory(mustexist=True)
self.text.insert(END, f'Папка {path}\n')
<|reserved_special_token_0|>
def exit(self):
choice = mb.askyesno('Quit', 'Do you want to quit?')
if choice:
self.root.destroy()
def create_child(self, width, height, title='Child', resizable=(False,
False), icon=None):
ChildWindow(self.root, width, height, title, resizable, icon)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Window:
def __init__(self, width, height, title='MyWindow', resizable=(False,
False), icon='resources/feather.ico'):
self.root = Tk()
self.root.title(title)
self.root.geometry('+600+300')
if icon:
self.root.iconbitmap(icon)
self.text = ScrolledText(self.root)
<|reserved_special_token_0|>
def draw_widgets(self):
self.draw_menu()
self.text.pack()
def draw_menu(self):
menu_bar = Menu(self.root)
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label='Открыть', command=self.open_file)
file_menu.add_command(label='Сохранить как', command=self.save_file)
file_menu.add_command(label='Отркыть папку', command=self.open_dir)
file_menu.add_separator()
file_menu.add_command(label='Выйти', command=self.exit)
info_menu = Menu(menu_bar, tearoff=0)
info_menu.add_command(label='О приложении', command=self.show_info)
menu_bar.add_cascade(label='Файл', menu=file_menu)
menu_bar.add_cascade(label='Справка', menu=info_menu)
self.root.configure(menu=menu_bar)
def open_file(self):
file_names = fd.askopenfilenames()
self.text.insert(END, str(file_names))
def save_file(self):
name = fd.asksaveasfilename(filetypes=(('TEXT files', '*.txt'), (
'Py files', '*.py')))
if name:
self.text.insert(END, f'Сохранить файл по пути {name}\n')
def open_dir(self):
path = fd.askdirectory(mustexist=True)
self.text.insert(END, f'Папка {path}\n')
def show_info(self):
mb.showinfo('Информация', 'Лучшее графическое приложение на свете')
def exit(self):
choice = mb.askyesno('Quit', 'Do you want to quit?')
if choice:
self.root.destroy()
def create_child(self, width, height, title='Child', resizable=(False,
False), icon=None):
ChildWindow(self.root, width, height, title, resizable, icon)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Window:
def __init__(self, width, height, title='MyWindow', resizable=(False,
False), icon='resources/feather.ico'):
self.root = Tk()
self.root.title(title)
self.root.geometry('+600+300')
if icon:
self.root.iconbitmap(icon)
self.text = ScrolledText(self.root)
def run(self):
self.draw_widgets()
self.root.mainloop()
def draw_widgets(self):
self.draw_menu()
self.text.pack()
def draw_menu(self):
menu_bar = Menu(self.root)
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label='Открыть', command=self.open_file)
file_menu.add_command(label='Сохранить как', command=self.save_file)
file_menu.add_command(label='Отркыть папку', command=self.open_dir)
file_menu.add_separator()
file_menu.add_command(label='Выйти', command=self.exit)
info_menu = Menu(menu_bar, tearoff=0)
info_menu.add_command(label='О приложении', command=self.show_info)
menu_bar.add_cascade(label='Файл', menu=file_menu)
menu_bar.add_cascade(label='Справка', menu=info_menu)
self.root.configure(menu=menu_bar)
def open_file(self):
file_names = fd.askopenfilenames()
self.text.insert(END, str(file_names))
def save_file(self):
name = fd.asksaveasfilename(filetypes=(('TEXT files', '*.txt'), (
'Py files', '*.py')))
if name:
self.text.insert(END, f'Сохранить файл по пути {name}\n')
def open_dir(self):
path = fd.askdirectory(mustexist=True)
self.text.insert(END, f'Папка {path}\n')
def show_info(self):
mb.showinfo('Информация', 'Лучшее графическое приложение на свете')
def exit(self):
choice = mb.askyesno('Quit', 'Do you want to quit?')
if choice:
self.root.destroy()
def create_child(self, width, height, title='Child', resizable=(False,
False), icon=None):
ChildWindow(self.root, width, height, title, resizable, icon)
if __name__ == '__main__':
window = Window(500, 500, 'TKINTER')
window.run()
<|reserved_special_token_1|>
from tkinter import *
from tkinter import messagebox as mb
from tkinter.scrolledtext import ScrolledText
from tkinter import filedialog as fd
from child_window import ChildWindow
# from PIL import Image as PilImage
# from PIL import ImageTk, ImageOps
class Window:
def __init__(self, width, height, title="MyWindow", resizable=(False, False), icon=r"resources/feather.ico"):
self.root = Tk()
self.root.title(title)
# self.root.geometry(f"{width}x{height}+200+200")
self.root.geometry("+600+300")
# self.root.resizable(resizable[0], resizable[1])
if icon:
self.root.iconbitmap(icon)
self.text = ScrolledText(self.root)
def run(self):
self.draw_widgets()
self.root.mainloop()
def draw_widgets(self):
self.draw_menu()
self.text.pack()
def draw_menu(self):
menu_bar = Menu(self.root)
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label="Открыть", command=self.open_file)
file_menu.add_command(label="Сохранить как", command=self.save_file)
file_menu.add_command(label="Отркыть папку", command=self.open_dir)
file_menu.add_separator()
file_menu.add_command(label="Выйти", command=self.exit)
info_menu = Menu(menu_bar, tearoff=0)
info_menu.add_command(label="О приложении", command=self.show_info)
menu_bar.add_cascade(label="Файл", menu=file_menu)
menu_bar.add_cascade(label="Справка", menu=info_menu)
self.root.configure(menu=menu_bar)
def open_file(self):
# wanted_files = (
# ("IMAGES", "*.jpeg;*.png;*.gif"),
# ("TEXT files", "*.txt;*.log"),
# ("PY files", "*.py"),
# ("ALL", "*.*")
# )
#
# file_name = fd.askopenfilename(initialdir="D:/", title="FIND A FILE", filetypes=wanted_files)
# self.text.insert(END, f"Надо открыть файл: {file_name}\nСодержимое:\n")
# if file_name:
# with open(file_name, "r") as f:
# self.text.insert(END, f.read())
# file = fd.askopenfile()
# self.text.insert(END, file.read())
# file.close()
file_names = fd.askopenfilenames()
self.text.insert(END, str(file_names))
def save_file(self):
name = fd.asksaveasfilename(filetypes=(("TEXT files", "*.txt"), ("Py files", "*.py")))
if name:
self.text.insert(END, f"Сохранить файл по пути {name}\n")
# with open(name, "w") as f:
# f.write("123")
# file = fd.asksaveasfile()
# file.write("123")
# file.close()
def open_dir(self):
path = fd.askdirectory(mustexist=True)
self.text.insert(END, f"Папка {path}\n")
def show_info(self):
mb.showinfo("Информация", "Лучшее графическое приложение на свете")
def exit(self):
choice = mb.askyesno("Quit", "Do you want to quit?")
if choice:
self.root.destroy()
def create_child(self, width, height, title="Child", resizable=(False, False), icon=None):
ChildWindow(self.root, width, height, title, resizable, icon)
if __name__ == "__main__":
window = Window(500, 500, "TKINTER")
# window.create_child(200, 100)
window.run()
|
flexible
|
{
"blob_id": "02d4e1ddb0b4cf75c9902e13263c5a80417de01b",
"index": 6530,
"step-1": "<mask token>\n\n\nclass Window:\n\n def __init__(self, width, height, title='MyWindow', resizable=(False, \n False), icon='resources/feather.ico'):\n self.root = Tk()\n self.root.title(title)\n self.root.geometry('+600+300')\n if icon:\n self.root.iconbitmap(icon)\n self.text = ScrolledText(self.root)\n <mask token>\n\n def draw_widgets(self):\n self.draw_menu()\n self.text.pack()\n\n def draw_menu(self):\n menu_bar = Menu(self.root)\n file_menu = Menu(menu_bar, tearoff=0)\n file_menu.add_command(label='Открыть', command=self.open_file)\n file_menu.add_command(label='Сохранить как', command=self.save_file)\n file_menu.add_command(label='Отркыть папку', command=self.open_dir)\n file_menu.add_separator()\n file_menu.add_command(label='Выйти', command=self.exit)\n info_menu = Menu(menu_bar, tearoff=0)\n info_menu.add_command(label='О приложении', command=self.show_info)\n menu_bar.add_cascade(label='Файл', menu=file_menu)\n menu_bar.add_cascade(label='Справка', menu=info_menu)\n self.root.configure(menu=menu_bar)\n\n def open_file(self):\n file_names = fd.askopenfilenames()\n self.text.insert(END, str(file_names))\n\n def save_file(self):\n name = fd.asksaveasfilename(filetypes=(('TEXT files', '*.txt'), (\n 'Py files', '*.py')))\n if name:\n self.text.insert(END, f'Сохранить файл по пути {name}\\n')\n\n def open_dir(self):\n path = fd.askdirectory(mustexist=True)\n self.text.insert(END, f'Папка {path}\\n')\n <mask token>\n <mask token>\n\n def create_child(self, width, height, title='Child', resizable=(False, \n False), icon=None):\n ChildWindow(self.root, width, height, title, resizable, icon)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Window:\n\n def __init__(self, width, height, title='MyWindow', resizable=(False, \n False), icon='resources/feather.ico'):\n self.root = Tk()\n self.root.title(title)\n self.root.geometry('+600+300')\n if icon:\n self.root.iconbitmap(icon)\n self.text = ScrolledText(self.root)\n <mask token>\n\n def draw_widgets(self):\n self.draw_menu()\n self.text.pack()\n\n def draw_menu(self):\n menu_bar = Menu(self.root)\n file_menu = Menu(menu_bar, tearoff=0)\n file_menu.add_command(label='Открыть', command=self.open_file)\n file_menu.add_command(label='Сохранить как', command=self.save_file)\n file_menu.add_command(label='Отркыть папку', command=self.open_dir)\n file_menu.add_separator()\n file_menu.add_command(label='Выйти', command=self.exit)\n info_menu = Menu(menu_bar, tearoff=0)\n info_menu.add_command(label='О приложении', command=self.show_info)\n menu_bar.add_cascade(label='Файл', menu=file_menu)\n menu_bar.add_cascade(label='Справка', menu=info_menu)\n self.root.configure(menu=menu_bar)\n\n def open_file(self):\n file_names = fd.askopenfilenames()\n self.text.insert(END, str(file_names))\n\n def save_file(self):\n name = fd.asksaveasfilename(filetypes=(('TEXT files', '*.txt'), (\n 'Py files', '*.py')))\n if name:\n self.text.insert(END, f'Сохранить файл по пути {name}\\n')\n\n def open_dir(self):\n path = fd.askdirectory(mustexist=True)\n self.text.insert(END, f'Папка {path}\\n')\n <mask token>\n\n def exit(self):\n choice = mb.askyesno('Quit', 'Do you want to quit?')\n if choice:\n self.root.destroy()\n\n def create_child(self, width, height, title='Child', resizable=(False, \n False), icon=None):\n ChildWindow(self.root, width, height, title, resizable, icon)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Window:\n\n def __init__(self, width, height, title='MyWindow', resizable=(False, \n False), icon='resources/feather.ico'):\n self.root = Tk()\n self.root.title(title)\n self.root.geometry('+600+300')\n if icon:\n self.root.iconbitmap(icon)\n self.text = ScrolledText(self.root)\n <mask token>\n\n def draw_widgets(self):\n self.draw_menu()\n self.text.pack()\n\n def draw_menu(self):\n menu_bar = Menu(self.root)\n file_menu = Menu(menu_bar, tearoff=0)\n file_menu.add_command(label='Открыть', command=self.open_file)\n file_menu.add_command(label='Сохранить как', command=self.save_file)\n file_menu.add_command(label='Отркыть папку', command=self.open_dir)\n file_menu.add_separator()\n file_menu.add_command(label='Выйти', command=self.exit)\n info_menu = Menu(menu_bar, tearoff=0)\n info_menu.add_command(label='О приложении', command=self.show_info)\n menu_bar.add_cascade(label='Файл', menu=file_menu)\n menu_bar.add_cascade(label='Справка', menu=info_menu)\n self.root.configure(menu=menu_bar)\n\n def open_file(self):\n file_names = fd.askopenfilenames()\n self.text.insert(END, str(file_names))\n\n def save_file(self):\n name = fd.asksaveasfilename(filetypes=(('TEXT files', '*.txt'), (\n 'Py files', '*.py')))\n if name:\n self.text.insert(END, f'Сохранить файл по пути {name}\\n')\n\n def open_dir(self):\n path = fd.askdirectory(mustexist=True)\n self.text.insert(END, f'Папка {path}\\n')\n\n def show_info(self):\n mb.showinfo('Информация', 'Лучшее графическое приложение на свете')\n\n def exit(self):\n choice = mb.askyesno('Quit', 'Do you want to quit?')\n if choice:\n self.root.destroy()\n\n def create_child(self, width, height, title='Child', resizable=(False, \n False), icon=None):\n ChildWindow(self.root, width, height, title, resizable, icon)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Window:\n\n def __init__(self, width, height, title='MyWindow', resizable=(False, \n False), icon='resources/feather.ico'):\n self.root = Tk()\n self.root.title(title)\n self.root.geometry('+600+300')\n if icon:\n self.root.iconbitmap(icon)\n self.text = ScrolledText(self.root)\n\n def run(self):\n self.draw_widgets()\n self.root.mainloop()\n\n def draw_widgets(self):\n self.draw_menu()\n self.text.pack()\n\n def draw_menu(self):\n menu_bar = Menu(self.root)\n file_menu = Menu(menu_bar, tearoff=0)\n file_menu.add_command(label='Открыть', command=self.open_file)\n file_menu.add_command(label='Сохранить как', command=self.save_file)\n file_menu.add_command(label='Отркыть папку', command=self.open_dir)\n file_menu.add_separator()\n file_menu.add_command(label='Выйти', command=self.exit)\n info_menu = Menu(menu_bar, tearoff=0)\n info_menu.add_command(label='О приложении', command=self.show_info)\n menu_bar.add_cascade(label='Файл', menu=file_menu)\n menu_bar.add_cascade(label='Справка', menu=info_menu)\n self.root.configure(menu=menu_bar)\n\n def open_file(self):\n file_names = fd.askopenfilenames()\n self.text.insert(END, str(file_names))\n\n def save_file(self):\n name = fd.asksaveasfilename(filetypes=(('TEXT files', '*.txt'), (\n 'Py files', '*.py')))\n if name:\n self.text.insert(END, f'Сохранить файл по пути {name}\\n')\n\n def open_dir(self):\n path = fd.askdirectory(mustexist=True)\n self.text.insert(END, f'Папка {path}\\n')\n\n def show_info(self):\n mb.showinfo('Информация', 'Лучшее графическое приложение на свете')\n\n def exit(self):\n choice = mb.askyesno('Quit', 'Do you want to quit?')\n if choice:\n self.root.destroy()\n\n def create_child(self, width, height, title='Child', resizable=(False, \n False), icon=None):\n ChildWindow(self.root, width, height, title, resizable, icon)\n\n\nif __name__ == '__main__':\n window = Window(500, 500, 'TKINTER')\n window.run()\n",
"step-5": "from tkinter import *\nfrom tkinter import messagebox as mb\nfrom tkinter.scrolledtext import ScrolledText\nfrom tkinter import filedialog as fd\nfrom child_window import ChildWindow\n# from PIL import Image as PilImage\n# from PIL import ImageTk, ImageOps\n\n\nclass Window:\n def __init__(self, width, height, title=\"MyWindow\", resizable=(False, False), icon=r\"resources/feather.ico\"):\n self.root = Tk()\n self.root.title(title)\n # self.root.geometry(f\"{width}x{height}+200+200\")\n self.root.geometry(\"+600+300\")\n # self.root.resizable(resizable[0], resizable[1])\n if icon:\n self.root.iconbitmap(icon)\n\n self.text = ScrolledText(self.root)\n\n def run(self):\n self.draw_widgets()\n self.root.mainloop()\n\n def draw_widgets(self):\n self.draw_menu()\n self.text.pack()\n\n def draw_menu(self):\n menu_bar = Menu(self.root)\n\n file_menu = Menu(menu_bar, tearoff=0)\n file_menu.add_command(label=\"Открыть\", command=self.open_file)\n file_menu.add_command(label=\"Сохранить как\", command=self.save_file)\n file_menu.add_command(label=\"Отркыть папку\", command=self.open_dir)\n file_menu.add_separator()\n file_menu.add_command(label=\"Выйти\", command=self.exit)\n\n info_menu = Menu(menu_bar, tearoff=0)\n info_menu.add_command(label=\"О приложении\", command=self.show_info)\n\n menu_bar.add_cascade(label=\"Файл\", menu=file_menu)\n menu_bar.add_cascade(label=\"Справка\", menu=info_menu)\n self.root.configure(menu=menu_bar)\n\n def open_file(self):\n # wanted_files = (\n # (\"IMAGES\", \"*.jpeg;*.png;*.gif\"),\n # (\"TEXT files\", \"*.txt;*.log\"),\n # (\"PY files\", \"*.py\"),\n # (\"ALL\", \"*.*\")\n # )\n #\n # file_name = fd.askopenfilename(initialdir=\"D:/\", title=\"FIND A FILE\", filetypes=wanted_files)\n # self.text.insert(END, f\"Надо открыть файл: {file_name}\\nСодержимое:\\n\")\n # if file_name:\n # with open(file_name, \"r\") as f:\n # self.text.insert(END, f.read())\n\n # file = fd.askopenfile()\n # self.text.insert(END, file.read())\n # file.close()\n\n file_names = fd.askopenfilenames()\n self.text.insert(END, str(file_names))\n\n def save_file(self):\n name = fd.asksaveasfilename(filetypes=((\"TEXT files\", \"*.txt\"), (\"Py files\", \"*.py\")))\n if name:\n self.text.insert(END, f\"Сохранить файл по пути {name}\\n\")\n # with open(name, \"w\") as f:\n # f.write(\"123\")\n\n # file = fd.asksaveasfile()\n # file.write(\"123\")\n # file.close()\n\n def open_dir(self):\n path = fd.askdirectory(mustexist=True)\n self.text.insert(END, f\"Папка {path}\\n\")\n\n def show_info(self):\n mb.showinfo(\"Информация\", \"Лучшее графическое приложение на свете\")\n\n def exit(self):\n choice = mb.askyesno(\"Quit\", \"Do you want to quit?\")\n if choice:\n self.root.destroy()\n\n def create_child(self, width, height, title=\"Child\", resizable=(False, False), icon=None):\n ChildWindow(self.root, width, height, title, resizable, icon)\n\n\nif __name__ == \"__main__\":\n window = Window(500, 500, \"TKINTER\")\n # window.create_child(200, 100)\n window.run()\n\n",
"step-ids": [
8,
9,
10,
12,
14
]
}
|
[
8,
9,
10,
12,
14
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.