code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def SVM():
'''data1——线性分类'''
data1 = spio.loadmat('data1.mat')
X = data1['X']
y = data1['y']
y = np.ravel(y)
plot_data(X, y)
model = svm.SVC(C=1.0, kernel='linear').fit(X, y) # 指定核函数为线性核函数
plot_decisionBoundary(X, y, model) # 画决策边界
'''data2——非线性分类'''
data2 = spio.loadmat('data2.mat')
X = data2['X']
y = data2['y']
y = np.ravel(y)
plt = plot_data(X, y)
plt.show()
model = svm.SVC(gamma=100).fit(X, y) # gamma为核函数的系数,值越大拟合的越好
plot_decisionBoundary(X, y, model, class_='notLinear') # 画决策边界 | data1——线性分类 | SVM | python | lawlite19/MachineLearning_Python | SVM/SVM_scikit-learn.py | https://github.com/lawlite19/MachineLearning_Python/blob/master/SVM/SVM_scikit-learn.py | MIT |
def PCA_2d_example():
'''加载数据并作图'''
data = spio.loadmat('data.mat')
X = data['X']
plt = plot_data_2d(X,'bo')
plt.axis('square')
plt.title('original data')
plt.show()
'''归一化数据并作图'''
scaler = StandardScaler()
scaler.fit(X)
x_train = scaler.transform(X)
plot_data_2d(x_train, 'bo')
plt.axis('square')
plt.title('scaler data')
plt.show()
'''拟合数据'''
K=1 # 要降的维度
model = pca.PCA(n_components=K).fit(x_train) # 拟合数据,n_components定义要降的维度
Z = model.transform(x_train) # transform就会执行降维操作
'''数据恢复并作图'''
Ureduce = model.components_ # 得到降维用的Ureduce
x_rec = np.dot(Z,Ureduce) # 数据恢复
plot_data_2d(x_rec,'bo')
plt.plot()
plt.axis('square')
plt.title('recover data')
plt.show() | 加载数据并作图 | PCA_2d_example | python | lawlite19/MachineLearning_Python | PCA/PCA_scikit-learn.py | https://github.com/lawlite19/MachineLearning_Python/blob/master/PCA/PCA_scikit-learn.py | MIT |
def PCA_face_example():
'''加载数据并显示'''
image_data = spio.loadmat('data_faces.mat')
X = image_data['X']
display_imageData(X[0:100,:]) # 显示100个最初图像
'''归一化数据'''
scaler = StandardScaler()
scaler.fit(X)
x_train = scaler.transform(X)
'''拟合模型'''
K=100
model = pca.PCA(n_components=K).fit(x_train)
Z = model.transform(x_train)
Ureduce = model.components_
display_imageData(Ureduce[0:36,:]) # 可视化部分U数据
x_rec = np.dot(Z,Ureduce)
display_imageData(x_rec[0:100,:]) # 显示恢复的数据 | 加载数据并显示 | PCA_face_example | python | lawlite19/MachineLearning_Python | PCA/PCA_scikit-learn.py | https://github.com/lawlite19/MachineLearning_Python/blob/master/PCA/PCA_scikit-learn.py | MIT |
def display_imageData(imgData):
sum = 0
'''
显示100个数(若是一个一个绘制将会非常慢,可以将要画的图片整理好,放到一个矩阵中,显示这个矩阵即可)
- 初始化一个二维数组
- 将每行的数据调整成图像的矩阵,放进二维数组
- 显示即可
'''
m,n = imgData.shape
width = np.int32(np.round(np.sqrt(n)))
height = np.int32(n/width);
rows_count = np.int32(np.floor(np.sqrt(m)))
cols_count = np.int32(np.ceil(m/rows_count))
pad = 1
display_array = -np.ones((pad+rows_count*(height+pad),pad+cols_count*(width+pad)))
for i in range(rows_count):
for j in range(cols_count):
max_val = np.max(np.abs(imgData[sum,:]))
display_array[pad+i*(height+pad):pad+i*(height+pad)+height,pad+j*(width+pad):pad+j*(width+pad)+width] = imgData[sum,:].reshape(height,width,order="F")/max_val # order=F指定以列优先,在matlab中是这样的,python中需要指定,默认以行
sum += 1
plt.imshow(display_array,cmap='gray') #显示灰度图像
plt.axis('off')
plt.show() | 显示100个数(若是一个一个绘制将会非常慢,可以将要画的图片整理好,放到一个矩阵中,显示这个矩阵即可)
- 初始化一个二维数组
- 将每行的数据调整成图像的矩阵,放进二维数组
- 显示即可 | display_imageData | python | lawlite19/MachineLearning_Python | PCA/PCA_scikit-learn.py | https://github.com/lawlite19/MachineLearning_Python/blob/master/PCA/PCA_scikit-learn.py | MIT |
def PCA_2D():
data_2d = spio.loadmat("data.mat")
X = data_2d['X']
m = X.shape[0]
plt = plot_data_2d(X,'bo') # 显示二维的数据
plt.show()
X_copy = X.copy()
X_norm,mu,sigma = featureNormalize(X_copy) # 归一化数据
#plot_data_2d(X_norm) # 显示归一化后的数据
#plt.show()
Sigma = np.dot(np.transpose(X_norm),X_norm)/m # 求Sigma
U,S,V = np.linalg.svd(Sigma) # 求Sigma的奇异值分解
plt = plot_data_2d(X,'bo') # 显示原本数据
drawline(plt, mu, mu+S[0]*(U[:,0]), 'r-') # 线,为投影的方向
plt.axis('square')
plt.show()
K = 1 # 定义降维多少维(本来是2维的,这里降维1维)
'''投影之后数据(降维之后)'''
Z = projectData(X_norm,U,K) # 投影
'''恢复数据'''
X_rec = recoverData(Z,U,K) # 恢复
'''作图-----原数据与恢复的数据'''
plt = plot_data_2d(X_norm,'bo')
plot_data_2d(X_rec,'ro')
for i in range(X_norm.shape[0]):
drawline(plt, X_norm[i,:], X_rec[i,:], '--k')
plt.axis('square')
plt.show() | 投影之后数据(降维之后) | PCA_2D | python | lawlite19/MachineLearning_Python | PCA/PCA.py | https://github.com/lawlite19/MachineLearning_Python/blob/master/PCA/PCA.py | MIT |
def featureNormalize(X):
'''(每一个数据-当前列的均值)/当前列的标准差'''
n = X.shape[1]
mu = np.zeros((1,n));
sigma = np.zeros((1,n))
mu = np.mean(X,axis=0) # axis=0表示列
sigma = np.std(X,axis=0)
for i in range(n):
X[:,i] = (X[:,i]-mu[i])/sigma[i]
return X,mu,sigma | (每一个数据-当前列的均值)/当前列的标准差 | featureNormalize | python | lawlite19/MachineLearning_Python | PCA/PCA.py | https://github.com/lawlite19/MachineLearning_Python/blob/master/PCA/PCA.py | MIT |
def display_imageData(imgData):
sum = 0
'''
显示100个数(若是一个一个绘制将会非常慢,可以将要画的图片整理好,放到一个矩阵中,显示这个矩阵即可)
- 初始化一个二维数组
- 将每行的数据调整成图像的矩阵,放进二维数组
- 显示即可
'''
m,n = imgData.shape
width = np.int32(np.round(np.sqrt(n)))
height = np.int32(n/width);
rows_count = np.int32(np.floor(np.sqrt(m)))
cols_count = np.int32(np.ceil(m/rows_count))
pad = 1
display_array = -np.ones((pad+rows_count*(height+pad),pad+cols_count*(width+pad)))
for i in range(rows_count):
for j in range(cols_count):
max_val = np.max(np.abs(imgData[sum,:]))
display_array[pad+i*(height+pad):pad+i*(height+pad)+height,pad+j*(width+pad):pad+j*(width+pad)+width] = imgData[sum,:].reshape(height,width,order="F")/max_val # order=F指定以列优先,在matlab中是这样的,python中需要指定,默认以行
sum += 1
plt.imshow(display_array,cmap='gray') #显示灰度图像
plt.axis('off')
plt.show() | 显示100个数(若是一个一个绘制将会非常慢,可以将要画的图片整理好,放到一个矩阵中,显示这个矩阵即可)
- 初始化一个二维数组
- 将每行的数据调整成图像的矩阵,放进二维数组
- 显示即可 | display_imageData | python | lawlite19/MachineLearning_Python | PCA/PCA.py | https://github.com/lawlite19/MachineLearning_Python/blob/master/PCA/PCA.py | MIT |
def KMeans():
'''二维数据聚类过程演示'''
print(u'聚类过程展示...\n')
data = spio.loadmat("data.mat")
X = data['X']
K = 3 # 总类数
initial_centroids = np.array([[3,3],[6,2],[8,5]]) # 初始化类中心
max_iters = 10
runKMeans(X,initial_centroids,max_iters,True) # 执行K-Means聚类算法
'''
图片压缩
'''
print(u'K-Means压缩图片\n')
img_data = misc.imread("bird.png") # 读取图片像素数据
img_data = img_data/255.0 # 像素值映射到0-1
img_size = img_data.shape
X = img_data.reshape(img_size[0]*img_size[1],3) # 调整为N*3的矩阵,N是所有像素点个数
K = 16
max_iters = 5
initial_centroids = kMeansInitCentroids(X,K)
centroids,idx = runKMeans(X, initial_centroids, max_iters, False)
print(u'\nK-Means运行结束\n')
print(u'\n压缩图片...\n')
idx = findClosestCentroids(X, centroids)
X_recovered = centroids[idx,:]
X_recovered = X_recovered.reshape(img_size[0],img_size[1],3)
print(u'绘制图片...\n')
plt.subplot(1,2,1)
plt.imshow(img_data)
plt.title(u"原先图片",fontproperties=font)
plt.subplot(1,2,2)
plt.imshow(X_recovered)
plt.title(u"压缩图像",fontproperties=font)
plt.show()
print(u'运行结束!') | 二维数据聚类过程演示 | KMeans | python | lawlite19/MachineLearning_Python | K-Means/K-Menas.py | https://github.com/lawlite19/MachineLearning_Python/blob/master/K-Means/K-Menas.py | MIT |
def findClosestCentroids(X,initial_centroids):
m = X.shape[0] # 数据条数
K = initial_centroids.shape[0] # 类的总数
dis = np.zeros((m,K)) # 存储计算每个点分别到K个类的距离
idx = np.zeros((m,1)) # 要返回的每条数据属于哪个类
'''计算每个点到每个类中心的距离'''
for i in range(m):
for j in range(K):
dis[i,j] = np.dot((X[i,:]-initial_centroids[j,:]).reshape(1,-1),(X[i,:]-initial_centroids[j,:]).reshape(-1,1))
'''返回dis每一行的最小值对应的列号,即为对应的类别
- np.min(dis, axis=1)返回每一行的最小值
- np.where(dis == np.min(dis, axis=1).reshape(-1,1)) 返回对应最小值的坐标
- 注意:可能最小值对应的坐标有多个,where都会找出来,所以返回时返回前m个需要的即可(因为对于多个最小值,属于哪个类别都可以)
'''
dummy,idx = np.where(dis == np.min(dis, axis=1).reshape(-1,1))
return idx[0:dis.shape[0]] # 注意截取一下 | 计算每个点到每个类中心的距离 | findClosestCentroids | python | lawlite19/MachineLearning_Python | K-Means/K-Menas.py | https://github.com/lawlite19/MachineLearning_Python/blob/master/K-Means/K-Menas.py | MIT |
def neuralNetwork(input_layer_size,hidden_layer_size,out_put_layer):
data_img = loadmat_data("data_digits.mat")
X = data_img['X']
y = data_img['y']
'''scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)'''
m,n = X.shape
"""digits = datasets.load_digits()
X = digits.data
y = digits.target
m,n = X.shape
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)"""
## 随机显示几行数据
rand_indices = [t for t in [np.random.randint(x-x, m) for x in range(100)]] # 生成100个0-m的随机数
display_data(X[rand_indices,:]) # 显示100个数字
#nn_params = np.vstack((Theta1.reshape(-1,1),Theta2.reshape(-1,1)))
Lambda = 1
initial_Theta1 = randInitializeWeights(input_layer_size,hidden_layer_size);
initial_Theta2 = randInitializeWeights(hidden_layer_size,out_put_layer)
initial_nn_params = np.vstack((initial_Theta1.reshape(-1,1),initial_Theta2.reshape(-1,1))) #展开theta
#np.savetxt("testTheta.csv",initial_nn_params,delimiter=",")
start = time.time()
result = optimize.fmin_cg(nnCostFunction, initial_nn_params, fprime=nnGradient, args=(input_layer_size,hidden_layer_size,out_put_layer,X,y,Lambda), maxiter=100)
print (u'执行时间:',time.time()-start)
print (result)
'''可视化 Theta1'''
length = result.shape[0]
Theta1 = result[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,input_layer_size+1)
Theta2 = result[hidden_layer_size*(input_layer_size+1):length].reshape(out_put_layer,hidden_layer_size+1)
display_data(Theta1[:,1:length])
display_data(Theta2[:,1:length])
'''预测'''
p = predict(Theta1,Theta2,X)
print (u"预测准确度为:%f%%"%np.mean(np.float64(p == y.reshape(-1,1))*100))
res = np.hstack((p,y.reshape(-1,1)))
np.savetxt("predict.csv", res, delimiter=',') | scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X) | neuralNetwork | python | lawlite19/MachineLearning_Python | NeuralNetwok/NeuralNetwork.py | https://github.com/lawlite19/MachineLearning_Python/blob/master/NeuralNetwok/NeuralNetwork.py | MIT |
def display_data(imgData):
sum = 0
'''
显示100个数(若是一个一个绘制将会非常慢,可以将要画的数字整理好,放到一个矩阵中,显示这个矩阵即可)
- 初始化一个二维数组
- 将每行的数据调整成图像的矩阵,放进二维数组
- 显示即可
'''
m,n = imgData.shape
width = np.int32(np.round(np.sqrt(n)))
height = np.int32(n/width);
rows_count = np.int32(np.floor(np.sqrt(m)))
cols_count = np.int32(np.ceil(m/rows_count))
pad = 1
display_array = -np.ones((pad+rows_count*(height+pad),pad+cols_count*(width+pad)))
for i in range(rows_count):
for j in range(cols_count):
if sum >= m: #超过了行数,退出当前循环
break;
display_array[pad+i*(height+pad):pad+i*(height+pad)+height,pad+j*(width+pad):pad+j*(width+pad)+width] = imgData[sum,:].reshape(height,width,order="F") # order=F指定以列优先,在matlab中是这样的,python中需要指定,默认以行
sum += 1
if sum >= m: #超过了行数,退出当前循环
break;
plt.imshow(display_array,cmap='gray') #显示灰度图像
plt.axis('off')
plt.show() | 显示100个数(若是一个一个绘制将会非常慢,可以将要画的数字整理好,放到一个矩阵中,显示这个矩阵即可)
- 初始化一个二维数组
- 将每行的数据调整成图像的矩阵,放进二维数组
- 显示即可 | display_data | python | lawlite19/MachineLearning_Python | NeuralNetwok/NeuralNetwork.py | https://github.com/lawlite19/MachineLearning_Python/blob/master/NeuralNetwok/NeuralNetwork.py | MIT |
def nnCostFunction(nn_params,input_layer_size,hidden_layer_size,num_labels,X,y,Lambda):
length = nn_params.shape[0] # theta的中长度
# 还原theta1和theta2
Theta1 = nn_params[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,input_layer_size+1)
Theta2 = nn_params[hidden_layer_size*(input_layer_size+1):length].reshape(num_labels,hidden_layer_size+1)
# np.savetxt("Theta1.csv",Theta1,delimiter=',')
m = X.shape[0]
class_y = np.zeros((m,num_labels)) # 数据的y对应0-9,需要映射为0/1的关系
# 映射y
for i in range(num_labels):
class_y[:,i] = np.int32(y==i).reshape(1,-1) # 注意reshape(1,-1)才可以赋值
'''去掉theta1和theta2的第一列,因为正则化时从1开始'''
Theta1_colCount = Theta1.shape[1]
Theta1_x = Theta1[:,1:Theta1_colCount]
Theta2_colCount = Theta2.shape[1]
Theta2_x = Theta2[:,1:Theta2_colCount]
# 正则化向theta^2
term = np.dot(np.transpose(np.vstack((Theta1_x.reshape(-1,1),Theta2_x.reshape(-1,1)))),np.vstack((Theta1_x.reshape(-1,1),Theta2_x.reshape(-1,1))))
'''正向传播,每次需要补上一列1的偏置bias'''
a1 = np.hstack((np.ones((m,1)),X))
z2 = np.dot(a1,np.transpose(Theta1))
a2 = sigmoid(z2)
a2 = np.hstack((np.ones((m,1)),a2))
z3 = np.dot(a2,np.transpose(Theta2))
h = sigmoid(z3)
'''代价'''
J = -(np.dot(np.transpose(class_y.reshape(-1,1)),np.log(h.reshape(-1,1)))+np.dot(np.transpose(1-class_y.reshape(-1,1)),np.log(1-h.reshape(-1,1)))-Lambda*term/2)/m
#temp1 = (h.reshape(-1,1)-class_y.reshape(-1,1))
#temp2 = (temp1**2).sum()
#J = 1/(2*m)*temp2
return np.ravel(J) | 去掉theta1和theta2的第一列,因为正则化时从1开始 | nnCostFunction | python | lawlite19/MachineLearning_Python | NeuralNetwok/NeuralNetwork.py | https://github.com/lawlite19/MachineLearning_Python/blob/master/NeuralNetwok/NeuralNetwork.py | MIT |
def nnGradient(nn_params,input_layer_size,hidden_layer_size,num_labels,X,y,Lambda):
length = nn_params.shape[0]
Theta1 = nn_params[0:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size,input_layer_size+1).copy() # 这里使用copy函数,否则下面修改Theta的值,nn_params也会一起修改
Theta2 = nn_params[hidden_layer_size*(input_layer_size+1):length].reshape(num_labels,hidden_layer_size+1).copy()
m = X.shape[0]
class_y = np.zeros((m,num_labels)) # 数据的y对应0-9,需要映射为0/1的关系
# 映射y
for i in range(num_labels):
class_y[:,i] = np.int32(y==i).reshape(1,-1) # 注意reshape(1,-1)才可以赋值
'''去掉theta1和theta2的第一列,因为正则化时从1开始'''
Theta1_colCount = Theta1.shape[1]
Theta1_x = Theta1[:,1:Theta1_colCount]
Theta2_colCount = Theta2.shape[1]
Theta2_x = Theta2[:,1:Theta2_colCount]
Theta1_grad = np.zeros((Theta1.shape)) #第一层到第二层的权重
Theta2_grad = np.zeros((Theta2.shape)) #第二层到第三层的权重
'''正向传播,每次需要补上一列1的偏置bias'''
a1 = np.hstack((np.ones((m,1)),X))
z2 = np.dot(a1,np.transpose(Theta1))
a2 = sigmoid(z2)
a2 = np.hstack((np.ones((m,1)),a2))
z3 = np.dot(a2,np.transpose(Theta2))
h = sigmoid(z3)
'''反向传播,delta为误差,'''
delta3 = np.zeros((m,num_labels))
delta2 = np.zeros((m,hidden_layer_size))
for i in range(m):
#delta3[i,:] = (h[i,:]-class_y[i,:])*sigmoidGradient(z3[i,:]) # 均方误差的误差率
delta3[i,:] = h[i,:]-class_y[i,:] # 交叉熵误差率
Theta2_grad = Theta2_grad+np.dot(np.transpose(delta3[i,:].reshape(1,-1)),a2[i,:].reshape(1,-1))
delta2[i,:] = np.dot(delta3[i,:].reshape(1,-1),Theta2_x)*sigmoidGradient(z2[i,:])
Theta1_grad = Theta1_grad+np.dot(np.transpose(delta2[i,:].reshape(1,-1)),a1[i,:].reshape(1,-1))
Theta1[:,0] = 0
Theta2[:,0] = 0
'''梯度'''
grad = (np.vstack((Theta1_grad.reshape(-1,1),Theta2_grad.reshape(-1,1)))+Lambda*np.vstack((Theta1.reshape(-1,1),Theta2.reshape(-1,1))))/m
return np.ravel(grad) | 去掉theta1和theta2的第一列,因为正则化时从1开始 | nnGradient | python | lawlite19/MachineLearning_Python | NeuralNetwok/NeuralNetwork.py | https://github.com/lawlite19/MachineLearning_Python/blob/master/NeuralNetwok/NeuralNetwork.py | MIT |
def checkGradient(Lambda = 0):
'''构造一个小型的神经网络验证,因为数值法计算梯度很浪费时间,而且验证正确后之后就不再需要验证了'''
input_layer_size = 3
hidden_layer_size = 5
num_labels = 3
m = 5
initial_Theta1 = debugInitializeWeights(input_layer_size,hidden_layer_size);
initial_Theta2 = debugInitializeWeights(hidden_layer_size,num_labels)
X = debugInitializeWeights(input_layer_size-1,m)
y = np.transpose(np.mod(np.arange(1,m+1), num_labels))# 初始化y
y = y.reshape(-1,1)
nn_params = np.vstack((initial_Theta1.reshape(-1,1),initial_Theta2.reshape(-1,1))) #展开theta
'''BP求出梯度'''
grad = nnGradient(nn_params, input_layer_size, hidden_layer_size,
num_labels, X, y, Lambda)
'''使用数值法计算梯度'''
num_grad = np.zeros((nn_params.shape[0]))
step = np.zeros((nn_params.shape[0]))
e = 1e-4
for i in range(nn_params.shape[0]):
step[i] = e
loss1 = nnCostFunction(nn_params-step.reshape(-1,1), input_layer_size, hidden_layer_size,
num_labels, X, y,
Lambda)
loss2 = nnCostFunction(nn_params+step.reshape(-1,1), input_layer_size, hidden_layer_size,
num_labels, X, y,
Lambda)
num_grad[i] = (loss2-loss1)/(2*e)
step[i]=0
# 显示两列比较
res = np.hstack((num_grad.reshape(-1,1),grad.reshape(-1,1)))
print("检查梯度的结果,第一列为数值法计算得到的,第二列为BP得到的:")
print (res) | 构造一个小型的神经网络验证,因为数值法计算梯度很浪费时间,而且验证正确后之后就不再需要验证了 | checkGradient | python | lawlite19/MachineLearning_Python | NeuralNetwok/NeuralNetwork.py | https://github.com/lawlite19/MachineLearning_Python/blob/master/NeuralNetwok/NeuralNetwork.py | MIT |
def predict(Theta1,Theta2,X):
m = X.shape[0]
num_labels = Theta2.shape[0]
#p = np.zeros((m,1))
'''正向传播,预测结果'''
X = np.hstack((np.ones((m,1)),X))
h1 = sigmoid(np.dot(X,np.transpose(Theta1)))
h1 = np.hstack((np.ones((m,1)),h1))
h2 = sigmoid(np.dot(h1,np.transpose(Theta2)))
'''
返回h中每一行最大值所在的列号
- np.max(h, axis=1)返回h中每一行的最大值(是某个数字的最大概率)
- 最后where找到的最大概率所在的列号(列号即是对应的数字)
'''
#np.savetxt("h2.csv",h2,delimiter=',')
p = np.array(np.where(h2[0,:] == np.max(h2, axis=1)[0]))
for i in np.arange(1, m):
t = np.array(np.where(h2[i,:] == np.max(h2, axis=1)[i]))
p = np.vstack((p,t))
return p | 正向传播,预测结果 | predict | python | lawlite19/MachineLearning_Python | NeuralNetwok/NeuralNetwork.py | https://github.com/lawlite19/MachineLearning_Python/blob/master/NeuralNetwok/NeuralNetwork.py | MIT |
def compiler_archs(compiler: str):
"""Discovers what platforms the given compiler supports; intended for MacOS use"""
import tempfile
import subprocess
print(f"Compiler: {compiler}")
arch_flags = []
# see also the architectures tested for in .github/workflows/build-and-upload.yml
for arch in ['x86_64', 'arm64', 'arm64e']:
with tempfile.TemporaryDirectory() as tmpdir:
cpp = Path(tmpdir) / 'test.cxx'; cpp.write_text('int main() {return 0;}\n')
out = Path(tmpdir) / 'a.out'
p = subprocess.run([compiler, "-arch", arch, str(cpp), "-o", str(out)], capture_output=True)
if p.returncode == 0:
arch_flags += ['-arch', arch]
print(f"Discovered {compiler} arch flags: {arch_flags}")
return arch_flags | Discovers what platforms the given compiler supports; intended for MacOS use | compiler_archs | python | plasma-umass/scalene | setup.py | https://github.com/plasma-umass/scalene/blob/master/setup.py | Apache-2.0 |
def extra_compile_args():
"""Returns extra compiler args for platform."""
if sys.platform == 'win32':
return ['/std:c++14'] # for Visual Studio C++
return ['-std=c++14'] | Returns extra compiler args for platform. | extra_compile_args | python | plasma-umass/scalene | setup.py | https://github.com/plasma-umass/scalene/blob/master/setup.py | Apache-2.0 |
def dll_suffix():
"""Returns the file suffix ("extension") of a DLL"""
if (sys.platform == 'win32'): return '.dll'
if (sys.platform == 'darwin'): return '.dylib'
return '.so' | Returns the file suffix ("extension") of a DLL | dll_suffix | python | plasma-umass/scalene | setup.py | https://github.com/plasma-umass/scalene/blob/master/setup.py | Apache-2.0 |
def read_file(name):
"""Returns a file's contents"""
with open(path.join(path.dirname(__file__), name), encoding="utf-8") as f:
return f.read() | Returns a file's contents | read_file | python | plasma-umass/scalene | setup.py | https://github.com/plasma-umass/scalene/blob/master/setup.py | Apache-2.0 |
def output_profile_line(
self,
json: ScaleneJSON,
fname: Filename,
line_no: LineNumber,
line: SyntaxLine,
console: Console,
tbl: Table,
stats: ScaleneStatistics,
profile_this_code: Callable[[Filename, LineNumber], bool],
force_print: bool = False,
suppress_lineno_print: bool = False,
is_function_summary: bool = False,
profile_memory: bool = False,
reduced_profile: bool = False,
) -> bool:
"""Print at most one line of the profile (true == printed one)."""
obj = json.output_profile_line(
fname=fname,
fname_print=fname,
line_no=line_no,
line=str(line),
stats=stats,
profile_this_code=profile_this_code,
force_print=force_print,
)
if not obj:
return False
if -1 < obj["n_peak_mb"] < 1:
# Don't print out "-0" or anything below 1.
obj["n_peak_mb"] = 0
# Finally, print results.
n_cpu_percent_c_str: str = (
""
if obj["n_cpu_percent_c"] < 1
else f"{obj['n_cpu_percent_c']:5.0f}%"
)
n_gpu_percent_str: str = (
"" if obj["n_gpu_percent"] < 1 else f"{obj['n_gpu_percent']:3.0f}%"
)
n_cpu_percent_python_str: str = (
""
if obj["n_cpu_percent_python"] < 1
else f"{obj['n_cpu_percent_python']:5.0f}%"
)
n_growth_mem_str = ""
if obj["n_peak_mb"] < 1024:
n_growth_mem_str = (
""
if (not obj["n_peak_mb"] and not obj["n_usage_fraction"])
else f"{obj['n_peak_mb']:5.0f}M"
)
else:
n_growth_mem_str = (
""
if (not obj["n_peak_mb"] and not obj["n_usage_fraction"])
else f"{(obj['n_peak_mb'] / 1024):5.2f}G"
)
# Only report utilization where there is more than 1% CPU total usage.
sys_str: str = (
"" if obj["n_sys_percent"] < 1 else f"{obj['n_sys_percent']:4.0f}%"
)
if not is_function_summary:
print_line_no = "" if suppress_lineno_print else str(line_no)
else:
print_line_no = (
""
if fname not in stats.firstline_map
else str(stats.firstline_map[fname])
)
if profile_memory:
spark_str: str = ""
# Scale the sparkline by the usage fraction.
samples = obj["memory_samples"]
# Randomly downsample to ScaleneOutput.max_sparkline_len_line.
if len(samples) > ScaleneOutput.max_sparkline_len_line:
random_samples = sorted(
random.sample(
samples, ScaleneOutput.max_sparkline_len_line
)
)
else:
random_samples = samples
sparkline_samples = [
random_samples[i][1] * obj["n_usage_fraction"]
for i in range(len(random_samples))
]
if random_samples:
_, _, spark_str = sparkline.generate(
sparkline_samples, 0, stats.max_footprint
)
# Red highlight
ncpps: Any = ""
ncpcs: Any = ""
nufs: Any = ""
ngpus: Any = ""
n_usage_fraction_str: str = (
""
if obj["n_usage_fraction"] < 0.01
else f"{(100 * obj['n_usage_fraction']):4.0f}%"
)
if (
obj["n_usage_fraction"] >= self.highlight_percentage
or (
obj["n_cpu_percent_c"]
+ obj["n_cpu_percent_python"]
+ obj["n_gpu_percent"]
)
>= self.highlight_percentage
):
ncpps = Text.assemble(
(n_cpu_percent_python_str, self.highlight_color)
)
ncpcs = Text.assemble(
(n_cpu_percent_c_str, self.highlight_color)
)
nufs = Text.assemble(
(spark_str + n_usage_fraction_str, self.highlight_color)
)
ngpus = Text.assemble(
(n_gpu_percent_str, self.highlight_color)
)
else:
ncpps = n_cpu_percent_python_str
ncpcs = n_cpu_percent_c_str
ngpus = n_gpu_percent_str
nufs = spark_str + n_usage_fraction_str
if reduced_profile and not ncpps + ncpcs + nufs + ngpus:
return False
n_python_fraction_str: str = (
""
if obj["n_python_fraction"] < 0.01
else f"{(obj['n_python_fraction'] * 100):4.0f}%"
)
n_copy_mb_s_str: str = (
""
if obj["n_copy_mb_s"] < 0.5
else f"{obj['n_copy_mb_s']:6.0f}"
)
if self.gpu:
tbl.add_row(
print_line_no,
ncpps, # n_cpu_percent_python_str,
ncpcs, # n_cpu_percent_c_str,
sys_str,
ngpus,
n_python_fraction_str,
n_growth_mem_str,
nufs, # spark_str + n_usage_fraction_str,
n_copy_mb_s_str,
line,
)
else:
tbl.add_row(
print_line_no,
ncpps, # n_cpu_percent_python_str,
ncpcs, # n_cpu_percent_c_str,
sys_str,
n_python_fraction_str,
n_growth_mem_str,
nufs, # spark_str + n_usage_fraction_str,
n_copy_mb_s_str,
line,
)
else:
# Red highlight
if (
obj["n_cpu_percent_c"]
+ obj["n_cpu_percent_python"]
+ obj["n_gpu_percent"]
) >= self.highlight_percentage:
ncpps = Text.assemble(
(n_cpu_percent_python_str, self.highlight_color)
)
ncpcs = Text.assemble(
(n_cpu_percent_c_str, self.highlight_color)
)
ngpus = Text.assemble(
(n_gpu_percent_str, self.highlight_color)
)
else:
ncpps = n_cpu_percent_python_str
ncpcs = n_cpu_percent_c_str
ngpus = n_gpu_percent_str
if reduced_profile and not ncpps + ncpcs + ngpus:
return False
if self.gpu:
tbl.add_row(
print_line_no,
ncpps, # n_cpu_percent_python_str,
ncpcs, # n_cpu_percent_c_str,
sys_str,
ngpus, # n_gpu_percent_str
line,
)
else:
tbl.add_row(
print_line_no,
ncpps, # n_cpu_percent_python_str,
ncpcs, # n_cpu_percent_c_str,
sys_str,
line,
)
return True | Print at most one line of the profile (true == printed one). | output_profile_line | python | plasma-umass/scalene | scalene/scalene_output.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_output.py | Apache-2.0 |
def output_profiles(
self,
column_width: int,
stats: ScaleneStatistics,
pid: int,
profile_this_code: Callable[[Filename, LineNumber], bool],
python_alias_dir: Path,
program_path: Filename,
program_args: Optional[List[str]],
profile_memory: bool = True,
reduced_profile: bool = False,
) -> bool:
"""Write the profile out."""
# Get the children's stats, if any.
json = ScaleneJSON()
json.gpu = self.gpu
if not pid:
stats.merge_stats(python_alias_dir)
# If we've collected any samples, dump them.
if (
not stats.total_cpu_samples
and not stats.total_memory_malloc_samples
and not stats.total_memory_free_samples
):
# Nothing to output.
return False
# Collect all instrumented filenames.
all_instrumented_files: List[Filename] = list(
set(
list(stats.cpu_samples_python.keys())
+ list(stats.cpu_samples_c.keys())
+ list(stats.memory_free_samples.keys())
+ list(stats.memory_malloc_samples.keys())
)
)
if not all_instrumented_files:
# We didn't collect samples in source files.
return False
mem_usage_line: Union[Text, str] = ""
growth_rate = 0.0
if profile_memory:
samples = stats.memory_footprint_samples
if len(samples) > 0:
# Randomly downsample samples
if len(samples) > ScaleneOutput.max_sparkline_len_file:
random_samples = sorted(
random.sample(
samples, ScaleneOutput.max_sparkline_len_file
)
)
else:
random_samples = samples
sparkline_samples = [item[1] for item in random_samples]
# Output a sparkline as a summary of memory usage over time.
_, _, spark_str = sparkline.generate(
sparkline_samples[: ScaleneOutput.max_sparkline_len_file],
0,
stats.max_footprint,
)
# Compute growth rate (slope), between 0 and 1.
if stats.allocation_velocity[1] > 0:
growth_rate = (
100.0
* stats.allocation_velocity[0]
/ stats.allocation_velocity[1]
)
mem_usage_line = Text.assemble(
"Memory usage: ",
((spark_str, self.memory_color)),
(
f" (max: {ScaleneJSON.memory_consumed_str(stats.max_footprint)}, growth rate: {growth_rate:3.0f}%)\n"
),
)
null = tempfile.TemporaryFile(mode="w+")
console = Console(
width=column_width,
record=True,
force_terminal=True,
file=null,
force_jupyter=False,
)
# Build a list of files we will actually report on.
report_files: List[Filename] = []
# Sort in descending order of CPU cycles, and then ascending order by filename
for fname in sorted(
all_instrumented_files,
key=lambda f: (-(stats.cpu_samples[f]), f),
):
fname = Filename(fname)
try:
percent_cpu_time = (
100 * stats.cpu_samples[fname] / stats.total_cpu_samples
)
except ZeroDivisionError:
percent_cpu_time = 0
# Ignore files responsible for less than some percent of execution time and fewer than a threshold # of mallocs.
if (
stats.malloc_samples[fname] < ScaleneJSON.malloc_threshold
and percent_cpu_time < ScaleneJSON.cpu_percent_threshold
):
continue
report_files.append(fname)
# Don't actually output the profile if we are a child process.
# Instead, write info to disk for the main process to collect.
if pid:
stats.output_stats(pid, python_alias_dir)
return True
if not report_files:
return False
for fname in report_files:
# If the file was actually a Jupyter (IPython) cell,
# restore its name, as in "[12]".
fname_print = fname
import re
if result := re.match("_ipython-input-([0-9]+)-.*", fname_print):
fname_print = Filename(f"[{result.group(1)}]")
# Print header.
percent_cpu_time = (
(100 * stats.cpu_samples[fname] / stats.total_cpu_samples)
if stats.total_cpu_samples
else 0
)
new_title = mem_usage_line + (
f"{fname_print}: % of time = {percent_cpu_time:6.2f}% ({ScaleneJSON.time_consumed_str(percent_cpu_time / 100.0 * stats.elapsed_time * 1e3)}) out of {ScaleneJSON.time_consumed_str(stats.elapsed_time * 1e3)}."
)
# Only display total memory usage once.
mem_usage_line = ""
tbl = Table(
box=box.MINIMAL_HEAVY_HEAD,
title=new_title,
collapse_padding=True,
width=column_width - 1,
)
tbl.add_column(
Markdown("Line", style="dim"),
style="dim",
justify="right",
no_wrap=True,
width=4,
)
tbl.add_column(
Markdown("Time " + "\n" + "_Python_", style="blue"),
style="blue",
no_wrap=True,
width=6,
)
tbl.add_column(
Markdown("–––––– \n_native_", style="blue"),
style="blue",
no_wrap=True,
width=6,
)
tbl.add_column(
Markdown("–––––– \n_system_", style="blue"),
style="blue",
no_wrap=True,
width=6,
)
if self.gpu:
tbl.add_column(
Markdown("–––––– \n_GPU_", style=self.gpu_color),
style=self.gpu_color,
no_wrap=True,
width=6,
)
other_columns_width = 0 # Size taken up by all columns BUT code
if profile_memory:
tbl.add_column(
Markdown("Memory \n_Python_", style=self.memory_color),
style=self.memory_color,
no_wrap=True,
width=7,
)
tbl.add_column(
Markdown("–––––– \n_peak_", style=self.memory_color),
style=self.memory_color,
no_wrap=True,
width=6,
)
tbl.add_column(
Markdown(
"––––––––––– \n_timeline_/%", style=self.memory_color
),
style=self.memory_color,
no_wrap=True,
width=15,
)
tbl.add_column(
Markdown("Copy \n_(MB/s)_", style=self.copy_volume_color),
style=self.copy_volume_color,
no_wrap=True,
width=6,
)
other_columns_width = 75 + (6 if self.gpu else 0)
else:
other_columns_width = 37 + (5 if self.gpu else 0)
tbl.add_column(
"\n" + fname_print,
width=column_width - other_columns_width,
no_wrap=True,
)
# Print out the the profile for the source, line by line.
if fname == "<BOGUS>":
continue
if not fname:
continue
# Print out the profile for the source, line by line.
full_fname = os.path.normpath(os.path.join(program_path, fname))
try:
with open(full_fname, "r") as source_file:
code_lines = source_file.read()
except (FileNotFoundError, OSError):
continue
# We track whether we should put in ellipsis (for reduced profiles)
# or not.
did_print = True # did we print a profile line last time?
# Generate syntax highlighted version for the whole file,
# which we will consume a line at a time.
# See https://github.com/willmcgugan/rich/discussions/965#discussioncomment-314233
syntax_highlighted = Syntax(
code_lines,
"python",
theme="default" if self.html else "vim",
line_numbers=False,
code_width=None,
)
capture_console = Console(
width=column_width - other_columns_width,
force_terminal=True,
)
formatted_lines = [
SyntaxLine(segments)
for segments in capture_console.render_lines(
syntax_highlighted
)
]
for line_no, line in enumerate(formatted_lines, start=1):
old_did_print = did_print
did_print = self.output_profile_line(
json=json,
fname=fname,
line_no=LineNumber(line_no),
line=line,
console=console,
tbl=tbl,
stats=stats,
profile_this_code=profile_this_code,
profile_memory=profile_memory,
force_print=False,
suppress_lineno_print=False,
is_function_summary=False,
reduced_profile=reduced_profile,
)
if old_did_print and not did_print:
# We are skipping lines, so add an ellipsis.
tbl.add_row("...")
old_did_print = did_print
# Potentially print a function summary.
fn_stats = stats.build_function_stats(fname)
print_fn_summary = False
# Check CPU samples and memory samples.
all_samples = set()
all_samples |= set(fn_stats.cpu_samples_python.keys())
all_samples |= set(fn_stats.cpu_samples_c.keys())
all_samples |= set(fn_stats.memory_malloc_samples.keys())
all_samples |= set(fn_stats.memory_free_samples.keys())
for fn_name in all_samples:
if fn_name == fname:
continue
print_fn_summary = True
break
if print_fn_summary:
try:
tbl.add_row(None, end_section=True)
except TypeError: # rich < 9.4.0 compatibility
tbl.add_row(None)
txt = Text.assemble(
f"function summary for {fname_print}", style="bold italic"
)
if profile_memory:
if self.gpu:
tbl.add_row("", "", "", "", "", "", "", "", "", txt)
else:
tbl.add_row("", "", "", "", "", "", "", "", txt)
elif self.gpu:
tbl.add_row("", "", "", "", "", txt)
else:
tbl.add_row("", "", "", "", txt)
for fn_name in sorted(
fn_stats.cpu_samples_python,
key=lambda k: stats.firstline_map[k],
):
if fn_name == fname:
continue
syntax_highlighted = Syntax(
fn_name,
"python",
theme="default" if self.html else "vim",
line_numbers=False,
code_width=None,
)
# force print, suppress line numbers
self.output_profile_line(
json=json,
fname=fn_name,
line_no=LineNumber(1),
line=syntax_highlighted, # type: ignore
console=console,
tbl=tbl,
stats=fn_stats,
profile_this_code=profile_this_code,
profile_memory=profile_memory,
force_print=True,
suppress_lineno_print=True,
is_function_summary=True,
reduced_profile=reduced_profile,
)
console.print(tbl)
# Compute AVERAGE memory consumption.
avg_mallocs: Dict[LineNumber, float] = defaultdict(float)
for line_no in stats.bytei_map[fname]:
n_malloc_mb = stats.memory_aggregate_footprint[fname][line_no]
if count := stats.memory_malloc_count[fname][line_no]:
avg_mallocs[line_no] = n_malloc_mb / count
else:
# Setting to n_malloc_mb addresses the edge case where this allocation is the last line executed.
avg_mallocs[line_no] = n_malloc_mb
avg_mallocs = OrderedDict(
sorted(avg_mallocs.items(), key=itemgetter(1), reverse=True)
)
# Compute (really, aggregate) PEAK memory consumption.
peak_mallocs: Dict[LineNumber, float] = defaultdict(float)
for line_no in stats.bytei_map[fname]:
peak_mallocs[line_no] = stats.memory_max_footprint[fname][
line_no
]
peak_mallocs = OrderedDict(
sorted(peak_mallocs.items(), key=itemgetter(1), reverse=True)
)
# Print the top N lines by AVERAGE memory consumption, as long
# as they are above some threshold MB in size.
self.output_top_memory(
"Top AVERAGE memory consumption, by line:",
console,
avg_mallocs,
)
# Print the top N lines by PEAK memory consumption, as long
# as they are above some threshold MB in size.
self.output_top_memory(
"Top PEAK memory consumption, by line:", console, peak_mallocs
)
# Only report potential leaks if the allocation velocity (growth rate) is above some threshold.
leaks = ScaleneLeakAnalysis.compute_leaks(
growth_rate, stats, avg_mallocs, fname
)
if len(leaks) > 0:
# Report in descending order by least likelihood
for leak in sorted(leaks, key=itemgetter(1), reverse=True):
output_str = f"Possible memory leak identified at line {str(leak[0])} (estimated likelihood: {(leak[1] * 100):3.0f}%, velocity: {(leak[2] / stats.elapsed_time):3.0f} MB/s)"
console.print(output_str)
if self.html:
# Write HTML file.
md = Markdown(
"generated by the [scalene](https://github.com/plasma-umass/scalene) profiler"
)
console.print(md)
if not self.output_file:
self.output_file = "/dev/stdout"
console.save_html(self.output_file, clear=False)
elif self.output_file:
# Don't output styles to text file.
console.save_text(self.output_file, styles=False, clear=False)
else:
# No output file specified: write to stdout.
sys.stdout.write(console.export_text(styles=True))
return True | Write the profile out. | output_profiles | python | plasma-umass/scalene | scalene/scalene_output.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_output.py | Apache-2.0 |
def replacement_process_join(self, timeout: float = -1) -> None: # type: ignore
"""
A drop-in replacement for multiprocessing.Process.join
that periodically yields to handle signals
"""
# print(multiprocessing.process.active_children())
if minor_version >= 7:
self._check_closed()
assert self._parent_pid == os.getpid(), "can only join a child process"
assert self._popen is not None, "can only join a started process"
tident = threading.get_ident()
if timeout < 0:
interval = sys.getswitchinterval()
else:
interval = min(timeout, sys.getswitchinterval())
start_time = time.perf_counter()
while True:
scalene.set_thread_sleeping(tident)
res = self._popen.wait(interval)
if res is not None:
from multiprocessing.process import _children # type: ignore
scalene.remove_child_pid(self.pid)
_children.discard(self)
return
scalene.reset_thread_sleeping(tident)
# I think that this should be timeout--
# Interval is the sleep time per-tic,
# but timeout determines whether it returns
if timeout != -1:
end_time = time.perf_counter()
if end_time - start_time >= timeout:
from multiprocessing.process import ( # type: ignore
_children,
)
_children.discard(self)
return | A drop-in replacement for multiprocessing.Process.join
that periodically yields to handle signals | replacement_pjoin.replacement_process_join | python | plasma-umass/scalene | scalene/replacement_pjoin.py | https://github.com/plasma-umass/scalene/blob/master/scalene/replacement_pjoin.py | Apache-2.0 |
def replacement_pjoin(scalene: Scalene) -> None:
def replacement_process_join(self, timeout: float = -1) -> None: # type: ignore
"""
A drop-in replacement for multiprocessing.Process.join
that periodically yields to handle signals
"""
# print(multiprocessing.process.active_children())
if minor_version >= 7:
self._check_closed()
assert self._parent_pid == os.getpid(), "can only join a child process"
assert self._popen is not None, "can only join a started process"
tident = threading.get_ident()
if timeout < 0:
interval = sys.getswitchinterval()
else:
interval = min(timeout, sys.getswitchinterval())
start_time = time.perf_counter()
while True:
scalene.set_thread_sleeping(tident)
res = self._popen.wait(interval)
if res is not None:
from multiprocessing.process import _children # type: ignore
scalene.remove_child_pid(self.pid)
_children.discard(self)
return
scalene.reset_thread_sleeping(tident)
# I think that this should be timeout--
# Interval is the sleep time per-tic,
# but timeout determines whether it returns
if timeout != -1:
end_time = time.perf_counter()
if end_time - start_time >= timeout:
from multiprocessing.process import ( # type: ignore
_children,
)
_children.discard(self)
return
multiprocessing.Process.join = replacement_process_join # type: ignore | A drop-in replacement for multiprocessing.Process.join
that periodically yields to handle signals | replacement_pjoin | python | plasma-umass/scalene | scalene/replacement_pjoin.py | https://github.com/plasma-umass/scalene/blob/master/scalene/replacement_pjoin.py | Apache-2.0 |
def is_native(package_name: str) -> bool:
"""
Returns whether a package is native or not.
"""
result = False
try:
package = importlib.import_module(package_name)
if package.__file__:
package_dir = os.path.dirname(package.__file__)
for root, dirs, files in os.walk(package_dir):
for filename in files:
if filename.endswith(".so") or filename.endswith(
".pyd"
):
return True
result = False
except ImportError:
# This module is not installed or something else went wrong; fail gracefully.
result = False
except AttributeError:
# No __file__, meaning it's built-in. Let's call it native.
result = True
except TypeError:
# __file__ is there, but empty (os.path.dirname() returns TypeError). Let's call it native.
result = True
return result | Returns whether a package is native or not. | is_native | python | plasma-umass/scalene | scalene/scalene_analysis.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_analysis.py | Apache-2.0 |
def get_imported_modules(source: str) -> List[str]:
"""
Extracts a list of imported modules from the given source code.
Parameters:
- source (str): The source code to be analyzed.
Returns:
- imported_modules (list[str]): A list of import statements.
"""
# Parse the source code into an abstract syntax tree
source = ScaleneAnalysis.strip_magic_line(source)
tree = ast.parse(source)
imported_modules = []
# Iterate through the nodes in the syntax tree
for node in ast.walk(tree):
# Check if the node represents an import statement
if isinstance(node, (ast.Import, ast.ImportFrom)):
imported_modules.append(ast.unparse(node))
return imported_modules | Extracts a list of imported modules from the given source code.
Parameters:
- source (str): The source code to be analyzed.
Returns:
- imported_modules (list[str]): A list of import statements. | get_imported_modules | python | plasma-umass/scalene | scalene/scalene_analysis.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_analysis.py | Apache-2.0 |
def get_native_imported_modules(source: str) -> List[str]:
"""
Extracts a list of **native** imported modules from the given source code.
Parameters:
- source (str): The source code to be analyzed.
Returns:
- imported_modules (list[str]): A list of import statements.
"""
# Parse the source code into an abstract syntax tree
source = ScaleneAnalysis.strip_magic_line(source)
tree = ast.parse(source)
imported_modules = []
# Add the module name to the list if it's native.
for node in ast.walk(tree):
if isinstance(node, ast.Import):
# Iterate through the imported modules in the statement
for alias in node.names:
if ScaleneAnalysis.is_native(alias.name):
imported_modules.append(ast.unparse(node))
# Check if the node represents an import from statement
elif isinstance(node, ast.ImportFrom):
node.module = cast(str, node.module)
if ScaleneAnalysis.is_native(node.module):
imported_modules.append(ast.unparse(node))
return imported_modules | Extracts a list of **native** imported modules from the given source code.
Parameters:
- source (str): The source code to be analyzed.
Returns:
- imported_modules (list[str]): A list of import statements. | get_native_imported_modules | python | plasma-umass/scalene | scalene/scalene_analysis.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_analysis.py | Apache-2.0 |
def find_regions(src: str) -> Dict[int, Tuple[int, int]]:
"""This function collects the start and end lines of all loops and functions in the AST, and then uses these to determine the narrowest region containing each line in the source code (that is, loops take precedence over functions."""
# Filter out the first line if in a Jupyter notebook and it starts with a magic (% or %%).
src = ScaleneAnalysis.strip_magic_line(src)
srclines = src.split("\n")
tree = ast.parse(src)
regions = {}
loops = {}
functions = {}
classes = {}
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
assert node.end_lineno
for line in range(node.lineno, node.end_lineno + 1):
classes[line] = (node.lineno, node.end_lineno)
if isinstance(node, (ast.For, ast.While)):
assert node.end_lineno
for line in range(node.lineno, node.end_lineno + 1):
loops[line] = (node.lineno, node.end_lineno)
if isinstance(node, ast.FunctionDef):
assert node.end_lineno
for line in range(node.lineno, node.end_lineno + 1):
functions[line] = (node.lineno, node.end_lineno)
for lineno, _ in enumerate(srclines, 1):
if lineno in loops:
regions[lineno] = loops[lineno]
elif lineno in functions:
regions[lineno] = functions[lineno]
elif lineno in classes:
regions[lineno] = classes[lineno]
else:
regions[lineno] = (lineno, lineno)
return regions | This function collects the start and end lines of all loops and functions in the AST, and then uses these to determine the narrowest region containing each line in the source code (that is, loops take precedence over functions. | find_regions | python | plasma-umass/scalene | scalene/scalene_analysis.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_analysis.py | Apache-2.0 |
def close(self) -> None:
"""Close the map file."""
self._signal_fd.close()
self._lock_fd.close() | Close the map file. | close | python | plasma-umass/scalene | scalene/scalene_mapfile.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_mapfile.py | Apache-2.0 |
def cleanup(self) -> None:
"""Remove all map files."""
try:
os.remove(self._init_filename)
os.remove(self._signal_filename)
except FileNotFoundError:
pass | Remove all map files. | cleanup | python | plasma-umass/scalene | scalene/scalene_mapfile.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_mapfile.py | Apache-2.0 |
def read(self) -> Any:
"""Read a line from the map file."""
if sys.platform == "win32":
return False
if not self._signal_mmap:
return False
return get_line_atomic.get_line_atomic(
self._lock_mmap, self._signal_mmap, self._buf, self._lastpos
) | Read a line from the map file. | read | python | plasma-umass/scalene | scalene/scalene_mapfile.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_mapfile.py | Apache-2.0 |
def get_str(self) -> str:
"""Get the string from the buffer."""
map_str = self._buf.rstrip(b"\x00").split(b"\n")[0].decode("ascii")
return map_str | Get the string from the buffer. | get_str | python | plasma-umass/scalene | scalene/scalene_mapfile.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_mapfile.py | Apache-2.0 |
def replacement_poll_selector(scalene: Scalene) -> None:
"""
A replacement for selectors.PollSelector that
periodically wakes up to accept signals
"""
class ReplacementPollSelector(selectors.PollSelector):
def select(
self, timeout: Optional[float] = -1
) -> List[Tuple[selectors.SelectorKey, int]]:
tident = threading.get_ident()
start_time = time.perf_counter()
if not timeout or timeout < 0:
interval = sys.getswitchinterval()
else:
interval = min(timeout, sys.getswitchinterval())
while True:
scalene.set_thread_sleeping(tident)
selected = super().select(interval)
scalene.reset_thread_sleeping(tident)
if selected or timeout == 0:
return selected
end_time = time.perf_counter()
if timeout and timeout != -1:
if end_time - start_time >= timeout:
return [] # None
ReplacementPollSelector.__qualname__ = (
"replacement_poll_selector.ReplacementPollSelector"
)
selectors.PollSelector = ReplacementPollSelector # type: ignore | A replacement for selectors.PollSelector that
periodically wakes up to accept signals | replacement_poll_selector | python | plasma-umass/scalene | scalene/replacement_poll_selector.py | https://github.com/plasma-umass/scalene/blob/master/scalene/replacement_poll_selector.py | Apache-2.0 |
def is_port_available(port: int) -> bool:
"""
Check if a given TCP port is available to start a server on the local machine.
:param port: Port number as an integer.
:return: True if the port is available, False otherwise.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind(("localhost", port))
return True
except socket.error:
return False | Check if a given TCP port is available to start a server on the local machine.
:param port: Port number as an integer.
:return: True if the port is available, False otherwise. | is_port_available | python | plasma-umass/scalene | scalene/launchbrowser.py | https://github.com/plasma-umass/scalene/blob/master/scalene/launchbrowser.py | Apache-2.0 |
def generate_html(profile_fname: Filename, output_fname: Filename) -> None:
"""Apply a template to generate a single HTML payload containing the current profile."""
try:
# Load the profile
profile_file = pathlib.Path(profile_fname)
profile = profile_file.read_text()
except FileNotFoundError:
assert profile_fname == "demo"
profile = "{}"
# return
# Load the GUI JavaScript file.
scalene_dir = os.path.dirname(__file__)
file_contents = {
"scalene_gui_js_text": read_file_content(
scalene_dir, "scalene-gui", "scalene-gui-bundle.js"
),
"prism_css_text": read_file_content(
scalene_dir, "scalene-gui", "prism.css"
),
}
# Put the profile and everything else into the template.
environment = Environment(
loader=FileSystemLoader(os.path.join(scalene_dir, "scalene-gui"))
)
template = environment.get_template("index.html.template")
try:
import scalene_config
except ModuleNotFoundError:
import scalene.scalene_config as scalene_config
rendered_content = template.render(
profile=profile,
gui_js=file_contents["scalene_gui_js_text"],
prism_css=file_contents["prism_css_text"],
scalene_version=scalene_config.scalene_version,
scalene_date=scalene_config.scalene_date,
)
# Write the rendered content to the specified output file.
try:
with open(output_fname, "w", encoding="utf-8") as f:
f.write(rendered_content)
except OSError:
pass | Apply a template to generate a single HTML payload containing the current profile. | generate_html | python | plasma-umass/scalene | scalene/launchbrowser.py | https://github.com/plasma-umass/scalene/blob/master/scalene/launchbrowser.py | Apache-2.0 |
def replacement_exit(scalene: Scalene) -> None:
"""
Shims out the unconditional exit with
the "neat exit" (which raises the SystemExit error and
allows Scalene to exit neatly)
"""
# Note: MyPy doesn't like this, but it works because passing an int
# to sys.exit does the right thing
os._exit = sys.exit # type: ignore | Shims out the unconditional exit with
the "neat exit" (which raises the SystemExit error and
allows Scalene to exit neatly) | replacement_exit | python | plasma-umass/scalene | scalene/replacement_exit.py | https://github.com/plasma-umass/scalene/blob/master/scalene/replacement_exit.py | Apache-2.0 |
def setup_preload(args: argparse.Namespace) -> bool:
"""
Ensures that Scalene runs with libscalene preloaded, if necessary,
as well as any other required environment variables.
Returns true iff we had to run another process.
"""
# First, check that we are on a supported platform.
# (x86-64 and ARM only for now.)
if args.memory and (
platform.machine() not in ["x86_64", "AMD64", "arm64", "aarch64"]
or struct.calcsize("P") != 8
):
args.memory = False
print(
"Scalene warning: currently only 64-bit x86-64 and ARM platforms are supported for memory and copy profiling."
)
with contextlib.suppress(Exception):
from IPython import get_ipython
if get_ipython():
sys.exit = Scalene.clean_exit # type: ignore
sys._exit = Scalene.clean_exit # type: ignore
# Start a subprocess with the required environment variables,
# which may include preloading libscalene
req_env = ScalenePreload.get_preload_environ(args)
if any(k_v not in os.environ.items() for k_v in req_env.items()):
os.environ.update(req_env)
new_args = [
sys.executable,
"-m",
"scalene",
] + sys.argv[1:]
result = subprocess.Popen(new_args, close_fds=True, shell=False)
with contextlib.suppress(Exception):
# If running in the background, print the PID.
if os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()):
# In the background.
print(f"Scalene now profiling process {result.pid}")
print(
f" to disable profiling: python3 -m scalene.profile --off --pid {result.pid}"
)
print(
f" to resume profiling: python3 -m scalene.profile --on --pid {result.pid}"
)
try:
result.wait()
except subprocess.TimeoutExpired:
print("Scalene failure. Please try again.")
return False
except KeyboardInterrupt:
result.returncode = 0
if result.returncode < 0:
print(
"Scalene error: received signal",
signal.Signals(-result.returncode).name,
)
sys.exit(result.returncode)
return True
return False | Ensures that Scalene runs with libscalene preloaded, if necessary,
as well as any other required environment variables.
Returns true iff we had to run another process. | setup_preload | python | plasma-umass/scalene | scalene/scalene_preload.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_preload.py | Apache-2.0 |
def add_stack(
frame: FrameType,
should_trace: Callable[[Filename, str], bool],
stacks: Dict[Any, Any],
python_time: float,
c_time: float,
cpu_samples: float,
) -> None:
"""Add one to the stack starting from this frame."""
stk: List[Tuple[str, str, int]] = list()
f: Optional[FrameType] = frame
while f:
if should_trace(Filename(f.f_code.co_filename), f.f_code.co_name):
stk.insert(0, (f.f_code.co_filename, get_fully_qualified_name(f), f.f_lineno))
f = f.f_back
if tuple(stk) not in stacks:
stacks[tuple(stk)] = (1, python_time, c_time, cpu_samples)
else:
(prev_count, prev_python_time, prev_c_time, prev_cpu_samples) = stacks[
tuple(stk)
]
stacks[tuple(stk)] = (
prev_count + 1,
prev_python_time + python_time,
prev_c_time + c_time,
prev_cpu_samples + cpu_samples,
) | Add one to the stack starting from this frame. | add_stack | python | plasma-umass/scalene | scalene/scalene_utility.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_utility.py | Apache-2.0 |
def on_stack(
frame: FrameType, fname: Filename, lineno: LineNumber
) -> Optional[FrameType]:
"""Find a frame matching the given filename and line number, if any.
Used for checking whether we are still executing the same line
of code or not in invalidate_lines (for per-line memory
accounting).
"""
f = frame
current_file_and_line = (fname, lineno)
while f:
if (f.f_code.co_filename, f.f_lineno) == current_file_and_line:
return f
f = cast(FrameType, f.f_back)
return None | Find a frame matching the given filename and line number, if any.
Used for checking whether we are still executing the same line
of code or not in invalidate_lines (for per-line memory
accounting). | on_stack | python | plasma-umass/scalene | scalene/scalene_utility.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_utility.py | Apache-2.0 |
def flamegraph_format(stacks: Dict[Tuple[Any], Any]) -> str:
"""Converts stacks to a string suitable for input to Brendan Gregg's flamegraph.pl script."""
output = ""
for stk in stacks.keys():
for item in stk:
(fname, fn_name, lineno) = item
output += f"{fname} {fn_name}:{lineno};"
output += " " + str(stacks[stk][0])
output += "\n"
return output | Converts stacks to a string suitable for input to Brendan Gregg's flamegraph.pl script. | flamegraph_format | python | plasma-umass/scalene | scalene/scalene_utility.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_utility.py | Apache-2.0 |
def generate_html(profile_fname: Filename, output_fname: Filename) -> None:
"""Apply a template to generate a single HTML payload containing the current profile."""
def read_file_content(
directory: str, subdirectory: str, filename: str
) -> str:
file_path = os.path.join(directory, subdirectory, filename)
file_content = ""
try:
file_content = pathlib.Path(file_path).read_text(encoding="utf-8")
except UnicodeDecodeError as e:
raise UnicodeDecodeError(
f"Failed to decode file {file_path}. Ensure the file is UTF-8 encoded."
) from e
return file_content
try:
# Load the profile
profile_file = pathlib.Path(profile_fname)
profile = ""
try:
profile = profile_file.read_text(encoding="utf-8")
except UnicodeDecodeError as e:
raise UnicodeDecodeError(
f"Failed to decode file {profile_file}. Ensure the file is UTF-8 encoded."
) from e
except FileNotFoundError:
assert profile_fname == "demo"
profile = ""
# return
# Load the GUI JavaScript file.
scalene_dir = os.path.dirname(__file__)
file_contents = {
"scalene_gui_js_text": read_file_content(
scalene_dir, "scalene-gui", "scalene-gui-bundle.js"
),
"prism_css_text": read_file_content(
scalene_dir, "scalene-gui", "prism.css"
),
}
# Put the profile and everything else into the template.
environment = Environment(
loader=FileSystemLoader(os.path.join(scalene_dir, "scalene-gui"))
)
template = environment.get_template("index.html.template")
rendered_content = template.render(
profile=profile,
gui_js=file_contents["scalene_gui_js_text"],
prism_css=file_contents["prism_css_text"],
scalene_version=scalene_version,
scalene_date=scalene_date,
)
# Write the rendered content to the specified output file.
try:
with open(output_fname, "w", encoding="utf-8") as f:
f.write(rendered_content)
except OSError:
pass | Apply a template to generate a single HTML payload containing the current profile. | generate_html | python | plasma-umass/scalene | scalene/scalene_utility.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_utility.py | Apache-2.0 |
def clear(self) -> None:
"""Reset for new samples"""
self._n = 0
self._m1 = self._m2 = self._m3 = self._m4 = 0.0
self._peak = 0.0 | Reset for new samples | clear | python | plasma-umass/scalene | scalene/runningstats.py | https://github.com/plasma-umass/scalene/blob/master/scalene/runningstats.py | Apache-2.0 |
def push(self, x: float) -> None:
"""Add a sample"""
if x > self._peak:
self._peak = x
n1 = self._n
self._n += 1
delta = x - self._m1
delta_n = delta / self._n
delta_n2 = delta_n * delta_n
term1 = delta * delta_n * n1
self._m1 += delta_n
self._m4 += (
term1 * delta_n2 * (self._n * self._n - 3 * self._n + 3)
+ 6 * delta_n2 * self._m2
- 4 * delta_n * self._m3
)
self._m3 += term1 * delta_n * (self._n - 2) - 3 * delta_n * self._m2
self._m2 += term1 | Add a sample | push | python | plasma-umass/scalene | scalene/runningstats.py | https://github.com/plasma-umass/scalene/blob/master/scalene/runningstats.py | Apache-2.0 |
def peak(self) -> float:
"""The maximum sample seen."""
return self._peak | The maximum sample seen. | peak | python | plasma-umass/scalene | scalene/runningstats.py | https://github.com/plasma-umass/scalene/blob/master/scalene/runningstats.py | Apache-2.0 |
def size(self) -> int:
"""The number of samples"""
return self._n | The number of samples | size | python | plasma-umass/scalene | scalene/runningstats.py | https://github.com/plasma-umass/scalene/blob/master/scalene/runningstats.py | Apache-2.0 |
def mean(self) -> float:
"""Arithmetic mean, a.k.a. average"""
return self._m1 | Arithmetic mean, a.k.a. average | mean | python | plasma-umass/scalene | scalene/runningstats.py | https://github.com/plasma-umass/scalene/blob/master/scalene/runningstats.py | Apache-2.0 |
def var(self) -> float:
"""Variance"""
return self._m2 / (self._n - 1.0) | Variance | var | python | plasma-umass/scalene | scalene/runningstats.py | https://github.com/plasma-umass/scalene/blob/master/scalene/runningstats.py | Apache-2.0 |
def std(self) -> float:
"""Standard deviation"""
return math.sqrt(self.var()) | Standard deviation | std | python | plasma-umass/scalene | scalene/runningstats.py | https://github.com/plasma-umass/scalene/blob/master/scalene/runningstats.py | Apache-2.0 |
def sem(self) -> float:
"""Standard error of the mean"""
return self.std() / math.sqrt(self._n) | Standard error of the mean | sem | python | plasma-umass/scalene | scalene/runningstats.py | https://github.com/plasma-umass/scalene/blob/master/scalene/runningstats.py | Apache-2.0 |
def _find_apple_gpu_service() -> io_registry_entry_t:
"""
Grabs the first service matching "IOAccelerator" (integrated GPU).
Returns None if not found.
"""
matching = IOServiceMatching(b"IOAccelerator")
if not matching:
return None
service_obj = IOServiceGetMatchingService(kIOMasterPortDefault, matching)
# service_obj is automatically retained if found.
# No need to release 'matching' (it is CFTypeRef, but handled by the system).
return service_obj | Grabs the first service matching "IOAccelerator" (integrated GPU).
Returns None if not found. | _find_apple_gpu_service | python | plasma-umass/scalene | scalene/scalene_apple_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_apple_gpu.py | Apache-2.0 |
def _read_gpu_core_count(service_obj: io_registry_entry_t) -> int:
"""
Reads the top-level "gpu-core-count" from the service.
(Only needed once, as it shouldn't change.)
"""
if not service_obj:
return 0
cf_core_count = IORegistryEntryCreateCFProperty(service_obj, cf_str_gpu_core_count, None, 0)
if not cf_core_count or (CFGetTypeID(cf_core_count) != CFNumberGetTypeID()):
if cf_core_count:
IOObjectRelease(cf_core_count)
return 0
val_container_64 = ctypes.c_longlong(0)
success = CFNumberGetValue(cf_core_count, kCFNumberSInt64Type, ctypes.byref(val_container_64))
IOObjectRelease(cf_core_count)
return val_container_64.value if success else 0 | Reads the top-level "gpu-core-count" from the service.
(Only needed once, as it shouldn't change.) | _read_gpu_core_count | python | plasma-umass/scalene | scalene/scalene_apple_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_apple_gpu.py | Apache-2.0 |
def _read_perf_stats(service_obj: io_registry_entry_t) -> Tuple[float, float]:
"""
Returns (utilization [0..1], in_use_mem_MB).
Reads the "PerformanceStatistics" sub-dict via IORegistryEntryCreateCFProperty.
"""
if not service_obj:
return (0.0, 0.0)
# Grab the PerformanceStatistics dictionary
perf_dict_ref = IORegistryEntryCreateCFProperty(service_obj, cf_str_perf_stats, None, 0)
if not perf_dict_ref or (CFGetTypeID(perf_dict_ref) != CFDictionaryGetTypeID()):
if perf_dict_ref:
IOObjectRelease(perf_dict_ref)
return (0.0, 0.0)
# Device Utilization
device_util = 0.0
util_val_ref = CFDictionaryGetValue(perf_dict_ref, cf_str_device_util)
if util_val_ref and (CFGetTypeID(util_val_ref) == CFNumberGetTypeID()):
val64 = ctypes.c_longlong(0)
if CFNumberGetValue(util_val_ref, kCFNumberSInt64Type, ctypes.byref(val64)):
device_util = val64.value / 100.0
# In-use memory
in_use_mem = 0.0
mem_val_ref = CFDictionaryGetValue(perf_dict_ref, cf_str_inuse_mem)
if mem_val_ref and (CFGetTypeID(mem_val_ref) == CFNumberGetTypeID()):
val64 = ctypes.c_longlong(0)
if CFNumberGetValue(mem_val_ref, kCFNumberSInt64Type, ctypes.byref(val64)):
in_use_mem = float(val64.value) / 1048576.0 # convert bytes -> MB
IOObjectRelease(perf_dict_ref)
return (device_util, in_use_mem) | Returns (utilization [0..1], in_use_mem_MB).
Reads the "PerformanceStatistics" sub-dict via IORegistryEntryCreateCFProperty. | _read_perf_stats | python | plasma-umass/scalene | scalene/scalene_apple_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_apple_gpu.py | Apache-2.0 |
def has_gpu(self) -> bool:
"""Return True if we found an Apple integrated GPU service."""
return bool(self._service_obj) | Return True if we found an Apple integrated GPU service. | has_gpu | python | plasma-umass/scalene | scalene/scalene_apple_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_apple_gpu.py | Apache-2.0 |
def reinit(self) -> None:
"""No-op for compatibility with other GPU wrappers."""
pass | No-op for compatibility with other GPU wrappers. | reinit | python | plasma-umass/scalene | scalene/scalene_apple_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_apple_gpu.py | Apache-2.0 |
def get_stats(self) -> Tuple[float, float]:
"""Return (util%, memory_in_use_MB)."""
if not self.has_gpu():
return (0.0, 0.0)
try:
util, mem = _read_perf_stats(self._service_obj)
return (util, mem)
except Exception:
return (0.0, 0.0) | Return (util%, memory_in_use_MB). | get_stats | python | plasma-umass/scalene | scalene/scalene_apple_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_apple_gpu.py | Apache-2.0 |
def __del__(self):
"""Release the service object if it exists."""
if self._service_obj:
IOObjectRelease(self._service_obj)
self._service_obj = None | Release the service object if it exists. | __del__ | python | plasma-umass/scalene | scalene/scalene_apple_gpu.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_apple_gpu.py | Apache-2.0 |
def find_browser(browserClass: Optional[str] = None) -> Optional[str]:
"""Find the default system browser, excluding text browsers.
If you want a specific browser, pass its class as an argument."""
text_browsers = [
"browsh",
"elinks",
"links",
"lynx",
"w3m",
]
try:
# Get the default browser object
browser = webbrowser.get(browserClass)
browser_name = (
browser.name if browser.name else browser.__class__.__name__
)
return browser_name if browser_name not in text_browsers else None
except AttributeError:
# https://github.com/plasma-umass/scalene/issues/790
# https://github.com/python/cpython/issues/105545
# MacOSXOSAScript._name was deprecated but for pre-Python 3.11,
# we need to refer to it as such to prevent this error:
# 'MacOSXOSAScript' object has no attribute 'name'
browser = webbrowser.get(browserClass)
return browser._name if browser._name not in text_browsers else None # type: ignore[attr-defined]
except webbrowser.Error:
# Return None if there is an error in getting the browser
return None | Find the default system browser, excluding text browsers.
If you want a specific browser, pass its class as an argument. | find_browser | python | plasma-umass/scalene | scalene/find_browser.py | https://github.com/plasma-umass/scalene/blob/master/scalene/find_browser.py | Apache-2.0 |
def scalene(self, line: str, cell: str = "") -> None:
"""%%scalene magic: see https://github.com/plasma-umass/scalene for usage info."""
if line:
sys.argv = ["scalene", "--ipython", *line.split()]
(args, _left) = ScaleneParseArgs.parse_args()
# print(f"{args=}, {_left=}")
else:
args = ScaleneArguments()
# print(f"{args=}")
if args and cell:
# Preface with a "\n" to drop the first line (%%scalene).
self.run_code(args, "\n" + cell) # type: ignore | %%scalene magic: see https://github.com/plasma-umass/scalene for usage info. | scalene | python | plasma-umass/scalene | scalene/scalene_magics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_magics.py | Apache-2.0 |
def scrun(self, line: str = "") -> None:
"""%scrun magic: see https://github.com/plasma-umass/scalene for usage info."""
if line:
sys.argv = ["scalene", "--ipython", *line.split()]
(args, left) = ScaleneParseArgs.parse_args()
if args:
self.run_code(args, " ".join(left)) # type: ignore | %scrun magic: see https://github.com/plasma-umass/scalene for usage info. | scrun | python | plasma-umass/scalene | scalene/scalene_magics.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_magics.py | Apache-2.0 |
def find_available_port(start_port: int, end_port: int) -> Optional[int]:
"""
Finds an available port within a given range.
Parameters:
- start_port (int): the starting port number to search from
- end_port (int): the ending port number to search up to (inclusive)
Returns:
- int: the first available port number found in the given range, or None if no ports are available
"""
for port in range(start_port, end_port + 1):
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("", port))
return port
except OSError:
continue
return None | Finds an available port within a given range.
Parameters:
- start_port (int): the starting port number to search from
- end_port (int): the ending port number to search up to (inclusive)
Returns:
- int: the first available port number found in the given range, or None if no ports are available | find_available_port | python | plasma-umass/scalene | scalene/scalene_jupyter.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_jupyter.py | Apache-2.0 |
def log_message(self, format: str, *args: Any) -> None:
"""overriding log_message to disable all messages from webserver"""
pass | overriding log_message to disable all messages from webserver | display_profile.log_message | python | plasma-umass/scalene | scalene/scalene_jupyter.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_jupyter.py | Apache-2.0 |
def display_profile(port: int, profile_fname: str) -> None:
# Display the profile in a cell. Starts a webserver to host the iframe holding the profile.html file,
# which lets JavaScript run (can't do this with `display`, which strips out JavaScript), and then
# tears down the server.
from IPython.core.display import display
from IPython.display import IFrame
class RequestHandler(BaseHTTPRequestHandler):
def _send_response(self, content: str) -> None:
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes(content, "utf8"))
def log_message(self, format: str, *args: Any) -> None:
"""overriding log_message to disable all messages from webserver"""
pass
def do_GET(self) -> None:
if self.path == "/":
try:
with open(profile_fname) as f:
content = f.read()
self._send_response(content)
except FileNotFoundError:
print("Scalene error: profile file not found.")
elif self.path == "/shutdown":
self.server.should_shutdown = True # type: ignore
self.send_response(204)
# self._send_response("Server is shutting down...")
else:
self.send_response(404)
class MyHTTPServer(HTTPServer):
"""Redefine to check `should_shutdown` flag."""
def serve_forever(self, poll_interval: float = 0.5) -> None:
self.should_shutdown = False
while not self.should_shutdown:
# Poll interval currently disabled below to avoid
# interfering with existing functionality.
# time.sleep(poll_interval)
self.handle_request()
class local_server:
def run_server(self) -> None:
try:
server_address = ("", port)
self.httpd = MyHTTPServer(server_address, RequestHandler)
self.httpd.serve_forever()
except BaseException as be:
print("server failure", be)
pass
the_server = local_server()
server_thread = Thread(target=the_server.run_server)
server_thread.start()
# Display the profile and then shutdown the server.
display(
IFrame(src=f"http://localhost:{port}", width="100%", height="400")
)
Thread(target=lambda: server_thread.join()).start()
# Wait 2 seconds to ensure that the page is rendered, then kill the cell.
import time
time.sleep(2)
import sys
sys.exit() | overriding log_message to disable all messages from webserver | display_profile | python | plasma-umass/scalene | scalene/scalene_jupyter.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_jupyter.py | Apache-2.0 |
def redirect_python(
preface: str, cmdline: str, python_alias_dir: pathlib.Path
) -> str:
"""
Redirects Python calls to a different command with a preface and cmdline.
Args:
preface: A string to be prefixed to the Python command.
cmdline: Additional command line arguments to be appended.
python_alias_dir: The directory where the alias scripts will be stored.
"""
base_python_extension = ".exe" if sys.platform == "win32" else ""
all_python_names = [
"python" + base_python_extension,
f"python{sys.version_info.major}{base_python_extension}",
f"python{sys.version_info.major}.{sys.version_info.minor}{base_python_extension}",
]
shebang = "@echo off" if sys.platform == "win32" else "#!/bin/bash"
all_args = "%*" if sys.platform == "win32" else '"$@"'
payload = f"{shebang}\n{preface} {sys.executable} -m scalene {cmdline} {all_args}\n"
for name in all_python_names:
fname = python_alias_dir / name
if sys.platform == "win32":
fname = fname.with_suffix(".bat")
try:
with open(fname, "w") as file:
file.write(payload)
if sys.platform != "win32":
os.chmod(fname, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR)
except IOError as e:
print(f"Error writing to {fname}: {e}")
sys.path.insert(0, str(python_alias_dir))
os.environ["PATH"] = f"{python_alias_dir}{os.pathsep}{os.environ['PATH']}"
orig_sys_executable = sys.executable
# Compute the new sys executable path
sys_executable_path = python_alias_dir / all_python_names[0]
# On Windows, adjust the path to use a .bat file instead of .exe
if sys.platform == "win32" and sys_executable_path.suffix == ".exe":
sys_executable_path = sys_executable_path.with_suffix(".bat")
sys.executable = str(sys_executable_path)
return orig_sys_executable | Redirects Python calls to a different command with a preface and cmdline.
Args:
preface: A string to be prefixed to the Python command.
cmdline: Additional command line arguments to be appended.
python_alias_dir: The directory where the alias scripts will be stored. | redirect_python | python | plasma-umass/scalene | scalene/redirect_python.py | https://github.com/plasma-umass/scalene/blob/master/scalene/redirect_python.py | Apache-2.0 |
def _get_module_details(
mod_name: str,
error: Type[Exception] = ImportError,
) -> Tuple[str, ModuleSpec, CodeType]:
"""Copy of `runpy._get_module_details`, but not private."""
if mod_name.startswith("."):
raise error("Relative module names not supported")
pkg_name, _, _ = mod_name.rpartition(".")
if pkg_name:
# Try importing the parent to avoid catching initialization errors
try:
__import__(pkg_name)
except ImportError as e:
# If the parent or higher ancestor package is missing, let the
# error be raised by find_spec() below and then be caught. But do
# not allow other errors to be caught.
if e.name is None or (
e.name != pkg_name and not pkg_name.startswith(e.name + ".")
):
raise
# Warn if the module has already been imported under its normal name
existing = sys.modules.get(mod_name)
if existing is not None and not hasattr(existing, "__path__"):
from warnings import warn
msg = (
"{mod_name!r} found in sys.modules after import of "
"package {pkg_name!r}, but prior to execution of "
"{mod_name!r}; this may result in unpredictable "
"behaviour".format(mod_name=mod_name, pkg_name=pkg_name)
)
warn(RuntimeWarning(msg))
try:
spec = importlib.util.find_spec(mod_name)
except (ImportError, AttributeError, TypeError, ValueError) as ex:
# This hack fixes an impedance mismatch between pkgutil and
# importlib, where the latter raises other errors for cases where
# pkgutil previously raised ImportError
msg = "Error while finding module specification for {!r} ({}: {})"
if mod_name.endswith(".py"):
msg += (
f". Try using '{mod_name[:-3]}' instead of "
f"'{mod_name}' as the module name."
)
raise error(msg.format(mod_name, type(ex).__name__, ex)) from ex
if spec is None:
raise error("No module named %s" % mod_name)
if spec.submodule_search_locations is not None:
if mod_name == "__main__" or mod_name.endswith(".__main__"):
raise error("Cannot use package as __main__ module")
try:
pkg_main_name = mod_name + ".__main__"
return _get_module_details(pkg_main_name, error)
except error as e:
if mod_name not in sys.modules:
raise # No module loaded; being a package is irrelevant
raise error(
("%s; %r is a package and cannot " + "be directly executed")
% (e, mod_name)
)
loader = spec.loader
# use isinstance instead of `is None` to placate mypy
if not isinstance(loader, SourceLoader):
raise error(
"%r is a namespace package and cannot be executed" % mod_name
)
try:
code = loader.get_code(mod_name)
except ImportError as e:
raise error(format(e)) from e
if code is None:
raise error("No code object available for %s" % mod_name)
return mod_name, spec, code | Copy of `runpy._get_module_details`, but not private. | _get_module_details | python | plasma-umass/scalene | scalene/get_module_details.py | https://github.com/plasma-umass/scalene/blob/master/scalene/get_module_details.py | Apache-2.0 |
def thread_join_replacement(
self: threading.Thread, timeout: Optional[float] = None
) -> None:
"""We replace threading.Thread.join with this method which always
periodically yields."""
start_time = time.perf_counter()
interval = sys.getswitchinterval()
while self.is_alive():
scalene.set_thread_sleeping(threading.get_ident())
orig_thread_join(self, interval)
scalene.reset_thread_sleeping(threading.get_ident())
# If a timeout was specified, check to see if it's expired.
if timeout is not None:
end_time = time.perf_counter()
if end_time - start_time >= timeout:
return None
return None | We replace threading.Thread.join with this method which always
periodically yields. | replacement_thread_join.thread_join_replacement | python | plasma-umass/scalene | scalene/replacement_thread_join.py | https://github.com/plasma-umass/scalene/blob/master/scalene/replacement_thread_join.py | Apache-2.0 |
def replacement_thread_join(scalene: Scalene) -> None:
orig_thread_join = threading.Thread.join
def thread_join_replacement(
self: threading.Thread, timeout: Optional[float] = None
) -> None:
"""We replace threading.Thread.join with this method which always
periodically yields."""
start_time = time.perf_counter()
interval = sys.getswitchinterval()
while self.is_alive():
scalene.set_thread_sleeping(threading.get_ident())
orig_thread_join(self, interval)
scalene.reset_thread_sleeping(threading.get_ident())
# If a timeout was specified, check to see if it's expired.
if timeout is not None:
end_time = time.perf_counter()
if end_time - start_time >= timeout:
return None
return None
threading.Thread.join = thread_join_replacement # type: ignore | We replace threading.Thread.join with this method which always
periodically yields. | replacement_thread_join | python | plasma-umass/scalene | scalene/replacement_thread_join.py | https://github.com/plasma-umass/scalene/blob/master/scalene/replacement_thread_join.py | Apache-2.0 |
def set_timer_signals(self, use_virtual_time: bool = True) -> None:
"""
Set up timer signals for CPU profiling.
use_virtual_time: bool, default True
If True, sets virtual timer signals, otherwise sets real timer signals.
"""
if sys.platform == "win32":
self.cpu_timer_signal = signal.SIGBREAK # Note: on Windows, this is unused, so any signal will do
self.cpu_signal = signal.SIGBREAK
return
if use_virtual_time:
self.cpu_timer_signal = signal.ITIMER_VIRTUAL
self.cpu_signal = signal.SIGVTALRM
else:
self.cpu_timer_signal = signal.ITIMER_REAL
self.cpu_signal = signal.SIGALRM | Set up timer signals for CPU profiling.
use_virtual_time: bool, default True
If True, sets virtual timer signals, otherwise sets real timer signals. | set_timer_signals | python | plasma-umass/scalene | scalene/scalene_signals.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_signals.py | Apache-2.0 |
def get_timer_signals(self) -> Tuple[int, signal.Signals]:
"""
Returns 2-tuple of the integers representing the CPU timer signal and the CPU signal.
"""
return self.cpu_timer_signal, self.cpu_signal | Returns 2-tuple of the integers representing the CPU timer signal and the CPU signal. | get_timer_signals | python | plasma-umass/scalene | scalene/scalene_signals.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_signals.py | Apache-2.0 |
def get_all_signals(self) -> List[signal.Signals]:
"""
Return all the signals used for controlling profiling, except the CPU timer.
"""
return [
self.start_profiling_signal,
self.stop_profiling_signal,
self.memcpy_signal,
self.malloc_signal,
self.free_signal,
self.cpu_signal,
] | Return all the signals used for controlling profiling, except the CPU timer. | get_all_signals | python | plasma-umass/scalene | scalene/scalene_signals.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_signals.py | Apache-2.0 |
def memory_consumed_str(size_in_mb: float) -> str:
"""Return a string corresponding to amount of memory consumed."""
gigabytes = size_in_mb // 1024
terabytes = gigabytes // 1024
if terabytes > 0:
return f"{(size_in_mb / 1048576):3.3f} TB"
elif gigabytes > 0:
return f"{(size_in_mb / 1024):3.3f} GB"
else:
return f"{size_in_mb:3.3f} MB" | Return a string corresponding to amount of memory consumed. | memory_consumed_str | python | plasma-umass/scalene | scalene/scalene_json.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_json.py | Apache-2.0 |
def output_profile_line(
self,
*,
fname: Filename,
fname_print: Filename,
line_no: LineNumber,
line: str,
stats: ScaleneStatistics,
profile_this_code: Callable[[Filename, LineNumber], bool],
profile_memory: bool = False,
force_print: bool = False,
) -> Dict[str, Any]:
"""Print at most one line of the profile (true == printed one)."""
if not force_print and not profile_this_code(fname, line_no):
return {
"lineno": line_no,
"line": line,
"n_core_utilization": 0,
"n_cpu_percent_c": 0,
"n_cpu_percent_python": 0,
"n_sys_percent": 0,
"n_gpu_percent": 0,
"n_gpu_avg_memory_mb": 0,
"n_gpu_peak_memory_mb": 0,
"n_peak_mb": 0,
"n_growth_mb": 0,
"n_avg_mb": 0,
"n_mallocs": 0,
"n_malloc_mb": 0,
"n_usage_fraction": 0,
"n_python_fraction": 0,
"n_copy_mb_s": 0,
"memory_samples": [],
}
# Prepare output values.
n_cpu_samples_c = stats.cpu_samples_c[fname][line_no]
# Correct for negative CPU sample counts. This can happen
# because of floating point inaccuracies, since we perform
# subtraction to compute it.
n_cpu_samples_c = max(0, n_cpu_samples_c)
n_cpu_samples_python = stats.cpu_samples_python[fname][line_no]
n_gpu_samples = stats.gpu_samples[fname][line_no]
n_gpu_mem_samples = stats.gpu_mem_samples[fname][line_no]
# Compute percentages of CPU time.
if stats.total_cpu_samples:
n_cpu_percent_c = n_cpu_samples_c * 100 / stats.total_cpu_samples
n_cpu_percent_python = (
n_cpu_samples_python * 100 / stats.total_cpu_samples
)
else:
n_cpu_percent_c = 0
n_cpu_percent_python = 0
if True:
if stats.n_gpu_samples[fname][line_no]:
n_gpu_percent = (
n_gpu_samples * 100 / stats.n_gpu_samples[fname][line_no]
) # total_gpu_samples
else:
n_gpu_percent = 0
# Now, memory stats.
# Total volume of memory allocated.
n_malloc_mb = stats.memory_malloc_samples[fname][line_no]
# Number of distinct allocation calls (those from the same line are counted as 1).
n_mallocs = stats.memory_malloc_count[fname][line_no]
# Total volume of memory allocated by Python (not native code).
n_python_malloc_mb = stats.memory_python_samples[fname][line_no]
n_usage_fraction = (
0
if not stats.total_memory_malloc_samples
else n_malloc_mb / stats.total_memory_malloc_samples
)
n_python_fraction = (
0 if not n_malloc_mb else n_python_malloc_mb / n_malloc_mb
)
# Average memory consumed by this line.
n_avg_mb = (
stats.memory_aggregate_footprint[fname][line_no]
if n_mallocs == 0
else stats.memory_aggregate_footprint[fname][line_no] / n_mallocs
)
# Peak memory consumed by this line.
n_peak_mb = stats.memory_max_footprint[fname][line_no]
# Force the reporting of average to be no more than peak.
# In principle, this should never happen, but...
# assert n_avg_mb <= n_peak_mb
if n_avg_mb > n_peak_mb:
n_avg_mb = n_peak_mb
n_cpu_percent = n_cpu_percent_c + n_cpu_percent_python
# Adjust CPU time by utilization.
mean_cpu_util = stats.cpu_utilization[fname][line_no].mean()
mean_core_util = stats.core_utilization[fname][line_no].mean()
n_sys_percent = n_cpu_percent * (1.0 - mean_cpu_util)
n_cpu_percent_python *= mean_cpu_util
n_cpu_percent_c *= mean_cpu_util
del mean_cpu_util
n_copy_b = stats.memcpy_samples[fname][line_no]
if stats.elapsed_time:
n_copy_mb_s = n_copy_b / (1024 * 1024 * stats.elapsed_time)
else:
n_copy_mb_s = 0
stats.per_line_footprint_samples[fname][line_no] = (
self.compress_samples(
stats.per_line_footprint_samples[fname][line_no],
stats.max_footprint,
)
)
payload = {
"line": line,
"lineno": line_no,
"memory_samples": stats.per_line_footprint_samples[fname][line_no],
"n_avg_mb": n_avg_mb,
"n_copy_mb_s": n_copy_mb_s,
"n_core_utilization": mean_core_util,
"n_cpu_percent_c": n_cpu_percent_c,
"n_cpu_percent_python": n_cpu_percent_python,
"n_gpu_avg_memory_mb": n_gpu_mem_samples.mean(),
"n_gpu_peak_memory_mb": n_gpu_mem_samples.peak(),
"n_gpu_percent": n_gpu_percent,
"n_growth_mb": n_peak_mb, # For backwards compatibility
"n_peak_mb": n_peak_mb,
"n_malloc_mb": n_malloc_mb,
"n_mallocs": n_mallocs,
"n_python_fraction": n_python_fraction,
"n_sys_percent": n_sys_percent,
"n_usage_fraction": n_usage_fraction,
}
try:
FunctionDetail(**payload)
except ValidationError as e:
print("Warning: JSON failed validation:")
print(e)
return payload | Print at most one line of the profile (true == printed one). | output_profile_line | python | plasma-umass/scalene | scalene/scalene_json.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_json.py | Apache-2.0 |
def output_profiles(
self,
program: Filename,
stats: ScaleneStatistics,
pid: int,
profile_this_code: Callable[[Filename, LineNumber], bool],
python_alias_dir: Path,
program_path: Filename,
entrypoint_dir: Filename,
program_args: Optional[List[str]],
profile_memory: bool = True,
reduced_profile: bool = False,
) -> Dict[str, Any]:
"""Write the profile out."""
# Get the children's stats, if any.
if not pid:
stats.merge_stats(python_alias_dir)
# If we've collected any samples, dump them.
if (
not stats.total_cpu_samples
and not stats.total_memory_malloc_samples
and not stats.total_memory_free_samples
and not stats.total_gpu_samples
):
# Nothing to output.
return {}
# Collect all instrumented filenames.
all_instrumented_files: List[Filename] = list(
set(
list(stats.cpu_samples_python.keys())
+ list(stats.cpu_samples_c.keys())
+ list(stats.memory_free_samples.keys())
+ list(stats.memory_malloc_samples.keys())
+ list(stats.gpu_samples.keys())
)
)
if not all_instrumented_files:
# We didn't collect samples in source files.
return {}
growth_rate = 0.0
if profile_memory:
stats.memory_footprint_samples = self.compress_samples(
stats.memory_footprint_samples, stats.max_footprint
)
# Compute growth rate (slope), between 0 and 1.
if stats.allocation_velocity[1] > 0:
growth_rate = (
100.0
* stats.allocation_velocity[0]
/ stats.allocation_velocity[1]
)
else:
stats.memory_footprint_samples = []
# Adjust the program name if it was a Jupyter cell.
result = re.match(r"_ipython-input-([0-9]+)-.*", program)
if result:
program = Filename("[" + result.group(1) + "]")
# Process the stacks to normalize by total number of CPU samples.
for stk in stats.stacks.keys():
(count, python_time, c_time, cpu_samples) = stats.stacks[stk]
stats.stacks[stk] = (
count,
python_time / stats.total_cpu_samples,
c_time / stats.total_cpu_samples,
cpu_samples / stats.total_cpu_samples,
)
# Convert stacks into a representation suitable for JSON dumping.
stks = []
for stk in stats.stacks.keys():
this_stk: List[str] = []
this_stk.extend(stk)
stks.append((this_stk, stats.stacks[stk]))
output: Dict[str, Any] = {
"program": program,
"entrypoint_dir": entrypoint_dir,
"args": program_args,
"filename": program_path,
"alloc_samples": stats.alloc_samples,
"elapsed_time_sec": stats.elapsed_time,
"growth_rate": growth_rate,
"max_footprint_mb": stats.max_footprint,
"max_footprint_python_fraction": stats.max_footprint_python_fraction,
"max_footprint_fname": (
stats.max_footprint_loc[0] if stats.max_footprint_loc else None
),
"max_footprint_lineno": (
stats.max_footprint_loc[1] if stats.max_footprint_loc else None
),
"files": {},
"gpu": self.gpu,
"gpu_device": self.gpu_device,
"memory": profile_memory,
"samples": stats.memory_footprint_samples,
"stacks": stks,
}
# Build a list of files we will actually report on.
report_files: List[Filename] = []
# Sort in descending order of CPU cycles, and then ascending order by filename
for fname in sorted(
all_instrumented_files,
key=lambda f: (-(stats.cpu_samples[f]), f),
):
fname = Filename(fname)
try:
percent_cpu_time = (
100
* stats.cpu_samples[fname]
/ stats.elapsed_time
# 100 * stats.cpu_samples[fname] / stats.total_cpu_samples
)
except ZeroDivisionError:
percent_cpu_time = 0
# Ignore files responsible for less than some percent of execution time and fewer than a threshold # of mallocs.
if (
stats.malloc_samples[fname] < self.malloc_threshold
and percent_cpu_time < self.cpu_percent_threshold
):
continue
report_files.append(fname)
# Don't actually output the profile if we are a child process.
# Instead, write info to disk for the main process to collect.
if pid:
stats.output_stats(pid, python_alias_dir)
# Return a value to indicate that the stats were successfully
# output to the proper directory
return {"is_child": True}
if len(report_files) == 0:
return {}
for fname in report_files:
# If the file was actually a Jupyter (IPython) cell,
# restore its name, as in "[12]".
fname_print = fname
result = re.match(r"_ipython-input-([0-9]+)-.*", fname_print)
if result:
fname_print = Filename("[" + result.group(1) + "]")
# Leak analysis
# First, compute AVERAGE memory consumption.
avg_mallocs: Dict[LineNumber, float] = defaultdict(float)
for line_no in stats.bytei_map[fname]:
n_malloc_mb = stats.memory_aggregate_footprint[fname][line_no]
count = stats.memory_malloc_count[fname][line_no]
if count:
avg_mallocs[line_no] = n_malloc_mb / count
else:
# Setting to n_malloc_mb addresses the edge case where this allocation is the last line executed.
avg_mallocs[line_no] = n_malloc_mb
avg_mallocs = OrderedDict(
sorted(avg_mallocs.items(), key=itemgetter(1), reverse=True)
)
# Now only report potential leaks if the allocation
# velocity (growth rate) is above some threshold.
leaks = ScaleneLeakAnalysis.compute_leaks(
growth_rate, stats, avg_mallocs, fname
)
# Sort in descending order by least likelihood
leaks = sorted(leaks, key=itemgetter(1), reverse=True)
reported_leaks = {}
for leak_lineno, leak_likelihood, leak_velocity in leaks:
reported_leaks[str(leak_lineno)] = {
"likelihood": leak_likelihood,
"velocity_mb_s": leak_velocity / stats.elapsed_time,
}
# Print header.
if not stats.total_cpu_samples:
percent_cpu_time = 0
else:
percent_cpu_time = (
100 * stats.cpu_samples[fname] / stats.total_cpu_samples
)
# Print out the the profile for the source, line by line.
full_fname = fname
try:
with open(full_fname, "r", encoding="utf-8") as source_file:
code_lines = source_file.readlines()
except (FileNotFoundError, OSError):
continue
# Find all enclosing regions (loops or function defs) for each line of code.
code_str = "".join(code_lines)
enclosing_regions = ScaleneAnalysis.find_regions(code_str)
outer_loop = ScaleneAnalysis.find_outermost_loop(code_str)
imports = ScaleneAnalysis.get_native_imported_modules(code_str)
output["files"][fname_print] = {
"percent_cpu_time": percent_cpu_time,
"lines": [],
"leaks": reported_leaks,
"imports": imports,
}
for lineno, line in enumerate(code_lines, start=1):
# Protect against JS 'injection' in Python comments by replacing some characters with Unicode.
# This gets unescaped in scalene-gui.js.
line = line.replace("&", "\\u0026")
line = line.replace("<", "\\u003c")
line = line.replace(">", "\\u003e")
profile_line = self.output_profile_line(
fname=fname,
fname_print=fname_print,
line_no=LineNumber(lineno),
line=line,
stats=stats,
profile_this_code=profile_this_code,
profile_memory=profile_memory,
force_print=False,
)
if profile_line:
profile_line["start_region_line"] = enclosing_regions[
lineno
][0]
profile_line["end_region_line"] = enclosing_regions[
lineno
][1]
profile_line["start_outermost_loop"] = outer_loop[lineno][
0
]
profile_line["end_outermost_loop"] = outer_loop[lineno][1]
try:
LineDetail(**profile_line)
except ValidationError as e:
print("Warning: JSON failed validation:")
print(e)
# When reduced-profile set, only output if the payload for the line is non-zero.
if reduced_profile:
profile_line_copy = copy.copy(profile_line)
del profile_line_copy["line"]
del profile_line_copy["lineno"]
if not any(profile_line_copy.values()):
continue
output["files"][fname_print]["lines"].append(profile_line)
fn_stats = stats.build_function_stats(fname)
# Check CPU samples and memory samples.
print_fn_summary = False
all_samples = set()
all_samples |= set(fn_stats.cpu_samples_python.keys())
all_samples |= set(fn_stats.cpu_samples_c.keys())
all_samples |= set(fn_stats.memory_malloc_samples.keys())
all_samples |= set(fn_stats.memory_free_samples.keys())
all_samples |= set(fn_stats.gpu_samples.keys())
print_fn_summary = any(fn != fname for fn in all_samples)
output["files"][fname_print]["functions"] = []
if print_fn_summary:
for fn_name in sorted(
all_samples,
key=lambda k: stats.firstline_map[k],
):
if fn_name == fname:
continue
profile_line = self.output_profile_line(
fname=fn_name,
fname_print=fn_name,
# line 1 is where function stats are
# accumulated; see
# ScaleneStatistics.build_function_stats
line_no=LineNumber(1),
line=fn_name, # Set the source line to just the function name.
stats=fn_stats,
profile_this_code=profile_this_code,
profile_memory=profile_memory,
force_print=True,
)
if profile_line:
# Fix the line number to point to the first line of the function.
profile_line["lineno"] = stats.firstline_map[fn_name]
output["files"][fname_print]["functions"].append(
profile_line
)
# Validate the schema
try:
ScaleneJSONSchema(**output)
except ValidationError as e:
print("Warning: JSON failed validation:")
print(e)
return output | Write the profile out. | output_profiles | python | plasma-umass/scalene | scalene/scalene_json.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_json.py | Apache-2.0 |
def replacement_fork(scalene: Scalene) -> None:
"""
Executes Scalene fork() handling.
Works just like os.register_at_fork(), but unlike that also provides the child PID.
"""
orig_fork = os.fork
def fork_replacement() -> int:
scalene.before_fork()
child_pid = orig_fork()
if child_pid == 0:
scalene.after_fork_in_child()
else:
scalene.after_fork_in_parent(child_pid)
return child_pid
os.fork = fork_replacement | Executes Scalene fork() handling.
Works just like os.register_at_fork(), but unlike that also provides the child PID. | replacement_fork | python | plasma-umass/scalene | scalene/replacement_fork.py | https://github.com/plasma-umass/scalene/blob/master/scalene/replacement_fork.py | Apache-2.0 |
def _in_wsl() -> bool:
"""Are we in Windows Subsystem for Linux?"""
return "WSL_DISTRO_NAME" in os.environ | Are we in Windows Subsystem for Linux? | _in_wsl | python | plasma-umass/scalene | scalene/sparkline.py | https://github.com/plasma-umass/scalene/blob/master/scalene/sparkline.py | Apache-2.0 |
def _in_windows_terminal() -> bool:
"""Are we in Windows Terminal?
https://aka.ms/windowsterminal
"""
return "WT_PROFILE_ID" in os.environ | Are we in Windows Terminal?
https://aka.ms/windowsterminal | _in_windows_terminal | python | plasma-umass/scalene | scalene/sparkline.py | https://github.com/plasma-umass/scalene/blob/master/scalene/sparkline.py | Apache-2.0 |
def clean_exit(code: object = 0) -> NoReturn:
"""Replacement for sys.exit that exits cleanly from within Jupyter notebooks."""
raise StopJupyterExecution | Replacement for sys.exit that exits cleanly from within Jupyter notebooks. | clean_exit | python | plasma-umass/scalene | scalene/scalene_parseargs.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_parseargs.py | Apache-2.0 |
def parse_args() -> Tuple[argparse.Namespace, List[str]]:
# In IPython, intercept exit cleanly (because sys.exit triggers a backtrace).
with contextlib.suppress(BaseException):
from IPython import get_ipython
if get_ipython():
sys.exit = ScaleneParseArgs.clean_exit
sys._exit = ScaleneParseArgs.clean_exit # type: ignore
defaults = ScaleneArguments()
usage = dedent(
rf"""[b]Scalene[/b]: a high-precision CPU and memory profiler, version {scalene_version} ({scalene_date})
[link=https://github.com/plasma-umass/scalene]https://github.com/plasma-umass/scalene[/link]
command-line:
% [b]scalene \[options] your_program.py \[--- --your_program_args] [/b]
or
% [b]python3 -m scalene \[options] your_program.py \[--- --your_program_args] [/b]
in Jupyter, line mode:
[b] %scrun \[options] statement[/b]
in Jupyter, cell mode:
[b] %%scalene \[options]
your code here
[/b]
"""
)
# NOTE: below is only displayed on non-Windows platforms.
epilog = dedent(
"""When running Scalene in the background, you can suspend/resume profiling
for the process ID that Scalene reports. For example:
% python3 -m scalene [options] yourprogram.py &
Scalene now profiling process 12345
to suspend profiling: python3 -m scalene.profile --off --pid 12345
to resume profiling: python3 -m scalene.profile --on --pid 12345
"""
)
parser = RichArgParser( # argparse.ArgumentParser(
prog="scalene",
description=usage,
epilog=epilog if sys.platform != "win32" else "",
formatter_class=argparse.RawTextHelpFormatter,
allow_abbrev=False,
)
parser.add_argument(
"--version",
dest="version",
action="store_const",
const=True,
help="prints the version number for this release of Scalene and exits",
)
parser.add_argument(
"--column-width",
dest="column_width",
type=int,
default=defaults.column_width,
help=f"Column width for profile output (default: [blue]{defaults.column_width}[/blue])",
)
parser.add_argument(
"--outfile",
type=str,
default=defaults.outfile,
help="file to hold profiler output (default: [blue]"
+ ("stdout" if not defaults.outfile else defaults.outfile)
+ "[/blue])",
)
parser.add_argument(
"--html",
dest="html",
action="store_const",
const=True,
default=defaults.html,
help="output as HTML (default: [blue]"
+ str("html" if defaults.html else "web")
+ "[/blue])",
)
parser.add_argument(
"--json",
dest="json",
action="store_const",
const=True,
default=defaults.json,
help="output as JSON (default: [blue]"
+ str("json" if defaults.json else "web")
+ "[/blue])",
)
parser.add_argument(
"--cli",
dest="cli",
action="store_const",
const=True,
default=defaults.cli,
help="forces use of the command-line",
)
parser.add_argument(
"--stacks",
dest="stacks",
action="store_const",
const=True,
default=defaults.stacks,
help="collect stack traces",
)
parser.add_argument(
"--web",
dest="web",
action="store_const",
const=True,
default=defaults.web,
help="opens a web tab to view the profile (saved as 'profile.html')",
)
parser.add_argument(
"--no-browser",
dest="no_browser",
action="store_const",
const=True,
default=defaults.no_browser,
help="doesn't open a web tab; just saves the profile ('profile.html')",
)
parser.add_argument(
"--viewer",
dest="viewer",
action="store_const",
const=True,
default=False,
help="opens the Scalene web UI.",
)
parser.add_argument(
"--reduced-profile",
dest="reduced_profile",
action="store_const",
const=True,
default=defaults.reduced_profile,
help=f"generate a reduced profile, with non-zero lines only (default: [blue]{defaults.reduced_profile}[/blue])",
)
parser.add_argument(
"--profile-interval",
type=float,
default=defaults.profile_interval,
help=f"output profiles every so many seconds (default: [blue]{defaults.profile_interval}[/blue])",
)
parser.add_argument(
"--cpu",
dest="cpu",
action="store_const",
const=True,
default=None,
help="profile CPU time (default: [blue] True [/blue])",
)
parser.add_argument(
"--cpu-only",
dest="cpu",
action="store_const",
const=True,
default=None,
help="profile CPU time ([red]deprecated: use --cpu [/red])",
)
parser.add_argument(
"--gpu",
dest="gpu",
action="store_const",
const=True,
default=None,
help="profile GPU time and memory (default: [blue]"
+ (str(defaults.gpu))
+ " [/blue])",
)
if sys.platform == "win32":
memory_profile_message = (
"profile memory (not supported on this platform)"
)
else:
memory_profile_message = (
"profile memory (default: [blue]"
+ (str(defaults.memory))
+ " [/blue])"
)
parser.add_argument(
"--memory",
dest="memory",
action="store_const",
const=True,
default=None,
help=memory_profile_message,
)
parser.add_argument(
"--profile-all",
dest="profile_all",
action="store_const",
const=True,
default=defaults.profile_all,
help="profile all executed code, not just the target program (default: [blue]"
+ (
"all code"
if defaults.profile_all
else "only the target program"
)
+ "[/blue])",
)
parser.add_argument(
"--profile-only",
dest="profile_only",
type=str,
default=defaults.profile_only,
help="profile only code in filenames that contain the given strings, separated by commas (default: [blue]"
+ (
"no restrictions"
if not defaults.profile_only
else defaults.profile_only
)
+ "[/blue])",
)
parser.add_argument(
"--profile-exclude",
dest="profile_exclude",
type=str,
default=defaults.profile_exclude,
help="do not profile code in filenames that contain the given strings, separated by commas (default: [blue]"
+ (
"no restrictions"
if not defaults.profile_exclude
else defaults.profile_exclude
)
+ "[/blue])",
)
parser.add_argument(
"--use-virtual-time",
dest="use_virtual_time",
action="store_const",
const=True,
default=defaults.use_virtual_time,
help=f"measure only CPU time, not time spent in I/O or blocking (default: [blue]{defaults.use_virtual_time}[/blue])",
)
parser.add_argument(
"--cpu-percent-threshold",
dest="cpu_percent_threshold",
type=float,
default=defaults.cpu_percent_threshold,
help=f"only report profiles with at least this percent of CPU time (default: [blue]{defaults.cpu_percent_threshold}%%[/blue])",
)
parser.add_argument(
"--cpu-sampling-rate",
dest="cpu_sampling_rate",
type=float,
default=defaults.cpu_sampling_rate,
help=f"CPU sampling rate (default: every [blue]{defaults.cpu_sampling_rate}s[/blue])",
)
parser.add_argument(
"--allocation-sampling-window",
dest="allocation_sampling_window",
type=int,
default=defaults.allocation_sampling_window,
help=f"Allocation sampling window size, in bytes (default: [blue]{defaults.allocation_sampling_window} bytes[/blue])",
)
parser.add_argument(
"--malloc-threshold",
dest="malloc_threshold",
type=int,
default=defaults.malloc_threshold,
help=f"only report profiles with at least this many allocations (default: [blue]{defaults.malloc_threshold}[/blue])",
)
parser.add_argument(
"--program-path",
dest="program_path",
type=str,
default="",
help="The directory containing the code to profile (default: [blue]the path to the profiled program[/blue])",
)
parser.add_argument(
"--memory-leak-detector",
dest="memory_leak_detector",
action="store_true",
default=defaults.memory_leak_detector,
help="EXPERIMENTAL: report likely memory leaks (default: [blue]"
+ (str(defaults.memory_leak_detector))
+ "[/blue])",
)
parser.add_argument(
"--ipython",
dest="ipython",
action="store_const",
const=True,
default=False,
help=argparse.SUPPRESS,
)
if sys.platform != "win32":
# Turning profiling on and off from another process is currently not supported on Windows.
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"--on",
action="store_true",
help="start with profiling on (default)",
)
group.add_argument(
"--off", action="store_true", help="start with profiling off"
)
# the PID of the profiling process (for internal use only)
parser.add_argument(
"--pid", type=int, default=0, help=argparse.SUPPRESS
)
# collect all arguments after "---", which Scalene will ignore
parser.add_argument(
"---",
dest="unused_args",
default=[],
help=argparse.SUPPRESS,
nargs=argparse.REMAINDER,
)
# Parse out all Scalene arguments.
# https://stackoverflow.com/questions/35733262/is-there-any-way-to-instruct-argparse-python-2-7-to-remove-found-arguments-fro
args, left = parser.parse_known_args()
# Validate file/directory arguments
if args.outfile and os.path.isdir(args.outfile):
parser.error(f"outfile {args.outfile} is a directory")
# Hack to simplify functionality for Windows platforms.
if sys.platform == "win32":
args.on = True
args.pid = 0
left += args.unused_args
import re
# Launch the UI if `--viewer` was selected.
if args.viewer:
if find_browser():
assert not args.no_browser
dir = os.path.dirname(__file__)
import scalene.scalene_config
import subprocess
subprocess.Popen(
[
sys.executable,
f"{dir}{os.sep}launchbrowser.py",
"demo",
str(scalene.scalene_config.SCALENE_PORT),
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
sys.exit(0)
pass
else:
print(
"Scalene: could not open a browser."
) # {scalene_gui_url}.")
sys.exit(0)
# If any of the individual profiling metrics were specified,
# disable the unspecified ones (set as None).
if args.cpu or args.gpu or args.memory:
if not args.memory:
args.memory = False
if not args.gpu:
args.gpu = False
else:
# Nothing specified; use defaults.
args.cpu = defaults.cpu
args.gpu = defaults.gpu
args.memory = defaults.memory
args.cpu = True # Always true
in_jupyter_notebook = len(sys.argv) >= 1 and re.match(
r"_ipython-input-([0-9]+)-.*", sys.argv[0]
)
# If the user did not enter any commands (just `scalene` or `python3 -m scalene`),
# print the usage information and bail.
if not in_jupyter_notebook and (len(sys.argv) + len(left) == 1):
parser.print_help(sys.stderr)
sys.exit(-1)
if args.version:
print(f"Scalene version {scalene_version} ({scalene_date})")
if not args.ipython:
sys.exit(-1)
# Clear out the namespace. We do this to indicate that we should not run further in IPython.
for arg in list(args.__dict__):
delattr(args, arg)
# was:
# args = (
# []
# ) # We use this to indicate that we should not run further in IPython.
return args, left | )
# NOTE: below is only displayed on non-Windows platforms.
epilog = dedent(
"""When running Scalene in the background, you can suspend/resume profiling
for the process ID that Scalene reports. For example:
% python3 -m scalene [options] yourprogram.py &
Scalene now profiling process 12345
to suspend profiling: python3 -m scalene.profile --off --pid 12345
to resume profiling: python3 -m scalene.profile --on --pid 12345 | parse_args | python | plasma-umass/scalene | scalene/scalene_parseargs.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_parseargs.py | Apache-2.0 |
def put(self, item: Optional[T]) -> None:
"""Add an item to the queue."""
self.queue.put(item) | Add an item to the queue. | put | python | plasma-umass/scalene | scalene/scalene_sigqueue.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_sigqueue.py | Apache-2.0 |
def get(self) -> Optional[T]:
"""Get one item from the queue."""
return self.queue.get() | Get one item from the queue. | get | python | plasma-umass/scalene | scalene/scalene_sigqueue.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_sigqueue.py | Apache-2.0 |
def start(self) -> None:
"""Start processing."""
# We use a daemon thread to defensively avoid hanging if we never join with it
if not self.thread:
self.thread = threading.Thread(target=self.run, daemon=True)
self.thread.start() | Start processing. | start | python | plasma-umass/scalene | scalene/scalene_sigqueue.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_sigqueue.py | Apache-2.0 |
def stop(self) -> None:
"""Stop processing."""
if self.thread:
self.queue.put(None)
# We need to join all threads before a fork() to avoid an inconsistent
# state, locked mutexes, etc.
self.thread.join()
self.thread = None | Stop processing. | stop | python | plasma-umass/scalene | scalene/scalene_sigqueue.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_sigqueue.py | Apache-2.0 |
def run(self) -> None:
"""Run the function processing items until stop is called.
Executed in a separate thread."""
while True:
item = self.queue.get()
if item is None: # None => stop request
break
with self.lock:
self.process(*item) | Run the function processing items until stop is called.
Executed in a separate thread. | run | python | plasma-umass/scalene | scalene/scalene_sigqueue.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_sigqueue.py | Apache-2.0 |
def scalene_redirect_profile(func: Any) -> Any:
"""Handle @profile decorators.
If Scalene encounters any functions decorated by @profile, it will
only report stats for those functions.
"""
return Scalene.profile(func) | Handle @profile decorators.
If Scalene encounters any functions decorated by @profile, it will
only report stats for those functions. | scalene_redirect_profile | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def start() -> None:
"""Start profiling."""
Scalene.start() | Start profiling. | start | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def stop() -> None:
"""Stop profiling."""
Scalene.stop() | Stop profiling. | stop | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def enable_profiling() -> Generator[None, None, None]:
"""Contextmanager that starts and stops profiling"""
start()
yield
stop() | Contextmanager that starts and stops profiling | enable_profiling | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def signal_blocking_wrapper(func: Union[BuiltinFunctionType, FunctionType]) -> Any:
"""Wrap a function to block the specified signal during its execution."""
@functools.wraps(func)
def wrapped(*args: Any, **kwargs: Any) -> Any:
# Block the specified signal temporarily
original_sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, [signal_to_block])
try:
return func(*args, **kwargs)
finally:
# Restore original signal mask
signal.pthread_sigmask(signal.SIG_SETMASK, original_sigmask)
return wrapped | Wrap a function to block the specified signal during its execution. | patch_module_functions_with_signal_blocking.signal_blocking_wrapper | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def patch_module_functions_with_signal_blocking(module: ModuleType, signal_to_block: signal.Signals) -> None:
"""Patch all functions in the given module to block the specified signal during execution."""
def signal_blocking_wrapper(func: Union[BuiltinFunctionType, FunctionType]) -> Any:
"""Wrap a function to block the specified signal during its execution."""
@functools.wraps(func)
def wrapped(*args: Any, **kwargs: Any) -> Any:
# Block the specified signal temporarily
original_sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, [signal_to_block])
try:
return func(*args, **kwargs)
finally:
# Restore original signal mask
signal.pthread_sigmask(signal.SIG_SETMASK, original_sigmask)
return wrapped
# Iterate through all attributes of the module
for attr_name in dir(module):
attr = getattr(module, attr_name)
if isinstance(attr, BuiltinFunctionType) or isinstance(attr, FunctionType):
wrapped_attr = signal_blocking_wrapper(attr)
setattr(module, attr_name, wrapped_attr) | Patch all functions in the given module to block the specified signal during execution. | patch_module_functions_with_signal_blocking | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def last_profiled_tuple() -> Tuple[Filename, LineNumber, ByteCodeIndex]:
"""Helper function to type last profiled information."""
return cast(
Tuple[Filename, LineNumber, ByteCodeIndex], Scalene.__last_profiled
) | Helper function to type last profiled information. | last_profiled_tuple | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def get_original_lock() -> threading.Lock:
"""Return the true lock, which we shim in replacement_lock.py."""
return Scalene.__original_lock() | Return the true lock, which we shim in replacement_lock.py. | get_original_lock | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def get_all_signals_set() -> Set[int]:
"""Return the set of all signals currently set.
Used by replacement_signal_fns.py to shim signals used by the client program.
"""
return set(Scalene.__signals.get_all_signals()) | Return the set of all signals currently set.
Used by replacement_signal_fns.py to shim signals used by the client program. | get_all_signals_set | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def get_timer_signals() -> Tuple[int, signal.Signals]:
"""Return the set of all TIMER signals currently set.
Used by replacement_signal_fns.py to shim timers used by the client program.
"""
return Scalene.__signals.get_timer_signals() | Return the set of all TIMER signals currently set.
Used by replacement_signal_fns.py to shim timers used by the client program. | get_timer_signals | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def set_in_jupyter() -> None:
"""Tell Scalene that it is running inside a Jupyter notebook."""
Scalene.__in_jupyter = True | Tell Scalene that it is running inside a Jupyter notebook. | set_in_jupyter | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def in_jupyter() -> bool:
"""Return whether Scalene is running inside a Jupyter notebook."""
return Scalene.__in_jupyter | Return whether Scalene is running inside a Jupyter notebook. | in_jupyter | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def interruption_handler(
signum: Union[
Callable[[signal.Signals, FrameType], None],
int,
signal.Handlers,
None,
],
this_frame: Optional[FrameType],
) -> None:
"""Handle keyboard interrupts (e.g., Ctrl-C)."""
raise KeyboardInterrupt | Handle keyboard interrupts (e.g., Ctrl-C). | interruption_handler | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def update_line() -> None:
"""Mark a new line by allocating the trigger number of bytes."""
bytearray(scalene.scalene_config.NEWLINE_TRIGGER_LENGTH) | Mark a new line by allocating the trigger number of bytes. | update_line | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def clear_metrics(cls) -> None:
"""Clear the various states for forked processes."""
cls.__stats.clear()
cls.child_pids.clear() | Clear the various states for forked processes. | clear_metrics | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def add_child_pid(cls, pid: int) -> None:
"""Add this pid to the set of children. Used when forking."""
cls.child_pids.add(pid) | Add this pid to the set of children. Used when forking. | add_child_pid | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def remove_child_pid(cls, pid: int) -> None:
"""Remove a child once we have joined with it (used by replacement_pjoin.py)."""
with contextlib.suppress(KeyError):
cls.child_pids.remove(pid) | Remove a child once we have joined with it (used by replacement_pjoin.py). | remove_child_pid | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def profile(func: Any) -> Any:
"""Record the file and function name.
Replacement @profile decorator function. Scalene tracks which
functions - in which files - have been decorated; if any have,
it and only reports stats for those.
"""
Scalene.__files_to_profile.add(func.__code__.co_filename)
Scalene.__functions_to_profile[func.__code__.co_filename].add(func)
if Scalene.__args.memory:
Scalene.register_files_to_profile()
return func | Record the file and function name.
Replacement @profile decorator function. Scalene tracks which
functions - in which files - have been decorated; if any have,
it and only reports stats for those. | profile | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
def shim(func: Callable[[Any], Any]) -> Any:
"""Provide a decorator that calls the wrapped function with the
Scalene variant.
Wrapped function must be of type (s: Scalene) -> Any.
This decorator allows for marking a function in a separate
file as a drop-in replacement for an existing library
function. The intention is for these functions to replace a
function that indefinitely blocks (which interferes with
Scalene) with a function that awakens periodically to allow
for signals to be delivered.
"""
func(Scalene)
# Returns the function itself to the calling file for the sake
# of not displaying unusual errors if someone attempts to call
# it
@functools.wraps(func)
def wrapped(*args: Any, **kwargs: Any) -> Any:
return func(*args, **kwargs)
return wrapped | Provide a decorator that calls the wrapped function with the
Scalene variant.
Wrapped function must be of type (s: Scalene) -> Any.
This decorator allows for marking a function in a separate
file as a drop-in replacement for an existing library
function. The intention is for these functions to replace a
function that indefinitely blocks (which interferes with
Scalene) with a function that awakens periodically to allow
for signals to be delivered. | shim | python | plasma-umass/scalene | scalene/scalene_profiler.py | https://github.com/plasma-umass/scalene/blob/master/scalene/scalene_profiler.py | Apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.