repo_name
stringlengths 7
90
| path
stringlengths 5
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 976
581k
| license
stringclasses 15
values |
---|---|---|---|---|---|
deokwooj/DDEA | webgui/pack_cluster.py | 1 | 20586 | #!/usr/bin/python
# To force float point division
from __future__ import division
"""
Created on Thu Mar 13 16:34:54 2014
Author : Deokwoo Jung
E-mail : [email protected]
"""
import numpy as np
#from numpy.linalg import norm
from sklearn import cluster
from sklearn.cluster import Ward
from sklearn.cluster import KMeans
from scipy.stats import stats
import time
##################################################################
# Custom library
##################################################################
from data_tools import *
from shared_constants import *
from log_util import log
def max_diff_dist_idx(dist_mat, min_dist, max_dist):
num_nodes = dist_mat.shape[0]
dist_diff = list()
max_diff = -1
max_diff_row = 0
max_diff_label = list()
max_cluster_idx = list()
for i,dist_vals in enumerate(dist_mat):
# exclude its own distance
idx_set=np.r_[np.r_[0:i:1],np.r_[i+1:num_nodes:1]]
#print i,'th row k-mean cluster'
temp=dist_vals[idx_set]
if np.min(temp)>max_dist:
exemplar_idx = i
max_cluster_idx = i
return exemplar_idx, max_cluster_idx
########################################
# K-mean
#_,label,_=cluster.k_means(temp[:,None],2)
# Herichical Binary Clutering
ward = Ward(n_clusters=2).fit(temp[:, None])
label=ward.labels_
#kmean=KMeans(n_clusters=2).fit(temp[:,None])
#label=kmean.labels_
# max is default
centroid=np.zeros(2)
centroid[0]=np.max(temp[label==0])
centroid[1]=np.max(temp[label==1])
#idx0=idx_set[np.nonzero(label==0)]
#idx1=idx_set[np.nonzero(label==1)]
#dist01=np.round([dist_mat[v0,v1] for v0 in idx0 for v1 in idx1],2)
#num_min_dist_violation=len(np.nonzero(dist01<min_dist)[0])
########################################
temp_1=abs(centroid[0]-centroid[1])
cent_diff=centroid[0]-centroid[1]
dist_diff.append(abs(cent_diff))
if max_diff< temp_1:
#if (max_diff< temp_1) and (num_min_dist_violation==0):
max_idx_set=idx_set
max_diff_row=i
max_diff=temp_1
max_diff_label=label
max_cent_diff=cent_diff
cur_cent_idx = set([])
if max_cent_diff>0:
cur_cent_idx=cur_cent_idx| set(np.nonzero(max_diff_label==1)[0])
else:
cur_cent_idx=cur_cent_idx| set(np.nonzero(max_diff_label==0)[0])
max_cluster_idx=list(set(max_idx_set[list(cur_cent_idx)]) |set([max_diff_row]))
exemplar_idx=max_diff_row
return exemplar_idx, max_cluster_idx
def signle_let_cluster_idx(dist_mat, max_dist):
log.info(str(max_dist))
num_nodes=dist_mat.shape[0]
nodes_all_alone = list()
exemplar_idx = list()
max_cluster_idx = list()
for i, dist_vals in enumerate(dist_mat):
# exclude its own distance
idx_set = np.r_[np.r_[0:i:1], np.r_[i+1:num_nodes:1]]
temp = dist_vals[idx_set]
num_nodes_away_more_than_max_dist = len(np.nonzero(temp>max_dist)[0])
#print temp
if num_nodes_away_more_than_max_dist==num_nodes-1:
log.info('-' * 20)
log.info(str(i) +'th node check')
log.info('*** all nodes are away beyond max_dist **')
nodes_all_alone.append(i)
#exemplar_idx.append([i])
exemplar_idx.append(i)
#max_cluster_idx.append([i])
max_cluster_idx.append(i)
return exemplar_idx,max_cluster_idx
def udiag_min(a):
return min([min(a[i,i+1:]) for i in range(a.shape[0]-1)])
def udiag_max(a):
return max([max(a[i,i+1:]) for i in range(a.shape[0]-1)])
def udiag_avg(a):
return sum([sum(a[i,i+1:]) for i in range(a.shape[0]-1)])\
/((a.shape[0]-0)*(a.shape[0]-1)/2)
def max_pack_cluster(DIST_MAT,min_dist=0.3,max_dist=1.0):
# minium distance for clusters set by max_dist=1.0 , min_dist=0.3
# Initionalize
num_nodes = DIST_MAT.shape[0]
label = np.inf*np.ones(num_nodes)
label_num = 0
remain_index = np.arange(num_nodes)
dist_mat = DIST_MAT.copy()
exemplar_list = list()
while dist_mat.shape[0] > 2:
if udiag_min(dist_mat) > max_dist:
log.info('all samples are seperated further than max_dist')
log.info('remaining samples will be individual clusters')
# Assign different labels to all raminig samples
inf_idx=np.nonzero(label == np.inf)[0]
for r in inf_idx:
exemplar_list.append(int(r))
#label[inf_idx]=label_num+np.arange(len(inf_idx))
label[inf_idx] = np.int_(label_num+np.arange(len(inf_idx)))
break
elif udiag_max(dist_mat)<min_dist:
# Assign the same label to all raminig samples
log.info('all samples are seperated within min_dist')
log.info('remaining samples will be the same')
inf_idx=np.nonzero(label==np.inf)[0]
exemplar_list.append(int(inf_idx[0]))
label[inf_idx]=int(label_num)
break
else:
exemplar_idx,max_cluster_idx=max_diff_dist_idx(dist_mat,min_dist,max_dist)
dcluster_idx=remain_index[max_cluster_idx]
exemplar_list.append(np.int_(remain_index[exemplar_idx]))
# Update dist_mat and remain_idx
dist_mat=np.delete(dist_mat, max_cluster_idx, axis=0)
dist_mat=np.delete(dist_mat, max_cluster_idx, axis=1)
remain_index=np.delete(remain_index,max_cluster_idx, axis=0)
# Adding label info
label[dcluster_idx]=label_num;label_num+=1
log.info('dist_mat.max()=' + str(dist_mat.max()))
unassigned_idx=np.nonzero(label==np.inf)[0]
if len(unassigned_idx)>0:
label[unassigned_idx]=label_num+np.arange(len(unassigned_idx))
exemplar_list=exemplar_list+list(unassigned_idx)
#raise NameError('There exist the unassigned: '+str(unassigned_idx))
intra_err_cnt, inter_err_cnt=check_bounded_distance_constraint_condition(DIST_MAT,label,min_dist,max_dist)
return np.int_(exemplar_list),np.int_(label)
def compute_cluster_err(DIST_MAT,m_labels):
num_clusters=int(m_labels.max())+1
# Compute Intra-Cluster Distance
c_wgt_set=np.zeros(num_clusters)
c_dist_w_min=np.zeros(num_clusters)
c_dist_w_max=np.zeros(num_clusters)
c_dist_w_avg=np.zeros(num_clusters)
for i in range(num_clusters):
c_idx=np.nonzero(m_labels==i)[0]
c_wgt=c_idx.shape[0]/DIST_MAT.shape[0]
c_wgt_set[i]=c_wgt
if c_idx.shape[0]>1:
# sample weight of the cluster
c_dist_w_min[i]=udiag_min(DIST_MAT[c_idx,:][:,c_idx])
c_dist_w_max[i]=udiag_max(DIST_MAT[c_idx,:][:,c_idx])
c_dist_w_avg[i]=udiag_avg(DIST_MAT[c_idx,:][:,c_idx])
else:
c_dist_w_min[i]=0
c_dist_w_max[i]=0
c_dist_w_avg[i]=0
intra_dist_min=sum(c_dist_w_min*c_wgt_set)
intra_dist_avg=sum(c_dist_w_avg*c_wgt_set)
intra_dist_max=sum(c_dist_w_max*c_wgt_set)
intra_dist_bnd=np.array([intra_dist_min, intra_dist_avg,intra_dist_max])
inter_dist=[]
# Compute Inter-Cluster Distance
if num_clusters>1:
for i in range(num_clusters-1):
for j in range(i+1,num_clusters):
i_idx=np.nonzero(m_labels==i)[0]
j_idx=np.nonzero(m_labels==j)[0]
temp_mat=DIST_MAT[i_idx,:][:,j_idx]
inter_dist.append(temp_mat.min())
inter_dist=np.array(inter_dist)
inter_dist_bnd=np.array([inter_dist.min(), inter_dist.mean(),inter_dist.max()])
validity=intra_dist_avg/inter_dist.min()
else:
validity=0
inter_dist_bnd=0
return validity,intra_dist_bnd,inter_dist_bnd
def show_clusters(exemplars,labels,input_names):
n_labels = labels.max()
for i in range(n_labels + 1):
log.info('Cluster %i: %s' % ((i + 1), ', '.join(input_names[labels == i])))
def plot_label(X_val,X_name,labels,exemplar,label_idx_set):
num_label=len(label_idx_set)
if num_label>15:
fsize=6
elif num_label>10:
fsize=8
elif num_label>5:
fsize=10
else:
fsize=12
for k,label_idx in enumerate(label_idx_set):
fig = plt.figure('Label '+str(label_idx)+' Measurements')
fig.suptitle('Label '+str(label_idx)+' Measurements',fontsize=fsize)
idx=np.nonzero(labels==label_idx)[0]
exemplar_idx=exemplar[label_idx]
num_col=int(np.ceil(np.sqrt(len(idx))))
num_row=num_col
for k,i in enumerate(idx):
ax=plt.subplot(num_col,num_row,k+1)
plt.plot(X_val[:,i])
if exemplar_idx==i:
plt.title('**'+X_name[i]+'**',fontsize=fsize)
else:
plt.title(X_name[i],fontsize=fsize)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(fsize)
if (k<num_col*(num_row-1)):
for tick in ax.xaxis.get_major_ticks():
ax.set_xticklabels( () )
plt.get_current_fig_manager().window.showMaximized()
def check_bounded_distance_constraint_condition(dist_mat,labels,min_dist,max_dist):
intra_err_cnt=0
num_clusters=int(labels.max()+1)
log.info('-' * 80)
log.info('Intra-Cluster distance check.....')
log.info('Condition: inter-cluster distance is upper-bounded by' + str(round(max_dist,2)))
log.info('-' * 80)
for i in range(num_clusters):
idx_set = np.nonzero(labels==(i))[0]
#print '----------------------------------------------------------'
#print i,'th cluster: ',idx_set
for idx_pair in pair_in_idx(idx_set):
#print idx_pair, 'dist-',round(dist_mat[idx_pair[0],idx_pair[1]],2)
dist_val_=dist_mat[idx_pair[0],idx_pair[1]]
# Rule violation
if dist_val_ > max_dist:
log.info('*** the distance of pairs :' + str(idx_pair) + ' in ' + str(i) + 'th cluster ~' +
str(np.round(dist_val_,2)) + ' > max_dist=' + str(np.round(max_dist,2)) +'***')
intra_err_cnt=intra_err_cnt+1
log.info('-' * 80)
log.info('Inter-Cluster distance check.....')
log.info('Condition: intra-cluster distance is lower-bounded by ' + str(round(min_dist,2)))
log.info('-' * 80)
cluster_pairs=pair_in_idx(range(num_clusters))
inter_err_cnt=0
for c_pair in cluster_pairs:
idx_set_0=np.nonzero(labels==(c_pair[0]))[0]
idx_set_1=np.nonzero(labels==(c_pair[1]))[0]
#print '----------------------------------------------------------'
#print 'The pairwise distance between ',c_pair[0],'th cluster and',c_pair[1],'th cluster'
for idx_pair in pair_in_idx(idx_set_0,idx_set_1):
#print idx_pair, 'dist-',round(dist_mat[idx_pair[0],idx_pair[1]],2)
dist_val_=dist_mat[idx_pair[0],idx_pair[1]]
# Rule violation
if dist_val_<min_dist:
log.info('*** the distance of pairs :' + str(idx_pair[0]) +
' in ' + str(c_pair[0]) + ' and ' + str(idx_pair[1]) +
' in ' + str(c_pair[1]) + ' ~ ' + str(round(dist_val_,2)) +
' < min_dist=', str(round(min_dist,2)) + '***')
inter_err_cnt += inter_err_cnt+1
return intra_err_cnt, inter_err_cnt
def cluster_measurement_points(m_matrix, m_name, corr_bnd = [0.1,0.9],alg='aff'):
exemplars_dict = dict()
if m_matrix.shape[1] == 0:
return [], exemplars_dict, [], []
elif m_matrix.shape[1] == 1:
exemplars_ = [0]
labels_= [0]
exemplars_name = m_name
else:
distmat_input = find_norm_dist_matrix(m_matrix)
# Find representative set of sensor measurements
min_dist_ = np.sqrt(2*(1-(corr_bnd[1])))
max_dist_ = np.sqrt(2*(1-(corr_bnd[0])))
if alg == 'pack':
log.info('use pack clustering algoirthm')
exemplars_, labels_ = max_pack_cluster(distmat_input, min_dist=min_dist_, max_dist=max_dist_)
else:
log.info('use affinity clustering algoirthm')
SIMM_MAT = 2 - distmat_input
exemplars_, labels_ = cluster.affinity_propagation(SIMM_MAT, damping=0.5)
num_clusters = int(labels_.max()+1)
log.info('-' * 40)
log.info(str(num_clusters) + 'clusters out of ' + str(len(labels_)) + 'measurements')
log.info('-' * 40)
validity, intra_dist, inter_dist = compute_cluster_err(distmat_input, labels_)
log.info('validity: ' + str(round(validity,2)) + ', intra_dist: ' +
str(np.round(intra_dist,2)) + ', inter_dist: ' +
str(np.round(inter_dist,2)))
log.info('-' * 40)
exemplars_name = list(np.array(m_name)[exemplars_])
for label_id, (m_idx, exemplar_label) in enumerate(zip(exemplars_, exemplars_name)):
log.info(str(exemplar_label))
children_set = list(set(np.nonzero(labels_ == label_id)[0]) - set([m_idx]))
log.info('Label ' + str(label_id) + ' : ' + str(m_idx) + '<--' + str(children_set) )
exemplars_dict.update({exemplar_label : list(np.array(m_name)[children_set])})
return m_matrix[:, exemplars_], exemplars_dict, exemplars_, labels_
def CLUSTERING_TEST(distmat_input, min_corr=0.1, max_corr=0.9):
################################################################################
# Unsupervised clustering for sensors given the normalized euclidian distance
# of sensor data
# Find only a few represetative sensors out of many sensors
################################################################################
# exemplars are a set of representative signals for each cluster
# Smaller dampding input will generate more clusers, default is 0.5
# 0.5 <= damping <=0.99
################################################################################
print '==========================================================='
print 'Clustering Test'
print '==========================================================='
print 'Pack Clustering'
print '---------------------------'
min_dist_=np.sqrt(2*(1-(max_corr)))
max_dist_=np.sqrt(2*(1-(min_corr)))
pack_exemplars,pack_labels=max_pack_cluster(distmat_input,min_dist=min_dist_,max_dist=max_dist_)
pack_num_clusters=int(pack_labels.max()+1)
print '-------------------------------------------------------------------------'
print pack_num_clusters, 'clusters out of ', len(pack_labels), 'measurements'
print '-------------------------------------------------------------------------'
validity,intra_dist,inter_dist=compute_cluster_err(distmat_input,pack_labels)
print 'validity:',round(validity,2),', intra_dist: ',np.round(intra_dist,2),', inter_dist: ',np.round(inter_dist,2)
print '-------------------------------------------------------------------------'
max_num_clusters=pack_num_clusters
print 'Heirachical Clustering'
print '---------------------------'
ward_validity_log=[];
ward_intra_dist_log=[];
ward_inter_dist_log=[];
ward_num_clusters_log=[]
for k in range(2,max_num_clusters+1):
ward = Ward(n_clusters=k).fit(distmat_input.T)
ward_labels=ward.labels_
ward_validity,ward_intra_dist,ward_inter_dist=compute_cluster_err(distmat_input,ward_labels)
ward_num_clusters=int(ward_labels.max()+1)
ward_validity_log.append(ward_validity);
ward_intra_dist_log.append(list(ward_intra_dist));
ward_inter_dist_log.append(list(ward_inter_dist));
ward_num_clusters_log.append(ward_num_clusters)
ward_intra_dist_log=np.array(ward_intra_dist_log);
ward_inter_dist_log=np.array(ward_inter_dist_log)
print 'K-Mean Clustering'
print '---------------------------'
kmean_validity_log=[];
kmean_intra_dist_log=[];
kmean_inter_dist_log=[];
kmean_num_clusters_log=[]
for k in range(2,max_num_clusters+1):
kmean=KMeans(n_clusters=k).fit(distmat_input.T)
kmean_labels=kmean.labels_
kmean_validity,kmean_intra_dist,kmean_inter_dist=compute_cluster_err(distmat_input,kmean_labels)
kmean_num_clusters=int(kmean_labels.max()+1)
kmean_validity_log.append(kmean_validity);
kmean_intra_dist_log.append(list(kmean_intra_dist));
kmean_inter_dist_log.append(list(kmean_inter_dist));
kmean_num_clusters_log.append(kmean_num_clusters)
kmean_intra_dist_log=np.array(kmean_intra_dist_log);
kmean_inter_dist_log=np.array(kmean_inter_dist_log)
print 'Affinity Clustering'
print '---------------------------'
SIMM_MAT=2-distmat_input
aff_exemplars, aff_labels = cluster.affinity_propagation(SIMM_MAT,damping=0.5)
aff_num_clusters=int(aff_labels.max()+1)
aff_validity,aff_intra_dist,aff_inter_dist=compute_cluster_err(distmat_input,aff_labels)
fig = plt.figure('Intra_dist')
fig.suptitle('Intra_dist')
plot(pack_num_clusters,intra_dist[0],'s',label='pack')
plot(pack_num_clusters,intra_dist[1],'s',label='pack')
plot(pack_num_clusters,intra_dist[2],'s',label='pack')
plot(ward_num_clusters_log,ward_intra_dist_log[:,0],'-+',label='ward')
plot(ward_num_clusters_log,ward_intra_dist_log[:,1],'-+',label='ward')
plot(ward_num_clusters_log,ward_intra_dist_log[:,2],'-+',label='ward')
plot(kmean_num_clusters_log,kmean_intra_dist_log[:,0],'-v',label='kmean')
plot(kmean_num_clusters_log,kmean_intra_dist_log[:,1],'-v',label='kmean')
plot(kmean_num_clusters_log,kmean_intra_dist_log[:,2],'-v',label='kmean')
plot(aff_num_clusters,aff_intra_dist[0],'*',label='aff')
plot(aff_num_clusters,aff_intra_dist[1],'*',label='aff')
plot(aff_num_clusters,aff_intra_dist[2],'*',label='aff')
plt.legend()
fig = plt.figure('Inter_dist')
fig.suptitle('Inter_dist')
plot(pack_num_clusters,inter_dist[0],'s',label='pack')
plot(pack_num_clusters,inter_dist[1],'s',label='pack')
plot(pack_num_clusters,inter_dist[2],'s',label='pack')
plot(ward_num_clusters_log,ward_inter_dist_log[:,0],'-+',label='ward')
plot(ward_num_clusters_log,ward_inter_dist_log[:,1],'-+',label='ward')
plot(ward_num_clusters_log,ward_inter_dist_log[:,2],'-+',label='ward')
plot(kmean_num_clusters_log,kmean_inter_dist_log[:,0],'-v',label='kmean')
plot(kmean_num_clusters_log,kmean_inter_dist_log[:,1],'-v',label='kmean')
plot(kmean_num_clusters_log,kmean_inter_dist_log[:,2],'-v',label='kmean')
plot(aff_num_clusters,aff_inter_dist[0],'*',label='aff')
plot(aff_num_clusters,aff_inter_dist[1],'*',label='aff')
plot(aff_num_clusters,aff_inter_dist[2],'*',label='aff')
plt.legend()
fig = plt.figure('Validity')
fig.suptitle('Validity')
plot(pack_num_clusters,validity,'s',label='pack')
plot(ward_num_clusters_log,ward_validity_log,'-+',label='ward')
plot(kmean_num_clusters_log,kmean_validity_log,'-v',label='kmean')
plot(aff_num_clusters,aff_validity,'*',label='aff')
plt.legend()
aff_intra_err_cnt, aff_inter_err_cnt=check_bounded_distance_constraint_condition(distmat_input,aff_labels,min_dist_,max_dist_)
ward_intra_err_cnt, ward_inter_err_cnt=check_bounded_distance_constraint_condition(distmat_input,ward_labels,min_dist_,max_dist_)
kmean_intra_err_cnt, kmean_inter_err_cnt=check_bounded_distance_constraint_condition(distmat_input,kmean_labels,min_dist_,max_dist_)
pack_intra_err_cnt, pack_inter_err_cnt=check_bounded_distance_constraint_condition(distmat_input,pack_labels,min_dist_,max_dist_)
print 'error count'
print '-----------------------------'
print 'pack_intra_err_cnt:', pack_intra_err_cnt, 'pack_inter_err_cnt:', pack_inter_err_cnt
print 'aff_intra_err_cnt:', aff_intra_err_cnt, 'aff_inter_err_cnt:', aff_inter_err_cnt
print 'ward_intra_err_cnt:', ward_intra_err_cnt, 'ward_inter_err_cnt:', ward_inter_err_cnt
print 'kmean_intra_err_cnt:', kmean_intra_err_cnt, 'kmean_inter_err_cnt:', kmean_inter_err_cnt
print '==========================================================='
print 'End of Clustering Test'
print '===========================================================' | gpl-2.0 |
MatthewRueben/multiple-explorers | classes/environment.py | 1 | 13566 | #!/usr/bin/env python
from geography import Bounds2D, Location, POI
from rovers import Rover
from roverSettingsStruct import RoverSettings
import random
import itertools
from matplotlib import pyplot
import copy
class World():
def __init__(self, world_bounds, N_poi, poi_bounds, rover_settings, rover_start, rovHeadings):
""" Inputs "world_bounds" and "poi_bounds" are of class "2DBounds". """
# Rover settings attributes:
# .rewardType
# .moveRandomly
# .numAgents
# .sensorRange
# .sensorFov
# .sensorNoiseInt
N_rovers = rover_settings.numAgents
self.world_bounds = world_bounds
self.poi_bounds = poi_bounds
self.rover_start = rover_start
# Init POIs
self.POIs = []
total_val = N_poi * 10
halfVal = total_val / 2.0
leftover_val = halfVal
# hack to make one large valued, and the others no more than half its value
# poiValueList = [float(random.randint(0, halfVal)) for x in range(N_poi-1)]
# for poiValue in poiValueList:
# self.POIs.append(POI(poiValue, d_min=5.0))
# self.POIs.append(POI(total_val, d_min=5.0))
# print poiValueList
bigPOI = POI(halfVal, d_min=5.0)
self.POIs.append(bigPOI)
for poi_index in range(N_poi-1):
# V_choice = random.uniform(V_bounds[0], V_bounds[1])
#poi_value = random.randint(0, leftover_val)
poi_value = 450.0 # <---- HACK!
leftover_val -= poi_value
poi = POI(poi_value, d_min=5.0) # assign POI value & minimum observation distance
# poi = POI(45.0, d_min=5.0)
self.POIs.append(poi)
# Init rovers
self.rovers = []
for rover_index, heading in itertools.izip(range(N_rovers), rovHeadings):
rover = Rover(name='Fred',
x=self.rover_start.x,
y=self.rover_start.y,
heading=heading,
num_sensors=rover_settings.sensorFov,
observation_range=10,
sensor_range=rover_settings.sensorRange,
sensor_noise=rover_settings.sensorNoiseInt,
num_POI=100)
self.rovers.append(rover)
def resetWithClusters(self, headings):
''' Resets with the clusters either against the left wall, right wall, top wall, or bottom wall. '''
clusterLocations = self.buildClusterLocations(self.poi_bounds, len(self.POIs))
for poi, clusterLoc in itertools.izip(self.POIs, clusterLocations):
# poi.place_randomly(self.poi_bounds) # assign POI location
poi.place_location(clusterLoc)
for rover, heading in itertools.izip(self.rovers, headings):
# reset agents to be center of world
rover.reset(self.rover_start.x,
self.rover_start.y,
heading)
def buildClusterLocations(self, bounds, numPois):
quad = random.random()
clusterList = [float(random.randint(0, 60)) for x in range(numPois)]
clusterLocations = []
# if quad < .25:
# # up wall
# y = 100.0
# for cluster in clusterList:
# clusterLocations.append(copy.deepcopy(Location(cluster, y)))
# elif quad < .5:
# # bottom wall
# y = 0
# for cluster in clusterList:
# clusterLocations.append(copy.deepcopy(Location(cluster, y)))
# elif quad < .75:
# # left wall
# x = 0
# for cluster in clusterList:
# clusterLocations.append(copy.deepcopy(Location(x, cluster)))
# else:
# right wall
x = 50
for cluster in clusterList:
clusterLocations.append(copy.deepcopy(Location(x, cluster)))
return clusterLocations
def reset(self, headings):
# for poi in self.POIs:
# poi.place_randomly(self.poi_bounds)
for rover, heading in itertools.izip(self.rovers, headings):
# reset agents to be center of world
rover.reset(self.rover_start.x,
self.rover_start.y,
heading)
def initPOILocs(self):
for poi in self.POIs:
poi.place_randomly(self.poi_bounds)
def get_rewards(self):
rewards = {'POI': [],
'GLOBAL': 0,
'LOCAL': [0]*len(self.rovers),
'DIFFERENCE': [0]*len(self.rovers),
'DIFFERENCE_PO': [0]*len(self.rovers)}
rover_closest_list = []
# Calculate GLOBAL reward
for poi_index in range(len(self.POIs)): # for each POI, figure out which rover is closest and get the appropriate reward
delta_min, rover_closest = self.find_closest(poi_index)
# print 'Closest rover: ', rover_closest
rover_closest_list.append(rover_closest)
poi_reward = self.POIs[poi_index].V / delta_min # the entire reward for this POI
# print ' Poi reward: ', poi_reward
# print
rewards['POI'].append(poi_reward) # keep track of the reward for each POI
rewards['GLOBAL'] += poi_reward
# Calculate LOCAL reward
for rover_index, rover in enumerate(self.rovers):
rewards['LOCAL'][rover_index] = 0
for poi_index, poi in enumerate(self.POIs): # for each POI...
delta_min = 100.0 # start arbitrarily high
for step_index, location in enumerate(rover.location_history): # check each of the rover's steps
delta = (location - poi) # observation distance
if delta < delta_min: # the closest distance counts, even if it's closer than the minimum observation distance
delta_min = delta
delta_min = max(delta_min ** 2, poi.d_min ** 2) # delta is actually the SQUARED Euclidean distance
poi_reward = poi.V / delta_min # the entire reward for this POI (for this rover only)
rewards['LOCAL'][rover_index] += poi_reward
# Calculate DIFFERENCE reward (with counterfactual c = 0)
for my_rover_index, rover in enumerate(self.rovers):
G_without = rewards['GLOBAL'] # Set G(Z_-i) = G(Z)
closest_to = [poi_index for poi_index, (rover_index, step_index) in enumerate(rover_closest_list) if rover_index == my_rover_index] # find which POIs this rover was closest to
for poi_index in closest_to: # for each of those POIs...
G_without -= rewards['POI'][poi_index] # Subtract its old reward
delta_min_new, rover_closest_new = self.find_closest(poi_index, [my_rover_index]) # Find the next-closest rover to it
#print (rover_closest_list[poi_index], rover_closest_new)
poi_reward_new = self.POIs[poi_index].V / delta_min_new # Calculate its new reward
G_without += poi_reward_new # Add it back in (G_without should be getting smaller)
rewards['DIFFERENCE'][my_rover_index] = rewards['GLOBAL'] - G_without # Calculate D = G(Z) - G(Z_-i)
# print rewards['DIFFERENCE']
# print 'Any DIFFERENCE rewards less than zero?', any([el < 0 for el in rewards['DIFFERENCE']])
# Calculate DIFFERENCE reward with PARTIAL OBSERVABILITY (and c = 0)
"""
# for each rover
# find which rovers this rover can see
# it can see itself! very important
# start with the full-observability POI rewards
# for each POI
# Partial Observability
G_PO -= rewards['POI'][poi_index] # Subtract its old reward
delta_min_new, rover_closest_new = self.find_closest(poi_index, [my_rover_index]) # Find the next-closest rover to it
poi_reward_new = self.POIs[poi_index].V / delta_min_new # Calculate its new reward
G_PO += poi_reward_new # Add it back in (G_without should be getting smaller)
# Without this agent
G_PO_without
delta_min_new, rover_closest_new = self.find_closest(poi_index, [my_rover_index]) # Find the next-closest rover to it
poi_reward_new = self.POIs[poi_index].V / delta_min_new # Calculate its new reward
G_PO_without
rewards['DIFFERENCE_PO'][my_rover_index] = G_PO - G_PO_without # Calculate D_PO
"""
return rewards, rover_closest_list
def find_closest(self, poi_index, not_these_rovers=[]):
""" Finds closest rover to the specified POI.
Returns that rover's index as well as the distance metric. """
poi = self.POIs[poi_index]
delta_min = 100.0 # start arbitrarily high
rover_closest = None
step_closest = None
for rover_index, rover in enumerate(self.rovers):
# Check observation distances for the rover locations we aren't skipping
if rover_index not in not_these_rovers:
for step_index, location in enumerate(self.rovers[rover_index].location_history):
delta = (location - poi) # observation distance
if delta < delta_min: # the closest rover counts, even if it's closer than the minimum observation distance
delta_min = delta
rover_closest = (rover_index, step_index)
delta_min = max(delta_min ** 2, poi.d_min ** 2) # delta is actually the SQUARED Euclidean distance
return delta_min, rover_closest
def test_plot(self, rover_closest_list=[]):
import time
pyplot.ion()
# Plot each rover's trajectory, one by one
for this_rover_index, rover in enumerate(self.rovers):
pyplot.cla() # clear axis
pyplot.title('Rover #' + str(this_rover_index + 1))
# Plot the world, with POIs
for poi in self.POIs:
pyplot.plot(poi.location.x, poi.location.y, 'k*')
pyplot.axis([self.world_bounds.x_lower, self.world_bounds.x_upper,
self.world_bounds.y_lower, self.world_bounds.y_upper])
trajectory_x = [step.x for step in rover.location_history]
trajectory_y = [step.y for step in rover.location_history]
pyplot.plot(trajectory_x, trajectory_y, 'ro-')
# Draw lines to indicate whenever the rover became the closest observer of a POI
if rover_closest_list:
closest_to = [(poi_index, step_index) for poi_index, (rover_index, step_index) in enumerate(rover_closest_list) if rover_index == this_rover_index] # find which POIs this rover was closest to
for (poi_index, step_index) in closest_to: # for each of those POIs...
pyplot.plot([trajectory_x[step_index], self.POIs[poi_index].location.x],
[trajectory_y[step_index], self.POIs[poi_index].location.y])
pyplot.draw()
time.sleep(1.0)
def plot_all(self, rover_closest_list=[]):
pyplot.ion()
# Which step are we at?
step = str(len(self.rovers[0].location_history))
if int(step) < 10:
step = '0' + step
# Get the rewards thus far.
rewards, rover_closest_list = self.get_rewards()
# Plot each rover's trajectory, one by one
pyplot.cla() # clear axis
pyplot.title('Step #' + str(step) + ', System Reward = ' + str(rewards['GLOBAL']))
for this_rover_index, rover in enumerate(self.rovers):
# Plot the world
fig = pyplot.gcf()
pyplot.axis([self.world_bounds.x_lower, self.world_bounds.x_upper,
self.world_bounds.y_lower, self.world_bounds.y_upper])
# Plot rovers
trajectory_x = [point.x for point in rover.location_history]
trajectory_y = [point.y for point in rover.location_history]
pyplot.plot(trajectory_x, trajectory_y, 'r.-')
pyplot.plot(trajectory_x[-1], trajectory_y[-1], 'ro')
for poi_index, poi in enumerate(self.POIs):
#pyplot.plot(poi.location.x, poi.location.y, 'k*')
# Check if a rover has been within the minimum observation distance of this POI
delta_min, rover_closest = self.find_closest(poi_index)
if delta_min < 1.05 * (poi.d_min ** 2): # if within 5% of min. obs. distance (since an == relation might fail due to float math)
color_choice = 'g'
else:
color_choice = '0.5' # lightish gray
# Draw inside circle of POI -- bigger is better
radius = poi.V / 450.0 * 4
circle1 = pyplot.Circle((poi.location.x, poi.location.y), radius, color=color_choice, fill=True)
fig.gca().add_artist(circle1)
# Draw outside circle of POI at minimum observation distance
circle2 = pyplot.Circle((poi.location.x, poi.location.y), 5, color=color_choice, fill=False)
fig.gca().add_artist(circle2)
pyplot.draw()
fig.savefig('Learned01Step' + str(step) + '.png')
| mit |
Srisai85/scikit-learn | sklearn/utils/multiclass.py | 83 | 12343 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
xtonyjiang/GNOVA | calculate.py | 1 | 5915 | #!/usr/bin/python
from __future__ import division
import collections
from itertools import product
import numpy as np
import pandas as pd
from sklearn import linear_model
from scipy.stats import norm
from numpy.linalg import inv
def calculate(gwas_snps, ld_scores, annots, N1, N2):
np.seterr(invalid='ignore')
### Clean up data ###
if annots is None:
annot = ld_scores[['SNP']]
annot['ALL_'] = 1
else:
annot = pd.concat(annots)
ld_snps = set(ld_scores['SNP'])
annot = annot.loc[annot['SNP'].isin(ld_snps)].reset_index(drop=True)
ld_scores = ld_scores.drop(['CHR', 'BP', 'CM', 'MAF'], axis=1, errors='ignore').reset_index(drop=True)
annot = annot.drop(['BP', 'SNP', 'CHR', 'CM'], axis=1, errors='ignore')
gwas_snps.drop(['idx'], axis=1, errors='ignore', inplace=True)
num_annotations = len(annot.columns)
merged = pd.merge(gwas_snps,
pd.concat([ld_scores, annot], axis=1),
on=['SNP'])
ld_score_all = merged.iloc[:,4]
if num_annotations == 1: # non-stratified analysis
ld_scores = merged.iloc[:,4:5]
annot = merged.iloc[:,5:6]
else: # we added in an all 1's column in prep step, so exclude that
ld_scores = merged.iloc[:,5:4 + num_annotations]
annot = merged.iloc[:,5 + num_annotations: 4 + 2 * num_annotations]
num_annotations -= 1
### Calculate genetic correlation ###
# Calculate S and W matrix
P = annot.sum()
p0 = len(ld_scores)
S = np.empty([num_annotations, num_annotations])
for i, j in product(range(num_annotations), range(num_annotations)):
S[i][j] = np.sum(ld_scores[annot.iloc[:,i] == 1].iloc[:,j]) / (P[i] * P[j])
W = np.empty([num_annotations, num_annotations])
for i, j in product(range(num_annotations), range(num_annotations)):
W[i][j] = np.sum((annot.iloc[:,i]==1) & (annot.iloc[:,j]==1)) / np.sum(annot.iloc[:,j] == 1)
# Calculate heritability
Z_x, Z_y = merged['Z_x'], merged['Z_y']
if annots is None:
h2_1 = np.array([p0 * (np.mean(Z_x ** 2) - 1) / (N1 * np.mean(ld_score_all))])
h2_2 = np.array([p0 * (np.mean(Z_y ** 2) - 1) / (N2 * np.mean(ld_score_all))])
else:
tau1 = (np.mean((Z_x) ** 2) - 1)/(N1 * np.mean(ld_score_all))
tau2 = (np.mean((Z_y) ** 2) - 1)/(N2 * np.mean(ld_score_all))
w1 = 1 /(ld_score_all * (1 + N1 * tau1 * ld_score_all) ** 2)
w2 = 1 /(ld_score_all * (1 + N2 * tau2 * ld_score_all) ** 2)
w1[(w1 < 0) | (w1 == np.inf) | (w1 == -np.inf)] = 0
w2[(w2 < 0) | (w2 == np.inf) | (w2 == -np.inf)] = 0
m1 = linear_model.LinearRegression().fit(ld_scores, pd.DataFrame((Z_x) ** 2), sample_weight=w1)
m2 = linear_model.LinearRegression().fit(ld_scores, pd.DataFrame((Z_y) ** 2), sample_weight=w2)
h2_1 = np.dot(W, m1.coef_.T * pd.DataFrame(P) / N1)
h2_2 = np.dot(W, m2.coef_.T * pd.DataFrame(P) / N2)
# Calculate sample overlap correction
if annots is None:
w1 = 1 + N1 * (h2_1 * ld_score_all / len(ld_score_all))
w2 = 1 + N2 * (h2_2 * ld_score_all / len(ld_score_all))
else:
w1 = 1 + p0 * (np.mean(Z_x ** 2) - 1) / np.mean(ld_score_all) * ld_score_all / len(ld_score_all)
w2 = 1 + p0 * (np.mean(Z_y ** 2) - 1) / np.mean(ld_score_all) * ld_score_all / len(ld_score_all)
w3 = np.mean(Z_x * Z_y) * ld_score_all
w = 1 / (w1 * w2 + w3 * w3)
m = linear_model.LinearRegression().fit(pd.DataFrame(ld_score_all), pd.DataFrame(Z_x * Z_y), sample_weight=w)
corr_pheno = m.intercept_[0]
# Calculate Jackknife variance estimate
nblock = 200
q_block = np.empty([num_annotations, nblock])
for i in range(num_annotations):
df_x = Z_x[annot.iloc[:,i] == 1]
df_y = Z_y[annot.iloc[:,i] == 1]
tot = np.dot(df_x, df_y)
for j, (b_x, b_y) in enumerate(zip(np.array_split(df_x, nblock), np.array_split(df_y, nblock))):
q_block[i][j] = (tot - np.dot(b_x, b_y)) / ((len(df_x) - len(b_x) - corr_pheno) * ((N1 * N2) ** 0.5))
q = np.mean(q_block, axis=1)
cov_q = np.cov(q_block, bias=True) * (nblock - 1)
# rho
rho = W.dot(inv(S)).dot(q)
rho_corrected = W.dot(inv(S)).dot(q - corr_pheno / ((N1 * N2) ** 0.5))
# covariance of rho
cov_rho = W.dot(inv(S)).dot(cov_q).dot(inv(S)).dot(W.T)
# genetic correlation
corr = rho / ((h2_1 * h2_2) ** 0.5).T
corr_corrected = rho_corrected / ((h2_1 * h2_2) ** 0.5).T
if np.isnan(corr).any() or np.isnan(corr_corrected).any():
print('Some correlation estimates are NaN because the heritability '
'estimates were negative.')
# p-value and standard error
se_rho = cov_rho.diagonal() ** 0.5
p_value = norm.sf(abs(rho / se_rho)) * 2
p_value_corrected = norm.sf(abs(rho_corrected / se_rho)) * 2
out = pd.DataFrame(collections.OrderedDict(
[('rho', rho),
('rho_corrected', rho_corrected),
('se_rho', se_rho),
('pvalue', p_value),
('pvalue_corrected', p_value_corrected),
('corr', corr[0]),
('corr_corrected', corr_corrected[0]),
('h2_1', h2_1.T[0]),
('h2_2', h2_2.T[0]),
('p', P),
('p0', p0)
]
))
# Check for all-1 annotations and remove them from the output
has_all_ones = False
if len(out) > 1:
for row in out.index:
if annot[row].all():
out.loc[row,:-2] = np.nan
has_all_ones = True
if has_all_ones:
print('NOTE: There is at least one annotation that applies to every SNP. '
'Non-stratified analysis will provide better estimates for the '
'total genetic covariance and genetic correlation, so we have labeled '
'the results for these annotations as "NA" in the output.')
return out
| gpl-3.0 |
ECP-CANDLE/Benchmarks | Pilot1/P1B2/p1b2.py | 1 | 4456 | from __future__ import print_function
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
import os
import sys
import logging
import argparse
try:
import configparser
except ImportError:
import ConfigParser as configparser
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', 'common'))
sys.path.append(lib_path)
lib_path2 = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path2)
import candle
#url_p1b2 = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B2/'
#file_train = 'P1B2.train.csv'
#file_test = 'P1B2.test.csv'
logger = logging.getLogger(__name__)
additional_definitions = [
{'name':'reg_l2',
'type': float,
'default': 0.,
'help':'weight of regularization for l2 norm of nn weights'}
]
required = [
'data_url',
'train_data',
'test_data',
'activation',
'batch_size',
'dense',
'dropout',
'epochs',
'feature_subsample',
'initialization',
'learning_rate',
'loss',
'optimizer',
'reg_l2',
'rng_seed',
'scaling',
'val_split',
'shuffle'
]
class BenchmarkP1B2(candle.Benchmark):
def set_locals(self):
"""Functionality to set variables specific for the benchmark
- required: set of required parameters for the benchmark.
- additional_definitions: list of dictionaries describing the additional parameters for the
benchmark.
"""
if required is not None:
self.required = set(required)
if additional_definitions is not None:
self.additional_definitions = additional_definitions
def extension_from_parameters(params, framework):
"""Construct string for saving model with annotation of parameters"""
ext = framework
ext += '.A={}'.format(params['activation'])
ext += '.B={}'.format(params['batch_size'])
ext += '.D={}'.format(params['dropout'])
ext += '.E={}'.format(params['epochs'])
if params['feature_subsample']:
ext += '.F={}'.format(params['feature_subsample'])
for i, n in enumerate(params['dense']):
if n:
ext += '.D{}={}'.format(i+1, n)
ext += '.S={}'.format(params['scaling'])
return ext
def load_data_one_hot(params, seed):
# fetch data
file_train = candle.fetch_file(params['data_url'] + params['train_data'],subdir='Pilot1')
file_test = candle.fetch_file(params['data_url'] + params['test_data'],subdir='Pilot1')
return candle.load_Xy_one_hot_data2(file_train, file_test, class_col=['cancer_type'],
drop_cols=['case_id', 'cancer_type'],
n_cols=params['feature_subsample'],
shuffle=params['shuffle'],
scaling=params['scaling'],
validation_split=params['val_split'],
dtype=params['data_type'],
seed=seed)
def load_data(params, seed):
# fetch data
file_train = candle.fetch_file(params['data_url'] + params['train_data'],subdir='Pilot1')
file_test = candle.fetch_file(params['data_url'] + params['test_data'],subdir='Pilot1')
return candle.load_Xy_data2(file_train, file_test, class_col=['cancer_type'],
drop_cols=['case_id', 'cancer_type'],
n_cols=params['feature_subsample'],
shuffle=params['shuffle'],
scaling=params['scaling'],
validation_split=params['val_split'],
dtype=params['data_type'],
seed=seed)
def evaluate_accuracy_one_hot(y_pred, y_test):
def map_max_indices(nparray):
maxi = lambda a: a.argmax()
iter_to_na = lambda i: np.fromiter(i, dtype=np.float)
return np.array([maxi(a) for a in nparray])
ya, ypa = tuple(map(map_max_indices, (y_test, y_pred)))
accuracy = accuracy_score(ya, ypa)
# print('Accuracy: {}%'.format(100 * accuracy))
return {'accuracy': accuracy}
def evaluate_accuracy(y_pred, y_test):
accuracy = accuracy_score(y_test, y_pred)
# print('Accuracy: {}%'.format(100 * accuracy))
return {'accuracy': accuracy}
| mit |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/tests/test_compat.py | 9 | 2455 | # -*- coding: utf-8 -*-
"""
Testing that functions from compat work as expected
"""
from pandas.compat import (range, zip, map, filter, lrange, lzip, lmap,
lfilter, builtins, iterkeys, itervalues, iteritems,
next)
import pandas.util.testing as tm
class TestBuiltinIterators(tm.TestCase):
def check_result(self, actual, expected, lengths):
for (iter_res, list_res), exp, length in zip(actual, expected,
lengths):
self.assertNotIsInstance(iter_res, list)
tm.assertIsInstance(list_res, list)
iter_res = list(iter_res)
self.assertEqual(len(list_res), length)
self.assertEqual(len(iter_res), length)
self.assertEqual(iter_res, exp)
self.assertEqual(list_res, exp)
def test_range(self):
actual1 = range(10)
actual2 = lrange(10)
actual = [actual1, actual2],
expected = list(builtins.range(10)),
lengths = 10,
actual1 = range(1, 10, 2)
actual2 = lrange(1, 10, 2)
actual += [actual1, actual2],
lengths += 5,
expected += list(builtins.range(1, 10, 2)),
self.check_result(actual, expected, lengths)
def test_map(self):
func = lambda x, y, z: x + y + z
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual1 = map(func, *lst)
actual2 = lmap(func, *lst)
actual = [actual1, actual2],
expected = list(builtins.map(func, *lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
def test_filter(self):
func = lambda x: x
lst = list(builtins.range(10))
actual1 = filter(func, lst)
actual2 = lfilter(func, lst)
actual = [actual1, actual2],
lengths = 9,
expected = list(builtins.filter(func, lst)),
self.check_result(actual, expected, lengths)
def test_zip(self):
lst = [builtins.range(10), builtins.range(10), builtins.range(10)]
actual = [zip(*lst), lzip(*lst)],
expected = list(builtins.zip(*lst)),
lengths = 10,
self.check_result(actual, expected, lengths)
def test_dict_iterators(self):
self.assertEqual(next(itervalues({1: 2})), 2)
self.assertEqual(next(iterkeys({1: 2})), 1)
self.assertEqual(next(iteritems({1: 2})), (1, 2))
| apache-2.0 |
Nyker510/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
stiphyMT/plantcv | plantcv/plantcv/morphology/segment_tangent_angle.py | 2 | 5430 | # Find tangent angles in degrees of skeleton segments
import os
import cv2
import numpy as np
import pandas as pd
from plantcv.plantcv import params
from plantcv.plantcv import outputs
from plantcv.plantcv import plot_image
from plantcv.plantcv import print_image
from plantcv.plantcv import find_objects
from plantcv.plantcv import color_palette
from plantcv.plantcv.morphology import _iterative_prune
def _slope_to_intesect_angle(m1, m2):
""" Calculate intersections angle (in degrees) from the slope of two lines
Inputs:
m1 = Slope of line 1
m2 = Slope of line 2
Returns:
angle = Intersection angle (in degrees)
:param m1: float
:param m2: float
:return angle: float
"""
angle = (np.pi - np.absolute(np.arctan(m1) - np.arctan(m2))) * 180 / np.pi
return angle
def segment_tangent_angle(segmented_img, objects, size, label="default"):
""" Find 'tangent' angles in degrees of skeleton segments. Use `size` pixels on either end of
each segment to find a linear regression line, and calculate angle between the two lines
drawn per segment.
Inputs:
segmented_img = Segmented image to plot slope lines and intersection angles on
objects = List of contours
size = Size of ends used to calculate "tangent" lines
label = optional label parameter, modifies the variable name of observations recorded
Returns:
labeled_img = Segmented debugging image with angles labeled
:param segmented_img: numpy.ndarray
:param objects: list
:param size: int
:param label: str
:return labeled_img: numpy.ndarray
"""
# Store debug
debug = params.debug
params.debug = None
labeled_img = segmented_img.copy()
intersection_angles = []
label_coord_x = []
label_coord_y = []
# Create a color scale, use a previously stored scale if available
rand_color = color_palette(num=len(objects), saved=True)
for i, cnt in enumerate(objects):
find_tangents = np.zeros(segmented_img.shape[:2], np.uint8)
cv2.drawContours(find_tangents, objects, i, 255, 1, lineType=8)
cv2.drawContours(labeled_img, objects, i, rand_color[i], params.line_thickness, lineType=8)
pruned_segment = _iterative_prune(find_tangents, size)
segment_ends = find_tangents - pruned_segment
segment_end_obj, segment_end_hierarchy = find_objects(segment_ends, segment_ends)
slopes = []
for j, obj in enumerate(segment_end_obj):
# Find bounds for regression lines to get drawn
rect = cv2.minAreaRect(cnt)
pts = cv2.boxPoints(rect)
df = pd.DataFrame(pts, columns=('x', 'y'))
x_max = int(df['x'].max())
x_min = int(df['x'].min())
# Find line fit to each segment
[vx, vy, x, y] = cv2.fitLine(obj, cv2.DIST_L2, 0, 0.01, 0.01)
slope = -vy / vx
left_list = int(((x - x_min) * slope) + y)
right_list = int(((x - x_max) * slope) + y)
slopes.append(slope)
if slope > 1000000 or slope < -1000000:
print("Slope of contour with ID#", i, "is", slope, "and cannot be plotted.")
else:
# Draw slope lines
cv2.line(labeled_img, (x_max - 1, right_list), (x_min, left_list), rand_color[i], 1)
if len(slopes) < 2:
# If size*2>len(obj) then pruning will remove the segment completely, and
# makes segment_end_objs contain just one contour.
print("Size too large, contour with ID#", i, "got pruned away completely.")
intersection_angles.append("NA")
else:
# Calculate intersection angles
slope1 = slopes[0][0]
slope2 = slopes[1][0]
intersection_angle = _slope_to_intesect_angle(slope1, slope2)
intersection_angles.append(intersection_angle)
# Store coordinates for labels
label_coord_x.append(objects[i][0][0][0])
label_coord_y.append(objects[i][0][0][1])
segment_ids = []
# Reset debug mode
params.debug = debug
for i, cnt in enumerate(objects):
# Label slope lines
w = label_coord_x[i]
h = label_coord_y[i]
if type(intersection_angles[i]) is str:
text = "{}".format(intersection_angles[i])
else:
text = "{:.2f}".format(intersection_angles[i])
cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness)
# segment_label = "ID" + str(i)
segment_ids.append(i)
outputs.add_observation(sample=label, variable='segment_tangent_angle', trait='segment tangent angle',
method='plantcv.plantcv.morphology.segment_tangent_angle', scale='degrees', datatype=list,
value=intersection_angles, label=segment_ids)
# Auto-increment device
params.device += 1
if params.debug == 'print':
print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_segment_tangent_angles.png'))
elif params.debug == 'plot':
plot_image(labeled_img)
return labeled_img
| mit |
zhwa/thunder | thunder/rdds/fileio/tifffile.py | 9 | 173192 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2014, Christoph Gohlke
# Copyright (c) 2008-2014, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write image data from and to TIFF files.
Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
SGI, ImageJ, MicroManager, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG and CCITT compressed image data or EXIF, IPTC, GPS,
and XMP metadata is not implemented.
Only primary info records are read for STK, FluoView, MicroManager, and
NIH image formats.
TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SGI, SEQ, GEL,
and OME-TIFF, are custom extensions defined by Molecular Devices (Universal
Imaging Corporation), Carl Zeiss MicroImaging, Olympus, Silicon Graphics
International, Media Cybernetics, Molecular Dynamics, and the Open Microscopy
Environment consortium respectively.
For command line usage run ``python tifffile.py --help``
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2014.08.24
Requirements
------------
* `CPython 2.7 or 3.4 <http://www.python.org>`_
* `Numpy 1.8.2 <http://www.numpy.org>`_
* `Matplotlib 1.4 <http://www.matplotlib.org>`_ (optional for plotting)
* `Tifffile.c 2013.11.05 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for faster decoding of PackBits and LZW encoded strings)
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Other Python packages and modules for reading bio-scientific TIFF files:
* `Imread <http://luispedro.org/software/imread>`_
* `PyLibTiff <http://code.google.com/p/pylibtiff>`_
* `SimpleITK <http://www.simpleitk.org>`_
* `PyLSM <https://launchpad.net/pylsm>`_
* `PyMca.TiffIO.py <http://pymca.sourceforge.net/>`_ (same as fabio.TiffIO)
* `BioImageXD.Readers <http://www.bioimagexd.net/>`_
* `Cellcognition.io <http://cellcognition.org/>`_
* `CellProfiler.bioformats
<https://github.com/CellProfiler/python-bioformats>`_
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis for a bug fix and some read_cz_lsm functions.
* Hadrien Mary for help on reading MicroManager files.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010).
Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011
(5) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
(8) Micro-Manager File Formats.
http://www.micro-manager.org/wiki/Micro-Manager_File_Formats
(9) Tags for TIFF and Related Specifications. Digital Preservation.
http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml
Examples
--------
>>> data = numpy.random.rand(5, 301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> numpy.testing.assert_array_equal(image, data)
>>> with TiffFile('temp.tif') as tif:
... images = tif.asarray()
... for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
This version has been modified as follows from the original tifffile.py
by Christoph Gohlke, available at http://www.lfd.uci.edu/~gohlke/:
* warning message about failure to find C extensions suppressed
"""
from __future__ import division, print_function
import sys
import os
import re
import glob
import math
import zlib
import time
import json
import struct
import warnings
import tempfile
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as etree
import numpy
# try:
# import _tifffile
# except ImportError:
# warnings.warn(
# "failed to import the optional _tifffile C extension module.\n"
# "Loading of some compressed images will be slow.\n"
# "Tifffile.c can be obtained at http://www.lfd.uci.edu/~gohlke/")
__version__ = '2014.08.24'
__docformat__ = 'restructuredtext en'
__all__ = ('imsave', 'imread', 'imshow', 'TiffFile', 'TiffWriter',
'TiffSequence')
def imsave(filename, data, **kwargs):
"""Write image data to TIFF file.
Refer to the TiffWriter class and member functions for documentation.
Parameters
----------
filename : str
Name of file to write.
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
kwargs : dict
Parameters 'byteorder', 'bigtiff', and 'software' are passed to
the TiffWriter class.
Parameters 'photometric', 'planarconfig', 'resolution',
'description', 'compress', 'volume', and 'extratags' are passed to
the TiffWriter.save function.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> description = u'{"shape": %s}' % str(list(data.shape))
>>> imsave('temp.tif', data, compress=6,
... extratags=[(270, 's', 0, description, True)])
"""
tifargs = {}
for key in ('byteorder', 'bigtiff', 'software', 'writeshape'):
if key in kwargs:
tifargs[key] = kwargs[key]
del kwargs[key]
if 'writeshape' not in kwargs:
kwargs['writeshape'] = True
if 'bigtiff' not in tifargs and data.size*data.dtype.itemsize > 2000*2**20:
tifargs['bigtiff'] = True
with TiffWriter(filename, **tifargs) as tif:
tif.save(data, **kwargs)
class TiffWriter(object):
"""Write image data to TIFF file.
TiffWriter instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> with TiffWriter('temp.tif', bigtiff=True) as tif:
... for i in range(data.shape[0]):
... tif.save(data[i], compress=6)
"""
TYPES = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
TAGS = {
'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'fill_order': 266,
'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'tile_width': 322, 'tile_length': 323, 'tile_offsets': 324,
'tile_byte_counts': 325, 'extra_samples': 338, 'sample_format': 339,
'image_depth': 32997, 'tile_depth': 32998}
def __init__(self, filename, bigtiff=False, byteorder=None,
software='tifffile.py'):
"""Create a new TIFF file for writing.
Use bigtiff=True when creating files greater than 2 GB.
Parameters
----------
filename : str
Name of file to write.
bigtiff : bool
If True, the BigTIFF format is used.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
software : str
Name of the software used to create the image.
Saved with the first page only.
"""
if byteorder not in (None, '<', '>'):
raise ValueError("invalid byteorder %s" % byteorder)
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
self._byteorder = byteorder
self._software = software
self._fh = open(filename, 'wb')
self._fh.write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
self._bigtiff = True
self._offset_size = 8
self._tag_size = 20
self._numtag_format = 'Q'
self._offset_format = 'Q'
self._val_format = '8s'
self._fh.write(struct.pack(byteorder+'HHH', 43, 8, 0))
else:
self._bigtiff = False
self._offset_size = 4
self._tag_size = 12
self._numtag_format = 'H'
self._offset_format = 'I'
self._val_format = '4s'
self._fh.write(struct.pack(byteorder+'H', 42))
# first IFD
self._ifd_offset = self._fh.tell()
self._fh.write(struct.pack(byteorder+self._offset_format, 0))
def save(self, data, photometric=None, planarconfig=None, resolution=None,
description=None, volume=False, writeshape=False, compress=0,
extratags=()):
"""Write image data to TIFF file.
Image data are written in one stripe per plane.
Dimensions larger than 2 to 4 (depending on photometric mode, planar
configuration, and SGI mode) are flattened and saved as separate pages.
The 'sample_format' and 'bits_per_sample' TIFF tags are derived from
the data type.
Parameters
----------
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
photometric : {'minisblack', 'miniswhite', 'rgb'}
The color space of the image data.
By default this setting is inferred from the data shape.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
description : str
The subject of the image. Saved with the first page only.
compress : int
Values from 0 to 9 controlling the level of zlib compression.
If 0, data are written uncompressed (default).
volume : bool
If True, volume data are stored in one tile (if applicable) using
the SGI image_depth and tile_depth tags.
Image width and depth must be multiple of 16.
Few software can read this format, e.g. MeVisLab.
writeshape : bool
If True, write the data shape to the image_description tag
if necessary and no other description is given.
extratags: sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
count : int
Number of data values. Not used for string values.
value : sequence
'Count' values compatible with 'dtype'.
writeonce : bool
If True, the tag is written to the first page only.
"""
if photometric not in (None, 'minisblack', 'miniswhite', 'rgb'):
raise ValueError("invalid photometric %s" % photometric)
if planarconfig not in (None, 'contig', 'planar'):
raise ValueError("invalid planarconfig %s" % planarconfig)
if not 0 <= compress <= 9:
raise ValueError("invalid compression level %s" % compress)
fh = self._fh
byteorder = self._byteorder
numtag_format = self._numtag_format
val_format = self._val_format
offset_format = self._offset_format
offset_size = self._offset_size
tag_size = self._tag_size
data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C')
data_shape = shape = data.shape
data = numpy.atleast_2d(data)
# normalize shape of data
samplesperpixel = 1
extrasamples = 0
if volume and data.ndim < 3:
volume = False
if photometric is None:
if planarconfig:
photometric = 'rgb'
elif data.ndim > 2 and shape[-1] in (3, 4):
photometric = 'rgb'
elif volume and data.ndim > 3 and shape[-4] in (3, 4):
photometric = 'rgb'
elif data.ndim > 2 and shape[-3] in (3, 4):
photometric = 'rgb'
else:
photometric = 'minisblack'
if planarconfig and len(shape) <= (3 if volume else 2):
planarconfig = None
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if len(shape) < 4:
volume = False
if planarconfig is None:
if shape[-1] in (3, 4):
planarconfig = 'contig'
elif shape[-4 if volume else -3] in (3, 4):
planarconfig = 'planar'
elif shape[-1] > shape[-4 if volume else -3]:
planarconfig = 'planar'
else:
planarconfig = 'contig'
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
if samplesperpixel > 3:
extrasamples = samplesperpixel - 3
elif planarconfig and len(shape) > (3 if volume else 2):
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
# remove trailing 1s
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1]
if len(shape) < 3:
volume = False
if False and (
len(shape) > (3 if volume else 2) and shape[-1] < 5 and
all(shape[-1] < i
for i in shape[(-4 if volume else -3):-1])):
# DISABLED: non-standard TIFF, e.g. (220, 320, 2)
planarconfig = 'contig'
samplesperpixel = shape[-1]
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
else:
data = data.reshape(
(-1, 1) + shape[(-3 if volume else -2):] + (1,))
if samplesperpixel == 2:
warnings.warn("writing non-standard TIFF (samplesperpixel 2)")
if volume and (data.shape[-2] % 16 or data.shape[-3] % 16):
warnings.warn("volume width or length are not multiple of 16")
volume = False
data = numpy.swapaxes(data, 1, 2)
data = data.reshape(
(data.shape[0] * data.shape[1],) + data.shape[2:])
# data.shape is now normalized 5D or 6D, depending on volume
# (pages, planar_samples, (depth,) height, width, contig_samples)
assert len(data.shape) in (5, 6)
shape = data.shape
bytestr = bytes if sys.version[0] == '2' else (
lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x)
tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
if volume:
# use tiles to save volume data
tag_byte_counts = TiffWriter.TAGS['tile_byte_counts']
tag_offsets = TiffWriter.TAGS['tile_offsets']
else:
# else use strips
tag_byte_counts = TiffWriter.TAGS['strip_byte_counts']
tag_offsets = TiffWriter.TAGS['strip_offsets']
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def addtag(code, dtype, count, value, writeonce=False):
# Compute ifdentry & ifdvalue bytes from code, dtype, count, value.
# Append (code, ifdentry, ifdvalue, writeonce) to tags list.
code = int(TiffWriter.TAGS.get(code, code))
try:
tifftype = TiffWriter.TYPES[dtype]
except KeyError:
raise ValueError("unknown dtype %s" % dtype)
rawcount = count
if dtype == 's':
value = bytestr(value) + b'\0'
count = rawcount = len(value)
value = (value, )
if len(dtype) > 1:
count *= int(dtype[:-1])
dtype = dtype[-1]
ifdentry = [pack('HH', code, tifftype),
pack(offset_format, rawcount)]
ifdvalue = None
if count == 1:
if isinstance(value, (tuple, list)):
value = value[0]
ifdentry.append(pack(val_format, pack(dtype, value)))
elif struct.calcsize(dtype) * count <= offset_size:
ifdentry.append(pack(val_format,
pack(str(count)+dtype, *value)))
else:
ifdentry.append(pack(offset_format, 0))
ifdvalue = pack(str(count)+dtype, *value)
tags.append((code, b''.join(ifdentry), ifdvalue, writeonce))
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if self._software:
addtag('software', 's', 0, self._software, writeonce=True)
self._software = None # only save to first page
if description:
addtag('image_description', 's', 0, description, writeonce=True)
elif writeshape and shape[0] > 1 and shape != data_shape:
addtag('image_description', 's', 0,
"shape=(%s)" % (",".join('%i' % i for i in data_shape)),
writeonce=True)
addtag('datetime', 's', 0,
datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"),
writeonce=True)
addtag('compression', 'H', 1, 32946 if compress else 1)
addtag('orientation', 'H', 1, 1)
addtag('image_width', 'I', 1, shape[-2])
addtag('image_length', 'I', 1, shape[-3])
if volume:
addtag('image_depth', 'I', 1, shape[-4])
addtag('tile_depth', 'I', 1, shape[-4])
addtag('tile_width', 'I', 1, shape[-2])
addtag('tile_length', 'I', 1, shape[-3])
addtag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2)
addtag('sample_format', 'H', 1,
{'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
addtag('photometric', 'H', 1,
{'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric])
addtag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig and samplesperpixel > 1:
addtag('planar_configuration', 'H', 1, 1
if planarconfig == 'contig' else 2)
addtag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8, ) * samplesperpixel)
else:
addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb' and extrasamples == 1:
addtag('extra_samples', 'H', 1, 1) # associated alpha channel
else:
addtag('extra_samples', 'H', extrasamples, (0,) * extrasamples)
if resolution:
addtag('x_resolution', '2I', 1, rational(resolution[0]))
addtag('y_resolution', '2I', 1, rational(resolution[1]))
addtag('resolution_unit', 'H', 1, 2)
addtag('rows_per_strip', 'I', 1,
shape[-3] * (shape[-4] if volume else 1))
# use one strip or tile per plane
strip_byte_counts = (data[0, 0].size * data.dtype.itemsize,) * shape[1]
addtag(tag_byte_counts, offset_format, shape[1], strip_byte_counts)
addtag(tag_offsets, offset_format, shape[1], (0, ) * shape[1])
# add extra tags from users
for t in extratags:
addtag(*t)
# the entries in an IFD must be sorted in ascending order by tag code
tags = sorted(tags, key=lambda x: x[0])
if not self._bigtiff and (fh.tell() + data.size*data.dtype.itemsize
> 2**31-1):
raise ValueError("data too large for non-bigtiff file")
for pageindex in range(shape[0]):
# update pointer at ifd_offset
pos = fh.tell()
fh.seek(self._ifd_offset)
fh.write(pack(offset_format, pos))
fh.seek(pos)
# write ifdentries
fh.write(pack(numtag_format, len(tags)))
tag_offset = fh.tell()
fh.write(b''.join(t[1] for t in tags))
self._ifd_offset = fh.tell()
fh.write(pack(offset_format, 0)) # offset to next IFD
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(tags):
if tag[2]:
pos = fh.tell()
fh.seek(tag_offset + tagindex*tag_size + offset_size + 4)
fh.write(pack(offset_format, pos))
fh.seek(pos)
if tag[0] == tag_offsets:
strip_offsets_offset = pos
elif tag[0] == tag_byte_counts:
strip_byte_counts_offset = pos
fh.write(tag[2])
# write image data
data_offset = fh.tell()
if compress:
strip_byte_counts = []
for plane in data[pageindex]:
plane = zlib.compress(plane, compress)
strip_byte_counts.append(len(plane))
fh.write(plane)
else:
# if this fails try update Python/numpy
data[pageindex].tofile(fh)
fh.flush()
# update strip and tile offsets and byte_counts if necessary
pos = fh.tell()
for tagindex, tag in enumerate(tags):
if tag[0] == tag_offsets: # strip or tile offsets
if tag[2]:
fh.seek(strip_offsets_offset)
strip_offset = data_offset
for size in strip_byte_counts:
fh.write(pack(offset_format, strip_offset))
strip_offset += size
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, data_offset))
elif tag[0] == tag_byte_counts: # strip or tile byte_counts
if compress:
if tag[2]:
fh.seek(strip_byte_counts_offset)
for size in strip_byte_counts:
fh.write(pack(offset_format, size))
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, strip_byte_counts[0]))
break
fh.seek(pos)
fh.flush()
# remove tags that should be written only once
if pageindex == 0:
tags = [t for t in tags if not t[-1]]
def close(self):
self._fh.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def imread(files, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
files : str or list
File name, glob pattern, or list of file names.
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages in file to return as array.
multifile : bool
If True (default), OME-TIFF data may include pages from multiple files.
pattern : str
Regular expression pattern that matches axes names and indices in
file names.
kwargs : dict
Additional parameters passed to the TiffFile or TiffSequence asarray
function.
Examples
--------
>>> im = imread('test.tif', key=0)
>>> im.shape
(256, 256, 4)
>>> ims = imread(['test.tif', 'test.tif'])
>>> ims.shape
(2, 256, 256, 4)
"""
kwargs_file = {}
if 'multifile' in kwargs:
kwargs_file['multifile'] = kwargs['multifile']
del kwargs['multifile']
else:
kwargs_file['multifile'] = True
kwargs_seq = {}
if 'pattern' in kwargs:
kwargs_seq['pattern'] = kwargs['pattern']
del kwargs['pattern']
if isinstance(files, basestring) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if len(files) == 1:
files = files[0]
if isinstance(files, basestring):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(**kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(**kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
__slots__ = ('func', )
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
value = self.func(instance)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
class TiffFile(object):
"""Read image and metadata from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Attributes
----------
pages : list
All TIFF pages in file.
series : list of Records(shape, dtype, axes, TiffPages)
TIFF pages with compatible shapes and types.
micromanager_metadata: dict
Extra MicroManager non-TIFF metadata in the file, if exists.
All attributes are read-only.
Examples
--------
>>> with TiffFile('test.tif') as tif:
... data = tif.asarray()
... data.shape
(256, 256, 4)
"""
def __init__(self, arg, name=None, offset=None, size=None,
multifile=True, multifile_close=True):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
The file objects are closed in TiffFile.close().
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
multifile : bool
If True (default), series may include pages from multiple files.
Currently applies to OME-TIFF only.
multifile_close : bool
If True (default), keep the handles of other files in multifile
series closed. This is inefficient when few files refer to
many pages. If False, the C runtime may run out of resources.
"""
self._fh = FileHandle(arg, name=name, offset=offset, size=size)
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
self._multifile_close = bool(multifile_close)
self._files = {self._fh.name: self} # cache of TiffFiles
try:
self._fromfile()
except Exception:
self._fh.close()
raise
@property
def filehandle(self):
"""Return file handle."""
return self._fh
@property
def filename(self):
"""Return name of file handle."""
return self._fh.name
def close(self):
"""Close open file handle(s)."""
for tif in self._files.values():
tif._fh.close()
self._files = {}
def _fromfile(self):
"""Read TIFF header and all page records from file."""
self._fh.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0]
if version == 43: # BigTiff
self.offset_size, zero = struct.unpack(self.byteorder+'HH',
self._fh.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self.pages = []
while True:
try:
page = TiffPage(self)
self.pages.append(page)
except StopIteration:
break
if not self.pages:
raise ValueError("empty TIFF file")
if self.is_micromanager:
# MicroManager files contain metadata not stored in TIFF tags.
self.micromanager_metadata = read_micromanager_metadata(self._fh)
if self.is_lsm:
self._fix_lsm_strip_offsets()
self._fix_lsm_strip_byte_counts()
def _fix_lsm_strip_offsets(self):
"""Unwrap strip offsets for LSM files greater than 4 GB."""
for series in self.series:
wrap = 0
previous_offset = 0
for page in series.pages:
strip_offsets = []
for current_offset in page.strip_offsets:
if current_offset < previous_offset:
wrap += 2**32
strip_offsets.append(current_offset + wrap)
previous_offset = current_offset
page.strip_offsets = tuple(strip_offsets)
def _fix_lsm_strip_byte_counts(self):
"""Set strip_byte_counts to size of compressed data.
The strip_byte_counts tag in LSM files contains the number of bytes
for the uncompressed data.
"""
if not self.pages:
return
strips = {}
for page in self.pages:
assert len(page.strip_offsets) == len(page.strip_byte_counts)
for offset, bytecount in zip(page.strip_offsets,
page.strip_byte_counts):
strips[offset] = bytecount
offsets = sorted(strips.keys())
offsets.append(min(offsets[-1] + strips[offsets[-1]], self._fh.size))
for i, offset in enumerate(offsets[:-1]):
strips[offset] = min(strips[offset], offsets[i+1] - offset)
for page in self.pages:
if page.compression:
page.strip_byte_counts = tuple(
strips[offset] for offset in page.strip_offsets)
@lazyattr
def series(self):
"""Return series of TiffPage with compatible shape and properties."""
if not self.pages:
return []
series = []
page0 = self.pages[0]
if self.is_ome:
series = self._omeseries()
elif self.is_fluoview:
dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(page0.mm_header.dimensions))
series = [Record(
axes=''.join(dims.get(i[0].strip().upper(), 'Q')
for i in mmhd if i[1] > 1),
shape=tuple(int(i[1]) for i in mmhd if i[1] > 1),
pages=self.pages, dtype=numpy.dtype(page0.dtype))]
elif self.is_lsm:
lsmi = page0.cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if page0.is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
axes = axes[::-1]
shape = tuple(getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes)
pages = [p for p in self.pages if not p.is_reduced]
series = [Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype))]
if len(pages) != len(self.pages): # reduced RGB pages
pages = [p for p in self.pages if p.is_reduced]
cp = 1
i = 0
while cp < len(pages) and i < len(shape)-2:
cp *= shape[i]
i += 1
shape = shape[:i] + pages[0].shape
axes = axes[:i] + 'CYX'
series.append(Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype)))
elif self.is_imagej:
shape = []
axes = []
ij = page0.imagej_tags
if 'frames' in ij:
shape.append(ij['frames'])
axes.append('T')
if 'slices' in ij:
shape.append(ij['slices'])
axes.append('Z')
if 'channels' in ij and not self.is_rgb:
shape.append(ij['channels'])
axes.append('C')
remain = len(self.pages) // (product(shape) if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
shape.extend(page0.shape)
axes.extend(page0.axes)
axes = ''.join(axes)
series = [Record(pages=self.pages, shape=tuple(shape), axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif self.is_nih:
if len(self.pages) == 1:
shape = page0.shape
axes = page0.axes
else:
shape = (len(self.pages),) + page0.shape
axes = 'I' + page0.axes
series = [Record(pages=self.pages, shape=shape, axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif page0.is_shaped:
# TODO: shaped files can contain multiple series
shape = page0.tags['image_description'].value[7:-1]
shape = tuple(int(i) for i in shape.split(b','))
series = [Record(pages=self.pages, shape=shape,
axes='Q' * len(shape),
dtype=numpy.dtype(page0.dtype))]
# generic detection of series
if not series:
shapes = []
pages = {}
for page in self.pages:
if not page.shape:
continue
shape = page.shape + (page.axes,
page.compression in TIFF_DECOMPESSORS)
if shape not in pages:
shapes.append(shape)
pages[shape] = [page]
else:
pages[shape].append(page)
series = [Record(pages=pages[s],
axes=(('I' + s[-2])
if len(pages[s]) > 1 else s[-2]),
dtype=numpy.dtype(pages[s][0].dtype),
shape=((len(pages[s]), ) + s[:-2]
if len(pages[s]) > 1 else s[:-2]))
for s in shapes]
# remove empty series, e.g. in MD Gel files
series = [s for s in series if sum(s.shape) > 0]
return series
def asarray(self, key=None, series=None, memmap=False):
"""Return image data from multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages to return as array.
memmap : bool
If True, return an array stored in a binary file on disk
if possible.
"""
if key is None and series is None:
series = 0
if series is not None:
pages = self.series[series].pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError("key must be an int, slice, or sequence")
if not len(pages):
raise ValueError("no pages selected")
if self.is_nih:
if pages[0].is_palette:
result = stack_pages(pages, colormapped=False, squeeze=False)
result = numpy.take(pages[0].color_map, result, axis=1)
result = numpy.swapaxes(result, 0, 1)
else:
result = stack_pages(pages, memmap=memmap,
colormapped=False, squeeze=False)
elif len(pages) == 1:
return pages[0].asarray(memmap=memmap)
elif self.is_ome:
assert not self.is_palette, "color mapping disabled for ome-tiff"
if any(p is None for p in pages):
# zero out missing pages
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray(memmap=False))
s = self.series[series]
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=s.dtype, shape=s.shape)
result = result.reshape(-1)
else:
result = numpy.empty(s.shape, s.dtype).reshape(-1)
index = 0
class KeepOpen:
# keep Tiff files open between consecutive pages
def __init__(self, parent, close):
self.master = parent
self.parent = parent
self._close = close
def open(self, page):
if self._close and page and page.parent != self.parent:
if self.parent != self.master:
self.parent.filehandle.close()
self.parent = page.parent
self.parent.filehandle.open()
def close(self):
if self._close and self.parent != self.master:
self.parent.filehandle.close()
keep = KeepOpen(self, self._multifile_close)
for page in pages:
keep.open(page)
if page:
a = page.asarray(memmap=False, colormapped=False,
reopen=False)
else:
a = nopage
try:
result[index:index + a.size] = a.reshape(-1)
except ValueError as e:
warnings.warn("ome-tiff: %s" % e)
break
index += a.size
keep.close()
else:
result = stack_pages(pages, memmap=memmap)
if key is None:
try:
result.shape = self.series[series].shape
except ValueError:
try:
warnings.warn("failed to reshape %s to %s" % (
result.shape, self.series[series].shape))
# try series of expected shapes
result.shape = (-1,) + self.series[series].shape
except ValueError:
# revert to generic shape
result.shape = (-1,) + pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
def _omeseries(self):
"""Return image series in OME-TIFF file(s)."""
root = etree.fromstring(self.pages[0].tags['image_description'].value)
uuid = root.attrib.get('UUID', None)
self._files = {uuid: self}
dirname = self._fh.dirname
modulo = {}
result = []
for element in root:
if element.tag.endswith('BinaryOnly'):
warnings.warn("ome-xml: not an ome-tiff master file")
break
if element.tag.endswith('StructuredAnnotations'):
for annot in element:
if not annot.attrib.get('Namespace',
'').endswith('modulo'):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith('Along'):
continue
axis = along.tag[-1]
newaxis = along.attrib.get('Type', 'other')
newaxis = AXES_LABELS[newaxis]
if 'Start' in along.attrib:
labels = range(
int(along.attrib['Start']),
int(along.attrib['End']) + 1,
int(along.attrib.get('Step', 1)))
else:
labels = [label.text for label in along
if label.tag.endswith('Label')]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith('Image'):
continue
for pixels in element:
if not pixels.tag.endswith('Pixels'):
continue
atr = pixels.attrib
dtype = atr.get('Type', None)
axes = ''.join(reversed(atr['DimensionOrder']))
shape = list(int(atr['Size'+ax]) for ax in axes)
size = product(shape[:-2])
ifds = [None] * size
for data in pixels:
if not data.tag.endswith('TiffData'):
continue
atr = data.attrib
ifd = int(atr.get('IFD', 0))
num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
num = int(atr.get('PlaneCount', num))
idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]]
try:
idx = numpy.ravel_multi_index(idx, shape[:-2])
except ValueError:
# ImageJ produces invalid ome-xml when cropping
warnings.warn("ome-xml: invalid TiffData index")
continue
for uuid in data:
if not uuid.tag.endswith('UUID'):
continue
if uuid.text not in self._files:
if not self._multifile:
# abort reading multifile OME series
# and fall back to generic series
return []
fname = uuid.attrib['FileName']
try:
tif = TiffFile(os.path.join(dirname, fname))
except (IOError, ValueError):
tif.close()
warnings.warn(
"ome-xml: failed to read '%s'" % fname)
break
self._files[uuid.text] = tif
if self._multifile_close:
tif.close()
pages = self._files[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
# only process first uuid
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
if all(i is None for i in ifds):
# skip images without data
continue
dtype = next(i for i in ifds if i).dtype
result.append(Record(axes=axes, shape=shape, pages=ifds,
dtype=numpy.dtype(dtype)))
for record in result:
for axis, (newaxis, labels) in modulo.items():
i = record.axes.index(axis)
size = len(labels)
if record.shape[i] == size:
record.axes = record.axes.replace(axis, newaxis, 1)
else:
record.shape[i] //= size
record.shape.insert(i+1, size)
record.axes = record.axes.replace(axis, axis+newaxis, 1)
record.shape = tuple(record.shape)
# squeeze dimensions
for record in result:
record.shape, record.axes = squeeze_axes(record.shape, record.axes)
return result
def __len__(self):
"""Return number of image pages in file."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified page."""
return self.pages[key]
def __iter__(self):
"""Return iterator over pages."""
return iter(self.pages)
def __str__(self):
"""Return string containing information about file."""
result = [
self._fh.name.capitalize(),
format_size(self._fh.size),
{'<': 'little endian', '>': 'big endian'}[self.byteorder]]
if self.is_bigtiff:
result.append("bigtiff")
if len(self.pages) > 1:
result.append("%i pages" % len(self.pages))
if len(self.series) > 1:
result.append("%i series" % len(self.series))
if len(self._files) > 1:
result.append("%i files" % (len(self._files)))
return ", ".join(result)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@lazyattr
def fstat(self):
try:
return os.fstat(self._fh.fileno())
except Exception: # io.UnsupportedOperation
return None
@lazyattr
def is_bigtiff(self):
return self.offset_size != 4
@lazyattr
def is_rgb(self):
return all(p.is_rgb for p in self.pages)
@lazyattr
def is_palette(self):
return all(p.is_palette for p in self.pages)
@lazyattr
def is_mdgel(self):
return any(p.is_mdgel for p in self.pages)
@lazyattr
def is_mediacy(self):
return any(p.is_mediacy for p in self.pages)
@lazyattr
def is_stk(self):
return all(p.is_stk for p in self.pages)
@lazyattr
def is_lsm(self):
return self.pages[0].is_lsm
@lazyattr
def is_imagej(self):
return self.pages[0].is_imagej
@lazyattr
def is_micromanager(self):
return self.pages[0].is_micromanager
@lazyattr
def is_nih(self):
return self.pages[0].is_nih
@lazyattr
def is_fluoview(self):
return self.pages[0].is_fluoview
@lazyattr
def is_ome(self):
return self.pages[0].is_ome
class TiffPage(object):
"""A TIFF image file directory (IFD).
Attributes
----------
index : int
Index of page in file.
dtype : str {TIFF_SAMPLE_DTYPES}
Data type of image, colormapped if applicable.
shape : tuple
Dimensions of the image array in TIFF page,
colormapped and with one alpha channel if applicable.
axes : str
Axes label codes:
'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane,
'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime,
'L' exposure, 'V' event, 'Q' unknown, '_' missing
tags : TiffTags
Dictionary of tags in page.
Tag values are also directly accessible as attributes.
color_map : numpy array
Color look up table, if exists.
cz_lsm_scan_info: Record(dict)
LSM scan info attributes, if exists.
imagej_tags: Record(dict)
Consolidated ImageJ description and metadata tags, if exists.
uic_tags: Record(dict)
Consolidated MetaMorph STK/UIC tags, if exists.
All attributes are read-only.
Notes
-----
The internal, normalized '_shape' attribute is 6 dimensional:
0. number planes (stk)
1. planar samples_per_pixel
2. image_depth Z (sgi)
3. image_length Y
4. image_width X
5. contig samples_per_pixel
"""
def __init__(self, parent):
"""Initialize instance from file."""
self.parent = parent
self.index = len(parent.pages)
self.shape = self._shape = ()
self.dtype = self._dtype = None
self.axes = ""
self.tags = TiffTags()
self._fromfile()
self._process_tags()
def _fromfile(self):
"""Read TIFF IFD structure and its tags from file.
File cursor must be at storage position of IFD offset and is left at
offset to next IFD.
Raises StopIteration if offset (first bytes read) is 0.
"""
fh = self.parent.filehandle
byteorder = self.parent.byteorder
offset_size = self.parent.offset_size
fmt = {4: 'I', 8: 'Q'}[offset_size]
offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0]
if not offset:
raise StopIteration()
# read standard tags
tags = self.tags
fh.seek(offset)
fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size]
try:
numtags = struct.unpack(byteorder + fmt, fh.read(size))[0]
except Exception:
warnings.warn("corrupted page list")
raise StopIteration()
tagcode = 0
for _ in range(numtags):
try:
tag = TiffTag(self.parent)
# print(tag)
except TiffTag.Error as e:
warnings.warn(str(e))
continue
if tagcode > tag.code:
# expected for early LSM and tifffile versions
warnings.warn("tags are not ordered by code")
tagcode = tag.code
if tag.name not in tags:
tags[tag.name] = tag
else:
# some files contain multiple IFD with same code
# e.g. MicroManager files contain two image_description
i = 1
while True:
name = "%s_%i" % (tag.name, i)
if name not in tags:
tags[name] = tag
break
pos = fh.tell()
if self.is_lsm or (self.index and self.parent.is_lsm):
# correct non standard LSM bitspersample tags
self.tags['bits_per_sample']._correct_lsm_bitspersample(self)
if self.is_lsm:
# read LSM info subrecords
for name, reader in CZ_LSM_INFO_READERS.items():
try:
offset = self.cz_lsm_info['offset_'+name]
except KeyError:
continue
if offset < 8:
# older LSM revision
continue
fh.seek(offset)
try:
setattr(self, 'cz_lsm_'+name, reader(fh))
except ValueError:
pass
elif self.is_stk and 'uic1tag' in tags and not tags['uic1tag'].value:
# read uic1tag now that plane count is known
uic1tag = tags['uic1tag']
fh.seek(uic1tag.value_offset)
tags['uic1tag'].value = Record(
read_uic1tag(fh, byteorder, uic1tag.dtype, uic1tag.count,
tags['uic2tag'].count))
fh.seek(pos)
def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if not (name in tags or default is None):
tags[name] = TiffTag(code, dtype=dtype, count=count,
value=default, name=name)
if name in tags and validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(
validate[value] for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
# LSM might list more items than samples_per_pixel
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
if 'photometric' not in tags:
self.photometric = None
if 'image_depth' not in tags:
self.image_depth = 1
if 'image_length' in tags:
self.strips_per_image = int(math.floor(
float(self.image_length + self.rows_per_strip - 1) /
self.rows_per_strip))
else:
self.strips_per_image = 0
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if 'image_length' not in self.tags or 'image_width' not in self.tags:
# some GEL file pages are missing image data
self.image_length = 0
self.image_width = 0
self.image_depth = 0
self.strip_offsets = 0
self._shape = ()
self.shape = ()
self.axes = ''
if self.is_palette:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
#else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
self.color_map.shape = (3, -1)
# determine shape of data
image_length = self.image_length
image_width = self.image_width
image_depth = self.image_depth
samples_per_pixel = self.samples_per_pixel
if self.is_stk:
assert self.image_depth == 1
planes = self.tags['uic2tag'].count
if self.is_contig:
self._shape = (planes, 1, 1, image_length, image_width,
samples_per_pixel)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, image_length, image_width,
samples_per_pixel)
self.axes = 'YXS'
else:
self._shape = (planes, samples_per_pixel, 1, image_length,
image_width, 1)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, samples_per_pixel, image_length,
image_width)
self.axes = 'SYX'
# detect type of series
if planes == 1:
self.shape = self.shape[1:]
elif numpy.all(self.uic2tag.z_distance != 0):
self.axes = 'Z' + self.axes
elif numpy.all(numpy.diff(self.uic2tag.time_created) != 0):
self.axes = 'T' + self.axes
else:
self.axes = 'I' + self.axes
# DISABLED
if self.is_palette:
assert False, "color mapping disabled for stk"
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, planes, image_length, image_width)
else:
self.shape = (3, planes, image_depth, image_length,
image_width)
self.axes = 'C' + self.axes
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
elif self.is_palette:
samples = 1
if 'extra_samples' in self.tags:
samples += len(self.extra_samples)
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples)
else:
self._shape = (1, samples, image_depth, image_length,
image_width, 1)
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, image_length, image_width)
self.axes = 'CYX'
else:
self.shape = (3, image_depth, image_length, image_width)
self.axes = 'CZYX'
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
elif self.is_rgb or samples_per_pixel > 1:
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples_per_pixel)
if image_depth == 1:
self.shape = (image_length, image_width, samples_per_pixel)
self.axes = 'YXS'
else:
self.shape = (image_depth, image_length, image_width,
samples_per_pixel)
self.axes = 'ZYXS'
else:
self._shape = (1, samples_per_pixel, image_depth,
image_length, image_width, 1)
if image_depth == 1:
self.shape = (samples_per_pixel, image_length, image_width)
self.axes = 'SYX'
else:
self.shape = (samples_per_pixel, image_depth,
image_length, image_width)
self.axes = 'SZYX'
if False and self.is_rgb and 'extra_samples' in self.tags:
# DISABLED: only use RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
self.shape = self.shape[:-1] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, image_depth, image_length, image_width, 1)
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
if not self.compression and 'strip_byte_counts' not in tags:
self.strip_byte_counts = (
product(self.shape) * (self.bits_per_sample // 8), )
assert len(self.shape) == len(self.axes)
def asarray(self, squeeze=True, colormapped=True, rgbonly=False,
scale_mdgel=False, memmap=False, reopen=True):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
If any of 'squeeze', 'colormapped', or 'rgbonly' are not the default,
the shape of the returned array might be different from the page shape.
Parameters
----------
squeeze : bool
If True, all length-1 dimensions (except X and Y) are
squeezed out from result.
colormapped : bool
If True, color mapping is applied for palette-indexed images.
rgbonly : bool
If True, return RGB(A) image without additional extra samples.
memmap : bool
If True, use numpy.memmap to read arrays from file if possible.
For use on 64 bit systems and files with few huge contiguous data.
reopen : bool
If True and the parent file handle is closed, the file is
temporarily re-opened (and closed if no exception occurs).
scale_mdgel : bool
If True, MD Gel data will be scaled according to the private
metadata in the second TIFF page. The dtype will be float32.
"""
if not self._shape:
return
if self.dtype is None:
raise ValueError("data type not supported: %s%i" % (
self.sample_format, self.bits_per_sample))
if self.compression not in TIFF_DECOMPESSORS:
raise ValueError("cannot decompress %s" % self.compression)
tag = self.tags['sample_format']
if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
raise ValueError("sample formats don't match %s" % str(tag.value))
fh = self.parent.filehandle
closed = fh.closed
if closed:
if reopen:
fh.open()
else:
raise IOError("file handle is closed")
dtype = self._dtype
shape = self._shape
image_width = self.image_width
image_length = self.image_length
image_depth = self.image_depth
typecode = self.parent.byteorder + dtype
bits_per_sample = self.bits_per_sample
if self.is_tiled:
if 'tile_offsets' in self.tags:
byte_counts = self.tile_byte_counts
offsets = self.tile_offsets
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
tile_width = self.tile_width
tile_length = self.tile_length
tile_depth = self.tile_depth if 'tile_depth' in self.tags else 1
tw = (image_width + tile_width - 1) // tile_width
tl = (image_length + tile_length - 1) // tile_length
td = (image_depth + tile_depth - 1) // tile_depth
shape = (shape[0], shape[1],
td*tile_depth, tl*tile_length, tw*tile_width, shape[-1])
tile_shape = (tile_depth, tile_length, tile_width, shape[-1])
runlen = tile_width
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
runlen = image_width
if any(o < 2 for o in offsets):
raise ValueError("corrupted page")
if memmap and self._is_memmappable(rgbonly, colormapped):
result = fh.memmap_array(typecode, shape, offset=offsets[0])
elif self.is_contiguous:
fh.seek(offsets[0])
result = fh.read_array(typecode, product(shape))
result = result.astype('=' + dtype)
else:
if self.is_contig:
runlen *= self.samples_per_pixel
if bits_per_sample in (8, 16, 32, 64, 128):
if (bits_per_sample * runlen) % 8:
raise ValueError("data and sample size mismatch")
def unpack(x):
try:
return numpy.fromstring(x, typecode)
except ValueError as e:
# strips may be missing EOI
warnings.warn("unpack: %s" % e)
xlen = ((len(x) // (bits_per_sample // 8))
* (bits_per_sample // 8))
return numpy.fromstring(x[:xlen], typecode)
elif isinstance(bits_per_sample, tuple):
def unpack(x):
return unpackrgb(x, typecode, bits_per_sample)
else:
def unpack(x):
return unpackints(x, typecode, bits_per_sample, runlen)
decompress = TIFF_DECOMPESSORS[self.compression]
if self.compression == 'jpeg':
table = self.jpeg_tables if 'jpeg_tables' in self.tags else b''
decompress = lambda x: decodejpg(x, table, self.photometric)
if self.is_tiled:
result = numpy.empty(shape, dtype)
tw, tl, td, pl = 0, 0, 0, 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
tile = unpack(decompress(fh.read(bytecount)))
tile.shape = tile_shape
if self.predictor == 'horizontal':
numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile)
result[0, pl, td:td+tile_depth,
tl:tl+tile_length, tw:tw+tile_width, :] = tile
del tile
tw += tile_width
if tw >= shape[4]:
tw, tl = 0, tl + tile_length
if tl >= shape[3]:
tl, td = 0, td + tile_depth
if td >= shape[2]:
td, pl = 0, pl + 1
result = result[...,
:image_depth, :image_length, :image_width, :]
else:
strip_size = (self.rows_per_strip * self.image_width *
self.samples_per_pixel)
result = numpy.empty(shape, dtype).reshape(-1)
index = 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
strip = fh.read(bytecount)
strip = decompress(strip)
strip = unpack(strip)
size = min(result.size, strip.size, strip_size,
result.size - index)
result[index:index+size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor == 'horizontal' and not (self.is_tiled and not
self.is_contiguous):
# work around bug in LSM510 software
if not (self.parent.is_lsm and not self.compression):
numpy.cumsum(result, axis=-2, dtype=dtype, out=result)
if colormapped and self.is_palette:
if self.color_map.shape[1] >= 2**bits_per_sample:
# FluoView and LSM might fail here
result = numpy.take(self.color_map,
result[:, 0, :, :, :, 0], axis=1)
elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
# return only RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for i, exs in enumerate(extra_samples):
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
result = result[..., [0, 1, 2, 3+i]]
else:
result = result[:, [0, 1, 2, 3+i]]
break
else:
if self.is_contig:
result = result[..., :3]
else:
result = result[:, :3]
if squeeze:
try:
result.shape = self.shape
except ValueError:
warnings.warn("failed to reshape from %s to %s" % (
str(result.shape), str(self.shape)))
if scale_mdgel and self.parent.is_mdgel:
# MD Gel stores private metadata in the second page
tags = self.parent.pages[1]
if tags.md_file_tag in (2, 128):
scale = tags.md_scale_pixel
scale = scale[0] / scale[1] # rational
result = result.astype('float32')
if tags.md_file_tag == 2:
result **= 2 # squary root data format
result *= scale
if closed:
# TODO: file remains open if an exception occurred above
fh.close()
return result
def _is_memmappable(self, rgbonly, colormapped):
"""Return if image data in file can be memory mapped."""
if not self.parent.filehandle.is_file or not self.is_contiguous:
return False
return not (self.predictor or
(rgbonly and 'extra_samples' in self.tags) or
(colormapped and self.is_palette) or
({'big': '>', 'little': '<'}[sys.byteorder] !=
self.parent.byteorder))
@lazyattr
def is_contiguous(self):
"""Return offset and size of contiguous data, else None.
Excludes prediction and colormapping.
"""
if self.compression or self.bits_per_sample not in (8, 16, 32, 64):
return
if self.is_tiled:
if (self.image_width != self.tile_width or
self.image_length % self.tile_length or
self.tile_width % 16 or self.tile_length % 16):
return
if ('image_depth' in self.tags and 'tile_depth' in self.tags and
(self.image_length != self.tile_length or
self.image_depth % self.tile_depth)):
return
offsets = self.tile_offsets
byte_counts = self.tile_byte_counts
else:
offsets = self.strip_offsets
byte_counts = self.strip_byte_counts
if len(offsets) == 1:
return offsets[0], byte_counts[0]
if self.is_stk or all(offsets[i] + byte_counts[i] == offsets[i+1]
or byte_counts[i+1] == 0 # no data/ignore offset
for i in range(len(offsets)-1)):
return offsets[0], sum(byte_counts)
def __str__(self):
"""Return string containing information about page."""
s = ', '.join(s for s in (
' x '.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
'%s bit' % str(self.bits_per_sample),
self.photometric if 'photometric' in self.tags else '',
self.compression if self.compression else 'raw',
'|'.join(t[3:] for t in (
'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej',
'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy',
'is_sgi', 'is_reduced', 'is_tiled',
'is_contiguous') if getattr(self, t))) if s)
return "Page %i: %s" % (self.index, s)
def __getattr__(self, name):
"""Return tag value."""
if name in self.tags:
value = self.tags[name].value
setattr(self, name, value)
return value
raise AttributeError(name)
@lazyattr
def uic_tags(self):
"""Consolidate UIC tags."""
if not self.is_stk:
raise AttributeError("uic_tags")
tags = self.tags
result = Record()
result.number_planes = tags['uic2tag'].count
if 'image_description' in tags:
result.plane_descriptions = self.image_description.split(b'\x00')
if 'uic1tag' in tags:
result.update(tags['uic1tag'].value)
if 'uic3tag' in tags:
result.update(tags['uic3tag'].value) # wavelengths
if 'uic4tag' in tags:
result.update(tags['uic4tag'].value) # override uic1 tags
uic2tag = tags['uic2tag'].value
result.z_distance = uic2tag.z_distance
result.time_created = uic2tag.time_created
result.time_modified = uic2tag.time_modified
try:
result.datetime_created = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_created, uic2tag.time_created)]
result.datetime_modified = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_modified, uic2tag.time_modified)]
except ValueError as e:
warnings.warn("uic_tags: %s" % e)
return result
@lazyattr
def imagej_tags(self):
"""Consolidate ImageJ metadata."""
if not self.is_imagej:
raise AttributeError("imagej_tags")
tags = self.tags
if 'image_description_1' in tags:
# MicroManager
result = imagej_description(tags['image_description_1'].value)
else:
result = imagej_description(tags['image_description'].value)
if 'imagej_metadata' in tags:
try:
result.update(imagej_metadata(
tags['imagej_metadata'].value,
tags['imagej_byte_counts'].value,
self.parent.byteorder))
except Exception as e:
warnings.warn(str(e))
return Record(result)
@lazyattr
def is_rgb(self):
"""True if page contains a RGB image."""
return ('photometric' in self.tags and
self.tags['photometric'].value == 2)
@lazyattr
def is_contig(self):
"""True if page contains a contiguous image."""
return ('planar_configuration' in self.tags and
self.tags['planar_configuration'].value == 1)
@lazyattr
def is_palette(self):
"""True if page contains a palette-colored image and not OME or STK."""
try:
# turn off color mapping for OME-TIFF and STK
if self.is_stk or self.is_ome or self.parent.is_ome:
return False
except IndexError:
pass # OME-XML not found in first page
return ('photometric' in self.tags and
self.tags['photometric'].value == 3)
@lazyattr
def is_tiled(self):
"""True if page contains tiled image."""
return 'tile_width' in self.tags
@lazyattr
def is_reduced(self):
"""True if page is a reduced image of another image."""
return bool(self.tags['new_subfile_type'].value & 1)
@lazyattr
def is_mdgel(self):
"""True if page contains md_file_tag tag."""
return 'md_file_tag' in self.tags
@lazyattr
def is_mediacy(self):
"""True if page contains Media Cybernetics Id tag."""
return ('mc_id' in self.tags and
self.tags['mc_id'].value.startswith(b'MC TIFF'))
@lazyattr
def is_stk(self):
"""True if page contains UIC2Tag tag."""
return 'uic2tag' in self.tags
@lazyattr
def is_lsm(self):
"""True if page contains LSM CZ_LSM_INFO tag."""
return 'cz_lsm_info' in self.tags
@lazyattr
def is_fluoview(self):
"""True if page contains FluoView MM_STAMP tag."""
return 'mm_stamp' in self.tags
@lazyattr
def is_nih(self):
"""True if page contains NIH image header."""
return 'nih_image_header' in self.tags
@lazyattr
def is_sgi(self):
"""True if page contains SGI image and tile depth tags."""
return 'image_depth' in self.tags and 'tile_depth' in self.tags
@lazyattr
def is_ome(self):
"""True if page contains OME-XML in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'<?xml version='))
@lazyattr
def is_shaped(self):
"""True if page contains shape in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'shape=('))
@lazyattr
def is_imagej(self):
"""True if page contains ImageJ description."""
return (
('image_description' in self.tags and
self.tags['image_description'].value.startswith(b'ImageJ=')) or
('image_description_1' in self.tags and # Micromanager
self.tags['image_description_1'].value.startswith(b'ImageJ=')))
@lazyattr
def is_micromanager(self):
"""True if page contains Micro-Manager metadata."""
return 'micromanager_metadata' in self.tags
class TiffTag(object):
"""A TIFF tag structure.
Attributes
----------
name : string
Attribute name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF_DATA_TYPES.
count : int
Number of values.
value : various types
Tag data as Python object.
value_offset : int
Location of value in file, if any.
All attributes are read-only.
"""
__slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
'_offset', '_value', '_type')
class Error(Exception):
pass
def __init__(self, arg, **kwargs):
"""Initialize instance from file or arguments."""
self._offset = None
if hasattr(arg, '_fh'):
self._fromfile(arg, **kwargs)
else:
self._fromdata(arg, **kwargs)
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value
self._value = value
self._type = dtype
def _fromfile(self, parent):
"""Read tag structure from open file. Advance file cursor."""
fh = parent.filehandle
byteorder = parent.byteorder
self._offset = fh.tell()
self.value_offset = self._offset + parent.offset_size + 4
fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
data = fh.read(size)
code, dtype = struct.unpack(byteorder + fmt[:2], data[:4])
count, value = struct.unpack(byteorder + fmt[2:], data[4:])
self._value = value
self._type = dtype
if code in TIFF_TAGS:
name = TIFF_TAGS[code][0]
elif code in CUSTOM_TAGS:
name = CUSTOM_TAGS[code][0]
else:
name = str(code)
try:
dtype = TIFF_DATA_TYPES[self._type]
except KeyError:
raise TiffTag.Error("unknown tag data type %i" % self._type)
fmt = '%s%i%s' % (byteorder, count*int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size > parent.offset_size or code in CUSTOM_TAGS:
pos = fh.tell()
tof = {4: 'I', 8: 'Q'}[parent.offset_size]
self.value_offset = offset = struct.unpack(byteorder+tof, value)[0]
if offset < 0 or offset > parent.filehandle.size:
raise TiffTag.Error("corrupt file - invalid tag value offset")
elif offset < 4:
raise TiffTag.Error("corrupt value offset for tag %i" % code)
fh.seek(offset)
if code in CUSTOM_TAGS:
readfunc = CUSTOM_TAGS[code][1]
value = readfunc(fh, byteorder, dtype, count)
if isinstance(value, dict): # numpy.core.records.record
value = Record(value)
elif code in TIFF_TAGS or dtype[-1] == 's':
value = struct.unpack(fmt, fh.read(size))
else:
value = read_numpy(fh, byteorder, dtype, count)
fh.seek(pos)
else:
value = struct.unpack(fmt, value[:size])
if code not in CUSTOM_TAGS and code not in (273, 279, 324, 325):
# scalar value if not strip/tile offsets/byte_counts
if len(value) == 1:
value = value[0]
if (dtype.endswith('s') and isinstance(value, bytes)
and self._type != 7):
# TIFF ASCII fields can contain multiple strings,
# each terminated with a NUL
value = stripascii(value)
self.code = code
self.name = name
self.dtype = dtype
self.count = count
self.value = value
def _correct_lsm_bitspersample(self, parent):
"""Correct LSM bitspersample tag.
Old LSM writers may use a separate region for two 16-bit values,
although they fit into the tag value element of the tag.
"""
if self.code == 258 and self.count == 2:
# TODO: test this. Need example file.
warnings.warn("correcting LSM bitspersample tag")
fh = parent.filehandle
tof = {4: '<I', 8: '<Q'}[parent.offset_size]
self.value_offset = struct.unpack(tof, self._value)[0]
fh.seek(self.value_offset)
self.value = struct.unpack("<HH", fh.read(4))
def as_str(self):
"""Return value as human readable string."""
return ((str(self.value).split('\n', 1)[0]) if (self._type != 7)
else '<undefined>')
def __str__(self):
"""Return string containing information about tag."""
return ' '.join(str(getattr(self, s)) for s in self.__slots__)
class TiffSequence(object):
"""Sequence of image files.
The data shape and dtype of all files must match.
Properties
----------
files : list
List of file names.
shape : tuple
Shape of image sequence.
axes : str
Labels of axes in shape.
Examples
--------
>>> tifs = TiffSequence("test.oif.files/*.tif")
>>> tifs.shape, tifs.axes
((2, 100), 'CT')
>>> data = tifs.asarray()
>>> data.shape
(2, 100, 256, 256)
"""
_patterns = {
'axes': r"""
# matches Olympus OIF and Leica TIFF series
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
"""}
class ParseError(Exception):
pass
def __init__(self, files, imread=TiffFile, pattern='axes',
*args, **kwargs):
"""Initialize instance from multiple files.
Parameters
----------
files : str, or sequence of str
Glob pattern or sequence of file names.
imread : function or class
Image read function or class with asarray function returning numpy
array from single file.
pattern : str
Regular expression pattern that matches axes names and sequence
indices in file names.
By default this matches Olympus OIF and Leica TIFF series.
"""
if isinstance(files, basestring):
files = natural_sorted(glob.glob(files))
files = list(files)
if not files:
raise ValueError("no files found")
#if not os.path.isfile(files[0]):
# raise ValueError("file not found")
self.files = files
if hasattr(imread, 'asarray'):
# redefine imread
_imread = imread
def imread(fname, *args, **kwargs):
with _imread(fname) as im:
return im.asarray(*args, **kwargs)
self.imread = imread
self.pattern = self._patterns.get(pattern, pattern)
try:
self._parse()
if not self.axes:
self.axes = 'I'
except self.ParseError:
self.axes = 'I'
self.shape = (len(files),)
self._start_index = (0,)
self._indices = tuple((i,) for i in range(len(files)))
def __str__(self):
"""Return string with information about image sequence."""
return "\n".join([
self.files[0],
'* files: %i' % len(self.files),
'* axes: %s' % self.axes,
'* shape: %s' % str(self.shape)])
def __len__(self):
return len(self.files)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
pass
def asarray(self, memmap=False, *args, **kwargs):
"""Read image data from all files and return as single numpy array.
If memmap is True, return an array stored in a binary file on disk.
The args and kwargs parameters are passed to the imread function.
Raise IndexError or ValueError if image shapes don't match.
"""
im = self.imread(self.files[0], *args, **kwargs)
shape = self.shape + im.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=im.dtype, shape=shape)
else:
result = numpy.zeros(shape, dtype=im.dtype)
result = result.reshape(-1, *im.shape)
for index, fname in zip(self._indices, self.files):
index = [i-j for i, j in zip(index, self._start_index)]
index = numpy.ravel_multi_index(index, self.shape)
im = self.imread(fname, *args, **kwargs)
result[index] = im
result.shape = shape
return result
def _parse(self):
"""Get axes and shape from file names."""
if not self.pattern:
raise self.ParseError("invalid pattern")
pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
matches = pattern.findall(self.files[0])
if not matches:
raise self.ParseError("pattern doesn't match file names")
matches = matches[-1]
if len(matches) % 2:
raise self.ParseError("pattern doesn't match axis name and index")
axes = ''.join(m for m in matches[::2] if m)
if not axes:
raise self.ParseError("pattern doesn't match file names")
indices = []
for fname in self.files:
matches = pattern.findall(fname)[-1]
if axes != ''.join(m for m in matches[::2] if m):
raise ValueError("axes don't match within the image sequence")
indices.append([int(m) for m in matches[1::2] if m])
shape = tuple(numpy.max(indices, axis=0))
start_index = tuple(numpy.min(indices, axis=0))
shape = tuple(i-j+1 for i, j in zip(shape, start_index))
if product(shape) != len(self.files):
warnings.warn("files are missing. Missing data are zeroed")
self.axes = axes.upper()
self.shape = shape
self._indices = indices
self._start_index = start_index
class Record(dict):
"""Dictionary with attribute access.
Can also be initialized with numpy.core.records.record.
"""
__slots__ = ()
def __init__(self, arg=None, **kwargs):
if kwargs:
arg = kwargs
elif arg is None:
arg = {}
try:
dict.__init__(self, arg)
except (TypeError, ValueError):
for i, name in enumerate(arg.dtype.names):
v = arg[i]
self[name] = v if v.dtype.char != 'S' else stripnull(v)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __str__(self):
"""Pretty print Record."""
s = []
lists = []
for k in sorted(self):
try:
if k.startswith('_'): # does not work with byte
continue
except AttributeError:
pass
v = self[k]
if isinstance(v, (list, tuple)) and len(v):
if isinstance(v[0], Record):
lists.append((k, v))
continue
elif isinstance(v[0], TiffPage):
v = [i.index for i in v if i]
s.append(
("* %s: %s" % (k, str(v))).split("\n", 1)[0]
[:PRINT_LINE_LEN].rstrip())
for k, v in lists:
l = []
for i, w in enumerate(v):
l.append("* %s[%i]\n %s" % (k, i,
str(w).replace("\n", "\n ")))
s.append('\n'.join(l))
return '\n'.join(s)
class TiffTags(Record):
"""Dictionary of TiffTag with attribute access."""
def __str__(self):
"""Return string with information about all tags."""
s = []
for tag in sorted(self.values(), key=lambda x: x.code):
typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
line = "* %i %s (%s) %s" % (
tag.code, tag.name, typecode, tag.as_str())
s.append(line[:PRINT_LINE_LEN].lstrip())
return '\n'.join(s)
class FileHandle(object):
"""Binary file handle.
* Handle embedded files (for CZI within CZI files).
* Allow to re-open closed files (for multi file formats such as OME-TIFF).
* Read numpy arrays and records from file like objects.
Only binary read, seek, tell, and close are supported on embedded files.
When initialized from another file handle, do not use it unless this
FileHandle is closed.
Attributes
----------
name : str
Name of the file.
path : str
Absolute path to file.
size : int
Size of file in bytes.
is_file : bool
If True, file has a filno and can be memory mapped.
All attributes are read-only.
"""
__slots__ = ('_fh', '_arg', '_mode', '_name', '_dir',
'_offset', '_size', '_close', 'is_file')
def __init__(self, arg, mode='rb', name=None, offset=None, size=None):
"""Initialize file handle from file name or another file handle.
Parameters
----------
arg : str, File, or FileHandle
File name or open file handle.
mode : str
File open mode in case 'arg' is a file name.
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
"""
self._fh = None
self._arg = arg
self._mode = mode
self._name = name
self._dir = ''
self._offset = offset
self._size = size
self._close = True
self.is_file = False
self.open()
def open(self):
"""Open or re-open file."""
if self._fh:
return # file is open
if isinstance(self._arg, basestring):
# file name
self._arg = os.path.abspath(self._arg)
self._dir, self._name = os.path.split(self._arg)
self._fh = open(self._arg, self._mode)
self._close = True
if self._offset is None:
self._offset = 0
elif isinstance(self._arg, FileHandle):
# FileHandle
self._fh = self._arg._fh
if self._offset is None:
self._offset = 0
self._offset += self._arg._offset
self._close = False
if not self._name:
if self._offset:
name, ext = os.path.splitext(self._arg._name)
self._name = "%s@%i%s" % (name, self._offset, ext)
else:
self._name = self._arg._name
self._dir = self._arg._dir
else:
# open file object
self._fh = self._arg
if self._offset is None:
self._offset = self._arg.tell()
self._close = False
if not self._name:
try:
self._dir, self._name = os.path.split(self._fh.name)
except AttributeError:
self._name = "Unnamed stream"
if self._offset:
self._fh.seek(self._offset)
if self._size is None:
pos = self._fh.tell()
self._fh.seek(self._offset, 2)
self._size = self._fh.tell()
self._fh.seek(pos)
try:
self._fh.fileno()
self.is_file = True
except Exception:
self.is_file = False
def read(self, size=-1):
"""Read 'size' bytes from file, or until EOF is reached."""
if size < 0 and self._offset:
size = self._size
return self._fh.read(size)
def memmap_array(self, dtype, shape, offset=0, mode='r', order='C'):
"""Return numpy.memmap of data stored in file."""
if not self.is_file:
raise ValueError("Can not memory map file without fileno.")
return numpy.memmap(self._fh, dtype=dtype, mode=mode,
offset=self._offset + offset,
shape=shape, order=order)
def read_array(self, dtype, count=-1, sep=""):
"""Return numpy array from file.
Work around numpy issue #2230, "numpy.fromfile does not accept
StringIO object" https://github.com/numpy/numpy/issues/2230.
"""
try:
return numpy.fromfile(self._fh, dtype, count, sep)
except IOError:
if count < 0:
size = self._size
else:
size = count * numpy.dtype(dtype).itemsize
data = self._fh.read(size)
return numpy.fromstring(data, dtype, count, sep)
def read_record(self, dtype, shape=1, byteorder=None):
"""Return numpy record from file."""
try:
rec = numpy.rec.fromfile(self._fh, dtype, shape,
byteorder=byteorder)
except Exception:
dtype = numpy.dtype(dtype)
if shape is None:
shape = self._size // dtype.itemsize
size = product(sequence(shape)) * dtype.itemsize
data = self._fh.read(size)
return numpy.rec.fromstring(data, dtype, shape,
byteorder=byteorder)
return rec[0] if shape == 1 else rec
def tell(self):
"""Return file's current position."""
return self._fh.tell() - self._offset
def seek(self, offset, whence=0):
"""Set file's current position."""
if self._offset:
if whence == 0:
self._fh.seek(self._offset + offset, whence)
return
elif whence == 2:
self._fh.seek(self._offset + self._size + offset, 0)
return
self._fh.seek(offset, whence)
def close(self):
"""Close file."""
if self._close and self._fh:
self._fh.close()
self._fh = None
self.is_file = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getattr__(self, name):
"""Return attribute from underlying file object."""
if self._offset:
warnings.warn(
"FileHandle: '%s' not implemented for embedded files" % name)
return getattr(self._fh, name)
@property
def name(self):
return self._name
@property
def dirname(self):
return self._dir
@property
def path(self):
return os.path.join(self._dir, self._name)
@property
def size(self):
return self._size
@property
def closed(self):
return self._fh is None
def read_bytes(fh, byteorder, dtype, count):
"""Read tag data from file and return as byte string."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count).tostring()
def read_numpy(fh, byteorder, dtype, count):
"""Read tag data from file and return as numpy array."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count)
def read_json(fh, byteorder, dtype, count):
"""Read JSON tag data from file and return as object."""
data = fh.read(count)
try:
return json.loads(unicode(stripnull(data), 'utf-8'))
except ValueError:
warnings.warn("invalid JSON `%s`" % data)
def read_mm_header(fh, byteorder, dtype, count):
"""Read MM_HEADER tag from file and return as numpy.rec.array."""
return fh.read_record(MM_HEADER, byteorder=byteorder)
def read_mm_stamp(fh, byteorder, dtype, count):
"""Read MM_STAMP tag from file and return as numpy.array."""
return fh.read_array(byteorder+'f8', 8)
def read_uic1tag(fh, byteorder, dtype, count, plane_count=None):
"""Read MetaMorph STK UIC1Tag from file and return as dictionary.
Return empty dictionary if plane_count is unknown.
"""
assert dtype in ('2I', '1I') and byteorder == '<'
result = {}
if dtype == '2I':
# pre MetaMorph 2.5 (not tested)
values = fh.read_array('<u4', 2*count).reshape(count, 2)
result = {'z_distance': values[:, 0] / values[:, 1]}
elif plane_count:
for i in range(count):
tagid = struct.unpack('<I', fh.read(4))[0]
if tagid in (28, 29, 37, 40, 41):
# silently skip unexpected tags
fh.read(4)
continue
name, value = read_uic_tag(fh, tagid, plane_count, offset=True)
result[name] = value
return result
def read_uic2tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC2Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 6*plane_count).reshape(plane_count, 6)
return {
'z_distance': values[:, 0] / values[:, 1],
'date_created': values[:, 2], # julian days
'time_created': values[:, 3], # milliseconds
'date_modified': values[:, 4], # julian days
'time_modified': values[:, 5], # milliseconds
}
def read_uic3tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC3Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 2*plane_count).reshape(plane_count, 2)
return {'wavelengths': values[:, 0] / values[:, 1]}
def read_uic4tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC4Tag from file and return as dictionary."""
assert dtype == '1I' and byteorder == '<'
result = {}
while True:
tagid = struct.unpack('<H', fh.read(2))[0]
if tagid == 0:
break
name, value = read_uic_tag(fh, tagid, plane_count, offset=False)
result[name] = value
return result
def read_uic_tag(fh, tagid, plane_count, offset):
"""Read a single UIC tag value from file and return tag name and value.
UIC1Tags use an offset.
"""
def read_int(count=1):
value = struct.unpack('<%iI' % count, fh.read(4*count))
return value[0] if count == 1 else value
try:
name, dtype = UIC_TAGS[tagid]
except KeyError:
# unknown tag
return '_tagid_%i' % tagid, read_int()
if offset:
pos = fh.tell()
if dtype not in (int, None):
off = read_int()
if off < 8:
warnings.warn("invalid offset for uic tag '%s': %i"
% (name, off))
return name, off
fh.seek(off)
if dtype is None:
# skip
name = '_' + name
value = read_int()
elif dtype is int:
# int
value = read_int()
elif dtype is Fraction:
# fraction
value = read_int(2)
value = value[0] / value[1]
elif dtype is julian_datetime:
# datetime
value = julian_datetime(*read_int(2))
elif dtype is read_uic_image_property:
# ImagePropertyEx
value = read_uic_image_property(fh)
elif dtype is str:
# pascal string
size = read_int()
if 0 <= size < 2**10:
value = struct.unpack('%is' % size, fh.read(size))[0][:-1]
value = stripnull(value)
elif offset:
value = ''
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
elif dtype == '%ip':
# sequence of pascal strings
value = []
for i in range(plane_count):
size = read_int()
if 0 <= size < 2**10:
string = struct.unpack('%is' % size, fh.read(size))[0][:-1]
string = stripnull(string)
value.append(string)
elif offset:
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
else:
# struct or numpy type
dtype = '<' + dtype
if '%i' in dtype:
dtype = dtype % plane_count
if '(' in dtype:
# numpy type
value = fh.read_array(dtype, 1)[0]
if value.shape[-1] == 2:
# assume fractions
value = value[..., 0] / value[..., 1]
else:
# struct format
value = struct.unpack(dtype, fh.read(struct.calcsize(dtype)))
if len(value) == 1:
value = value[0]
if offset:
fh.seek(pos + 4)
return name, value
def read_uic_image_property(fh):
"""Read UIC ImagePropertyEx tag from file and return as dict."""
# TODO: test this
size = struct.unpack('B', fh.read(1))[0]
name = struct.unpack('%is' % size, fh.read(size))[0][:-1]
flags, prop = struct.unpack('<IB', fh.read(5))
if prop == 1:
value = struct.unpack('II', fh.read(8))
value = value[0] / value[1]
else:
size = struct.unpack('B', fh.read(1))[0]
value = struct.unpack('%is' % size, fh.read(size))[0]
return dict(name=name, flags=flags, value=value)
def read_cz_lsm_info(fh, byteorder, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError("not a valid CS_LSM_INFO structure")
fh.seek(-8, 1)
if structure_size < numpy.dtype(CZ_LSM_INFO).itemsize:
# adjust structure according to structure_size
cz_lsm_info = []
size = 0
for name, dtype in CZ_LSM_INFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
cz_lsm_info.append((name, dtype))
else:
cz_lsm_info = CZ_LSM_INFO
return fh.read_record(cz_lsm_info, byteorder=byteorder)
def read_cz_lsm_floatpairs(fh):
"""Read LSM sequence of float pairs from file and return as list."""
size = struct.unpack('<i', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_positions(fh):
"""Read LSM positions from file and return as list."""
size = struct.unpack('<I', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_time_stamps(fh):
"""Read LSM time stamps from file and return as list."""
size, count = struct.unpack('<ii', fh.read(8))
if size != (8 + 8 * count):
raise ValueError("lsm_time_stamps block is too short")
# return struct.unpack('<%dd' % count, fh.read(8*count))
return fh.read_array('<f8', count=count)
def read_cz_lsm_event_list(fh):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack('<II', fh.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack('<IdI', fh.read(16))
etext = stripnull(fh.read(esize - 16))
events.append((etime, etype, etext))
count -= 1
return events
def read_cz_lsm_scan_info(fh):
"""Read LSM scan information from file and return as Record."""
block = Record()
blocks = [block]
unpack = struct.unpack
if 0x10000000 != struct.unpack('<I', fh.read(4))[0]:
# not a Recording sub block
raise ValueError("not a lsm_scan_info structure")
fh.read(8)
while True:
entry, dtype, size = unpack('<III', fh.read(12))
if dtype == 2:
# ascii
value = stripnull(fh.read(size))
elif dtype == 4:
# long
value = unpack('<i', fh.read(4))[0]
elif dtype == 5:
# rational
value = unpack('<d', fh.read(8))[0]
else:
value = 0
if entry in CZ_LSM_SCAN_INFO_ARRAYS:
blocks.append(block)
name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
newobj = []
setattr(block, name, newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
blocks.append(block)
newobj = Record()
block.append(newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
setattr(block, name, value)
elif entry == 0xffffffff:
# end sub block
block = blocks.pop()
else:
# unknown entry
setattr(block, "entry_0x%x" % entry, value)
if not blocks:
break
return block
def read_nih_image_header(fh, byteorder, dtype, count):
"""Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array."""
a = fh.read_record(NIH_IMAGE_HEADER, byteorder=byteorder)
a = a.newbyteorder(byteorder)
a.xunit = a.xunit[:a._xunit_len]
a.um = a.um[:a._um_len]
return a
def read_micromanager_metadata(fh):
"""Read MicroManager non-TIFF settings from open file and return as dict.
The settings can be used to read image data without parsing the TIFF file.
Raise ValueError if file does not contain valid MicroManager metadata.
"""
fh.seek(0)
try:
byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)]
except IndexError:
raise ValueError("not a MicroManager TIFF file")
results = {}
fh.seek(8)
(index_header, index_offset, display_header, display_offset,
comments_header, comments_offset, summary_header, summary_length
) = struct.unpack(byteorder + "IIIIIIII", fh.read(32))
if summary_header != 2355492:
raise ValueError("invalid MicroManager summary_header")
results['summary'] = read_json(fh, byteorder, None, summary_length)
if index_header != 54773648:
raise ValueError("invalid MicroManager index_header")
fh.seek(index_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 3453623:
raise ValueError("invalid MicroManager index_header")
data = struct.unpack(byteorder + "IIIII"*count, fh.read(20*count))
results['index_map'] = {
'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5],
'position': data[3::5], 'offset': data[4::5]}
if display_header != 483765892:
raise ValueError("invalid MicroManager display_header")
fh.seek(display_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 347834724:
raise ValueError("invalid MicroManager display_header")
results['display_settings'] = read_json(fh, byteorder, None, count)
if comments_header != 99384722:
raise ValueError("invalid MicroManager comments_header")
fh.seek(comments_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 84720485:
raise ValueError("invalid MicroManager comments_header")
results['comments'] = read_json(fh, byteorder, None, count)
return results
def imagej_metadata(data, bytecounts, byteorder):
"""Return dict from ImageJ metadata tag value."""
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
def read_string(data, byteorder):
return _str(stripnull(data[0 if byteorder == '<' else 1::2]))
def read_double(data, byteorder):
return struct.unpack(byteorder+('d' * (len(data) // 8)), data)
def read_bytes(data, byteorder):
#return struct.unpack('b' * len(data), data)
return numpy.fromstring(data, 'uint8')
metadata_types = { # big endian
b'info': ('info', read_string),
b'labl': ('labels', read_string),
b'rang': ('ranges', read_double),
b'luts': ('luts', read_bytes),
b'roi ': ('roi', read_bytes),
b'over': ('overlays', read_bytes)}
metadata_types.update( # little endian
dict((k[::-1], v) for k, v in metadata_types.items()))
if not bytecounts:
raise ValueError("no ImageJ metadata")
if not data[:4] in (b'IJIJ', b'JIJI'):
raise ValueError("invalid ImageJ metadata")
header_size = bytecounts[0]
if header_size < 12 or header_size > 804:
raise ValueError("invalid ImageJ metadata header size")
ntypes = (header_size - 4) // 8
header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8])
pos = 4 + ntypes * 8
counter = 0
result = {}
for mtype, count in zip(header[::2], header[1::2]):
values = []
name, func = metadata_types.get(mtype, (_str(mtype), read_bytes))
for _ in range(count):
counter += 1
pos1 = pos + bytecounts[counter]
values.append(func(data[pos:pos1], byteorder))
pos = pos1
result[name.strip()] = values[0] if count == 1 else values
return result
def imagej_description(description):
"""Return dict from ImageJ image_description tag."""
def _bool(val):
return {b'true': True, b'false': False}[val.lower()]
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
result = {}
for line in description.splitlines():
try:
key, val = line.split(b'=')
except Exception:
continue
key = key.strip()
val = val.strip()
for dtype in (int, float, _bool, _str):
try:
val = dtype(val)
break
except Exception:
pass
result[_str(key)] = val
return result
def _replace_by(module_function, package=None, warn=False):
"""Try replace decorated function by module.function."""
try:
from importlib import import_module
except ImportError:
warnings.warn('could not import module importlib')
return lambda func: func
def decorate(func, module_function=module_function, warn=warn):
try:
module, function = module_function.split('.')
if not package:
module = import_module(module)
else:
module = import_module('.' + module, package=package)
func, oldfunc = getattr(module, function), func
globals()['__old_' + func.__name__] = oldfunc
except Exception:
if warn:
warnings.warn("failed to import %s" % module_function)
return func
return decorate
def decodejpg(encoded, tables=b'', photometric=None,
ycbcr_subsampling=None, ycbcr_positioning=None):
"""Decode JPEG encoded byte string (using _czifile extension module)."""
import _czifile
image = _czifile.decodejpg(encoded, tables)
if photometric == 'rgb' and ycbcr_subsampling and ycbcr_positioning:
# TODO: convert YCbCr to RGB
pass
return image.tostring()
@_replace_by('_tifffile.decodepackbits')
def decodepackbits(encoded):
"""Decompress PackBits encoded byte string.
PackBits is a simple byte-oriented run-length compression scheme.
"""
func = ord if sys.version[0] == '2' else lambda x: x
result = []
result_extend = result.extend
i = 0
try:
while True:
n = func(encoded[i]) + 1
i += 1
if n < 129:
result_extend(encoded[i:i+n])
i += n
elif n > 129:
result_extend(encoded[i:i+1] * (258-n))
i += 1
except IndexError:
pass
return b''.join(result) if sys.version[0] == '2' else bytes(result)
@_replace_by('_tifffile.decodelzw')
def decodelzw(encoded):
"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This is an implementation of the LZW decoding algorithm described in (1).
It is not compatible with old style LZW compressed files like quad-lzw.tif.
"""
len_encoded = len(encoded)
bitcount_max = len_encoded * 8
unpack = struct.unpack
if sys.version[0] == '2':
newtable = [chr(i) for i in range(256)]
else:
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
"""Return integer of `bitw` bits at `bitcount` position in encoded."""
start = bitcount // 8
s = encoded[start:start+4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00'*(4-len(s)))[0]
code <<= bitcount % 8
code &= mask
return code >> shr
switchbitch = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9*'1'+'0'*23, 2)),
511: (10, 22, int(10*'1'+'0'*22, 2)),
1023: (11, 21, int(11*'1'+'0'*21, 2)),
2047: (12, 20, int(12*'1'+'0'*20, 2)), }
bitw, shr, mask = switchbitch[255]
bitcount = 0
if len_encoded < 4:
raise ValueError("strip must be at least 4 characters long")
if next_code() != 256:
raise ValueError("strip must begin with CLEAR code")
code = 0
oldcode = 0
result = []
result_append = result.append
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257 or bitcount >= bitcount_max: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
table_append = table.append
lentable = 258
bitw, shr, mask = switchbitch[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result_append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result_append(decoded)
table_append(newcode)
lentable += 1
oldcode = code
if lentable in switchbitch:
bitw, shr, mask = switchbitch[lentable]
if code != 257:
warnings.warn("unexpected end of lzw stream (code %i)" % code)
return b''.join(result)
@_replace_by('_tifffile.unpackints')
def unpackints(data, dtype, itemsize, runlen=0):
"""Decompress byte string to array of integers of any bit size <= 32.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
itemsize : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
"""
if itemsize == 1: # bitarray
data = numpy.fromstring(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen + (8 - runlen % 8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if itemsize in (8, 16, 32, 64):
return numpy.fromstring(data, dtype)
if itemsize < 1 or itemsize > 32:
raise ValueError("itemsize out of range: %i" % itemsize)
if dtype.kind not in "biu":
raise ValueError("invalid dtype")
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
if itembytes != dtype.itemsize:
raise ValueError("dtype.itemsize too small")
if runlen == 0:
runlen = len(data) // itembytes
skipbits = runlen*itemsize % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes*8 - itemsize
bitmask = int(itemsize*'1'+'0'*shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big endian?
unpack = struct.unpack
l = runlen * (len(data)*8 // (runlen*itemsize + skipbits))
result = numpy.empty((l, ), dtype)
bitcount = 0
for i in range(len(result)):
start = bitcount // 8
s = data[start:start+itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0]
code <<= bitcount % 8
code &= bitmask
result[i] = code >> shrbits
bitcount += itemsize
if (i+1) % runlen == 0:
bitcount += skipbits
return result
def unpackrgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
"""Return array from byte string containing packed samples.
Use to unpack RGB565 or RGB555 to RGB888 format.
Parameters
----------
data : byte str
The data to be decoded. Samples in each pixel are stored consecutively.
Pixels are aligned to 8, 16, or 32 bit boundaries.
dtype : numpy.dtype
The sample data type. The byteorder applies also to the data stream.
bitspersample : tuple
Number of bits for each sample in a pixel.
rescale : bool
Upscale samples to the number of bits in dtype.
Returns
-------
result : ndarray
Flattened array of unpacked samples of native dtype.
Examples
--------
>>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
>>> print(unpackrgb(data, '<B', (5, 6, 5), False))
[ 1 1 1 31 63 31]
>>> print(unpackrgb(data, '<B', (5, 6, 5)))
[ 8 4 8 255 255 255]
>>> print(unpackrgb(data, '<B', (5, 5, 5)))
[ 16 8 8 255 255 255]
"""
dtype = numpy.dtype(dtype)
bits = int(numpy.sum(bitspersample))
if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)):
raise ValueError("sample size not supported %s" % str(bitspersample))
dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits)
data = numpy.fromstring(data, dtype.byteorder+dt)
result = numpy.empty((data.size, len(bitspersample)), dtype.char)
for i, bps in enumerate(bitspersample):
t = data >> int(numpy.sum(bitspersample[i+1:]))
t &= int('0b'+'1'*bps, 2)
if rescale:
o = ((dtype.itemsize * 8) // bps + 1) * bps
if o > data.dtype.itemsize * 8:
t = t.astype('I')
t *= (2**o - 1) // (2**bps - 1)
t //= 2**(o - (dtype.itemsize * 8))
result[:, i] = t
return result.reshape(-1)
def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy array
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
def squeeze_axes(shape, axes, skip='XY'):
"""Return shape and axes with single-dimensional entries removed.
Remove unused dimensions unless their axes are listed in 'skip'.
>>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC')
((5, 2, 1), 'TYX')
"""
if len(shape) != len(axes):
raise ValueError("dimensions of axes and shape don't match")
shape, axes = zip(*(i for i in zip(shape, axes)
if i[0] > 1 or i[1] in skip))
return shape, ''.join(axes)
def transpose_axes(data, axes, asaxes='CTZYX'):
"""Return data with its axes permuted to match specified axes.
A view is returned if possible.
>>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape
(5, 2, 1, 3, 4)
"""
for ax in axes:
if ax not in asaxes:
raise ValueError("unknown axis %s" % ax)
# add missing axes to data
shape = data.shape
for ax in reversed(asaxes):
if ax not in axes:
axes = ax + axes
shape = (1,) + shape
data = data.reshape(shape)
# transpose axes
data = data.transpose([axes.index(ax) for ax in asaxes])
return data
def stack_pages(pages, memmap=False, *args, **kwargs):
"""Read data from sequence of TiffPage and stack them vertically.
If memmap is True, return an array stored in a binary file on disk.
Additional parameters are passsed to the page asarray function.
"""
if len(pages) == 0:
raise ValueError("no pages")
if len(pages) == 1:
return pages[0].asarray(memmap=memmap, *args, **kwargs)
result = pages[0].asarray(*args, **kwargs)
shape = (len(pages),) + result.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=result.dtype, shape=shape)
else:
result = numpy.empty(shape, dtype=result.dtype)
for i, page in enumerate(pages):
result[i] = page.asarray(*args, **kwargs)
return result
def stripnull(string):
"""Return string truncated at first null character.
Clean NULL terminated C strings.
>>> stripnull(b'string\\x00')
b'string'
"""
i = string.find(b'\x00')
return string if (i < 0) else string[:i]
def stripascii(string):
"""Return string truncated at last byte that is 7bit ASCII.
Clean NULL separated and terminated TIFF strings.
>>> stripascii(b'string\\x00string\\n\\x01\\x00')
b'string\\x00string\\n'
>>> stripascii(b'\\x00')
b''
"""
# TODO: pythonize this
ord_ = ord if sys.version_info[0] < 3 else lambda x: x
i = len(string)
while i:
i -= 1
if 8 < ord_(string[i]) < 127:
break
else:
i = -1
return string[:i+1]
def format_size(size):
"""Return file size as string from byte size."""
for unit in ('B', 'KB', 'MB', 'GB', 'TB'):
if size < 2048:
return "%.f %s" % (size, unit)
size /= 1024.0
def sequence(value):
"""Return tuple containing value if value is not a sequence.
>>> sequence(1)
(1,)
>>> sequence([1])
[1]
"""
try:
len(value)
return value
except TypeError:
return (value, )
def product(iterable):
"""Return product of sequence of numbers.
Equivalent of functools.reduce(operator.mul, iterable, 1).
>>> product([2**8, 2**30])
274877906944
>>> product([])
1
"""
prod = 1
for i in iterable:
prod *= i
return prod
def natural_sorted(iterable):
"""Return human sorted list of strings.
E.g. for sorting file names.
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10']
"""
def sortkey(x):
return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)]
numbers = re.compile(r'(\d+)')
return sorted(iterable, key=sortkey)
def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)):
"""Return datetime object from timestamp in Excel serial format.
Convert LSM time stamps.
>>> excel_datetime(40237.029999999795)
datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
"""
return epoch + datetime.timedelta(timestamp)
def julian_datetime(julianday, milisecond=0):
"""Return datetime from days since 1/1/4713 BC and ms since midnight.
Convert Julian dates according to MetaMorph.
>>> julian_datetime(2451576, 54362783)
datetime.datetime(2000, 2, 2, 15, 6, 2, 783)
"""
if julianday <= 1721423:
# no datetime before year 1
return None
a = julianday + 1
if a > 2299160:
alpha = math.trunc((a - 1867216.25) / 36524.25)
a += 1 + alpha - alpha // 4
b = a + (1524 if a > 1721423 else 1158)
c = math.trunc((b - 122.1) / 365.25)
d = math.trunc(365.25 * c)
e = math.trunc((b - d) / 30.6001)
day = b - d - math.trunc(30.6001 * e)
month = e - (1 if e < 13.5 else 13)
year = c - (4716 if month > 2.5 else 4715)
hour, milisecond = divmod(milisecond, 1000 * 60 * 60)
minute, milisecond = divmod(milisecond, 1000 * 60)
second, milisecond = divmod(milisecond, 1000)
return datetime.datetime(year, month, day,
hour, minute, second, milisecond)
def test_tifffile(directory='testimages', verbose=True):
"""Read all images in directory.
Print error message on failure.
>>> test_tifffile(verbose=False)
"""
successful = 0
failed = 0
start = time.time()
for f in glob.glob(os.path.join(directory, '*.*')):
if verbose:
print("\n%s>\n" % f.lower(), end='')
t0 = time.time()
try:
tif = TiffFile(f, multifile=True)
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
try:
img = tif.asarray()
except ValueError:
try:
img = tif[0].asarray()
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
finally:
tif.close()
successful += 1
if verbose:
print("%s, %s %s, %s, %.0f ms" % (
str(tif), str(img.shape), img.dtype, tif[0].compression,
(time.time()-t0) * 1e3))
if verbose:
print("\nSuccessfully read %i of %i files in %.3f s\n" % (
successful, successful+failed, time.time()-start))
class TIFF_SUBFILE_TYPES(object):
def __getitem__(self, key):
result = []
if key & 1:
result.append('reduced_image')
if key & 2:
result.append('page')
if key & 4:
result.append('mask')
return tuple(result)
TIFF_PHOTOMETRICS = {
0: 'miniswhite',
1: 'minisblack',
2: 'rgb',
3: 'palette',
4: 'mask',
5: 'separated', # CMYK
6: 'ycbcr',
8: 'cielab',
9: 'icclab',
10: 'itulab',
32803: 'cfa', # Color Filter Array
32844: 'logl',
32845: 'logluv',
34892: 'linear_raw'
}
TIFF_COMPESSIONS = {
1: None,
2: 'ccittrle',
3: 'ccittfax3',
4: 'ccittfax4',
5: 'lzw',
6: 'ojpeg',
7: 'jpeg',
8: 'adobe_deflate',
9: 't85',
10: 't43',
32766: 'next',
32771: 'ccittrlew',
32773: 'packbits',
32809: 'thunderscan',
32895: 'it8ctpad',
32896: 'it8lw',
32897: 'it8mp',
32898: 'it8bl',
32908: 'pixarfilm',
32909: 'pixarlog',
32946: 'deflate',
32947: 'dcs',
34661: 'jbig',
34676: 'sgilog',
34677: 'sgilog24',
34712: 'jp2000',
34713: 'nef',
}
TIFF_DECOMPESSORS = {
None: lambda x: x,
'adobe_deflate': zlib.decompress,
'deflate': zlib.decompress,
'packbits': decodepackbits,
'lzw': decodelzw,
# 'jpeg': decodejpg
}
TIFF_DATA_TYPES = {
1: '1B', # BYTE 8-bit unsigned integer.
2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
# the last byte must be NULL (binary zero).
3: '1H', # SHORT 16-bit (2-byte) unsigned integer
4: '1I', # LONG 32-bit (4-byte) unsigned integer.
5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
# a fraction; the second, the denominator.
6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
7: '1s', # UNDEFINED An 8-bit byte that may contain anything,
# depending on the definition of the field.
8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
# of a fraction, the second the denominator.
11: '1f', # FLOAT Single precision (4-byte) IEEE format.
12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
13: '1I', # IFD unsigned 4 byte IFD offset.
#14: '', # UNICODE
#15: '', # COMPLEX
16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff)
}
TIFF_SAMPLE_FORMATS = {
1: 'uint',
2: 'int',
3: 'float',
#4: 'void',
#5: 'complex_int',
6: 'complex',
}
TIFF_SAMPLE_DTYPES = {
('uint', 1): '?', # bitmap
('uint', 2): 'B',
('uint', 3): 'B',
('uint', 4): 'B',
('uint', 5): 'B',
('uint', 6): 'B',
('uint', 7): 'B',
('uint', 8): 'B',
('uint', 9): 'H',
('uint', 10): 'H',
('uint', 11): 'H',
('uint', 12): 'H',
('uint', 13): 'H',
('uint', 14): 'H',
('uint', 15): 'H',
('uint', 16): 'H',
('uint', 17): 'I',
('uint', 18): 'I',
('uint', 19): 'I',
('uint', 20): 'I',
('uint', 21): 'I',
('uint', 22): 'I',
('uint', 23): 'I',
('uint', 24): 'I',
('uint', 25): 'I',
('uint', 26): 'I',
('uint', 27): 'I',
('uint', 28): 'I',
('uint', 29): 'I',
('uint', 30): 'I',
('uint', 31): 'I',
('uint', 32): 'I',
('uint', 64): 'Q',
('int', 8): 'b',
('int', 16): 'h',
('int', 32): 'i',
('int', 64): 'q',
('float', 16): 'e',
('float', 32): 'f',
('float', 64): 'd',
('complex', 64): 'F',
('complex', 128): 'D',
('uint', (5, 6, 5)): 'B',
}
TIFF_ORIENTATIONS = {
1: 'top_left',
2: 'top_right',
3: 'bottom_right',
4: 'bottom_left',
5: 'left_top',
6: 'right_top',
7: 'right_bottom',
8: 'left_bottom',
}
# TODO: is there a standard for character axes labels?
AXES_LABELS = {
'X': 'width',
'Y': 'height',
'Z': 'depth',
'S': 'sample', # rgb(a)
'I': 'series', # general sequence, plane, page, IFD
'T': 'time',
'C': 'channel', # color, emission wavelength
'A': 'angle',
'P': 'phase', # formerly F # P is Position in LSM!
'R': 'tile', # region, point, mosaic
'H': 'lifetime', # histogram
'E': 'lambda', # excitation wavelength
'L': 'exposure', # lux
'V': 'event',
'Q': 'other',
#'M': 'mosaic', # LSM 6
}
AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
# Map OME pixel types to numpy dtype
OME_PIXEL_TYPES = {
'int8': 'i1',
'int16': 'i2',
'int32': 'i4',
'uint8': 'u1',
'uint16': 'u2',
'uint32': 'u4',
'float': 'f4',
# 'bit': 'bit',
'double': 'f8',
'complex': 'c8',
'double-complex': 'c16',
}
# NIH Image PicHeader v1.63
NIH_IMAGE_HEADER = [
('fileid', 'a8'),
('nlines', 'i2'),
('pixelsperline', 'i2'),
('version', 'i2'),
('oldlutmode', 'i2'),
('oldncolors', 'i2'),
('colors', 'u1', (3, 32)),
('oldcolorstart', 'i2'),
('colorwidth', 'i2'),
('extracolors', 'u2', (6, 3)),
('nextracolors', 'i2'),
('foregroundindex', 'i2'),
('backgroundindex', 'i2'),
('xscale', 'f8'),
('_x0', 'i2'),
('_x1', 'i2'),
('units_t', 'i2'), # NIH_UNITS_TYPE
('p1', [('x', 'i2'), ('y', 'i2')]),
('p2', [('x', 'i2'), ('y', 'i2')]),
('curvefit_t', 'i2'), # NIH_CURVEFIT_TYPE
('ncoefficients', 'i2'),
('coeff', 'f8', 6),
('_um_len', 'u1'),
('um', 'a15'),
('_x2', 'u1'),
('binarypic', 'b1'),
('slicestart', 'i2'),
('sliceend', 'i2'),
('scalemagnification', 'f4'),
('nslices', 'i2'),
('slicespacing', 'f4'),
('currentslice', 'i2'),
('frameinterval', 'f4'),
('pixelaspectratio', 'f4'),
('colorstart', 'i2'),
('colorend', 'i2'),
('ncolors', 'i2'),
('fill1', '3u2'),
('fill2', '3u2'),
('colortable_t', 'u1'), # NIH_COLORTABLE_TYPE
('lutmode_t', 'u1'), # NIH_LUTMODE_TYPE
('invertedtable', 'b1'),
('zeroclip', 'b1'),
('_xunit_len', 'u1'),
('xunit', 'a11'),
('stacktype_t', 'i2'), # NIH_STACKTYPE_TYPE
]
NIH_COLORTABLE_TYPE = (
'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow',
'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum')
NIH_LUTMODE_TYPE = (
'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale',
'ColorLut', 'CustomGrayscale')
NIH_CURVEFIT_TYPE = (
'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit',
'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated',
'UncalibratedOD')
NIH_UNITS_TYPE = (
'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters',
'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits')
NIH_STACKTYPE_TYPE = (
'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack')
# Map Universal Imaging Corporation MetaMorph internal tag ids to name and type
UIC_TAGS = {
0: ('auto_scale', int),
1: ('min_scale', int),
2: ('max_scale', int),
3: ('spatial_calibration', int),
4: ('x_calibration', Fraction),
5: ('y_calibration', Fraction),
6: ('calibration_units', str),
7: ('name', str),
8: ('thresh_state', int),
9: ('thresh_state_red', int),
10: ('tagid_10', None), # undefined
11: ('thresh_state_green', int),
12: ('thresh_state_blue', int),
13: ('thresh_state_lo', int),
14: ('thresh_state_hi', int),
15: ('zoom', int),
16: ('create_time', julian_datetime),
17: ('last_saved_time', julian_datetime),
18: ('current_buffer', int),
19: ('gray_fit', None),
20: ('gray_point_count', None),
21: ('gray_x', Fraction),
22: ('gray_y', Fraction),
23: ('gray_min', Fraction),
24: ('gray_max', Fraction),
25: ('gray_unit_name', str),
26: ('standard_lut', int),
27: ('wavelength', int),
28: ('stage_position', '(%i,2,2)u4'), # N xy positions as fractions
29: ('camera_chip_offset', '(%i,2,2)u4'), # N xy offsets as fractions
30: ('overlay_mask', None),
31: ('overlay_compress', None),
32: ('overlay', None),
33: ('special_overlay_mask', None),
34: ('special_overlay_compress', None),
35: ('special_overlay', None),
36: ('image_property', read_uic_image_property),
37: ('stage_label', '%ip'), # N str
38: ('autoscale_lo_info', Fraction),
39: ('autoscale_hi_info', Fraction),
40: ('absolute_z', '(%i,2)u4'), # N fractions
41: ('absolute_z_valid', '(%i,)u4'), # N long
42: ('gamma', int),
43: ('gamma_red', int),
44: ('gamma_green', int),
45: ('gamma_blue', int),
46: ('camera_bin', int),
47: ('new_lut', int),
48: ('image_property_ex', None),
49: ('plane_property', int),
50: ('user_lut_table', '(256,3)u1'),
51: ('red_autoscale_info', int),
52: ('red_autoscale_lo_info', Fraction),
53: ('red_autoscale_hi_info', Fraction),
54: ('red_minscale_info', int),
55: ('red_maxscale_info', int),
56: ('green_autoscale_info', int),
57: ('green_autoscale_lo_info', Fraction),
58: ('green_autoscale_hi_info', Fraction),
59: ('green_minscale_info', int),
60: ('green_maxscale_info', int),
61: ('blue_autoscale_info', int),
62: ('blue_autoscale_lo_info', Fraction),
63: ('blue_autoscale_hi_info', Fraction),
64: ('blue_min_scale_info', int),
65: ('blue_max_scale_info', int),
#66: ('overlay_plane_color', read_uic_overlay_plane_color),
}
# Olympus FluoView
MM_DIMENSION = [
('name', 'a16'),
('size', 'i4'),
('origin', 'f8'),
('resolution', 'f8'),
('unit', 'a64'),
]
MM_HEADER = [
('header_flag', 'i2'),
('image_type', 'u1'),
('image_name', 'a257'),
('offset_data', 'u4'),
('palette_size', 'i4'),
('offset_palette0', 'u4'),
('offset_palette1', 'u4'),
('comment_size', 'i4'),
('offset_comment', 'u4'),
('dimensions', MM_DIMENSION, 10),
('offset_position', 'u4'),
('map_type', 'i2'),
('map_min', 'f8'),
('map_max', 'f8'),
('min_value', 'f8'),
('max_value', 'f8'),
('offset_map', 'u4'),
('gamma', 'f8'),
('offset', 'f8'),
('gray_channel', MM_DIMENSION),
('offset_thumbnail', 'u4'),
('voice_field', 'i4'),
('offset_voice_field', 'u4'),
]
# Carl Zeiss LSM
CZ_LSM_INFO = [
('magic_number', 'u4'),
('structure_size', 'i4'),
('dimension_x', 'i4'),
('dimension_y', 'i4'),
('dimension_z', 'i4'),
('dimension_channels', 'i4'),
('dimension_time', 'i4'),
('data_type', 'i4'), # CZ_DATA_TYPES
('thumbnail_x', 'i4'),
('thumbnail_y', 'i4'),
('voxel_size_x', 'f8'),
('voxel_size_y', 'f8'),
('voxel_size_z', 'f8'),
('origin_x', 'f8'),
('origin_y', 'f8'),
('origin_z', 'f8'),
('scan_type', 'u2'),
('spectral_scan', 'u2'),
('type_of_data', 'u4'), # CZ_TYPE_OF_DATA
('offset_vector_overlay', 'u4'),
('offset_input_lut', 'u4'),
('offset_output_lut', 'u4'),
('offset_channel_colors', 'u4'),
('time_interval', 'f8'),
('offset_channel_data_types', 'u4'),
('offset_scan_info', 'u4'), # CZ_LSM_SCAN_INFO
('offset_ks_data', 'u4'),
('offset_time_stamps', 'u4'),
('offset_event_list', 'u4'),
('offset_roi', 'u4'),
('offset_bleach_roi', 'u4'),
('offset_next_recording', 'u4'),
# LSM 2.0 ends here
('display_aspect_x', 'f8'),
('display_aspect_y', 'f8'),
('display_aspect_z', 'f8'),
('display_aspect_time', 'f8'),
('offset_mean_of_roi_overlay', 'u4'),
('offset_topo_isoline_overlay', 'u4'),
('offset_topo_profile_overlay', 'u4'),
('offset_linescan_overlay', 'u4'),
('offset_toolbar_flags', 'u4'),
('offset_channel_wavelength', 'u4'),
('offset_channel_factors', 'u4'),
('objective_sphere_correction', 'f8'),
('offset_unmix_parameters', 'u4'),
# LSM 3.2, 4.0 end here
('offset_acquisition_parameters', 'u4'),
('offset_characteristics', 'u4'),
('offset_palette', 'u4'),
('time_difference_x', 'f8'),
('time_difference_y', 'f8'),
('time_difference_z', 'f8'),
('internal_use_1', 'u4'),
('dimension_p', 'i4'),
('dimension_m', 'i4'),
('dimensions_reserved', '16i4'),
('offset_tile_positions', 'u4'),
('reserved_1', '9u4'),
('offset_positions', 'u4'),
('reserved_2', '21u4'), # must be 0
]
# Import functions for LSM_INFO sub-records
CZ_LSM_INFO_READERS = {
'scan_info': read_cz_lsm_scan_info,
'time_stamps': read_cz_lsm_time_stamps,
'event_list': read_cz_lsm_event_list,
'channel_colors': read_cz_lsm_floatpairs,
'positions': read_cz_lsm_floatpairs,
'tile_positions': read_cz_lsm_floatpairs,
}
# Map cz_lsm_info.scan_type to dimension order
CZ_SCAN_TYPES = {
0: 'XYZCT', # x-y-z scan
1: 'XYZCT', # z scan (x-z plane)
2: 'XYZCT', # line scan
3: 'XYTCZ', # time series x-y
4: 'XYZTC', # time series x-z
5: 'XYTCZ', # time series 'Mean of ROIs'
6: 'XYZTC', # time series x-y-z
7: 'XYCTZ', # spline scan
8: 'XYCZT', # spline scan x-z
9: 'XYTCZ', # time series spline plane x-z
10: 'XYZCT', # point mode
}
# Map dimension codes to cz_lsm_info attribute
CZ_DIMENSIONS = {
'X': 'dimension_x',
'Y': 'dimension_y',
'Z': 'dimension_z',
'C': 'dimension_channels',
'T': 'dimension_time',
}
# Description of cz_lsm_info.data_type
CZ_DATA_TYPES = {
0: 'varying data types',
1: '8 bit unsigned integer',
2: '12 bit unsigned integer',
5: '32 bit float',
}
# Description of cz_lsm_info.type_of_data
CZ_TYPE_OF_DATA = {
0: 'Original scan data',
1: 'Calculated data',
2: '3D reconstruction',
3: 'Topography height map',
}
CZ_LSM_SCAN_INFO_ARRAYS = {
0x20000000: "tracks",
0x30000000: "lasers",
0x60000000: "detection_channels",
0x80000000: "illumination_channels",
0xa0000000: "beam_splitters",
0xc0000000: "data_channels",
0x11000000: "timers",
0x13000000: "markers",
}
CZ_LSM_SCAN_INFO_STRUCTS = {
# 0x10000000: "recording",
0x40000000: "track",
0x50000000: "laser",
0x70000000: "detection_channel",
0x90000000: "illumination_channel",
0xb0000000: "beam_splitter",
0xd0000000: "data_channel",
0x12000000: "timer",
0x14000000: "marker",
}
CZ_LSM_SCAN_INFO_ATTRIBUTES = {
# recording
0x10000001: "name",
0x10000002: "description",
0x10000003: "notes",
0x10000004: "objective",
0x10000005: "processing_summary",
0x10000006: "special_scan_mode",
0x10000007: "scan_type",
0x10000008: "scan_mode",
0x10000009: "number_of_stacks",
0x1000000a: "lines_per_plane",
0x1000000b: "samples_per_line",
0x1000000c: "planes_per_volume",
0x1000000d: "images_width",
0x1000000e: "images_height",
0x1000000f: "images_number_planes",
0x10000010: "images_number_stacks",
0x10000011: "images_number_channels",
0x10000012: "linscan_xy_size",
0x10000013: "scan_direction",
0x10000014: "time_series",
0x10000015: "original_scan_data",
0x10000016: "zoom_x",
0x10000017: "zoom_y",
0x10000018: "zoom_z",
0x10000019: "sample_0x",
0x1000001a: "sample_0y",
0x1000001b: "sample_0z",
0x1000001c: "sample_spacing",
0x1000001d: "line_spacing",
0x1000001e: "plane_spacing",
0x1000001f: "plane_width",
0x10000020: "plane_height",
0x10000021: "volume_depth",
0x10000023: "nutation",
0x10000034: "rotation",
0x10000035: "precession",
0x10000036: "sample_0time",
0x10000037: "start_scan_trigger_in",
0x10000038: "start_scan_trigger_out",
0x10000039: "start_scan_event",
0x10000040: "start_scan_time",
0x10000041: "stop_scan_trigger_in",
0x10000042: "stop_scan_trigger_out",
0x10000043: "stop_scan_event",
0x10000044: "stop_scan_time",
0x10000045: "use_rois",
0x10000046: "use_reduced_memory_rois",
0x10000047: "user",
0x10000048: "use_bc_correction",
0x10000049: "position_bc_correction1",
0x10000050: "position_bc_correction2",
0x10000051: "interpolation_y",
0x10000052: "camera_binning",
0x10000053: "camera_supersampling",
0x10000054: "camera_frame_width",
0x10000055: "camera_frame_height",
0x10000056: "camera_offset_x",
0x10000057: "camera_offset_y",
0x10000059: "rt_binning",
0x1000005a: "rt_frame_width",
0x1000005b: "rt_frame_height",
0x1000005c: "rt_region_width",
0x1000005d: "rt_region_height",
0x1000005e: "rt_offset_x",
0x1000005f: "rt_offset_y",
0x10000060: "rt_zoom",
0x10000061: "rt_line_period",
0x10000062: "prescan",
0x10000063: "scan_direction_z",
# track
0x40000001: "multiplex_type", # 0 after line; 1 after frame
0x40000002: "multiplex_order",
0x40000003: "sampling_mode", # 0 sample; 1 line average; 2 frame average
0x40000004: "sampling_method", # 1 mean; 2 sum
0x40000005: "sampling_number",
0x40000006: "acquire",
0x40000007: "sample_observation_time",
0x4000000b: "time_between_stacks",
0x4000000c: "name",
0x4000000d: "collimator1_name",
0x4000000e: "collimator1_position",
0x4000000f: "collimator2_name",
0x40000010: "collimator2_position",
0x40000011: "is_bleach_track",
0x40000012: "is_bleach_after_scan_number",
0x40000013: "bleach_scan_number",
0x40000014: "trigger_in",
0x40000015: "trigger_out",
0x40000016: "is_ratio_track",
0x40000017: "bleach_count",
0x40000018: "spi_center_wavelength",
0x40000019: "pixel_time",
0x40000021: "condensor_frontlens",
0x40000023: "field_stop_value",
0x40000024: "id_condensor_aperture",
0x40000025: "condensor_aperture",
0x40000026: "id_condensor_revolver",
0x40000027: "condensor_filter",
0x40000028: "id_transmission_filter1",
0x40000029: "id_transmission1",
0x40000030: "id_transmission_filter2",
0x40000031: "id_transmission2",
0x40000032: "repeat_bleach",
0x40000033: "enable_spot_bleach_pos",
0x40000034: "spot_bleach_posx",
0x40000035: "spot_bleach_posy",
0x40000036: "spot_bleach_posz",
0x40000037: "id_tubelens",
0x40000038: "id_tubelens_position",
0x40000039: "transmitted_light",
0x4000003a: "reflected_light",
0x4000003b: "simultan_grab_and_bleach",
0x4000003c: "bleach_pixel_time",
# laser
0x50000001: "name",
0x50000002: "acquire",
0x50000003: "power",
# detection_channel
0x70000001: "integration_mode",
0x70000002: "special_mode",
0x70000003: "detector_gain_first",
0x70000004: "detector_gain_last",
0x70000005: "amplifier_gain_first",
0x70000006: "amplifier_gain_last",
0x70000007: "amplifier_offs_first",
0x70000008: "amplifier_offs_last",
0x70000009: "pinhole_diameter",
0x7000000a: "counting_trigger",
0x7000000b: "acquire",
0x7000000c: "point_detector_name",
0x7000000d: "amplifier_name",
0x7000000e: "pinhole_name",
0x7000000f: "filter_set_name",
0x70000010: "filter_name",
0x70000013: "integrator_name",
0x70000014: "channel_name",
0x70000015: "detector_gain_bc1",
0x70000016: "detector_gain_bc2",
0x70000017: "amplifier_gain_bc1",
0x70000018: "amplifier_gain_bc2",
0x70000019: "amplifier_offset_bc1",
0x70000020: "amplifier_offset_bc2",
0x70000021: "spectral_scan_channels",
0x70000022: "spi_wavelength_start",
0x70000023: "spi_wavelength_stop",
0x70000026: "dye_name",
0x70000027: "dye_folder",
# illumination_channel
0x90000001: "name",
0x90000002: "power",
0x90000003: "wavelength",
0x90000004: "aquire",
0x90000005: "detchannel_name",
0x90000006: "power_bc1",
0x90000007: "power_bc2",
# beam_splitter
0xb0000001: "filter_set",
0xb0000002: "filter",
0xb0000003: "name",
# data_channel
0xd0000001: "name",
0xd0000003: "acquire",
0xd0000004: "color",
0xd0000005: "sample_type",
0xd0000006: "bits_per_sample",
0xd0000007: "ratio_type",
0xd0000008: "ratio_track1",
0xd0000009: "ratio_track2",
0xd000000a: "ratio_channel1",
0xd000000b: "ratio_channel2",
0xd000000c: "ratio_const1",
0xd000000d: "ratio_const2",
0xd000000e: "ratio_const3",
0xd000000f: "ratio_const4",
0xd0000010: "ratio_const5",
0xd0000011: "ratio_const6",
0xd0000012: "ratio_first_images1",
0xd0000013: "ratio_first_images2",
0xd0000014: "dye_name",
0xd0000015: "dye_folder",
0xd0000016: "spectrum",
0xd0000017: "acquire",
# timer
0x12000001: "name",
0x12000002: "description",
0x12000003: "interval",
0x12000004: "trigger_in",
0x12000005: "trigger_out",
0x12000006: "activation_time",
0x12000007: "activation_number",
# marker
0x14000001: "name",
0x14000002: "description",
0x14000003: "trigger_in",
0x14000004: "trigger_out",
}
# Map TIFF tag code to attribute name, default value, type, count, validator
TIFF_TAGS = {
254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
255: ('subfile_type', None, 3, 1,
{0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
256: ('image_width', None, 4, 1, None),
257: ('image_length', None, 4, 1, None),
258: ('bits_per_sample', 1, 3, 1, None),
259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
269: ('document_name', None, 2, None, None),
270: ('image_description', None, 2, None, None),
271: ('make', None, 2, None, None),
272: ('model', None, 2, None, None),
273: ('strip_offsets', None, 4, None, None),
274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
277: ('samples_per_pixel', 1, 3, 1, None),
278: ('rows_per_strip', 2**32-1, 4, 1, None),
279: ('strip_byte_counts', None, 4, None, None),
280: ('min_sample_value', None, 3, None, None),
281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
282: ('x_resolution', None, 5, 1, None),
283: ('y_resolution', None, 5, 1, None),
284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
285: ('page_name', None, 2, None, None),
286: ('x_position', None, 5, 1, None),
287: ('y_position', None, 5, 1, None),
296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}),
297: ('page_number', None, 3, 2, None),
305: ('software', None, 2, None, None),
306: ('datetime', None, 2, None, None),
315: ('artist', None, 2, None, None),
316: ('host_computer', None, 2, None, None),
317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}),
318: ('white_point', None, 5, 2, None),
319: ('primary_chromaticities', None, 5, 6, None),
320: ('color_map', None, 3, None, None),
322: ('tile_width', None, 4, 1, None),
323: ('tile_length', None, 4, 1, None),
324: ('tile_offsets', None, 4, None, None),
325: ('tile_byte_counts', None, 4, None, None),
338: ('extra_samples', None, 3, None,
{0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS),
340: ('smin_sample_value', None, None, None, None),
341: ('smax_sample_value', None, None, None, None),
347: ('jpeg_tables', None, 7, None, None),
530: ('ycbcr_subsampling', 1, 3, 2, None),
531: ('ycbcr_positioning', 1, 3, 1, None),
32996: ('sgi_matteing', None, None, 1, None), # use extra_samples
32996: ('sgi_datatype', None, None, 1, None), # use sample_format
32997: ('image_depth', None, 4, 1, None),
32998: ('tile_depth', None, 4, 1, None),
33432: ('copyright', None, 1, None, None),
33445: ('md_file_tag', None, 4, 1, None),
33446: ('md_scale_pixel', None, 5, 1, None),
33447: ('md_color_table', None, 3, None, None),
33448: ('md_lab_name', None, 2, None, None),
33449: ('md_sample_info', None, 2, None, None),
33450: ('md_prep_date', None, 2, None, None),
33451: ('md_prep_time', None, 2, None, None),
33452: ('md_file_units', None, 2, None, None),
33550: ('model_pixel_scale', None, 12, 3, None),
33922: ('model_tie_point', None, 12, None, None),
34665: ('exif_ifd', None, None, 1, None),
34735: ('geo_key_directory', None, 3, None, None),
34736: ('geo_double_params', None, 12, None, None),
34737: ('geo_ascii_params', None, 2, None, None),
34853: ('gps_ifd', None, None, 1, None),
37510: ('user_comment', None, None, None, None),
42112: ('gdal_metadata', None, 2, None, None),
42113: ('gdal_nodata', None, 2, None, None),
50289: ('mc_xy_position', None, 12, 2, None),
50290: ('mc_z_position', None, 12, 1, None),
50291: ('mc_xy_calibration', None, 12, 3, None),
50292: ('mc_lens_lem_na_n', None, 12, 3, None),
50293: ('mc_channel_name', None, 1, None, None),
50294: ('mc_ex_wavelength', None, 12, 1, None),
50295: ('mc_time_stamp', None, 12, 1, None),
50838: ('imagej_byte_counts', None, None, None, None),
65200: ('flex_xml', None, 2, None, None),
# code: (attribute name, default value, type, count, validator)
}
# Map custom TIFF tag codes to attribute names and import functions
CUSTOM_TAGS = {
700: ('xmp', read_bytes),
34377: ('photoshop', read_numpy),
33723: ('iptc', read_bytes),
34675: ('icc_profile', read_bytes),
33628: ('uic1tag', read_uic1tag), # Universal Imaging Corporation STK
33629: ('uic2tag', read_uic2tag),
33630: ('uic3tag', read_uic3tag),
33631: ('uic4tag', read_uic4tag),
34361: ('mm_header', read_mm_header), # Olympus FluoView
34362: ('mm_stamp', read_mm_stamp),
34386: ('mm_user_block', read_bytes),
34412: ('cz_lsm_info', read_cz_lsm_info), # Carl Zeiss LSM
43314: ('nih_image_header', read_nih_image_header),
# 40001: ('mc_ipwinscal', read_bytes),
40100: ('mc_id_old', read_bytes),
50288: ('mc_id', read_bytes),
50296: ('mc_frame_properties', read_bytes),
50839: ('imagej_metadata', read_bytes),
51123: ('micromanager_metadata', read_json),
}
# Max line length of printed output
PRINT_LINE_LEN = 79
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
#if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can't handle %s photometrics" % photometric)
# TODO: handle photometric == 'separated' (CMYK)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
data = data[(slice(0, maxdim), ) * len(data.shape)]
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if isrgb and data.shape[-3] in (3, 4):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif not isrgb and (data.shape[-1] < data.shape[-2] // 16 and
data.shape[-1] < data.shape[-3] // 16 and
data.shape[-1] < 5):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if photometric == 'palette' and isrgb:
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
elif data.dtype.kind == 'c':
raise NotImplementedError("complex type") # TODO: handle complex types
if not isrgb:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
if data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
try:
title = unicode(title, 'Windows-1252')
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.kind in 'ubf' or vmin == 0:
cmap = 'cubehelix'
else:
cmap = 'coolwarm'
if photometric == 'miniswhite':
cmap += '_r'
image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0, ) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image
def _app_show():
"""Block the GUI. For use as skimage plugin."""
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show()
def main(argv=None):
"""Command line usage main function."""
if float(sys.version[0:3]) < 2.6:
print("This script requires Python version 2.6 or better.")
print("This is Python version %s" % sys.version)
return 0
if argv is None:
argv = sys.argv
import optparse
parser = optparse.OptionParser(
usage="usage: %prog [options] path",
description="Display image data in TIFF files.",
version="%%prog %s" % __version__)
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help="display single page")
opt('-s', '--series', dest='series', type='int', default=-1,
help="display series of pages of same shape")
opt('--nomultifile', dest='nomultifile', action='store_true',
default=False, help="don't read OME series from multiple files")
opt('--noplot', dest='noplot', action='store_true', default=False,
help="don't display images")
opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
help="image interpolation method")
opt('--dpi', dest='dpi', type='int', default=96,
help="set plot resolution")
opt('--debug', dest='debug', action='store_true', default=False,
help="raise exception on failures")
opt('--test', dest='test', action='store_true', default=False,
help="try read all images in path")
opt('--doctest', dest='doctest', action='store_true', default=False,
help="runs the docstring examples")
opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
opt('-q', '--quiet', dest='verbose', action='store_false')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
doctest.testmod()
return 0
if not path:
parser.error("No file specified")
if settings.test:
test_tifffile(path, settings.verbose)
return 0
if any(i in path for i in '?*'):
path = glob.glob(path)
if not path:
print('no files match the pattern')
return 0
# TODO: handle image sequences
#if len(path) == 1:
path = path[0]
print("Reading file structure...", end=' ')
start = time.time()
try:
tif = TiffFile(path, multifile=not settings.nomultifile)
except Exception as e:
if settings.debug:
raise
else:
print("\n", e)
sys.exit(0)
print("%.3f ms" % ((time.time()-start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = [(None, tif[0 if settings.page < 0 else settings.page])]
if not settings.noplot:
print("Reading image data... ", end=' ')
def notnone(x):
return next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page])]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series].pages))]
else:
images = []
for i, s in enumerate(tif.series):
try:
images.append(
(tif.asarray(series=i), notnone(s.pages)))
except ValueError as e:
images.append((None, notnone(s.pages)))
if settings.debug:
raise
else:
print("\n* series %i failed: %s... " % (i, e),
end='')
print("%.3f ms" % ((time.time()-start) * 1e3))
except Exception as e:
if settings.debug:
raise
else:
print(e)
tif.close()
print("\nTIFF file:", tif)
print()
for i, s in enumerate(tif.series):
print ("Series %i" % i)
print(s)
print()
for i, page in images:
print(page)
print(page.tags)
if page.is_palette:
print("\nColor Map:", page.color_map.shape, page.color_map.dtype)
for attr in ('cz_lsm_info', 'cz_lsm_scan_info', 'uic_tags',
'mm_header', 'imagej_tags', 'micromanager_metadata',
'nih_image_header'):
if hasattr(page, attr):
print("", attr.upper(), Record(getattr(page, attr)), sep="\n")
print()
if page.is_micromanager:
print('MICROMANAGER_FILE_METADATA')
print(Record(tif.micromanager_metadata))
if images and not settings.noplot:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as e:
warnings.warn("failed to import matplotlib.\n%s" % e)
else:
for img, page in images:
if img is None:
continue
vmin, vmax = None, None
if 'gdal_nodata' in page.tags:
try:
vmin = numpy.min(img[img > float(page.gdal_nodata)])
except ValueError:
pass
if page.is_stk:
try:
vmin = page.uic_tags['min_scale']
vmax = page.uic_tags['max_scale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = None, None
title = "%s\n %s" % (str(tif), str(page))
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bits_per_sample,
photometric=page.photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
TIFFfile = TiffFile # backwards compatibility
if sys.version_info[0] > 2:
basestring = str, bytes
unicode = str
if __name__ == "__main__":
sys.exit(main()) | apache-2.0 |
nathania/networkx | examples/multigraph/chess_masters.py | 54 | 5146 | #!/usr/bin/env python
"""
An example of the MultiDiGraph clas
The function chess_pgn_graph reads a collection of chess
matches stored in the specified PGN file
(PGN ="Portable Game Notation")
Here the (compressed) default file ---
chess_masters_WCC.pgn.bz2 ---
contains all 685 World Chess Championship matches
from 1886 - 1985.
(data from http://chessproblem.my-free-games.com/chess/games/Download-PGN.php)
The chess_pgn_graph() function returns a MultiDiGraph
with multiple edges. Each node is
the last name of a chess master. Each edge is directed
from white to black and contains selected game info.
The key statement in chess_pgn_graph below is
G.add_edge(white, black, game_info)
where game_info is a dict describing each game.
"""
# Copyright (C) 2006-2010 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
# tag names specifying what game info should be
# stored in the dict on each digraph edge
game_details=["Event",
"Date",
"Result",
"ECO",
"Site"]
def chess_pgn_graph(pgn_file="chess_masters_WCC.pgn.bz2"):
"""Read chess games in pgn format in pgn_file.
Filenames ending in .gz or .bz2 will be uncompressed.
Return the MultiDiGraph of players connected by a chess game.
Edges contain game data in a dict.
"""
import bz2
G=nx.MultiDiGraph()
game={}
datafile = bz2.BZ2File(pgn_file)
lines = (line.decode().rstrip('\r\n') for line in datafile)
for line in lines:
if line.startswith('['):
tag,value=line[1:-1].split(' ',1)
game[str(tag)]=value.strip('"')
else:
# empty line after tag set indicates
# we finished reading game info
if game:
white=game.pop('White')
black=game.pop('Black')
G.add_edge(white, black, **game)
game={}
return G
if __name__ == '__main__':
import networkx as nx
G=chess_pgn_graph()
ngames=G.number_of_edges()
nplayers=G.number_of_nodes()
print("Loaded %d chess games between %d players\n"\
% (ngames,nplayers))
# identify connected components
# of the undirected version
Gcc=list(nx.connected_component_subgraphs(G.to_undirected()))
if len(Gcc)>1:
print("Note the disconnected component consisting of:")
print(Gcc[1].nodes())
# find all games with B97 opening (as described in ECO)
openings=set([game_info['ECO']
for (white,black,game_info) in G.edges(data=True)])
print("\nFrom a total of %d different openings,"%len(openings))
print('the following games used the Sicilian opening')
print('with the Najdorff 7...Qb6 "Poisoned Pawn" variation.\n')
for (white,black,game_info) in G.edges(data=True):
if game_info['ECO']=='B97':
print(white,"vs",black)
for k,v in game_info.items():
print(" ",k,": ",v)
print("\n")
try:
import matplotlib.pyplot as plt
except ImportError:
import sys
print("Matplotlib needed for drawing. Skipping")
sys.exit(0)
# make new undirected graph H without multi-edges
H=nx.Graph(G)
# edge width is proportional number of games played
edgewidth=[]
for (u,v,d) in H.edges(data=True):
edgewidth.append(len(G.get_edge_data(u,v)))
# node size is proportional to number of games won
wins=dict.fromkeys(G.nodes(),0.0)
for (u,v,d) in G.edges(data=True):
r=d['Result'].split('-')
if r[0]=='1':
wins[u]+=1.0
elif r[0]=='1/2':
wins[u]+=0.5
wins[v]+=0.5
else:
wins[v]+=1.0
try:
pos=nx.graphviz_layout(H)
except:
pos=nx.spring_layout(H,iterations=20)
plt.rcParams['text.usetex'] = False
plt.figure(figsize=(8,8))
nx.draw_networkx_edges(H,pos,alpha=0.3,width=edgewidth, edge_color='m')
nodesize=[wins[v]*50 for v in H]
nx.draw_networkx_nodes(H,pos,node_size=nodesize,node_color='w',alpha=0.4)
nx.draw_networkx_edges(H,pos,alpha=0.4,node_size=0,width=1,edge_color='k')
nx.draw_networkx_labels(H,pos,fontsize=14)
font = {'fontname' : 'Helvetica',
'color' : 'k',
'fontweight' : 'bold',
'fontsize' : 14}
plt.title("World Chess Championship Games: 1886 - 1985", font)
# change font and write text (using data coordinates)
font = {'fontname' : 'Helvetica',
'color' : 'r',
'fontweight' : 'bold',
'fontsize' : 14}
plt.text(0.5, 0.97, "edge width = # games played",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.text(0.5, 0.94, "node size = # games won",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.axis('off')
plt.savefig("chess_masters.png",dpi=75)
print("Wrote chess_masters.png")
plt.show() # display
| bsd-3-clause |
murali-munna/scikit-learn | sklearn/decomposition/__init__.py | 147 | 1421 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
k8si/691CL_project | Baseline - files.py | 1 | 5438 | # -*- coding: utf-8 -*-
"""
Created on Sat May 03 16:40:23 2014
@author: Helene
"""
import glob, os, re, nltk, random, time, sklearn
import numpy as np
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.metrics.pairwise import linear_kernel
stop = stopwords.words('english')
### Methods ###
def bagOfWords():
path = "C:/Users/Helene/Documents/GitHub/691CL_project"
documents = []
names = []
for subTopic in ['SPORTS_TXT', 'US_TXT']:
subPath = path+'/'+subTopic
for filename in glob.glob(os.path.join(subPath, '*.txt')):
f = open(filename, 'r')
documents.append(f.read())
names.append((subTopic, re.findall(r'\\[\w\-\.]+',filename)[0][1:]))
f.close()
return names, documents
def predArg():
documents = []
names = []
path = "C:/Users/Helene/Documents/GitHub/691CL_project"
for subTopic in ['SPORTS_CNLP','US_CNLP']:
subPath = path+'/'+subTopic
#print subPath
for filename in glob.glob(os.path.join(subPath,"*.cnlp")):
#print 'NEW FILE:',filename
sents = []
f = open(filename, 'r')
sent = []
for line in f.readlines():
if len(line) > 1:
sent.append(line)
else:
sents.append(sent)
sent = []
documents.append(sents)
names.append((subTopic, re.findall(r'\\[\w\-\.]+',filename)[0][1:]))
f.close()
strDocs = []
for doc in documents:
allTokens = [line.split('\t')[1]+"-"+line.split('\t')[7][:-1] for sent in doc for line in sent if re.match(r'\d', line.split('\t')[7])]
strDocs.append(' '.join(allTokens))
return names, strDocs
def setup(train, test, binaryOpt = False):
count_vectorizer = CountVectorizer(binary = binaryOpt)
count_vectorizer.fit_transform(train)
freq_term_matrix = count_vectorizer.transform(test)
if binaryOpt:
return freq_term_matrix
tfidf = TfidfTransformer(norm="l2")
tfidf.fit(freq_term_matrix)
tf_idf_matrix = tfidf.transform(freq_term_matrix)
return tf_idf_matrix
def rankMatchesToTarget(matrix, compareMatrix, targetDocIndex, k, names):
#print 'curr doc:',targetDocIndex
cosineSim = []
for i in range(compareMatrix.shape[0]):
if not all(matrix.data == compareMatrix.data) or (all(matrix.data == compareMatrix.data) and i != targetDocIndex):
cos = sklearn.metrics.pairwise.linear_kernel(matrix[targetDocIndex],compareMatrix[i])
cosineSim.append(( (names[targetDocIndex][0], names[i][0]), i, cos ))
sortedDocs = sorted(cosineSim, key=lambda tup: tup[-1], reverse = True)
return sortedDocs[slice(k)]
def precisionAndRecall(allSimilarityLists):
#(correct_Label, matched_label)
precision = []
for doc in allSimilarityLists:
tp = 0.0
for tup in doc:
if len(set(tup[0])) == 1:
tp += 1.0
precision.append(tp/10.0)
return np.mean(precision)
def shuffle (a, b):
combined = zip(a, b)
random.shuffle(combined)
a[:], b[:] = zip(*combined)
return a,b
def setupMatricies(start, end):
originals = [(onlyPaNames, setup(onlyPaDocs[start:end], onlyPaDocs[start:end], True), "predArg - binary:"),
(onlyPaNames, setup(onlyPaDocs[start:end], onlyPaDocs[start:end], False),"predArg - tfidf:"),
(bowNames, setup(bowDocs[start:end], bowDocs[start:end], True),"bagOfWords - binary:"),
(bowNames, setup(bowDocs[start:end], bowDocs[start:end], False), "bagOfWords - tfidf:"),
(bothNames, setup(bothDocs[start:end], bothDocs[start:end], True), "both BOW & PA - binary:"),
(bothNames, setup(bothDocs[start:end], bothDocs[start:end], False),"both BOW & PA - tfidf:")]
test = [setup(onlyPaDocs[start:end], onlyPaDocs[end:], True),
setup(onlyPaDocs[start:end], onlyPaDocs[end:], False),
setup(bowDocs[start:end], bowDocs[end:], True),
setup(bowDocs[start:end], bowDocs[end:], False),
setup(bothDocs[start:end], bothDocs[end:], True),
setup(bothDocs[start:end], bothDocs[end:], False)]
return originals,test
### "Main" ###
start = time.clock()
bowNames, bowDocs = shuffle(*bagOfWords())
onlyPaNames, onlyPaDocs = shuffle(*predArg())
bNames, bDocs = [], []
for title in bowNames:
if title[1]+'.cnlp' in [x[1] for x in onlyPaNames]:
paIndex = [x[1] for x in onlyPaNames].index(title[1]+'.cnlp')
bowIndex = [x[1] for x in bowNames].index(title[1])
#print paIndex, bowIndex
bNames.append((title[0],title[1]+'.cnlp'))
bDocs.append(bowDocs[bowIndex]+onlyPaDocs[paIndex])
bothNames, bothDocs = shuffle(bNames, bDocs)
print 'finished setting up the docs', time.clock() - start
everything, test = setupMatricies(0, 300)
print 'finished making all the vectors', time.clock() - start
for i in range(len(everything)):
names, matrixOrig, string = everything[i]
matrixCompare = test[i]
mostSimilarDocs = [rankMatchesToTarget(matrixOrig, matrixCompare, n, 10, names) for n in range(matrixCompare.shape[0])]
print string,precisionAndRecall(mostSimilarDocs)
print 'DONE!',(time.clock() - start)/60
| mit |
thareUSGS/GDAL_scripts | gdal_baseline_slope/python2/slope_histogram_cumulative_graph.py | 2 | 4103 | #!/usr/bin/env python
#/******************************************************************************
# * $Id$
# *
# * Project: GDAL Make Histogram and Cumulative graph from Tab delimited tab as
# generated by gdal_hist.py
# * Purpose: Take a gdal_hist.py output and create a histogram plot using matplotlib
# * Author: Trent Hare, [email protected]
# *
# ******************************************************************************
# * Public domain licenes (unlicense)
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# * DEALINGS IN THE SOFTWARE.
# ****************************************************************************/
import sys
import os
import math
import numpy as np
import pandas as pd
from pandas.tools.plotting import table
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def usage():
print 'Usage: slope_histogram_cumulative_graph.py -name "InSight E1" slope_histogram_table.tab outfile.png'
print " This program is geared to run on a table as generated by gdal_hist.py"
print 'slope_histogram_cumulative_graph.py -name "E_Marg_CE 01" DEM_1m_E_Marg_CE_adir_1m_hist.xls DEM_1m_E_Marg_CE_adir_1m_hist.png'
sys.exit(0)
#set None for commandline options
name = ""
infile = None
outfile = None
# =============================================================================
# Parse command line arguments.
# =============================================================================
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
if arg == '-name':
i = i + 1
name = sys.argv[i]
elif infile is None:
infile = arg
elif outfile is None:
outfile = arg
else:
Usage()
i = i + 1
if infile is None:
usage()
if not(os.path.isfile(infile)):
input = sys.argv[1]
print "filename %s does not exist." % (infile)
sys.exit(1)
#load table
df = pd.DataFrame.from_csv(infile, sep='\t', header=1)
#initialize figure
fig, ax1 = plt.subplots()
#calculate unscaled values
#df.value = (df.value * 5) - 0.2
#df.ix[df.value < 0] = 0; df
#not to reverse histogram before calculating 'approx' stats
#min = round(df.value.min(),2)
#max = round(df.value.max(),2)
#mean = round(df.value.mean(),2)
#stddev = round(df.value.std(),2)
#rms = round(math.sqrt((mean * mean) + (stddev * stddev)),2)
#statsDict = {'Min':min,'Max':max,'Mean':mean \
#,'StdDev':stddev,'RMS':rms}
#statsSeries = pd.Series(statsDict,name='stats')
#statsSeries.sort()
#t = table(ax1, statsSeries, \
#loc='lower right', colWidths=[0.1] * 2)
#t.set_fontsize(18)
#props = t.properties()
#cells = props['child_artists']
#for c in cells:
#c.set_height(0.05)
#Plot frequency histogram from input table
ax1.fill(df.value,df['count'],'gray')
#df.plot(ax1=ax1, kind='area', color='gray', legend=True)
ax1.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax1.get_yaxis().set_tick_params(direction='out')
#get min and max as found by pandas for plotting 'arrow' at X=15
#minY = round(df['count'].min(),0)
#maxY = round(df['count'].max(),0)
#grab existing ax1 axes
#ax = plt.axes()
#ax.arrow(15, minY, 0, maxY, head_width=0, head_length=0, fc='k', ec='k')
ax1.axvline(x=15, color='black', alpha=0.5)
#add cumulative plot on 'Y2' axis using save X axes
ax2 = ax1.twinx()
ax2.plot(df.value,df['cumulative'],'blue')
#df.plot(ax2=ax2, df.value,df['cumulative'],'blue')
ax2.get_yaxis().set_tick_params(direction='out')
#define labels
ax1.set_xlabel('Slope (degrees)')
ax1.set_ylabel('Count')
ax2.set_ylabel('Cumulative')
plt.suptitle(name + ' Slope Histogram and Cumulative Plot')
#save out PNG
plt.savefig(outfile)
print "Graph exported to %s" % (outfile)
| unlicense |
sarunya-w/CS402-PROJECT | Project/feature_extraction/feature_extraction_parallel/fftengine.py | 1 | 3973 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 17:31:34 2015
@author: Sarunya
"""
import sys
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
import scipy.ndimage
#from scipy.ndimage import filters
#import time
sys.setrecursionlimit(10000)
bs = 200
wd = 8 # theta_range=wd*wd*2
clmax = 11 #clmax is amount of class
def normFFT(images_file):
# apply to array
img = np.array(images_file)
#converte image to frequency domain
#f=np.log(np.abs(np.fft.fftshift(np.fft.fft2(im))))
f = np.log(np.abs(np.fft.fft2(img)))
#scaling
s=(100./f.shape[0],100./f.shape[1])
#normalized frequency domian
return scipy.ndimage.zoom(f,s,order = 2)
def G(x,mu,s):
return 1.0/ np.sqrt(2.0*np.pi)*np.exp(((x-mu)**2)/(-2.0*s**2))
def getValue(images):
f = normFFT(images) #f=[100,100]
rmax,cmax = f.shape
sg = np.zeros((2*wd,wd)) #sg[60,30]
sg[0:wd,:]=np.log(np.abs(f[rmax-wd:rmax,0:wd])) #sg[0:30,:] = f[70:100,0:30]
sg[wd:2*wd,:]=np.log(np.abs(f[0:wd,0:wd])) #sg[30:60,:] = f[0:30,0:30]
#filters.gaussian_filter(sg, (3,3), (0,0), sg)
# fsg=np.zeros(wd)
# for b in xrange(wd):
# for r in xrange(wd):
# for c in xrange(wd):
# rad=np.sqrt(r**2+c**2)
# fsg[b]=fsg[b]+sg[wd+r,c]*G(rad,float(b),0.2)+sg[wd-r,c]*G(rad,float(b),0.2)
# fsg[b]=fsg[b]/(np.pi*float(b+1.0))
# fsg=fsg/np.linalg.norm(fsg)
# fsg.astype(np.float32)
return sg.reshape(-1)
def getVector(images_files,class_files,samples,isTrain):
#ts=time.time()
sub_img = []
sub_cs = []
bb = bs//2
for f in xrange(len(images_files)):
img = Image.open(images_files[f]).convert('L')
w , h = img.size
pixels=[]
for i in xrange(samples):
r = np.random.randint(bb, h-bb)
c = np.random.randint(bb, w-bb)
pixels.append((c,r))
box = (c-bb, r-bb, c + bb, r + bb)
output_img = img.crop(box)
sub_img.append(getValue(output_img))
if isTrain:
cimg = Image.open(class_files[f]).convert('L')
for p in pixels:
sub_cs.append(cimg.getpixel(p))
if isTrain:
sub_img=np.array(sub_img,dtype=np.float32)
sub_cs=np.array(sub_cs,dtype=np.uint32)
sub_cs[sub_cs==255]= clmax - 1
else:
#sub_img=np.array(sub_img,dtype=np.float32)
sub_cs=None
#ts=timestamp(ts)
return (sub_img ,sub_cs)
"""
if __name__ == '__main__':
dsetname = './train'
images_files = []
class_files = []
for root, dirs, files in os.walk(dsetname):
for f in files:
if f.endswith('jpg') or f.endswith('JPG') or f.endswith('png') or f.endswith('PNG'):
# read image to array (PIL)
images_files.append(os.path.join(root,f))
img_name = os.path.basename(os.path.join(root,f))
file_name = img_name.split(".")
# check image don't have file type 'bmp'
if os.path.isfile(os.path.join(root , 'bmp/' + file_name[0] + '.bmp')) == False:
print "plese label" , root , img_name
cross = 1
else:
class_files.append(os.path.join(root , 'bmp/' + file_name[0] + '.bmp'))
vs ,cs = getVector(images_files,class_files,20,isTrain=True)
vs=np.array(dview.gather('vs'))
cs=np.array(dview.gather('cs'))
k = 0
if cs[0] is None:
cs = None
if not os.path.exists(ddesname):
os.makedirs(ddesname)
rfile = ddesname +'/'+ 'dataset%02d.pic'%(k)
pickleFile = open(rfile, 'wb')
theta_range = vs.shape[1]
size = vs.shape[0]
samples = cs
I = vs
pickle.dump((clmax,theta_dim,theta_range,size,samples,I), pickleFile, pickle.HIGHEST_PROTOCOL)
pickleFile.close()
k = k+1
""" | mit |
oesteban/seaborn | doc/sphinxext/plot_directive.py | 38 | 27578 | """
A directive for including a matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one my specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool or str
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files. If the ``:context: reset`` option is
specified, the context is reset for this and future plots, and
previous figures are closed prior to running the code.
``:context:close-figs`` keeps the context but closes previous figures
before running the code.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_html_show_source_link
Whether to show a link to the source in HTML.
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directory where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behavior
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing restructured text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import sys, os, shutil, io, re, textwrap
from os.path import relpath
import traceback
if not six.PY3:
import cStringIO
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[^0-9]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_context(arg):
if arg in [None, 'reset', 'close-figs']:
return arg
raise ValueError("argument should be None or 'reset' or 'close-figs'")
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in six.iteritems(document.nametypes):
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': _option_context,
'nofigs': directives.flag,
'encoding': directives.encoding
}
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
app.add_config_value('plot_pre_code', None, True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_html_show_source_link', True, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_config_value('plot_rcparams', {}, True)
app.add_config_value('plot_apply_rcparams', False, True)
app.add_config_value('plot_working_directory', None, True)
app.add_config_value('plot_template', None, True)
app.connect(str('doctree-read'), mark_plot_labels)
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
def remove_coding(text):
"""
Remove the coding comment, which six.exec_ doesn't like.
"""
sub_re = re.compile("^#\s*-\*-\s*coding:\s*.*-\*-$", flags=re.MULTILINE)
return sub_re.sub("", text)
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{{ only_latex }}
{% for img in images %}
{% if 'pdf' in img.formats -%}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endif -%}
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
if six.PY2:
pwd = os.getcwdu()
else:
pwd = os.getcwd()
old_sys_path = list(sys.path)
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`plot_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`plot_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.plot_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
# Redirect stdout
stdout = sys.stdout
if six.PY3:
sys.stdout = io.StringIO()
else:
sys.stdout = cStringIO.StringIO()
# Assign a do-nothing print function to the namespace. There
# doesn't seem to be any other way to provide a way to (not) print
# that works correctly across Python 2 and 3.
def _dummy_print(*arg, **kwarg):
pass
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
six.exec_(six.text_type("import numpy as np\n" +
"from matplotlib import pyplot as plt\n"), ns)
else:
six.exec_(six.text_type(setup.config.plot_pre_code), ns)
ns['print'] = _dummy_print
if "__main__" in code:
six.exec_("__name__ = '__main__'", ns)
code = remove_coding(code)
six.exec_(code, ns)
if function_name is not None:
six.exec_(function_name + "()", ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def clear_state(plot_rcparams, close=True):
if close:
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config, context_reset=False,
close_figs=False):
"""
Run a pyplot script and save the low and high res PNGs and a PDF
in *output_dir*.
Save the images under *output_dir* with file names derived from
*output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
if isinstance(plot_formats, six.string_types):
plot_formats = eval(plot_formats)
for fmt in plot_formats:
if isinstance(fmt, six.string_types):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# We didn't find the files, so build them
results = []
if context:
ns = plot_context
else:
ns = {}
if context_reset:
clear_state(config.plot_rcparams)
plot_context.clear()
close_figs = not context or close_figs
for i, code_piece in enumerate(code_pieces):
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close_figs)
elif close_figs:
plt.close('all')
run_code(code_piece, code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format),
dpi=dpi,
bbox_inches="tight")
except Exception as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
return results
def run(arguments, content, options, state_machine, state, lineno):
# The user may provide a filename *or* Python code content, but not both
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
nofigs = 'nofigs' in options
options.setdefault('include-source', config.plot_include_source)
keep_context = 'context' in options
context_opt = None if not keep_context else options['context']
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with io.open(source_file_name, 'r', encoding='utf-8') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code,
source_file_name,
build_dir,
output_base,
keep_context,
function_name,
config,
context_reset=context_opt == 'reset',
close_figs=context_opt == 'close-figs')
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
# Not-None src_link signals the need for a source link in the generated
# html
if j == 0 and config.plot_html_show_source_link:
src_link = source_link
else:
src_link = None
result = format_template(
config.plot_template or TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats and not nofigs,
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
if not os.path.exists(dest_dir):
cbook.mkdirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with io.open(target_name, 'w', encoding="utf-8") as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
| bsd-3-clause |
wdurhamh/statsmodels | statsmodels/graphics/tests/test_boxplots.py | 28 | 1257 | import numpy as np
from numpy.testing import dec
from statsmodels.graphics.boxplots import violinplot, beanplot
from statsmodels.datasets import anes96
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
@dec.skipif(not have_matplotlib)
def test_violinplot_beanplot():
# Test violinplot and beanplot with the same dataset.
data = anes96.load_pandas()
party_ID = np.arange(7)
labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
"Independent-Independent", "Independent-Republican",
"Weak Republican", "Strong Republican"]
age = [data.exog['age'][data.endog == id] for id in party_ID]
fig = plt.figure()
ax = fig.add_subplot(111)
violinplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
beanplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plt.close(fig)
| bsd-3-clause |
iancze/PSOAP | scripts/make_fake_primary_data.py | 1 | 1680 | import matplotlib.pyplot as plt
import numpy as np
from scipy.linalg import cho_factor, cho_solve
from psoap import constants as C
from psoap.data import lkca14, redshift
from psoap import covariance
# Optimized parameters for this chunk. Not that relevant though, since we are just using the GP
# as an interpolator.
lkca14.sort_by_SN()
amp_f = 0.2
l_f = 5.58
order = 23
# Select the relevant wavelengths
wl = lkca14.wl[0, order, :]
wl0 = 5235
wl1 = 5285
ind = (wl > wl0) & (wl < wl1)
n_epochs= 3
# Optimize the GP using the first 8 epochs
wl = lkca14.wl[0:n_epochs, order, ind]
fl = lkca14.fl[0:n_epochs, order, ind]
sigma = lkca14.sigma[0:n_epochs, order, ind]
date = lkca14.date[0:n_epochs]
n_epochs, n_pix = wl.shape
# Apply some calibration optimzation, although not strictly necessary
fl = covariance.cycle_calibration(wl, fl, sigma, amp_f, l_f, ncycles=3, limit_array=3)
print("finished cycling calibration")
# Optimize the GP parameters
amp_f, l_f = covariance.optimize_GP_f(wl.flatten(), fl.flatten(), sigma.flatten(), amp_f, l_f)
print("finished optimizing GP", amp_f, l_f)
# Use optimized fluxes, optimized GP parameters, and first epoch wavelength grid to predict a
# mean flux vector on to first epoch, which we take to be the master grid.
wl_predict = wl[0]
fl_predict, Sigma = covariance.predict_f(wl.flatten(), fl.flatten(), sigma.flatten(), wl_predict, amp_f, l_f)
# Plot all spectra up to see what it looks like
fig,ax = plt.subplots()
for i in range(n_epochs):
ax.plot(wl[i], fl[i])
ax.plot(wl_predict, fl_predict, "k", lw=1.2)
fig.savefig("fake/primary_spectra.png")
np.save("fake/primary_wl_fl.npy", np.array([wl_predict, fl_predict]))
| mit |
wesm/statsmodels | scikits/statsmodels/sandbox/distributions/examples/ex_mvelliptical.py | 1 | 5134 | # -*- coding: utf-8 -*-
"""examples for multivariate normal and t distributions
Created on Fri Jun 03 16:00:26 2011
@author: josef
for comparison I used R mvtnorm version 0.9-96
"""
import numpy as np
import scikits.statsmodels.sandbox.distributions.mv_normal as mvd
from numpy.testing import assert_array_almost_equal
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu = np.array([-1, 0.0, 2.0])
#************** multivariate normal distribution ***************
mvn3 = mvd.MVNormal(mu, cov3)
#compare with random sample
x = mvn3.rvs(size=1000000)
xli = [[2., 1., 1.5],
[0., 2., 1.5],
[1.5, 1., 2.5],
[0., 1., 1.5]]
xliarr = np.asarray(xli).T[None,:, :]
#from R session
#pmvnorm(lower=-Inf,upper=(x[0,.]-mu)/sqrt(diag(cov3)),mean=rep(0,3),corr3)
r_cdf = [0.3222292, 0.3414643, 0.5450594, 0.3116296]
r_cdf_errors = [1.715116e-05, 1.590284e-05, 5.356471e-05, 3.567548e-05]
n_cdf = [mvn3.cdf(a) for a in xli]
assert_array_almost_equal(r_cdf, n_cdf, decimal=4)
print n_cdf
print
print (x<np.array(xli[0])).all(-1).mean(0)
print (x[...,None]<xliarr).all(1).mean(0)
print mvn3.expect_mc(lambda x: (x<xli[0]).all(-1), size=100000)
print mvn3.expect_mc(lambda x: (x[...,None]<xliarr).all(1), size=100000)
#other methods
mvn3n = mvn3.normalized()
assert_array_almost_equal(mvn3n.cov, mvn3n.corr, decimal=15)
assert_array_almost_equal(mvn3n.mean, np.zeros(3), decimal=15)
xn = mvn3.normalize(x)
xn_cov = np.cov(xn, rowvar=0)
assert_array_almost_equal(mvn3n.cov, xn_cov, decimal=2)
assert_array_almost_equal(np.zeros(3), xn.mean(0), decimal=2)
mvn3n2 = mvn3.normalized2()
assert_array_almost_equal(mvn3n.cov, mvn3n2.cov, decimal=2)
#mistake: "normalized2" standardizes - FIXED
#assert_array_almost_equal(np.eye(3), mvn3n2.cov, decimal=2)
xs = mvn3.standardize(x)
xs_cov = np.cov(xn, rowvar=0)
#another mixup xs is normalized
#assert_array_almost_equal(np.eye(3), xs_cov, decimal=2)
assert_array_almost_equal(mvn3.corr, xs_cov, decimal=2)
assert_array_almost_equal(np.zeros(3), xs.mean(0), decimal=2)
mv2m = mvn3.marginal(np.array([0,1]))
print mv2m.mean
print mv2m.cov
mv2c = mvn3.conditional(np.array([0,1]), [0])
print mv2c.mean
print mv2c.cov
mv2c = mvn3.conditional(np.array([0]), [0, 0])
print mv2c.mean
print mv2c.cov
import scikits.statsmodels.api as sm
mod = sm.OLS(x[:,0], sm.add_constant(x[:,1:], prepend=True))
res = mod.fit()
print res.model.predict(np.array([1,0,0]))
mv2c = mvn3.conditional(np.array([0]), [0, 0])
print mv2c.mean
mv2c = mvn3.conditional(np.array([0]), [1, 1])
print res.model.predict(np.array([1,1,1]))
print mv2c.mean
#the following wrong input doesn't raise an exception but produces wrong numbers
#mv2c = mvn3.conditional(np.array([0]), [[1, 1],[2,2]])
#************** multivariate t distribution ***************
mvt3 = mvd.MVT(mu, cov3, 4)
xt = mvt3.rvs(size=100000)
assert_array_almost_equal(mvt3.cov, np.cov(xt, rowvar=0), decimal=1)
mvt3s = mvt3.standardized()
mvt3n = mvt3.normalized()
#the following should be equal or correct up to numerical precision of float
assert_array_almost_equal(mvt3.corr, mvt3n.sigma, decimal=15)
assert_array_almost_equal(mvt3n.corr, mvt3n.sigma, decimal=15)
assert_array_almost_equal(np.eye(3), mvt3s.sigma, decimal=15)
xts = mvt3.standardize(xt)
xts_cov = np.cov(xts, rowvar=0)
xtn = mvt3.normalize(xt)
xtn_cov = np.cov(xtn, rowvar=0)
xtn_corr = np.corrcoef(xtn, rowvar=0)
assert_array_almost_equal(mvt3n.mean, xtn.mean(0), decimal=2)
#the following might fail sometimes (random test), add seed in tests
assert_array_almost_equal(mvt3n.corr, xtn_corr, decimal=1)
#watch out cov is not the same as sigma for t distribution, what's right here?
#normalize by sigma or by cov ? now normalized by sigma
assert_array_almost_equal(mvt3n.cov, xtn_cov, decimal=1)
assert_array_almost_equal(mvt3s.cov, xts_cov, decimal=1)
a = [0.0, 1.0, 1.5]
mvt3_cdf0 = mvt3.cdf(a)
print mvt3_cdf0
print (xt<np.array(a)).all(-1).mean(0)
print 'R', 0.3026741 # "error": 0.0004832187
print 'R', 0.3026855 # error 3.444375e-06 with smaller abseps
print 'diff', mvt3_cdf0 - 0.3026855
a = [0.0, 0.5, 1.0]
mvt3_cdf1 = mvt3.cdf(a)
print mvt3_cdf1
print (xt<np.array(a)).all(-1).mean(0)
print 'R', 0.1946621 # "error": 0.0002524817
print 'R', 0.1946217 # "error:"2.748699e-06 with smaller abseps
print 'diff', mvt3_cdf1 - 0.1946217
assert_array_almost_equal(mvt3_cdf0, 0.3026855, decimal=5)
assert_array_almost_equal(mvt3_cdf1, 0.1946217, decimal=5)
import scikits.statsmodels.sandbox.distributions.mixture_rvs as mix
mu2 = np.array([4, 2.0, 2.0])
mvn32 = mvd.MVNormal(mu2, cov3/2., 4)
md = mix.mv_mixture_rvs([0.4, 0.6], 5, [mvt3, mvt3n], 3)
rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)
#rvs2 = rvs[:,:2]
import matplotlib.pyplot as plt
fig = plt.figure()
fig.add_subplot(2, 2, 1)
plt.plot(rvs[:,0], rvs[:,1], '.', alpha=0.25)
plt.title('1 versus 0')
fig.add_subplot(2, 2, 2)
plt.plot(rvs[:,0], rvs[:,2], '.', alpha=0.25)
plt.title('2 versus 0')
fig.add_subplot(2, 2, 3)
plt.plot(rvs[:,1], rvs[:,2], '.', alpha=0.25)
plt.title('2 versus 1')
plt.show()
| bsd-3-clause |
toobaz/pandas | pandas/tests/indexes/period/test_asfreq.py | 2 | 6254 | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, PeriodIndex, Series, period_range
from pandas.util import testing as tm
class TestPeriodIndex:
def test_asfreq(self):
pi1 = period_range(freq="A", start="1/1/2001", end="1/1/2001")
pi2 = period_range(freq="Q", start="1/1/2001", end="1/1/2001")
pi3 = period_range(freq="M", start="1/1/2001", end="1/1/2001")
pi4 = period_range(freq="D", start="1/1/2001", end="1/1/2001")
pi5 = period_range(freq="H", start="1/1/2001", end="1/1/2001 00:00")
pi6 = period_range(freq="Min", start="1/1/2001", end="1/1/2001 00:00")
pi7 = period_range(freq="S", start="1/1/2001", end="1/1/2001 00:00:00")
assert pi1.asfreq("Q", "S") == pi2
assert pi1.asfreq("Q", "s") == pi2
assert pi1.asfreq("M", "start") == pi3
assert pi1.asfreq("D", "StarT") == pi4
assert pi1.asfreq("H", "beGIN") == pi5
assert pi1.asfreq("Min", "S") == pi6
assert pi1.asfreq("S", "S") == pi7
assert pi2.asfreq("A", "S") == pi1
assert pi2.asfreq("M", "S") == pi3
assert pi2.asfreq("D", "S") == pi4
assert pi2.asfreq("H", "S") == pi5
assert pi2.asfreq("Min", "S") == pi6
assert pi2.asfreq("S", "S") == pi7
assert pi3.asfreq("A", "S") == pi1
assert pi3.asfreq("Q", "S") == pi2
assert pi3.asfreq("D", "S") == pi4
assert pi3.asfreq("H", "S") == pi5
assert pi3.asfreq("Min", "S") == pi6
assert pi3.asfreq("S", "S") == pi7
assert pi4.asfreq("A", "S") == pi1
assert pi4.asfreq("Q", "S") == pi2
assert pi4.asfreq("M", "S") == pi3
assert pi4.asfreq("H", "S") == pi5
assert pi4.asfreq("Min", "S") == pi6
assert pi4.asfreq("S", "S") == pi7
assert pi5.asfreq("A", "S") == pi1
assert pi5.asfreq("Q", "S") == pi2
assert pi5.asfreq("M", "S") == pi3
assert pi5.asfreq("D", "S") == pi4
assert pi5.asfreq("Min", "S") == pi6
assert pi5.asfreq("S", "S") == pi7
assert pi6.asfreq("A", "S") == pi1
assert pi6.asfreq("Q", "S") == pi2
assert pi6.asfreq("M", "S") == pi3
assert pi6.asfreq("D", "S") == pi4
assert pi6.asfreq("H", "S") == pi5
assert pi6.asfreq("S", "S") == pi7
assert pi7.asfreq("A", "S") == pi1
assert pi7.asfreq("Q", "S") == pi2
assert pi7.asfreq("M", "S") == pi3
assert pi7.asfreq("D", "S") == pi4
assert pi7.asfreq("H", "S") == pi5
assert pi7.asfreq("Min", "S") == pi6
msg = "How must be one of S or E"
with pytest.raises(ValueError, match=msg):
pi7.asfreq("T", "foo")
result1 = pi1.asfreq("3M")
result2 = pi1.asfreq("M")
expected = period_range(freq="M", start="2001-12", end="2001-12")
tm.assert_numpy_array_equal(result1.asi8, expected.asi8)
assert result1.freqstr == "3M"
tm.assert_numpy_array_equal(result2.asi8, expected.asi8)
assert result2.freqstr == "M"
def test_asfreq_nat(self):
idx = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-04"], freq="M")
result = idx.asfreq(freq="Q")
expected = PeriodIndex(["2011Q1", "2011Q1", "NaT", "2011Q2"], freq="Q")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("freq", ["D", "3D"])
def test_asfreq_mult_pi(self, freq):
pi = PeriodIndex(["2001-01", "2001-02", "NaT", "2001-03"], freq="2M")
result = pi.asfreq(freq)
exp = PeriodIndex(["2001-02-28", "2001-03-31", "NaT", "2001-04-30"], freq=freq)
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
result = pi.asfreq(freq, how="S")
exp = PeriodIndex(["2001-01-01", "2001-02-01", "NaT", "2001-03-01"], freq=freq)
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
def test_asfreq_combined_pi(self):
pi = pd.PeriodIndex(["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq="H")
exp = PeriodIndex(["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq="25H")
for freq, how in zip(["1D1H", "1H1D"], ["S", "E"]):
result = pi.asfreq(freq, how=how)
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
for freq in ["1D1H", "1H1D"]:
pi = pd.PeriodIndex(
["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq=freq
)
result = pi.asfreq("H")
exp = PeriodIndex(["2001-01-02 00:00", "2001-01-03 02:00", "NaT"], freq="H")
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
pi = pd.PeriodIndex(
["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq=freq
)
result = pi.asfreq("H", how="S")
exp = PeriodIndex(["2001-01-01 00:00", "2001-01-02 02:00", "NaT"], freq="H")
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
def test_asfreq_ts(self):
index = period_range(freq="A", start="1/1/2001", end="12/31/2010")
ts = Series(np.random.randn(len(index)), index=index)
df = DataFrame(np.random.randn(len(index), 3), index=index)
result = ts.asfreq("D", how="end")
df_result = df.asfreq("D", how="end")
exp_index = index.asfreq("D", how="end")
assert len(result) == len(ts)
tm.assert_index_equal(result.index, exp_index)
tm.assert_index_equal(df_result.index, exp_index)
result = ts.asfreq("D", how="start")
assert len(result) == len(ts)
tm.assert_index_equal(result.index, index.asfreq("D", how="start"))
def test_astype_asfreq(self):
pi1 = PeriodIndex(["2011-01-01", "2011-02-01", "2011-03-01"], freq="D")
exp = PeriodIndex(["2011-01", "2011-02", "2011-03"], freq="M")
tm.assert_index_equal(pi1.asfreq("M"), exp)
tm.assert_index_equal(pi1.astype("period[M]"), exp)
exp = PeriodIndex(["2011-01", "2011-02", "2011-03"], freq="3M")
tm.assert_index_equal(pi1.asfreq("3M"), exp)
tm.assert_index_equal(pi1.astype("period[3M]"), exp)
| bsd-3-clause |
hsuantien/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 121 | 6117 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
giancarloescobar/project-kappa | code/utils/eda.py | 2 | 4040 | """ Script to run eda
"""
from convolve import *
from events2neural_fixed import *
from harmonic import *
from loading_data import *
import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
import nibabel as nib
from dipy.segment.mask import median_otsu
from design_matrix import *
#I. Subject and Run
##############
subject = 'sub001'
run = 1
##############
#Loading Data and HDR
data = bold_data(subject, 1)
vol_shape, n_trs = data.shape[:-1], data.shape[-1]
TR = 2.5
tr_times = np.arange(0,30,TR)
all_tr_times = np.arange(n_trs) * TR
hrf_at_trs = hrf(tr_times)
X = np.ones((n_trs,14))
X_np = np.ones((n_trs,14))
mean_data = np.mean(data,axis=-1)
masked, mask = median_otsu(mean_data,2,1)
Y = data[mask].T
col = 0
pred = 0
#Adding onsets to design matrix
for i in list_cond_file(subject,run):
neural_prediction = events2neural_fixed(i, TR, n_trs)
convolved = convolve(neural_prediction, hrf_at_trs)
X[:,col] = convolved
X_np[:,pred] = neural_prediction
col = col + 1
pred = pred + 1
plt.plot(all_tr_times ,X_np[:,:8])
plt.savefig('block.png')
plt.close()
plt.plot(all_tr_times ,X_np[:,:8])
plt.plot(all_tr_times, X[:,:8])
plt.savefig('block_and_hdr.png')
plt.close()
#II. Design
#Masking Thresholds
#Run 1
data = bold_data(subject, 1)
mean_vol = np.mean(data, axis=-1)
plt.hist(np.ravel(mean_vol), bins=100)
plt.savefig('sub1_run1_mask.png')
plt.close()
#mean_data = np.mean(data,axis=-1)
#masked, mask = median_otsu(mean_data,2,1)
#Run 3
data3 = bold_data(subject, 3)
mean_vol2 = np.mean(data3, axis=-1)
plt.hist(np.ravel(mean_vol2), bins=100)
plt.savefig('sub1_run3_mask.png')
plt.close()
#mean_data2 = np.mean(data3,axis=-1)
#masked, mask2 = median_otsu(mean_data2,2,1)
#Run 5
data5 = bold_data(subject, 5)
mean_vol3 = np.mean(data5, axis=-1)
plt.hist(np.ravel(mean_vol3), bins=100)
plt.savefig('sub1_run5_mask.png')
plt.close()
#mean_data3 = np.mean(data5,axis=-1)
#masked, mask3 = median_otsu(mean_data3,2,1)
#Applied Mask to mean_data
mean_data[~mask]=np.nan
plt.imshow(mean_data[:,:,45],cmap='gray',alpha=0.5,interpolation='nearest')
#mean_data2[~mask2]=np.nan
#plt.imshow(mean_data2[:,:,45],cmap='gray',alpha=0.5,interpolation='nearest')
#mean_data3[~mask3]=np.nan
#plt.imshow(mean_data3[:,:,45],cmap='gray',alpha=0.5,interpolation='nearest')
#Design Matrix
plt.imshow(X[:,0:9], aspect = 0.1, interpolation = 'nearest', cmap = 'gray')
plt.colorbar()
plt.savefig('desing_matrix.png')
plt.close()
X, Y, betas_vols, mask, U, Y_demeaned, mean_data, projection_vols = design_matrix(subject, run)
plt.imshow(X, aspect = 0.1, interpolation = 'nearest', cmap = 'gray')
plt.colorbar()
plt.savefig('desing_matrix_dt_pca.png')
plt.close()
#Betas Values
betas_vols[~mask]=np.nan
plt.imshow(betas_vols[:,:,45,0], interpolation ='nearest')
plt.savefig('betas_vols_house.png')
plt.close()
plt.imshow(betas_vols[:,:,45,7], interpolation ='nearest')
plt.savefig('betas_vols_face.png')
plt.close()
plt.imshow(betas_vols[:,:,45,5], interpolation ='nearest')
plt.savefig('betas_vols_scissors.png')
plt.close()
#PCA
Y_demeaned = Y - np.mean(Y, axis=1).reshape([-1, 1])
Y_demeaned_2 = np.mean(Y_demeaned, axis = 0)
Y_demeaned_fix = Y_demeaned - Y_demeaned_2
u_cov = Y_demeaned_fix.dot(Y_demeaned_fix.T)
U, S, V = npl.svd(u_cov)
plt.plot(S)
plt.savefig('components_variance.png')
#PCA Projections
for i in range(10):
plt.imshow(projection_vols[:,:,45,i], cmap = 'gray')
plt.savefig('projection_' + str(i) + '.png')
# Some final checks that you wrote the files with their correct names
from os.path import exists
#assert exists('vol_std_values.txt')
#assert exists('vol_std_outliers.txt')
#assert exists('vol_std.png')
#assert exists('vol_rms_outliers.png')
#assert exists('extended_vol_rms_outliers.png')
#assert exists('extended_vol_rms_outliers.txt')
#assert exists('mean_mrss_vals.txt')
| bsd-3-clause |
jarryliu/queue-sim | measure/plot/process_log.py | 1 | 21095 | #!/usr/local/bin/python3
# support only python3
import numpy as np
import matplotlib.pyplot as plt
import re
from math import sqrt, floor, ceil
from process_theory import *
import scipy as sp
import scipy.stats
import matplotlib.patches as mpatches
from matplotlib.colors import colorConverter as cc
# get the all the latencies
def latencyFilter(data):
return data[:, 3:-1]
# get the histogram of distribution
def histogram(data, title, bins=1000, maxtype = "long"):
#print(data)
if maxtype == "short":
plt.hist(data, bins=bins, range = (min(data), max(data))) # arguments are passed to np.histogram
else:
plt.hist(data, bins=bins, range = (min(data), max(data)))
# plt.hist(data, bins=bins)
plt.title(title)
plt.show()
def mean_confidence_interval(a, k=1, confidence=0.99):
n = len(a)/k
m, se = np.mean(a), sp.stats.sem(a)
h = se * sp.stats.t._ppf((1+confidence)/2, n-1)
return m, m-h, m+h
# get the data of single log file and process the latencies
def processFile(path, f, newFile = False):
data = np.loadtxt(path+f)
(x,y)= data.shape
# 1. get arrival rate
# arrival = data[1:, 1]/1000/1000
arrival = data[floor(x/5):floor(4*x/5), 1]/1000/1000
#histogram(arrival, "arrival interval distribution")
mean_a = np.mean(arrival)
var_a = np.var(arrival)
# print("Mean Arrival interval is", mean_a, "variance is", var_a)
# 2. get end-to-end latency distribution
# latency = data[1:, 0]/1000/1000
latency = data[floor(x/5):, 11]/1000/1000
# print(f,latency)
#histogram(latency, "end-to-end latency distribution")
m, m_l, m_h = mean_confidence_interval(latency)
mList = np.mean(data[floor(x/5):floor(4*x/5), 3:8]/1000/1000, 0)
if newFile:
temp = np.mean(data[floor(x/5):floor(4*x/5), 3:11]/1000/1000, 0)
mList[0] = temp[0]+temp[1]
mList[1] = temp[2] + temp[3]
mList[2] = temp[4] + temp[5]
mList[3:] = temp[6:]
# print(f, m, m_l, m_h)
mean_s = [m, m_l, m_h, np.percentile(latency,5), np.percentile(latency, 99)]+list(mList)
var_s = np.var(latency)
# print("Average Latency is", mean_s, "variance is", var_s, "98 percentile", np.percentile(latency, 95))
return mean_a, var_a, mean_s, var_s
def processMFile(path, f):
data = np.loadtxt(path+f)
(x,y)= data.shape
# 1. get arrival rate
arrival = data[floor(x/5):floor(4*x/5), 1]/1000/1000
#histogram(arrival, "arrival interval distribution")
mean_a = np.mean(arrival)
var_a = np.var(arrival)
print("Mean Arrival interval is", mean_a, "variance is", var_a)
# 2. get end-to-end latency distribution
latency = data[floor(x/5):floor(4*x/5), 4:6]/1000/1000
latency = np.sum(latency, 1)
#histogram(latency, "end-to-end latency distribution")
mean_s = np.mean(latency)
var_s = np.var(latency)
print("Average Latency is", mean_s, "variance is", var_s)
return mean_a, var_a, mean_s, var_s
def printLatency(lam, var_a, mu, var_s):
lower, upper = getLatency(lam, var_a, mu, var_s)
print("Theoretical latency bounds are", lower, upper)
# get the first integer in a string
def getNum(string):
r = re.compile("([0-9]*)([a-zA-Z]*)([0-9]*)")
for s in r.match(string).groups():
if s.isdigit():
return int(s)
return 0
def readFileList(path, fList, newFile = False):
maList = []
varaList = []
msList = []
varsList = []
for f in fList:
mean_a, var_a, mean_s, var_s = processFile(path, f, newFile=newFile)
maList.append(mean_a)
varaList.append(var_a)
msList.append(mean_s)
varsList.append(var_s)
return np.array(maList), np.array(varaList), np.array(msList), np.array(varsList)
def burstLatency(bList, m):
mean = []
for b in bList:
mean.append(getLatencyDD1(b, 1/m))
return mean
def getBounds(mList, vList, mean, var):
upper = []
lower = []
for i in range(len(mList)):
l, u = getLatencyGG1(1/mList[i], vList[i], 1/mean, var)
lower.append(l)
upper.append(u)
return lower, upper
def plot_mean_and_CI(mean, lb, ub, color_mean=None, color_shading=None):
# plot the shaded range of the confidence intervals
plt.fill_between(range(mean.shape[0]), ub, lb,
color=color_shading, alpha=.5)
# plot the mean on top
plt.plot(mean, color_mean)
def plotStage(mean_s, newFile = False):
#plt.plot(bList, mean_s[:,4], "*-", label= "99 percentile")
plt.plot(bList, mean_s[:, 5], "*-", label="stage 1")
plt.plot(bList, mean_s[:, 6], "*-", label="stage 2")
plt.plot(bList, mean_s[:, 7], "*-", label="stage 3")
plt.plot(bList, mean_s[:, 8], "*-", label="stage 4")
#plt.plot(bList, mean_s[:, 9], "*-", label="stage 5")
print("latency ", mean_s[:,0])
print("stage 1 ", mean_s[:,5])
print("stage 2 ", mean_s[:,6])
print("stage 3 ", mean_s[:,7])
print("stage 4 ", mean_s[:,8])
#print("stage 5 ", mean_s[:,9])
# type = {"arrival", "latency", "depart", "stage1" ... "stage5"}
def showIndividual(path, f):
data = np.loadtxt(path+f)
(x,y)= data.shape
# 1. get arrival rate
# arrival = data[1:, 1]/1000/1000
# arrdiff = np.
totalLatency = np.array(data[100:floor(x/5), 0])
totalLatency = totalLatency[1:]
intDiff = np.array(data[100:floor(x/5), 1])
intDiff = intDiff[1:].reshape(len(intDiff[1:]),1)
latency = np.array(data[100:floor(x/5), 3:8])
interval = latency[1:, :] - latency[:-1, :]
s = intDiff.T - interval[:, 0]
interval[:, 0] = intDiff.T
interval[:, 1:] = np.cumsum(interval[:, 1:], axis =1) + intDiff
# np.set_printoptions(threshold=np.inf)
result = np.concatenate((s.T, interval, latency[1:, :], totalLatency.reshape(len(totalLatency), 1)), axis = 1)
import pandas as pd
df = pd.DataFrame(result[:201, :])
with pd.option_context('display.max_rows', 200, 'display.max_columns', 12):
print(df)
#path = "/Users/junjieliu/Development/GitHub/RTM-test/2018-2-5/"
# path = "/Users/junjieliu/Development/GitHub/RTM-test/theory_validation/"
# #
# # showIndividual(path+"poisson_1core_batch/", "b200")
# showIndividual(path+"batch_1pub_1core/", "batch100")
# exit(0)
# draw the distribution,
# type = {"arrival", "latency", "depart", "stage1" ... "stage5"}
def drawDistribution(path, f, types=["arrival"]):
data = np.loadtxt(path+f)
(x,y)= data.shape
# 1. get arrival rate
# arrival = data[1:, 1]/1000/1000
# arrdiff = np.
intDiff = np.array(data[floor(x/5):floor(4*x/5), 1]/1000/1000)
intDiff[0] = 0
intDiff = np.cumsum(intDiff)
for t in types:
if t == "arrival":
arrival = data[floor(x/5):floor(4*x/5), 9]
arrival = np.array(arrival)
# arrival = (arrival[1:] - arrival[:-1])/1000/1000
histogram(arrival, "arrival")
elif t == "interval0":
latency = np.array(data[floor(x/5):floor(4*x/5), 3]/1000/1000)
interval = intDiff - latency
#print(intDiff[:10], interval[:10])
interval = interval[1:] - interval[:-1]
# print(interval[:100])
histogram(interval, 'interval0', maxtype ="short")
elif t == "interval1":
interval = data[floor(x/5):floor(4*x/5), 1]/1000/1000
histogram(interval, "interval1", maxtype ="short")
elif t == "interval2":
latency = np.array(data[floor(x/5):floor(4*x/5), 4]/1000/1000)
interval = intDiff + latency
#print(intDiff[:10], interval[:10])
interval = interval[1:] - interval[:-1]
histogram(interval, 'interval2', maxtype ="short")
elif t == "interval3":
latency = np.array(data[floor(x/5):floor(4*x/5), 4]/1000/1000)
latency += np.array(data[floor(x/5):floor(4*x/5), 5]/1000/1000)
interval = intDiff + latency
#print(intDiff[:10], interval[:10])
interval = interval[1:] - interval[:-1]
histogram(interval, 'interval3', maxtype ="short")
elif t == "interval4":
latency = np.array(data[floor(x/5):floor(4*x/5), 4]/1000/1000)
latency += np.array(data[floor(x/5):floor(4*x/5), 5]/1000/1000)
latency += np.array(data[floor(x/5):floor(4*x/5), 6]/1000/1000)
interval = intDiff + latency
#print(intDiff[:10], interval[:10])
interval = interval[1:] - interval[:-1]
histogram(interval, 'interval4', maxtype ="short")
elif t == "interval5":
latency = np.array(data[floor(x/5):floor(4*x/5), 4]/1000/1000)
latency += np.array(data[floor(x/5):floor(4*x/5), 5]/1000/1000)
latency += np.array(data[floor(x/5):floor(4*x/5), 6]/1000/1000)
latency += np.array(data[floor(x/5):floor(4*x/5), 7]/1000/1000)
interval = intDiff + latency
#print(intDiff[:10], interval[:10])
interval = interval[1:] - interval[:-1]
histogram(interval, 'interval5', maxtype ="short")
elif t == "latency":
latency = data[floor(x/5):floor(4*x/5), 0]/1000/1000
histogram(latency, "latency")
elif t == "stage1":
latency = data[floor(x/5):floor(4*x/5), 3]/1000/1000
histogram(latency, "stage1 latency")
elif t == "stage2":
latency = data[floor(x/5):floor(4*x/5), 4]/1000/1000
histogram(latency, "stage2 latency")
elif t == "stage3":
latency = data[floor(x/5):floor(4*x/5), 5]/1000/1000
histogram(latency, "stage3 latency")
elif t == "stage4":
latency = data[floor(x/5):floor(4*x/5), 6]/1000/1000
histogram(latency, "stage4 latency")
elif t == "stage5":
latency = data[floor(x/5):floor(4*x/5), 7]/1000/1000
histogram(latency, "stage5 latency")
def showLatency(path, bList, fList, directory, showStage = True, changeRate = False, newFile = False):
mean_a, var_a, mean_s, var_s = readFileList(path + directory+"/", fList, newFile = newFile)
# plot the shaded range of the confidence intervals
#plt.fill_between(bList, mean_s[:, 3], mean_s[:, 4], alpha=.2)
plt.fill_between(bList, mean_s[:, 1], mean_s[:, 2], alpha=.5)
# print(mean_s[:, 0])
#plt.plot(bList, mean_s[:, 0], "*-", label="measured"
plt.plot(bList, mean_s[:, 0], "*-", label=directory)
if showStage:
plotStage(mean_s)
# if not changeRate:
# # s, v, b = fitModelXMXG1B(bList, mean_s[:,0])
# # print("parameter fitting", s, v, b)
# # xlist = [i for i in range(int(bList[0]), int(bList[-1]))]
# # y = [modelXMXG1B(i, s, v, b) for i in xlist]
# # s, v = fitModelXMXG1(bList, mean_s[:,0])
# # print("parameter fitting", s, v)
# # xlist = [i for i in range(int(bList[0]), int(bList[-1]))]
# # y = [modelXMXG1(i, s, v) for i in xlist]
# # plt.plot(xlist, y, "--", label="MXD1 model")
#
# s, v = fitSetup(bList, mean_s[:,0])
# print("parameter fitting", s, v)
# xlist = [i for i in range(int(bList[0]), int(bList[-1]))]
# y = [modelSetup(i, s, v) for i in xlist]
# plt.plot(xlist, y, "--", label="Setup model")
#
# s, v = fitSimple(bList, mean_s[:, 4])
# print("simple parameter fitting", s, v)
# xlist = [i for i in range(int(bList[0]), int(bList[-1]))]
# y = [modelSimple(i, s, v) for i in xlist]
# plt.plot(xlist, y, "--", label="Simple model")
# else:
# # s, v, b = fitModelRateMXG1B(bList, mean_s[:,0]/1000)
# # print("parameter fitting", s, v, b)
# # xlist = [i for i in range(int(bList[0]), int(bList[-1]))]
# # y = [modelRateMXG1B(i, s, v, b) for i in xlist]
# s, v = fitModelRateMXG1(bList, mean_s[:,0])
# print("parameter fitting", s, v)
# print(bList, bList[0], bList[-1])
# xlist = [i for i in range(1, int(bList[-1]), 5)]
# y = [modelRateMXG1(i, s, v) for i in xlist]
# print(xlist, y)
# plt.plot(xlist, y, "--", label="setup model")
plt.ylabel("Latency (ms)")
if changeRate:
plt.xlabel("Message Rate (kmessage/s)")
else:
plt.xlabel("Batch Size")
plt.legend()
plt.show()
if __name__ == "__main__" :
path = "/Users/junjieliu/Development/GitHub/RTM-test/theory_validation/"
dirList = ["cores/", "burst/", "burst_1core/", "msgsize/"]
# drawDistribution(path+"burst_1core/", "1000pub", ["interval0", "interval1", "interval2", "interval3", "interval4", "interval5", "stage1", "stage2", "stage3", "stage4", "stage5"])
# drawDistribution(path+"burst/", "1000pub", ["interval0", "interval1", "interval2", "interval3", "interval4", "interval5", "stage1", "stage2", "stage3", "stage4", "stage5"])
# drawDistribution(path+"batch_1pub_1core/", "batch10", ["stage1", "stage2", "stage3", "stage4", "stage5"])
# drawDistribution(path+"batch_1pub_8core/", "batch1", ["latency"])
# drawDistribution(path+"poisson_1pub_1core_concurrency/", "p100", ["arrival"])
# drawDistribution(path+"poisson_1pub_1core_rate/", "nemda50", ["arrival"])
# exit(0)
# m = mean_s[0]
# print ("cores")
# # core
# cList = [1, 2, 4, 8]
# fList = [str(c)+"core" for c in cList]
# mean_a, var_a, mean_s, var_s = readFileList(path + dirList[0], fList)
# plt.plot(cList, mean_s, label="measured")
# plt.plot(cList, [mean_s[0]/c for c in cList], label="estimate")
# plt.legend()
# plt.show()
# bList = [1,2,4,8]
# fList = [str(b)+"core" for b in bList]
# directory = "cores"
# showLatency(path, bList, fList, directory)
# exit(0)
# bList = np.array([100+10*i for i in range(3, 9)] + [200])
# fList = [str(b) for b in bList]
# bList = 1000/bList
# directory = "rate"
# showLatency(path, bList, fList, directory, changeRate=True)
# exit(0)
# bList = np.array([10, 100, 200, 500, 1000, 5000, 10000])/1000
# fList = ["latency28", "latency29", "latency30", "latency35", "latency34", "latency33", "latency36"]
# directory = "multi-conn"
# showLatency(path, bList, fList, directory)
# exit(0)
bList = np.array([10, 100, 200, 500, 1000, 5000, 10000])/1000
fList = ["latency15", "latency14", "latency13", "latency12", "latency11", "latency26", "latency25"]
directory = "multi-conn"
showLatency(path, bList, fList, directory)
exit(0)
# bList = [1, 10, 100, 200, 500, 1000]
# fList = [str(b)+"pub" for b in bList]
# directory = "burst"
# showLatency(path, bList, fList, directory)
# exit(0)
# # # drawDistribution(path+"burst_1core/", "20pub", ["interval0", "interval1", "interval2", "interval3", "interval4", "interval5"])#stage2", "stage2", "stage3", "stage4", "stage5"])
# bList = [1, 20, 100, 200, 500, 1000]
# fList = [str(b)+"pub" for b in bList]
# directory = "burst_1core"
# showLatency(path, bList, fList, directory)
# exit(0)
# A, B = fitPub1(np.array(bList), mean_s[:, 0])
# print("model parameter a+b", A, "c", B)
# res = [modelPub1(i, A, B) for i in bList]
# plt.plot(bList, res, "*--", label="model pub_1core")
# # plotStage(mean_s)
# print("measured ", mean_s[:, 0])
#
# # expLatency = [ mean_s[i, 0]/(ceil(bList[i]/8)+1)*(bList[i]+1) for i in range(len(bList)) ]
# # print("exp", expLatency)
# # plt.plot(bList, expLatency, "*-", label="D(k)/D/M")
# sList = [128, 256, 512, 1024, 2048, 4096, 8192]
# fList = [str(s) for s in sList]
# directory = "msgsize"
# showLatency(path, bList, fList, directory)
# exit(0)
# drawDistribution(path+"batch_1pub_8core/", "batch100", ["interval0", "interval1", "interval2", "interval3", "interval4", "interval5", "stage1", "stage2", "stage2", "stage3", "stage4", "stage5"])
# bList = [1, 10, 100, 200, 500, 1000]
# fList = ["batch"+str(b) for b in bList]
# directory = "batch_1pub_8core"
# showLatency(path, bList, fList, directory)
# exit(0)
# # new batch_1pub_1core
# path = "/Users/junjieliu/Development/GitHub/RTM-test/theory_validation/"
# bList = [1, 10, 100, 500, 1000]
# fList = ["batch"+str(b) for b in bList]
# directory = "batch_1pub_1core_moreTimestamps"
# showLatency(path, bList, fList, directory, newFile = True)
# exit(0)
# drawDistribution(path+"batch_1pub_1core/", "batch500", ["interval0", "interval1", "interval2", "interval3", "interval4", "interval5", "stage1", "stage2", "stage3", "stage4", "stage5"])
#drawDistribution(path+"batch_1pub_1core/", "batch1000", ["interval0"])#, "interval1", "interval2", "interval3", "interval4", "interval5", "stage1", "stage2", "stage3", "stage4", "stage5"])
#drawDistribution(path+"burst_1core/", "1000pub", ["interval0"])
#drawDistribution(path+"poisson_1pub_1core_rate/", "nemda20", ["interval0", "interval1", "interval2", "interval3", "interval4", "interval5", "stage1", "stage2", "stage3", "stage4", "stage5"])
# bList = [1, 10, 100, 200, 500, 1000]
# fList = ["batch"+str(b) for b in bList]
# directory = "batch_1pub_1core"
# showLatency(path, bList, fList, directory)
# exit(0)
#
# res = [modelBatch1(i, A, B+C) for i in bList]
# plt.plot(bList, res, "*--", label="model batch_1core")
# # A, C = fitBatch1(np.array(bList), mean_s[:, 0])
# # print("model parameter a", A, "c", C)
# # res = [modelBatch1(i, A, C) for i in bList]
# # plt.plot(bList, res, "*--", label="model batch_1core")
# # plotStage(mean_s)
# print("measured ", mean_s)
# # Assuming D/D/1, not working...
# t = burstLatency(bList[1:], mean_s[0, 0])
# plt.plot(bList[1:], t, label=r"$D^{(k)}/D/1$")
# print(r"$D^{(k)}/D/1$: ", t)
#print([bPerVar(lam, k)
# rate 10 pub, 2 cores
# bList = np.array([200, 180, 170,160, 150, 140, 130 ,125, 120, 110])
# fList = [str(b) for b in bList]
# directory = "rate"
# showLatency(path, bList, fList, directory):
# exit(0)
# rate 1 pub 1 core
# bList = 1000/np.array([10000, 1000, 500, 100, 50, 20, 10])
# fList = ["p"+str(int(1000/b)) for b in bList]
# directory = "rate_1pub_1core"
# showLatency(path, bList, fList, directory, changeRate = True)
# exit(0)
# # Poisson rate 1 pub 1 core
# drawDistribution(path+"poisson_1pub_1core_rate/", "nemda50", ["interval0", "interval1", "interval2", "interval3", "interval4", "interval5", "stage1", "stage2", "stage3", "stage4", "stage5"])
# bList = 1000/np.array([1000, 50, 20, 10])
# fList = ["nemda"+str(int(1000/b)) for b in bList]
# # print(fList)
# directory = "poisson_1pub_1core_rate"
# showLatency(path, bList, fList, directory, changeRate = True)
# exit(0)
# poisson
# drawDistribution(path+"poisson_1core_batch/", "b1000", ["interval0", "interval1", "interval2", "interval3", "interval4", "interval5", "stage1", "stage2", "stage3", "stage4", "stage5"])
# bList = np.array([1, 10, 100, 200, 500, 1000])
# fList = ["b"+str(b) for b in bList]
# directory = "poisson_1core_batch"
# showLatency(path, bList, fList, directory)
# exit(0)
#
# poisson concurrent
# bList = np.array([100, 200, 1000])
# fList = ["p"+str(b) for b in bList]
# directory = "poisson_1pub_1core_concurrency"
# showLatency(path, bList, fList, directory)
# exit(0)
#
# # print("burst")
# # # burst
# bList = [1, 10, 100, 200, 500, 1000]
# fList = [str(b)+"pub" for b in bList]
# mean_a, var_a, mean_s, var_s = readFileList(path + "burst/", fList)
# # measured result
# #plt.fill_between(bList, mean_s[:, 3], mean_s[:, 4], alpha=.2)
# plt.fill_between(bList, mean_s[:, 1], mean_s[:, 2], alpha=.5)
# plt.plot(bList, mean_s[:, 0], "*-", label="pub_8core")
# #A, B = fitPub8(bList, mean_s[:,0])
# #print("model parameter a+b", A, "c", B)
# res = [modelPub8(i, A, B) for i in bList]
# plt.plot(bList, res, "*--", label="model pub_8core")
# #plotStage(mean_s)
# print("measured 8 cores", mean_s[:, 0])
# # Assuming D/D/1, not working...
# t = burstLatency(bList[1:], mean_s[0, 0])
# plt.plot(bList[1:], t, label="D/D/1")
# print("D/D/1: ", t)
# #print([bPerVar(lam, k)
# # Assuming D/D/1 with modified burst
# bListP = [getBurstByVar(mean_a[i], var_a[i]) for i in range(len(mean_a))]
# #print(bListP)
# t = burstLatency(bListP, mean_s[0])
# plt.plot(bList, t, label="D/D/1 modified")
# #Assuming G/G/1
# #plt.plot(bList, mean_a)
# lower, upper = getBounds(mean_a, var_a, mean_s[0], var_s[0])
# plt.plot(bList, lower, label="lower G/G/1")
# #plt.plot(bList, upper, label="upper G/G/1")
# print("G/G/1 lower bound: ", lower)
# #print("G/G/1 upper bound: ", upper)
# plt.ylabel("Latency (ms)")
# plt.xlabel("# of publishers")
# plt.legend()
# plt.ylim(0, 10)
# plt.show()
# exit(0)
| mit |
pypot/scikit-learn | sklearn/ensemble/weight_boosting.py | 30 | 40648 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _check_sample_weight(self):
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba <= 0] = 1e-5
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
y_predict_proba[y_predict_proba <= 0] = 1e-5
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
tdhopper/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
galad-loth/DescHash | ProductQuant.py | 1 | 3595 | import numpy as npy
from LoadData import ReadFvecs,ReadIvecs
from sklearn.cluster import KMeans
from scipy.spatial.distance import pdist,squareform
from Utils import GetRetrivalMetric, GetKnnIdx
def PQTrain(data, lenSubVec,numSubCenter):
(dataSize, dataDim)=data.shape
if 0!=dataDim%lenSubVec:
print "Cannot partition the feature space with the given segment number"
return
numSubVec=dataDim/lenSubVec
centers=npy.zeros((numSubVec*numSubCenter,lenSubVec),dtype=npy.float32)
distOfCenters=npy.zeros((numSubCenter,numSubCenter,numSubVec),dtype=npy.float32)
objKmeans=KMeans(numSubCenter,'k-means++',3,100,0.001)
for ii in range(numSubVec):
print("PQ training. Processing "+str(ii)+"-th sub-vector")
objKmeans.fit(data[:,ii*lenSubVec:(ii+1)*lenSubVec])
centers[ii*numSubCenter:(ii+1)*numSubCenter,:]= objKmeans.cluster_centers_
distOfCenters[:,:,ii]=squareform(pdist(objKmeans.cluster_centers_,metric="euclidean"))
model={"centers":centers,"distOfCenters":distOfCenters}
return model
def PQEval(data,lenSubVec,numSubCenter,centersPQ):
(dataSize, dataDim)=data.shape
if 0!=dataDim%lenSubVec:
print "Cannot partition the feature space with the given segment number"
return
numSubVec=dataDim/lenSubVec
codePQ=-npy.ones((dataSize, numSubVec),dtype=npy.int32)
objKmeans=KMeans(numSubCenter)
if (centersPQ.shape[0]!=numSubVec*numSubCenter
or centersPQ.shape[1]!=lenSubVec):
print "PQ model dimension is not compatible with input data"
return
for ii in range(numSubVec):
objKmeans.cluster_centers_=centersPQ[ii*numSubCenter:(ii+1)*numSubCenter,:]
codePQ[:,ii]=objKmeans.predict(data[:,ii*lenSubVec:(ii+1)*lenSubVec])
return codePQ
def PQQuery(queryCode, baseCode, numSubCenter, modelPQ, k=5):
if queryCode.shape[1]!=baseCode.shape[1]:
print "Quary and Base codes are not with the same length"
return
nQuery=queryCode.shape[0]
kRetr=npy.min((k,baseCode.shape[0]))
distOfCenters=modelPQ["distOfCenters"]
knnIdx=-npy.ones((nQuery,kRetr),dtype=npy.int32)
distCodePair=npy.zeros(baseCode.shape, dtype=npy.float32)
for ii in range(nQuery):
distCodePair=distCodePair*0
for jj in range(queryCode.shape[1]):
distCodePair[:,jj]=distOfCenters[queryCode[ii,jj],baseCode[:,jj],jj]
idxSort=npy.argsort(npy.sum(npy.square(distCodePair),axis=1))
knnIdx[ii,:]=idxSort[:kRetr]
return knnIdx
if __name__=="__main__":
dataPath="E:\\DevProj\\Datasets\\SIFT1M\\siftsmall"
trainData=ReadFvecs(dataPath,"siftsmall_learn.fvecs")
trainData=trainData.astype(npy.float32)
lenSubVec=8
numSubCenter=256
modelPQ=PQTrain(trainData,lenSubVec,numSubCenter)
queryData=ReadFvecs(dataPath,"siftsmall_query.fvecs")
baseData=ReadFvecs(dataPath,"siftsmall_base.fvecs")
idxGt=ReadIvecs(dataPath,"siftsmall_groundtruth.ivecs")
queryData=queryData.astype(npy.float32)
baseData=baseData.astype(npy.float32)
idxKnnGt=GetKnnIdx(queryData,baseData,100)
queryCode=PQEval(queryData,lenSubVec,numSubCenter,modelPQ["centers"])
baseCode=PQEval(baseData,lenSubVec,numSubCenter,modelPQ["centers"])
idxKnnPred=PQQuery(queryCode, baseCode, numSubCenter, modelPQ, 100)
retrivMetric=GetRetrivalMetric(idxKnnGt, idxKnnPred, 100, 1000010)
print retrivMetric
| apache-2.0 |
geektoni/Influenza-Like-Illness-Predictor | data_analysis/get_model_statistics.py | 1 | 2940 | # -*- coding: utf-8 -*-
"""Script which can be used to compare the features obtained of two different influenza models
Usage:
get_model_statistics.py <model> [--country=<country_name>] [--no-future] [--basedir=<directory>] [--start-year=<start_year>] [--end-year=<end_year>] [--save] [--no-graph]
<baseline> Data file of the first model
<other_method> Data file of the second model
-h, --help Print this help message
"""
import pandas as pd
import numpy as np
from scipy import stats
from docopt import docopt
import os
import glob
from sklearn.metrics import mean_squared_error
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
def get_results_filename(basepath):
files = [f for f in glob.glob(basepath + "/*-prediction.csv", recursive=True)]
y = os.path.basename(files[0]).split("-")[0]
y2 = os.path.basename(files[0]).split("-")[1]
return "{}-{}".format(y, y2)
if __name__ == "__main__":
args = docopt(__doc__)
model = args["<model>"]
base_dir = args["--basedir"] if args["--basedir"] else "../complete_results"
country = args["--country"] if args["--country"] else "italy"
future = "no-future" if args["--no-future"] else "future"
# Read the baseline results and merge them
model_path = os.path.join(base_dir, args["<model>"], future, country)
season_years = get_results_filename(model_path)
model_file = os.path.join(model_path, "{}-prediction.csv".format(season_years))
# Load the data
data = pd.read_csv(model_file)
# Get only the weeks we care for
start_year = "2007-42" if not args["--start-year"] else args["--start-year"]
end_year = "2019-15" if not args["--end-year"] else args["--end-year"]
start_season = data["week"] >= start_year
end_season = data["week"] <= str(int(end_year.split("-")[0]) + 1) + "-" + end_year.split("-")[1]
total = start_season & end_season
data = data[total]
# Describe the data
print("")
print("[*] Describe the given dataset {}".format(model_file))
print(data.describe())
# Generate residuals
print("")
print("[*] Describe the residuals")
residuals = data["incidence"]-data["prediction"]
print(residuals.describe())
# Get some statistics
print("")
total_pearson = 0
for i in np.arange(0, len(data["prediction"]), 26):
total_pearson += stats.pearsonr(data["prediction"][i:i+26], data["incidence"][i:i+26])[0]
print("Pearson Correlation (value/p): ", total_pearson/(len(data["prediction"])/26))
print("")
print("Mean Squared Error: ", mean_squared_error(data["prediction"], data["incidence"]))
print("")
if not args["--no-graph"]:
ax = sns.distplot(residuals, label="Residual")
plt.figure()
ax = sns.distplot(data["incidence"], label="Incidence")
ax = sns.distplot(data["prediction"], label="Prediction")
plt.legend()
plt.show()
| mit |
jm-begon/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
lucidfrontier45/scikit-learn | examples/plot_feature_selection.py | 2 | 2800 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print __doc__
import numpy as np
import pylab as pl
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
pl.figure(1)
pl.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
pl.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
pl.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
pl.bar(X_indices[selector.get_support()] - .05, svm_weights_selected, width=.2,
label='SVM weights after selection', color='b')
pl.title("Comparing feature selection")
pl.xlabel('Feature number')
pl.yticks(())
pl.axis('tight')
pl.legend(loc='upper right')
pl.show()
| bsd-3-clause |
alivecor/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 37 | 3651 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Convert the labels to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
onehot_labels = tf.one_hot(labels, 3, 1, 0)
# Compute loss.
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
toastedcornflakes/scikit-learn | examples/gaussian_process/plot_gpc_iris.py | 81 | 2231 | """
=====================================================
Gaussian process classification (GPC) on iris dataset
=====================================================
This example illustrates the predicted probability of GPC for an isotropic
and anisotropic RBF kernel on a two-dimensional version for the iris-dataset.
The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by
assigning different length-scales to the two feature dimensions.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = np.array(iris.target, dtype=int)
h = .02 # step size in the mesh
kernel = 1.0 * RBF([1.0])
gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
kernel = 1.0 * RBF([1.0, 1.0])
gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
titles = ["Isotropic RBF", "Anisotropic RBF"]
plt.figure(figsize=(10, 5))
for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)):
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 2, i + 1)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y])
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title("%s, LML: %.3f" %
(titles[i], clf.log_marginal_likelihood(clf.kernel_.theta)))
plt.tight_layout()
plt.show()
| bsd-3-clause |
juhaj/topics-python-in-research | codes/python/solve_heat_equation_with_flow.py | 1 | 1751 | # Julius B. Kirkegaard 11/01/17
# [email protected]
import numpy as np
import matplotlib.pyplot as plt
def to_vector(mat):
return np.ravel(mat)
def to_matrix(vec):
return np.reshape(vec, shape)
### Define grid
dx = 0.02
x = np.arange(0, 1 + dx, dx)
m = len(x)
X, Y = np.meshgrid(x, x)
shape = X.shape
# Transfer to vectors
x = to_vector(X)
y = to_vector(Y)
n = len(x)
# Laplacian
L = np.zeros((n, n))
for i in range(n):
L[i,i] = -4
j = np.argmin( (x[i] + dx - x)**2 + (y[i] - y)**2 )
if i!=j: L[i,j] = 1
j = np.argmin( (x[i] - dx - x)**2 + (y[i] - y)**2 )
if i!=j: L[i,j] = 1
j = np.argmin( (x[i] - x)**2 + (y[i] + dx - y)**2 )
if i!=j: L[i,j] = 1
j = np.argmin( (x[i] - x)**2 + (y[i] - dx - y)**2 )
if i!=j: L[i,j] = 1
L = L/dx**2
# Flow
vx = 20 * (y - 0.5)
vy = -20 * (x - 0.5)
G = np.zeros((n, n))
for i in range(n):
# x-derivative
j = np.argmin( (x[i] + dx - x)**2 + (y[i] - y)**2 )
if i!=j: G[i, j] = vx[i]/(2*dx)
j = np.argmin( (x[i] - dx - x)**2 + (y[i] - y)**2 )
if i!=j: G[i, j] = -vx[i]/(2*dx)
# y-derivative
j = np.argmin( (x[i] - x)**2 + (y[i] + dx - y)**2 )
if i!=j: G[i, j] = vy[i]/(2*dx)
j = np.argmin( (x[i] - x)**2 + (y[i] - dx - y)**2 )
if i!=j: G[i, j] = -vy[i]/(2*dx)
# Form operator
A = L - G
# Boundary conditions
b = np.zeros(n)
for i in range(n):
if (x[i]==0 or x[i]==1 or y[i]==0 or y[i]==1):
A[i, :] = 0
A[i, i] = 1
if x[i] == 0:
b[i] = np.exp( -10*(y[i]-0.3)**2 )
# Solve
from scipy.linalg import solve
u = solve(A, b)
# Plot
U = to_matrix(u)
plt.imshow(U, extent=(min(x), max(x), max(y), min(y)))
plt.colorbar()
plt.xlabel('x')
plt.ylabel('y')
plt.title('Temperature distriubtion of plate')
plt.show()
| gpl-3.0 |
ThomasMiconi/htmresearch | htmresearch/frameworks/capybara/supervised/plot.py | 7 | 3244 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
FONT_SIZE = 12
FIG_SIZE = (12, 10)
def resolve_plot_path(plot_dir, data_id, cell_type, nb_chunks, aggregation):
plot_name = '%s_%s_chunks=%s_agg=%s.png' % (data_id, cell_type, nb_chunks,
aggregation)
plot_path = os.path.join(plot_dir, plot_name)
return plot_path
def make_sup_title(data_id, cell_type, nb_chunks, aggregation):
return ('Data: %s | Cells: %s | Chunks: %s | Aggregation: %s'
% (data_id, cell_type.upper(), nb_chunks, aggregation))
def make_subplots(n_rows, n_cols, plot_dir, data_id, cell_type, nb_chunks,
agg):
plot_path = resolve_plot_path(plot_dir, data_id, cell_type, nb_chunks, agg)
sup_title = make_sup_title(data_id, cell_type, nb_chunks, agg)
fig, ax = plt.subplots(n_rows, n_cols, figsize=FIG_SIZE)
fig.suptitle(sup_title, fontsize=FONT_SIZE + 2, fontweight='bold')
fig.subplots_adjust(hspace=.5, wspace=.5)
return fig, ax, plot_path
def plot_matrix(embeddings_mat, title, fig, ax):
heatmap = ax.pcolor(embeddings_mat, cmap=plt.cm.Blues)
fig.colorbar(heatmap, ax=ax)
n_sequences = embeddings_mat.shape[0]
ticks = range(1, n_sequences + 1, n_sequences / 5)
if n_sequences not in ticks: ticks.append(n_sequences)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xlabel('Sequence #')
ax.set_ylabel('Sequence #')
ax.set_title(title, fontsize=FONT_SIZE)
def plot_projections(embeddings_proj, labels, title, fig, ax):
# Colors
unique_labels = list(set(list(labels)))
nb_colors = len(unique_labels)
color_names = ['Class %s' % l for l in unique_labels]
colors = sns.color_palette('colorblind', nb_colors)
# Plot projections
ax.set_title(title, fontsize=FONT_SIZE)
ax.scatter(embeddings_proj[:, 0], embeddings_proj[:, 1],
c=[colors[unique_labels.index(l)] for l in labels])
# Add legend
patches = [mpatches.Patch(color=colors[i], label=color_names[i])
for i in range(nb_colors)]
ax.legend(handles=patches, loc='best')
def make_plot_title(plot_name, phase, accuracy):
return ('%s\n%s accuracy: %s / 100\n'
% (plot_name.capitalize(), phase.capitalize(), accuracy))
| agpl-3.0 |
elkingtonmcb/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
AdaptiveApplications/carnegie | tarc_bus_locator_client/numpy-1.8.1/build/lib.linux-x86_64-2.7/numpy/lib/recfunctions.py | 17 | 35016 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for matplotlib.
They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = ['append_fields',
'drop_fields',
'find_duplicates',
'get_fieldstructure',
'join_by',
'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields',
'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields as keys and a list of parent fields as values.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
# if (lastparent[-1] != lastname):
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if hasattr(element, '__iter__') and not isinstance(element, basestring):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarray : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in zip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays,
fill_value= -1, flatten=False, usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the fields
to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype (`asrecarray=False`)
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
#
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
#
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
#
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append((newname,
_recursive_rename_fields(current, namemapper)))
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value= -1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else :
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
seqarrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" % \
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array.
An exception is raised if the `key` field cannot be found in the two input
arrays.
Neither `r1` nor `r2` should have any duplicates along `key`: the presence
of duplicates will make the output quite unreliable. Note that duplicates
are not looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of r1
not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1 not
in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present in r2
but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present in r1
but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for the
two arrays and concatenating the result. This array is then sorted, and
the common entries selected. The output is constructed by filling the fields
with the selected entries. Matching is not preserved if there are some
duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError("The 'jointype' argument should be in 'inner', "\
"'outer' or 'leftouter' (got '%s' instead)" % jointype)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
(nb1, nb2) = (len(r1), len(r2))
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
if (set.intersection(set(r1names), set(r2names)).difference(key) and
not (r1postfix or r2postfix)):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = ndtype.index(desc)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields : r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and not f in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| mit |
ilyes14/scikit-learn | sklearn/decomposition/nmf.py | 35 | 39369 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Mathieu Blondel <[email protected]>
# Tom Dupre la Tour
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (Projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted, check_non_negative
from ..utils import deprecated
from ..utils import ConvergenceWarning
from .cdnmf_fast import _update_cdnmf_fast
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _check_init(A, shape, whom):
A = check_array(A)
if np.shape(A) != shape:
raise ValueError('Array with wrong shape passed to %s. Expected %s, '
'but got %s ' % (whom, shape, np.shape(A)))
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError('Array passed to %s is full of zeros.' % whom)
def _safe_compute_error(X, W, H):
"""Frobenius norm between X and WH, safe for sparse array"""
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(norm_X + norm_WH - 2. * cross_prod)
return error
def _check_string_param(sparseness, solver):
allowed_sparseness = (None, 'data', 'components')
if sparseness not in allowed_sparseness:
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, allowed_sparseness))
allowed_solver = ('pg', 'cd')
if solver not in allowed_solver:
raise ValueError(
'Invalid solver parameter: got %r instead of one of %r' %
(solver, allowed_solver))
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : integer
The number of components desired in the approximation.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise 'random'.
Valid options:
'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
eps: float
Truncate all values less then this in output to zero.
random_state : int seed, RandomState instance, or None (default)
Random number generator seed control, used in 'nndsvdar' and
'random' modes.
Returns
-------
W : array-like, shape (n_samples, n_components)
Initial guesses for solving X ~= WH
H : array-like, shape (n_components, n_features)
Initial guesses for solving X ~= WH
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if init is None:
if n_components < n_features:
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features)
W = avg * rng.randn(n_samples, n_components)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
V : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = fast_dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
def _update_projected_gradient_w(X, W, H, tolW, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = H.shape[0]
if sparseness is None:
Wt, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(beta) * np.ones((1,
n_components_))]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(eta) * np.eye(n_components_)]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return Wt.T, gradW.T, iterW
def _update_projected_gradient_h(X, W, H, tolH, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = W.shape[1]
if sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((n_components_, n_features))]),
safe_vstack([W,
np.sqrt(eta) * np.eye(n_components_)]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(beta)
* np.ones((1, n_components_))]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return H, gradH, iterH
def _fit_projected_gradient(X, W, H, tol, max_iter,
nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Compute Non-negative Matrix Factorization (NMF) with Projected Gradient
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with Sparseness Constraints.
Journal of Machine Learning Research 2004.
"""
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition
# as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
W, gradW, iterW = _update_projected_gradient_w(X, W, H, tolW,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _update_projected_gradient_h(X, W, H, tolH,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
W, _, _ = _update_projected_gradient_w(X, W, H, tol, nls_max_iter,
alpha, l1_ratio, sparseness,
beta, eta)
return W, H, n_iter
def _update_coordinate_descent(X, W, Ht, alpha, l1_ratio, shuffle,
random_state):
"""Helper function for _fit_coordinate_descent
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...)
"""
n_components = Ht.shape[1]
HHt = fast_dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L1 and L2 regularizations
l1_reg = 1. * l1_ratio * alpha
l2_reg = (1. - l1_ratio) * alpha
# L2 regularization corresponds to increase the diagonal of HHt
if l2_reg != 0.:
# adds l2_reg only on the diagonal
HHt.flat[::n_components + 1] += l2_reg
# L1 regularization correponds to decrease each element of XHt
if l1_reg != 0.:
XHt -= l1_reg
seed = random_state.randint(np.iinfo(np.int32).max)
return _update_cdnmf_fast(W, HHt, XHt, shuffle, seed)
def _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, alpha=0.001,
l1_ratio=0., regularization=None, update_H=True,
verbose=0, shuffle=False, random_state=None):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If True, the samples will be taken in shuffled order during
coordinate descent.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order='C')
X = check_array(X, accept_sparse='csr')
alpha_H = 0.
alpha_W = 0.
if regularization in ('both', 'components'):
alpha_H = float(alpha)
if regularization in ('both', 'transformation'):
alpha_W = float(alpha)
rng = check_random_state(random_state)
for n_iter in range(max_iter):
violation = 0.
# Update W
violation += _update_coordinate_descent(X, W, Ht, alpha_W,
l1_ratio, shuffle, rng)
# Update H
if update_H:
violation += _update_coordinate_descent(X.T, Ht, W, alpha_H,
l1_ratio, shuffle, rng)
if n_iter == 0:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def non_negative_factorization(X, W=None, H=None, n_components=None,
init='random', update_H=True, solver='cd',
tol=1e-4, max_iter=200, alpha=0., l1_ratio=0.,
regularization=None, random_state=None,
verbose=0, shuffle=False, nls_max_iter=2000,
sparseness=None, beta=1, eta=0.1):
"""Compute Non-negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
If update_H=False, it is used as a constant, to solve for W only.
n_components : integer
Number of components, if n_components is not set all features
are kept.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'custom': use custom matrices W and H
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a (deprecated) Projected Gradient solver.
'cd' is a Coordinate Descent solver.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean
If True, the samples will be taken in shuffled order during
coordinate descent.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
_check_string_param(sparseness, solver)
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
if not isinstance(n_components, six.integer_types) or n_components <= 0:
raise ValueError("Number of components must be positive;"
" got (n_components=%r)" % n_components)
if not isinstance(max_iter, numbers.Number) or max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % max_iter)
if not isinstance(tol, numbers.Number) or tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % tol)
# check W and H, or initialize them
if init == 'custom':
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=init,
random_state=random_state)
if solver == 'pg':
warnings.warn("'pg' solver will be removed in release 0.19."
" Use 'cd' solver instead.", DeprecationWarning)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(X, W, H, tol,
max_iter,
nls_max_iter,
alpha, l1_ratio,
sparseness,
beta, eta)
else: # transform
W, H, n_iter = _update_projected_gradient_w(X, W, H,
tol, nls_max_iter,
alpha, l1_ratio,
sparseness, beta,
eta)
elif solver == 'cd':
W, H, n_iter = _fit_coordinate_descent(X, W, H, tol,
max_iter,
alpha, l1_ratio,
regularization,
update_H=update_H,
verbose=verbose,
shuffle=shuffle,
random_state=random_state)
else:
raise ValueError("Invalid solver parameter '%s'." % solver)
if n_iter == max_iter:
warnings.warn("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter, ConvergenceWarning)
return W, H, n_iter
class NMF(BaseEstimator, TransformerMixin):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options::
'random': non-negative random matrices
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'custom': use custom matrices W and H, given in 'fit' method.
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a (deprecated) Projected Gradient solver.
'cd' is a Coordinate Descent solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
shuffle : boolean
If True, the samples will be taken in shuffled order during
coordinate descent.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, init=None, solver='cd',
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0, shuffle=False,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
if sparseness is not None:
warnings.warn("Controlling regularization through the sparseness,"
" beta and eta arguments is only available"
" for 'pg' solver, which will be removed"
" in release 0.19. Use another solver with L1 or L2"
" regularization instead.", DeprecationWarning)
self.nls_max_iter = nls_max_iter
self.sparseness = sparseness
self.beta = beta
self.eta = eta
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
W, H, n_iter_ = non_negative_factorization(
X=X, W=W, H=H, n_components=self.n_components,
init=self.init, update_H=True, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
if self.solver == 'pg':
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
self.reconstruction_err_ = _safe_compute_error(X, W, H)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Attributes
----------
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'n_components_')
W, _, n_iter_ = non_negative_factorization(
X=X, W=None, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
self.n_iter_ = n_iter_
return W
@deprecated("It will be removed in release 0.19. Use NMF instead."
"'pg' solver is still available until release 0.19.")
class ProjectedGradientNMF(NMF):
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
super(ProjectedGradientNMF, self).__init__(
n_components=n_components, init=init, solver='pg', tol=tol,
max_iter=max_iter, random_state=random_state, alpha=alpha,
l1_ratio=l1_ratio, verbose=verbose, nls_max_iter=nls_max_iter,
sparseness=sparseness, beta=beta, eta=eta)
| bsd-3-clause |
cvjena/libmaxdiv | experiments/hpw.py | 1 | 4781 | """ Runs the MDI algorithm on the Hurricane time-series. """
import sys
sys.path.append('..')
import numpy as np
import matplotlib.pylab as plt
import csv, datetime
from collections import OrderedDict
from maxdiv import maxdiv, preproc, baselines_noninterval, eval
HURRICANE_GT = { \
'Sandy' : (datetime.date(2012,10,22), datetime.date(2012,10,29)),
'Rafael' : (datetime.date(2012,10,12), datetime.date(2012,10,18)),
'Isaac' : (datetime.date(2012, 8,22), datetime.date(2012, 8,25))
}
def read_hpw_csv(csvFile):
""" Reads HPW data from a CSV file.
The CSV file must contain 4 fields per line:
date as 'yyyy-m-d-h', wind speed, air pressure and wave height
The first line is assumed to be field headings and will be ignored.
"""
ts = []
dates = []
mask = []
with open(csvFile) as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
continue
fields = [float(x) for x in line[1:]]
if np.isnan(fields).any():
ts.append([0] * len(fields))
mask.append(True)
else:
ts.append(fields)
mask.append(False)
dates.append(datetime.datetime(*[int(x) for i, x in enumerate(line[0].split('-'))]))
return np.ma.MaskedArray(np.array(ts).T, np.array(mask).reshape(1, len(mask)).repeat(3, axis = 0)), dates
def read_reduced_hpw_csv(csvFile):
""" Reads HPW data from a CSV file and takes 4-hourly mean values.
The CSV file must contain 4 fields per line:
date as 'yyyy-m-d-h', wind speed, air pressure and wave height
The first line is assumed to be field headings and will be ignored.
"""
# Read data from CSV file into ordered dict
data = OrderedDict()
with open(csvFile) as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
continue
date = tuple(int(x) if i < 3 else int(int(x) / 4) * 4 for i, x in enumerate(line[0].split('-')))
fields = [float(x) for x in line[1:]]
if not np.any(np.isnan(fields)):
if date not in data:
data[date] = []
data[date].append(fields)
# Take 4-hourly means and store them in a numpy array
ts = np.ndarray((3, len(data)))
for i, (date, values) in enumerate(data.items()):
ts[:,i] = np.array(values).mean(axis = 0).T
dates = [datetime.datetime(*date) for date in data.keys()]
return ts, dates
def datetime_diff(a, b):
""" Calculates the difference a - b between to dates in hours. """
if isinstance(a, datetime.date):
a = datetime.datetime.combine(a, datetime.datetime.min.time())
if isinstance(b, datetime.date):
b = datetime.datetime.combine(b, datetime.datetime.min.time())
return int((a-b).total_seconds()) / 3600
if __name__ == '__main__':
import sys
method = sys.argv[1] if len(sys.argv) > 1 else 'gaussian_cov_ts'
propmeth = sys.argv[2] if len(sys.argv) > 2 else 'dense'
# Load data
data, dates = read_hpw_csv('HPW_2012_41046.csv')
data = preproc.normalize_time_series(data)
# Detect
if method in ['hotellings_t', 'kde']:
if method == 'kde':
scores = baselines_noninterval.pointwiseKDE(preproc.td(data))
else:
scores = baselines_noninterval.hotellings_t(preproc.td(data))
regions = baselines_noninterval.pointwiseScoresToIntervals(scores, 24)
elif method == 'gaussian_cov_ts':
regions = maxdiv.maxdiv(data, 'gaussian_cov', mode = 'TS', td_dim = 3, td_lag = 1, proposals = propmeth,
extint_min_len = 24, extint_max_len = 72, num_intervals = 5)
else:
regions = maxdiv.maxdiv(data, method, mode = 'I_OMEGA', td_dim = 3, td_lag = 1, proposals = propmeth,
extint_min_len = 24, extint_max_len = 72, num_intervals = 5)
# Console output
print('-- Ground Truth --')
for name, (a, b) in HURRICANE_GT.items():
print('{:{}s}: {!s} - {!s}'.format(name, max(len(n) for n in HURRICANE_GT.keys()), a, b - datetime.timedelta(days = 1)))
print('\n-- Detected Intervals ({} with {} proposals) --'.format(method, propmeth))
for a, b, score in regions:
print('{!s} - {!s} (Score: {})'.format(dates[a], dates[b-1], score))
# Plot
ygt = [(datetime_diff(a, dates[0]), datetime_diff(b, dates[0])) for a, b in HURRICANE_GT.values()]
eval.plotDetections(data, regions, ygt,
ticks = { datetime_diff(d, dates[0]) : d.strftime('%b %Y') for d in (datetime.date(2012,mon,1) for mon in range(6, 12)) }) | lgpl-3.0 |
henridwyer/scikit-learn | sklearn/decomposition/nmf.py | 15 | 19103 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def check_non_negative(X, whom):
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
U, S, V = randomized_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
random_state = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
random_state = self.random_state
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a')
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar')
elif init == "random":
rng = check_random_state(random_state)
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
mikeireland/pymfe | ghost_fit.py | 1 | 2187 | """A script to fit tramlines etc for Ghost data.
"""
from __future__ import division, print_function
import pymfe
import astropy.io.fits as pyfits
import numpy as np
import matplotlib.pyplot as plt
import pdb
import shutil
import matplotlib.cm as cm
#plt.ion()
#Define the files in use (NB xmod.txt and wavemod.txt should be correct)
#arc_file = "/home/jbento/code/pymfe/data/ghost/blue/std/arcstd_blue.fits"
flat_file = "/home/jbento/code/pymfe/data/ghost/red/high/flathigh_red.fits"
#instantiate the ghostsim arm
ghost_format = pymfe.ghost.Arm('red',mode='high')
#Create an initial model of the spectrograph.
xx, wave, blaze= ghost_format.spectral_format()
#Get the data and normalize by median
flat_data = pyfits.getdata(flat_file)
#arc_data = pyfits.getdata(arc_file)
nx = flat_data.shape[0]
ny = flat_data.shape[1]
x = flat_data.shape[0]
profilex = np.arange(x) - x // 2
# Now create a model of the slit profile
mod_slit = np.zeros(x)
if ghost_format.mode == 'high':
nfibers = 26
else:
nfibers = ghost_format.nl
for i in range(-nfibers // 2, nfibers // 2):
mod_slit += np.exp(-(profilex - i * ghost_format.fiber_separation)**2 /
2.0 / ghost_format.profile_sigma**2)
plt.plot(flat_data[:,1000])
x=np.arange(flat_data[:,0].shape[0])
plt.plot(x-1,mod_slit*400)
plt.show()
#Have a look at the default model and make small adjustments if needed.
flat_conv=ghost_format.slit_flat_convolve(flat_data)
ghost_format.adjust_model(flat_conv,convolve=False,percentage_variation=10)
#Re-fit
ghost_format.fit_x_to_image(flat_conv,decrease_dim=8,inspect=True)
#shutil.copyfile('xmod.txt', 'data/subaru/xmod.txt')
'''
#Now find the other lines, after first re-loading into the extractor.
ghost_extract = pymfe.Extractor(ghost_format, transpose_data=True)
ghost_extract.find_lines(arc_data.T, arcfile='data/subaru/neon.txt',flat_data=flat_data.T)
#cp arclines.txt data/subaru/
shutil.copyfile('data/subaru/arclines.txt','data/subaru/arclines.backup')
shutil.copyfile('arclines.txt', 'data/subaru/arclines.txt')
#Now finally do the wavelength fit!
ghost_format.read_lines_and_fit()
shutil.copyfile('wavemod.txt', 'data/subaru/wavemod.txt')
'''
| mit |
Juggerr/pykml | docs/sphinxext/matplotlib/ipython_directive.py | 7 | 15656 | import sys, os, shutil, imp, warnings, cStringIO, re
import IPython
from IPython.Shell import MatplotlibShell
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.parsers.rst import directives
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[a-z]', x)[0])
for x in sphinx_version[:2]])
COMMENT, INPUT, OUTPUT = range(3)
rgxin = re.compile('In \[(\d+)\]:\s?(.*)\s*')
rgxout = re.compile('Out\[(\d+)\]:\s?(.*)\s*')
fmtin = 'In [%d]:'
fmtout = 'Out[%d]:'
def block_parser(part):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
#print 'PARSE', lines
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
#print 'OUTPUT', output
block.append((OUTPUT, output))
break
#print 'returning block', block
return block
import matplotlib
matplotlib.use('Agg')
class EmbeddedSphinxShell:
def __init__(self):
self.cout = cStringIO.StringIO()
IPython.Shell.Term.cout = self.cout
IPython.Shell.Term.cerr = self.cout
argv = ['-autocall', '0']
self.user_ns = {}
self.user_glocal_ns = {}
self.IP = IPython.ipmaker.make_IPython(
argv, self.user_ns, self.user_glocal_ns, embedded=True,
#shell_class=IPython.Shell.InteractiveShell,
shell_class=MatplotlibShell,
rc_override=dict(colors = 'NoColor'))
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# we need bookmark the current dir first so we can save
# relative to it
self.process_input('bookmark ipy_basedir')
self.cout.seek(0)
self.cout.truncate(0)
def process_input(self, line):
'process the input, capturing stdout'
#print "input='%s'"%self.input
stdout = sys.stdout
sys.stdout = self.cout
#self.IP.resetbuffer()
self.IP.push(self.IP.prefilter(line, 0))
#self.IP.runlines(line)
sys.stdout = stdout
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
#print 'BLOCK', block
ret = []
output = None
input_lines = None
m = rgxin.match(str(self.IP.outputcache.prompt1).strip())
lineno = int(m.group(1))
input_prompt = fmtin%lineno
output_prompt = fmtout%lineno
image_file = None
image_directive = None
for token, data in block:
if token==COMMENT:
if not self.is_suppress:
ret.append(data)
elif token==INPUT:
decorator, input, rest = data
#print 'INPUT:', data
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and decorator.startswith('@savefig')
#print 'is_verbatim=%s, is_doctest=%s, is_suppress=%s, is_savefig=%s'%(is_verbatim, is_doctest, is_suppress, is_savefig)
input_lines = input.split('\n')
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
if is_savefig:
saveargs = decorator.split(' ')
filename = saveargs[1]
outfile = os.path.join('_static/%s'%filename)
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = outfile
image_directive = '\n'.join(imagerows)
# TODO: can we get "rest" from ipython
#self.process_input('\n'.join(input_lines))
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input('')
else:
# only submit the line in non-verbatim mode
self.process_input(line)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input(line)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress:
if len(rest.strip()):
if is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append("%s"%rest)
ret.append('')
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon and not is_verbatim:
ret.append(output)
self.cout.truncate(0)
elif token==OUTPUT:
#print 'token==OUTPUT is_verbatim=%s'%is_verbatim
if is_verbatim:
# construct a mock output prompt
output = '%s %s\n'%(fmtout%lineno, data)
ret.append(output)
#print 'token==OUTPUT', output
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
ind = found.find(output_prompt)
if ind<0:
raise RuntimeError('output prompt="%s" does not match out line=%s'%(output_prompt, found))
found = found[len(output_prompt):].strip()
if found!=submitted:
raise RuntimeError('doctest failure for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted))
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
if image_file is not None:
self.insure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command
self.process_input('bookmark ipy_thisdir')
self.process_input('cd -b ipy_basedir')
self.process_input(command)
self.process_input('cd -b ipy_thisdir')
self.cout.seek(0)
self.cout.truncate(0)
#print 'returning', ret, figure
return ret, image_directive
def insure_pyplot(self):
if self._pyplot_imported:
return
self.process_input('import matplotlib.pyplot as plt')
shell = EmbeddedSphinxShell()
def ipython_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine,
):
debug = ipython_directive.DEBUG
shell.is_suppress = options.has_key('suppress')
shell.is_doctest = options.has_key('doctest')
shell.is_verbatim = options.has_key('verbatim')
#print 'ipy', shell.is_suppress, options
parts = '\n'.join(content).split('\n\n')
lines = ['.. sourcecode:: ipython', '']
figures = []
for part in parts:
block = block_parser(part)
if len(block):
rows, figure = shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else:
#print 'INSERTING %d lines'%len(lines)
state_machine.insert_input(
lines, state_machine.input_lines.source(0))
return []
ipython_directive.DEBUG = False
def setup(app):
setup.app = app
options = {
'suppress': directives.flag,
'doctest': directives.flag,
'verbatim': directives.flag,
}
app.add_directive('ipython', ipython_directive, True, (0, 2, 0), **options)
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
--------> print(url.split('&'))
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: np.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
--------> print(x)
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
r"""
In [239]: 1/2
@verbatim
Out[239]: 0
In [240]: 1.0/2.0
Out[240]: 0.5
""",
r"""
@verbatim
In [6]: pwd
Out[6]: '/home/jdhunter/mypy'
""",
r"""
@verbatim
In [151]: myfile.upper?
Type: builtin_function_or_method
Base Class: <type 'builtin_function_or_method'>
String Form: <built-in method upper of str object at 0x980e2f0>
Namespace: Interactive
Docstring:
S.upper() -> string
Return a copy of the string S converted to uppercase.
"""
]
ipython_directive.DEBUG = True
#options = dict(suppress=True)
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
if __name__=='__main__':
test()
| bsd-3-clause |
mwalton/artificial-olfaction | evoVisualization/cloud.py | 1 | 1382 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import argparse
from sklearn.preprocessing import StandardScaler
from sklearn import decomposition
from sklearn import datasets
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--file", required = True,
help = "path to the evolution run file")
ap.add_argument("-l", "--label", required=True,
help = "label file")
ap.add_argument("-u", "--x0", required=True, type=int,
help = "first component")
ap.add_argument("-v", "--x1", required=True, type=int,
help = "second component")
ap.add_argument("-w", "--x2", required=True, type=int,
help = "third component")
ap.add_argument("-Y", "--Y", required=True, type=int,
help = "accuracy")
args = vars(ap.parse_args())
raw = np.genfromtxt(args["file"], delimiter=',', dtype=float)
labels = np.genfromtxt(args["label"], delimiter=',', dtype=None)
xIdx = [args["x0"], args["x1"], args["x2"]]
n = np.shape(raw)[0]
X = raw[:,xIdx]
y = raw[:,args["Y"]]
fig = plt.figure(1, figsize=(8, 6))
plt.clf()
ax = Axes3D(fig, elev=48, azim=134)
plt.cla()
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
ax.set_title(labels[args["Y"]] + " n=" + str(n))
ax.set_xlabel(labels[xIdx[0]])
ax.set_ylabel(labels[xIdx[1]])
ax.set_zlabel(labels[xIdx[2]])
plt.show() | mit |
poffuomo/spark | python/pyspark/sql/tests.py | 1 | 113287 | # -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import subprocess
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import py4j
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
_have_pandas = False
try:
import pandas
_have_pandas = True
except:
# No Pandas, but that's okay, we'll skip those tests
pass
from pyspark import SparkContext
from pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type
from pyspark.tests import ReusedPySparkTestCase, SparkSubmitTests
from pyspark.sql.functions import UserDefinedFunction, sha2, lit
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
class SQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.spark = SparkSession(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
def tearDown(self):
super(SQLTests, self).tearDown()
# tear down test_bucketed_write state
self.spark.sql("DROP TABLE IF EXISTS pyspark_bucket")
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.spark.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns
self.assertEqual(columns, ['name', 'age'])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_explode(self):
from pyspark.sql.functions import explode, explode_outer, posexplode_outer
d = [
Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"}),
Row(a=1, intlist=[], mapfield={}),
Row(a=1, intlist=None, mapfield=None),
]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
result = [tuple(x) for x in data.select(posexplode_outer("intlist")).collect()]
self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)])
result = [tuple(x) for x in data.select(posexplode_outer("mapfield")).collect()]
self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)])
result = [x[0] for x in data.select(explode_outer("intlist")).collect()]
self.assertEqual(result, [1, 2, 3, None, None])
result = [tuple(x) for x in data.select(explode_outer("mapfield")).collect()]
self.assertEqual(result, [('a', 'b'), (None, None), (None, None)])
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_single_udf_with_repeated_argument(self):
# regression test for SPARK-20685
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
row = self.spark.sql("SELECT add(1, 1)").first()
self.assertEqual(tuple(row), (2, ))
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_in_filter_on_top_of_outer_join(self):
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(a=1)])
df = left.join(right, on='a', how='left_outer')
df = df.withColumn('b', udf(lambda x: 'x')(df.a))
self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')])
def test_udf_in_filter_on_top_of_join(self):
# regression test for SPARK-18589
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.crossJoin(right).filter(f("a", "b"))
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_filter_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a < 2, BooleanType())
sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2"))
self.assertEqual(sel.collect(), [Row(key=1, value='1')])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import udf, explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
from pyspark.sql.functions import udf
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
res.explain(True)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_udf_registration_returns_udf(self):
df = self.spark.range(10)
add_three = self.spark.udf.register("add_three", lambda x: x + 3, IntegerType())
self.assertListEqual(
df.selectExpr("add_three(id) AS plus_three").collect(),
df.select(add_three("id").alias("plus_three")).collect()
)
def test_multiLine_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
multiLine=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_multiline_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", multiLine=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initalization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
from pyspark.sql.functions import UserDefinedFunction
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
from pyspark.sql.functions import UserDefinedFunction
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
from pyspark.sql.functions import UserDefinedFunction
from pyspark.sql.types import StringType
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit, udf
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
from pyspark.sql.types import _parse_schema_abstract, _infer_schema_type
rdd = self.sc.parallelize([(127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3])])
abstract = "byte1 short1 float1 time1 map1{} struct1(b) list1[]"
schema = _parse_schema_abstract(abstract)
typedSchema = _infer_schema_type(rdd.first(), schema)
df = self.spark.createDataFrame(rdd, typedSchema)
r = (127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1), {"a": 1}, Row(b=2), [1, 2, 3])
self.assertEqual(r, tuple(df.first()))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _verify_type
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_verify_type(ExamplePoint(1.0, 2.0), ExamplePointUDT())
self.assertRaises(ValueError, lambda: _verify_type([1.0, 2.0], ExamplePointUDT()))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_verify_type(PythonOnlyPoint(1.0, 2.0), PythonOnlyUDT())
self.assertRaises(ValueError, lambda: _verify_type([1.0, 2.0], PythonOnlyUDT()))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.show()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\
cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs)
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
self.assertRaisesRegexp(ValueError,
"Cannot apply 'in' operator against a column",
lambda: 1 in cs)
def test_column_getitem(self):
from pyspark.sql.functions import col
self.assertIsInstance(col("foo")[1:3], Column)
self.assertIsInstance(col("foo")[0], Column)
self.assertIsInstance(col("foo")["bar"], Column)
self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
aq = df.stat.approxQuantile("a", [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", "b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile(("a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr("a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov("a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab("a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, 1).alias('b')).collect()
# The value argument can be implicitly castable to the element's type of the array.
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
from pyspark.sql.types import StructType, StringType, StructField
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
with self.assertRaises(ValueError):
struct1 = StructType().add("name")
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
with self.assertRaises(KeyError):
not_a_field = struct1["f9"]
with self.assertRaises(IndexError):
not_a_field = struct1[9]
with self.assertRaises(TypeError):
not_a_field = struct1[9.9]
def test_metadata_null(self):
from pyspark.sql.types import StructType, StringType, StructField
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.spark.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
csvpath = os.path.join(tempfile.mkdtemp(), 'data')
df.write.option('quote', None).format('csv').save(csvpath)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.spark.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self.assertTrue("ZeroDivisionError" in e.desc)
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self.assertTrue("ZeroDivisionError" in sq.exception().desc)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
if sys.version >= '3':
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
else:
columnName = unicode("数量", "utf-8")
self.assertTrue(isinstance(columnName, unicode))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
def test_capture_parse_exception(self):
self.assertRaises(ParseException, lambda: self.spark.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.spark.sql("SET mapred.reduce.tasks=-1"))
df = self.spark.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
try:
df.select(sha2(df.a, 1024)).collect()
except IllegalArgumentException as e:
self.assertRegexpMatches(e.desc, "1024 is not in the permitted values")
self.assertRegexpMatches(e.stackTrace,
"org.apache.spark.sql.functions")
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
def test_current_database(self):
spark = self.spark
spark.catalog._reset()
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
spark.catalog._reset()
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 2)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_date" in functions)
self.assertTrue("to_timestamp" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
def test_cache(self):
spark = self.spark
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def test_read_text_file_list(self):
df = self.spark.read.text(['python/test_support/sql/text-test.txt',
'python/test_support/sql/text-test.txt'])
count = df.count()
self.assertEquals(count, 4)
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
def test_bucketed_write(self):
data = [
(1, "foo", 3.0), (2, "foo", 5.0),
(3, "bar", -1.0), (4, "bar", 6.0),
]
df = self.spark.createDataFrame(data, ["x", "y", "z"])
def count_bucketed_cols(names, table="pyspark_bucket"):
"""Given a sequence of column names and a table name
query the catalog and return number o columns which are
used for bucketing
"""
cols = self.spark.catalog.listColumns(table)
num = len([c for c in cols if c.name in names and c.isBucket])
return num
# Test write with one bucketing column
df.write.bucketBy(3, "x").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write two bucketing columns
df.write.bucketBy(3, "x", "y").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort
df.write.bucketBy(2, "x").sortBy("z").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with a list of columns
df.write.bucketBy(3, ["x", "y"]).mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with a list of columns
(df.write.bucketBy(2, "x")
.sortBy(["y", "z"])
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with multiple columns
(df.write.bucketBy(2, "x")
.sortBy("y", "z")
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
@unittest.skipIf(not _have_pandas, "Pandas not installed")
def test_to_pandas(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())
data = [
(1, "foo", True, 3.0), (2, "foo", True, 5.0),
(3, "bar", False, -1.0), (4, "bar", False, 6.0),
]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.int32)
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.bool)
self.assertEquals(types[3], np.float32)
class HiveSparkSubmitTests(SparkSubmitTests):
def test_hivecontext(self):
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
# It sets a metastore url and checks if there is a derby dir created by
# Hive metastore. If this derby dir exists, HiveContext is using
# Hive metastore.
metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db")
metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true"
hive_site_dir = os.path.join(self.programDir, "conf")
hive_site_file = self.createTempFile("hive-site.xml", ("""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>%s</value>
| </property>
|</configuration>
""" % metastore_URL).lstrip(), "conf")
script = self.createTempFile("test.py", """
|import os
|
|from pyspark.conf import SparkConf
|from pyspark.context import SparkContext
|from pyspark.sql import HiveContext
|
|conf = SparkConf()
|sc = SparkContext(conf=conf)
|hive_context = HiveContext(sc)
|print(hive_context.sql("show databases").collect())
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]",
"--driver-class-path", hive_site_dir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("default", out.decode('utf-8'))
self.assertTrue(os.path.exists(metastore_path))
class SQLTests2(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
class UDFInitializationTests(unittest.TestCase):
def tearDown(self):
if SparkSession._instantiatedSession is not None:
SparkSession._instantiatedSession.stop()
if SparkContext._active_spark_context is not None:
SparkContext._active_spark_contex.stop()
def test_udf_init_shouldnt_initalize_context(self):
from pyspark.sql.functions import UserDefinedFunction
UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
SparkContext._active_spark_context,
"SparkContext shouldn't be initialized when UserDefinedFunction is created."
)
self.assertIsNone(
SparkSession._instantiatedSession,
"SparkSession shouldn't be initialized when UserDefinedFunction is created."
)
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
except TypeError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
os.unlink(cls.tempdir.name)
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date, datetime
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)'])
@unittest.skipIf(sys.version_info < (3, 3), "Unittest < 3.3 doesn't support mocking")
def test_unbounded_frames(self):
from unittest.mock import patch
from pyspark.sql import functions as F
from pyspark.sql import window
import importlib
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
with patch("sys.maxsize", 2 ** 31 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 63 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 127 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
importlib.reload(window)
if __name__ == "__main__":
from pyspark.sql.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
| apache-2.0 |
batuhaniskr/Social-Network-Tracking-And-Analysis | analysis.py | 1 | 4391 | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import getopt
import json
import os.path
import sqlite3
import sys
from collections import Counter
import matplotlib.pyplot as pl
import numpy as np
from flask import Flask, render_template
from geopy.geocoders import Nominatim
from termcolor import colored
import settings
ROOT_DIR = os.path.dirname(os.pardir)
db_path = os.path.join(ROOT_DIR, "TweetAnalysis.db")
app = Flask(__name__)
def main(argv):
if len(argv) == 1 and argv[0] == '-h':
print("""
[Analysis]
[--location] for location analysis
[--hashtag] for hashtag analysis
[--user] for user location analysis
""")
return
try:
opts, args = getopt.getopt(argv, "", ("hashtag", "user", "location", "h"))
for opt, arg in opts:
if opt == '--location':
port = settings.PORT
app.run('127.0.0.1', port=port)
elif opt == '--user':
analysis_user()
elif opt == '--hashtag':
analysis_hashtag()
else:
print('Invalid selection.')
except:
print('You must pass some parameters. Use \"-h\" to help.')
@app.route('/locations')
def map():
location = location_analysis()
api_key = settings.GOOGLE_MAP_API_KEY
url = 'https://maps.googleapis.com/maps/api/js?key=' + api_key + '&libraries=visualization&callback=initMap'
return render_template('locations.html', location=location, url=url)
def analysis_user():
with sqlite3.connect(db_path) as db:
conn = db
c = conn.cursor()
c.execute("SELECT username, count(*) as tekrar FROM Tweet group by username order by tekrar desc LIMIT 10")
data = c.fetchall()
ilk = []
y = []
i = 0
for row in data:
ilk.append(row[0])
y.append(row[1])
i = i + 1
pl.figure(1)
x = range(i)
pl.bar(x, y, align='center')
pl.xticks(x, ilk)
# pl.plot(x, y, "-")
pl.title('User - Tweet Count')
pl.xlabel('Username')
pl.ylabel('Tweet Count')
print(colored("[INFO] Showing graph of user analysis", "green"))
pl.show()
def analysis_hashtag():
with sqlite3.connect(db_path) as db:
conn = db
c = conn.cursor()
c.execute("SELECT hashtag from Tweet")
hashtag_list = []
for row in c.fetchall():
if (row != ('',)):
if " " in ''.join(row):
for m in ''.join(row).split(' '):
hashtag_list.append(m)
else:
signle_item = ''.join(row)
hashtag_list.append(signle_item)
counter = Counter(hashtag_list).most_common(10)
pl.rcdefaults()
keys = []
performance = []
for i in counter:
performance.append(i[1])
keys.append(i[0])
pl.rcdefaults()
y_pos = np.arange(len(keys))
error = np.random.rand(len(keys))
pl.barh(y_pos, performance, xerr=error, align='center', alpha=0.4, )
pl.yticks(y_pos, keys)
pl.xlabel('quantity')
pl.title('hashtags')
print(colored("[INFO] Showing graph of hashtag analysis", "green"))
pl.show()
def location_analysis():
with sqlite3.connect(db_path) as db:
conn = db
c = conn.cursor()
locxy = []
c.execute("Select place from location")
loc_array = c.fetchall()
# mapping
geo_data = {
"features": []
}
for x in range(len(loc_array)):
if (loc_array[x] != ''):
geolocator = Nominatim()
location = geolocator.geocode(loc_array[x])
locxy.append(location.latitude)
locxy.append(location.longitude)
geo_json_feature = {
"lat": location.latitude,
"lng": location.longitude
}
geo_data['features'].append(geo_json_feature)
locxy.clear()
json_location = json.dumps(geo_data)
print(colored("[INFO] Showing graph of location analysis", "green"))
return json_location
if __name__ == '__main__':
main(sys.argv[1:])
| mit |
HWNi/DATA515-Project | uberTaxi/script/split_data.py | 1 | 1590 | import pandas as pd
import os
def split_uber_data(file_name):
"""
A function split the data/time of Uber data set.
Parameter: Uber data file, month
Return: True or False for unittest purpose
True, the output file saved
False, the output file fail to saved
"""
df = pd.read_csv(file_name, sep=',', header=0)
# process the Data/Time column
time = pd.DatetimeIndex(df['Date/Time'])
df['year'] = time.year
df['month'] = time.month
df['day'] = time.day
df['dayofweek'] = time.dayofweek
df['hour'] = time.hour
df['minute'] = time.minute
df['time'] = time.time
# converts the data frame to csv file
df.to_csv("split_" + file_name, index=False)
# return True or False for unittest purpose
if os.path.exists("split_" + file_name):
return True
else :
return False
def split_taxi_data(file_name):
"""
A function split the data/time of Uber data set.
Parameter: taxi data file, month
"""
df = pd.read_csv(file_name, sep=',', header=0)
# process the Data/Time column
time = pd.DatetimeIndex(df.icol(0))
df['year'] = time.year
df['month'] = time.month
df['day'] = time.day
df['dayofweek'] = time.dayofweek
df['hour'] = time.hour
df['minute'] = time.minute
df['time'] = time.time
# converts the data frame to csv file
df.to_csv("split_" + file_name, index=False)
# return True or False for unittest purpose
if os.path.exists("split_" + file_name):
return True
else :
return False
| mit |
mayblue9/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
michigraber/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
Nelca/buildMLSystem | ch06/03_clean.py | 6 | 5976 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script tries to improve the classifier by cleaning the tweets a bit
#
import time
start_time = time.time()
import re
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from sklearn.pipeline import Pipeline
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from utils import log_false_positives
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from utils import load_sent_word_net
sent_word_net = load_sent_word_net()
phase = "03"
emo_repl = {
# positive emoticons
"<3": " good ",
":d": " good ", # :D in lower case
":dd": " good ", # :DD in lower case
"8)": " good ",
":-)": " good ",
":)": " good ",
";)": " good ",
"(-:": " good ",
"(:": " good ",
# negative emoticons:
":/": " bad ",
":>": " sad ",
":')": " sad ",
":-(": " bad ",
":(": " bad ",
":S": " bad ",
":-S": " bad ",
}
emo_repl_order = [k for (k_len, k) in reversed(
sorted([(len(k), k) for k in emo_repl.keys()]))]
re_repl = {
r"\br\b": "are",
r"\bu\b": "you",
r"\bhaha\b": "ha",
r"\bhahaha\b": "ha",
r"\bdon't\b": "do not",
r"\bdoesn't\b": "does not",
r"\bdidn't\b": "did not",
r"\bhasn't\b": "has not",
r"\bhaven't\b": "have not",
r"\bhadn't\b": "had not",
r"\bwon't\b": "will not",
r"\bwouldn't\b": "would not",
r"\bcan't\b": "can not",
r"\bcannot\b": "can not",
}
def create_ngram_model(params=None):
def preprocessor(tweet):
global emoticons_replaced
tweet = tweet.lower()
for k in emo_repl_order:
tweet = tweet.replace(k, emo_repl[k])
for r, repl in re_repl.iteritems():
tweet = re.sub(r, repl, tweet)
return tweet
tfidf_ngrams = TfidfVectorizer(preprocessor=preprocessor,
analyzer="word")
clf = MultinomialNB()
pipeline = Pipeline([('tfidf', tfidf_ngrams), ('clf', clf)])
if params:
pipeline.set_params(**params)
return pipeline
def train_model(clf, X, Y, name="NB ngram", plot=False):
# create it again for plotting
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
clfs = [] # just to later get the median
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
if plot:
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
plot_pr(pr_scores[median], name, phase, precisions[median],
recalls[median], label=name)
log_false_positives(clfs[median], X_test, y_test, name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print "%.3f\t%.3f\t%.3f\t%.3f\t" % summary
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in xrange(len(X_wrong)):
print "clf.predict('%s')=%i instead of %i" %\
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx])
def get_best_model():
best_params = dict(tfidf__ngram_range=(1, 2),
tfidf__min_df=1,
tfidf__stop_words=None,
tfidf__smooth_idf=False,
tfidf__use_idf=False,
tfidf__sublinear_tf=True,
tfidf__binary=False,
clf__alpha=0.01,
)
best_clf = create_ngram_model(best_params)
return best_clf
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data()
classes = np.unique(Y_orig)
for c in classes:
print "#%s: %i" % (c, sum(Y_orig == c))
print "== Pos vs. neg =="
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print "== Pos/neg vs. irrelevant/neutral =="
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
# best_clf = grid_search_model(create_union_model, X, Y, name="sent vs
# rest", plot=True)
train_model(get_best_model(), X, Y, name="pos+neg vs rest", plot=True)
print "== Pos vs. rest =="
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs rest",
plot=True)
print "== Neg vs. rest =="
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(get_best_model(), X, Y, name="neg vs rest",
plot=True)
print "time spent:", time.time() - start_time
| mit |
Ademan/NumPy-GSoC | numpy/lib/npyio.py | 4 | 56059 | __all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'packbits', 'unpackbits', 'fromregex', 'DataSource']
import numpy as np
import format
import sys
import os
import sys
import itertools
import warnings
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
from _compiled_base import packbits, unpackbits
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype, _bytes_to_name
from numpy.compat import asbytes, asstr, asbytes_nested, bytes
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError, "Illegal argument"
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = gzip.GzipFile(f)
if sys.version_info[0] >= 3:
import types
f.seek = types.MethodType(seek, f)
f.tell = types.MethodType(tell, f)
else:
import new
f.seek = new.instancemethod(seek, f)
f.tell = new.instancemethod(tell, f)
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
self._obj = obj
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError, key
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
_zip = zipfile.ZipFile(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = BytesIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError, "%s is not a file in the archive" % key
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load a pickled, ``.npy``, or ``.npz`` binary file.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap`). The mode has no effect for pickled or
zipped files.
A memory-mapped array is stored on disk, and not directly loaded
into memory. However, it can be accessed and sliced like any
ndarray. Memory mapping is especially useful for accessing
small fragments of large files without reading the entire file
into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever is stored in the
pickle is returned.
- If the file is a ``.npy`` file, then an array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
if isinstance(file, basestring):
fid = open(file, "rb")
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
else:
fid = file
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
return NpzFile(fid)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError, \
"Failed to interpret file %s as a pickle" % repr(file)
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` compressed archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
else:
fid = file
arr = np.asanyarray(arr)
format.write_array(fid, arr)
def savez(file, *args, **kwds):
"""
Save several arrays into a single, compressed file in ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
\\*args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
\\*\\*kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. Each file contains one variable in ``.npy``
format. For a description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\*\\*kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError, "Cannot use un-named variables and keyword %s" % key
namedict[key] = val
zip = zipfile.ZipFile(file, mode="w")
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is ``.gz`` or
``.bz2``, the file is first decompressed.
dtype : dtype, optional
Data type of the resulting array. If this is a record data-type,
the resulting array will be 1-dimensional, and each row will be
interpreted as an element of the array. In this case, the number
of columns used must match the number of fields in the data-type.
comments : str, optional
The character used to indicate the start of a comment.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data:
``converters = {3: lambda s: float(s or 0)}``.
skiprows : int, optional
Skip the first `skiprows` lines.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. Default is False.
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads Matlab(R) data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
if delimiter is not None:
delimiter = asbytes(delimiter)
user_converters = converters
if usecols is not None:
usecols = list(usecols)
isstring = False
if _is_string_like(fname):
isstring = True
if fname.endswith('.gz'):
import gzip
fh = seek_gzip_factory(fname)
elif fname.endswith('.bz2'):
import bz2
fh = bz2.BZ2File(fname)
else:
fh = open(fname, 'U')
elif hasattr(fname, 'readline'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
return [dt.base] * int(np.prod(dt.shape))
else:
types = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt = flatten_dtype(tp)
types.extend(flat_dt)
return types
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip()
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.readline()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
while not first_vals:
first_line = fh.readline()
if not first_line: # EOF reached
raise IOError('End-of-file reached before encountering data.')
first_vals = split_line(first_line)
N = len(usecols or first_vals)
dtype_types = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
X.append(tuple([conv(val) for (conv, val) in zip(converters, vals)]))
finally:
if isstring:
fh.close()
if len(dtype_types) > 1:
# We're dealing with a structured array, with a dtype such as
# [('x', int), ('y', [('s', int), ('t', float)])]
#
# First, create the array using a flattened dtype:
# [('x', int), ('s', int), ('t', float)]
#
# Then, view the array using the specified dtype.
try:
X = np.array(X, dtype=np.dtype([('', t) for t in dtype_types]))
X = X.view(dtype)
except TypeError:
# In the case we have an object dtype
X = np.array(X, dtype=dtype)
else:
X = np.array(X, dtype)
X = np.squeeze(X)
if unpack:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n'):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
newline : str
.. versionadded:: 2.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
if _is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a list of formats.
# E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
import re
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
if not hasattr(file, "read"):
file = open(file, 'rb')
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skiprows` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is `.gz` or
`.bz2`, the file is first decompressed.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable or None, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable or None, optional
The set of strings corresponding to missing data.
filling_values : variable or None, optional
The set of values to be used as default when the data are missing.
usecols : sequence or None, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skiprows` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables names.
By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
if isinstance(fname, basestring):
fhd = np.lib._datasource.open(fname, 'U')
elif not hasattr(fname, 'read'):
raise TypeError("The input should be a string or a filehandle. "\
"(got %s instead)" % type(fname))
else:
fhd = fname
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn("The use of `skiprows` is deprecated.\n"\
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.readline()
# Keep on until we find the first valid values
first_values = None
while not first_values:
first_line = fhd.readline()
if not first_line:
raise IOError('End-of-file reached before encountering data.')
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
names = dtype.names
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn("The use of `missing` is deprecated.\n"\
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i].update(conv, locked=True,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
if len(invalid) > 0:
nbrows = len(rows)
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbrows -= skip_footer
errmsg = [template % (i + skip_header + 1, nb)
for (i, nb) in invalid if i < nbrows]
else:
errmsg = [template % (i + skip_header + 1, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
else:
rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/pandas/core/groupby.py | 7 | 147947 | import types
from functools import wraps
import numpy as np
import datetime
import collections
import warnings
import copy
from pandas.compat import(
zip, range, long, lzip,
callable, map
)
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.compat.numpy import _np_version_under1p8
from pandas.types.common import (_DATELIKE_DTYPES,
is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype,
is_datetime_or_timedelta_dtype,
is_bool, is_integer_dtype,
is_complex_dtype,
is_bool_dtype,
is_scalar,
is_list_like,
_ensure_float64,
_ensure_platform_int,
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_float)
from pandas.types.cast import _possibly_downcast_to_dtype
from pandas.types.missing import isnull, notnull, _maybe_fill
from pandas.core.common import _values_from_object, AbstractMethodError
from pandas.core.base import (PandasObject, SelectionMixin, GroupByError,
DataError, SpecificationError)
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import (Index, MultiIndex, CategoricalIndex,
_ensure_index)
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import (cache_readonly, Substitution, Appender,
make_signature, deprecate_kwarg)
from pandas.formats.printing import pprint_thing
from pandas.util.validators import validate_kwargs
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.config import option_context
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_doc_template = """
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
pandas.Panel.%(name)s
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile',
'fillna',
'mad',
'any', 'all',
'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'unique'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
_cython_transforms = frozenset(['cumprod', 'cumsum', 'shift'])
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
_local_template = "Compute %(f)s of group values"
@Substitution(name='groupby', f=name)
@Appender(_doc_template)
@Appender(_local_template)
def f(self):
self._set_group_selection()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result._convert(datetime=True)
return result
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target
object
This specification will select a column via the key parameter, or if the
level and/or axis parameters are given, a level of the index of the target
object.
These are local specifications and will override 'global' settings,
that is the parameters axis and level which are passed to the groupby
itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / frequency object, defaults to None
This will groupby the specified frequency if the target selection
(via key or level) is a datetime-like object. For full specification
of available frequencies, please see `here
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`_.
axis : number/name of the axis, defaults to 0
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
Syntactic sugar for ``df.groupby('A')``
>>> df.groupby(Grouper(key='A'))
Specify a resample operation on the column 'date'
>>> df.groupby(Grouper(key='date', freq='60s'))
Specify a resample operation on the level 'date' on the columns axis
with a frequency of 60s
>>> df.groupby(Grouper(level='date', freq='60s', axis=1))
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=0, sort=False):
self.key = key
self.level = level
self.freq = freq
self.axis = axis
self.sort = sort
self.grouper = None
self.obj = None
self.indexer = None
self.binner = None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
self.grouper, exclusions, self.obj = _get_grouper(self.obj, [self.key],
axis=self.axis,
level=self.level,
sort=self.sort)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifications, setup the internal grouper
for this particular specification
Parameters
----------
obj : the subject object
sort : bool, default False
whether the resulting grouper should be sorted
"""
if self.key is not None and self.level is not None:
raise ValueError(
"The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
ax = Index(ax.get_level_values(
level), name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError(
"The level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
# use stable sort to support first, last, nth
indexer = self.indexer = ax.argsort(kind='mergesort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis,
convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
""" default to the standard binner here """
group_axis = obj._get_axis(self.axis)
return Grouping(group_axis, None, obj=obj, name=self.key,
level=self.level, sort=self.sort, in_axis=False)
@property
def groups(self):
return self.grouper.groups
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = 'plot'
return self._groupby.apply(f)
def __getattr__(self, name):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
class _GroupBy(PandasObject, SelectionMixin):
_group_selection = None
_apply_whitelist = frozenset([])
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False, **kwargs):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.mutated = kwargs.pop('mutated', False)
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys,
axis=axis,
level=level,
sort=sort,
mutated=self.mutated)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
# we accept no other args
validate_kwargs('group', kwargs, {})
def __len__(self):
return len(self.groups)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
we create the grouper on instantiation
sub-classes may have a different policy
"""
pass
@property
def groups(self):
""" dict {group name -> group labels} """
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
safe get multiple indices, translate keys for
datelike to underlying repr
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp, datetime.datetime)):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = ("must supply a tuple to get_group with multiple"
" grouping keys")
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError:
# turns out it wasn't a tuple
msg = ("must supply a a same-length tuple to get_group"
" with multiple grouping keys")
raise ValueError(msg)
converters = [get_converter(s) for s in index_sample]
names = [tuple([f(n) for f, n in zip(converters, name)])
for name in names]
else:
converter = get_converter(index_sample)
names = [converter(name) for name in names]
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection. Used for methods needing to return info on
each group regardless of whether a group selection was previously set.
"""
if self._group_selection is not None:
self._group_selection = None
# GH12839 clear cached selection too when changing group selection
self._reset_cache('_selected_obj')
def _set_group_selection(self):
"""
Create group based selection. Used when selection is not passed
directly but instead via a grouper.
"""
grp = self.grouper
if self.as_index and getattr(grp, 'groupings', None) is not None and \
self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings
if g.level is None and g.in_axis]
if len(groupers):
self._group_selection = ax.difference(Index(groupers)).tolist()
# GH12839 clear selected obj cache when group selection changes
self._reset_cache('_selected_obj')
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(
self._get_indices(self.grouper.result_index)))
result.set_axis(self.axis, index)
result = result.sort_index(axis=self.axis)
result.set_axis(self.axis, self.obj._get_axis(self.axis))
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
plot = property(GroupByPlot)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_group_selection()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis or \
kwargs_with_axis['axis'] is None:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise
# ValueError
# if we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name,
*args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Substitution(name='groupby')
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform"""
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all='ignore'):
return func(g, *args, **kwargs)
else:
raise ValueError('func must be a callable if args or '
'kwargs are supplied')
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment', None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(
keys,
values,
not_indexed_same=mutated or self.mutated)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def _cumcount_array(self, ascending=True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Note
----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = _get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not is_scalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_transform(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.transform(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_transformed_output(output, names)
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = _ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise AbstractMethodError(self)
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in values:
if v is not None:
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
# this is a very unfortunate situation
# we have a multi-index that is NOT lexsorted
# and we have a result which is duplicated
# we can't reindex, so we resort to this
# GH 14776
if isinstance(ax, MultiIndex) and not ax.is_unique:
result = result.take(result.index.get_indexer_for(
ax.values).unique(), axis=self.axis)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if (isinstance(result, Series) and
getattr(self, 'name', None) is not None):
result.name = self.name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype='int64')
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
class GroupBy(_GroupBy):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
def irow(self, i):
"""
DEPRECATED. Use ``.nth(i)`` instead
"""
# 10177
warnings.warn("irow(i) is deprecated. Please use .nth(i)",
FutureWarning, stacklevel=2)
return self.nth(i)
@Substitution(name='groupby')
@Appender(_doc_template)
def count(self):
"""Compute count of group, excluding missing values"""
# defined here for API doc
raise NotImplementedError
@Substitution(name='groupby')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
nv.validate_groupby_func('mean', args, kwargs)
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_group_selection()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_group_selection()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def std(self, ddof=1, *args, **kwargs):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
# TODO: implement at Cython level?
nv.validate_groupby_func('std', args, kwargs)
return np.sqrt(self.var(ddof=ddof))
@Substitution(name='groupby')
@Appender(_doc_template)
def var(self, ddof=1, *args, **kwargs):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_groupby_func('var', args, kwargs)
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_group_selection()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
return self.std(ddof=ddof) / np.sqrt(self.count())
@Substitution(name='groupby')
@Appender(_doc_template)
def size(self):
"""Compute group sizes"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
@Substitution(name='groupby')
@Appender(_doc_template)
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
@Substitution(name='groupby')
@Appender(_doc_template)
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper
Return a new grouper with our resampler appended
"""
from pandas.tseries.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling
functionaility per group
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def expanding(self, *args, **kwargs):
"""
Return an expanding grouper, providing expanding
functionaility per group
"""
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def pad(self, limit=None):
"""
Forward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self.apply(lambda x: x.ffill(limit=limit))
ffill = pad
@Substitution(name='groupby')
@Appender(_doc_template)
def backfill(self, limit=None):
"""
Backward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self.apply(lambda x: x.bfill(limit=limit))
bfill = backfill
@Substitution(name='groupby')
@Appender(_doc_template)
def nth(self, n, dropna=None):
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame);
this is equivalent to calling dropna(how=dropna) before the
groupby.
Parameters
----------
n : int or list of ints
a single nth value for the row or a list of nth values
dropna : None or str, optional
apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
B
A
1 NaN
2 3.0
>>> g.nth(1)
B
A
1 2.0
2 5.0
>>> g.nth(-1)
B
A
1 4.0
2 5.0
>>> g.nth([0, 1])
B
A
1 NaN
1 2.0
2 3.0
2 5.0
Specifying ``dropna`` allows count ignoring NaN
>>> g.nth(0, dropna='any')
B
A
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
>>> g.nth(3, dropna='any')
B
A
1 NaN
2 NaN
Specifying ``as_index=False`` in ``groupby`` keeps the original index.
>>> df.groupby('A', as_index=False).nth(1)
A B
1 1 2.0
4 2 5.0
"""
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, (set, list, tuple)):
nth_values = list(set(n))
if dropna is not None:
raise ValueError(
"dropna option with a list of nth values is not supported")
else:
raise TypeError("n needs to be an int or a list/set/tuple of ints")
nth_values = np.array(nth_values, dtype=np.intp)
self._set_group_selection()
if not dropna:
mask = np.in1d(self._cumcount_array(), nth_values) | \
np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values)
out = self._selected_obj[mask]
if not self.as_index:
return out
ids, _, _ = self.grouper.group_info
out.index = self.grouper.result_index[ids[mask]]
return out.sort_index() if self.sort else out
if isinstance(self._selected_obj, DataFrame) and \
dropna not in ['any', 'all']:
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
# (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
# create a grouper with the original parameters, but on the dropped
# object
grouper, _, _ = _get_grouper(dropped, key=self.keys,
axis=self.axis, level=self.level,
sort=self.sort,
mutated=self.mutated)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or \
len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
@Substitution(name='groupby')
@Appender(_doc_template)
def cumcount(self, ascending=True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_group_selection()
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumprod(self, axis=0, *args, **kwargs):
"""Cumulative product for each group"""
nv.validate_groupby_func('cumprod', args, kwargs)
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis))
return self._cython_transform('cumprod')
@Substitution(name='groupby')
@Appender(_doc_template)
def cumsum(self, axis=0, *args, **kwargs):
"""Cumulative sum for each group"""
nv.validate_groupby_func('cumsum', args, kwargs)
if axis != 0:
return self.apply(lambda x: x.cumsum(axis=axis))
return self._cython_transform('cumsum')
@Substitution(name='groupby')
@Appender(_doc_template)
def shift(self, periods=1, freq=None, axis=0):
"""
Shift each group by periods observations
Parameters
----------
periods : integer, default 1
number of periods to shift
freq : frequency string
axis : axis to shift, default 0
"""
if freq is not None or axis != 0:
return self.apply(lambda x: x.shift(periods, freq, axis))
labels, _, ngroups = self.grouper.group_info
# filled in by Cython
indexer = np.zeros_like(labels)
_algos.group_shift_indexer(indexer, labels, ngroups, periods)
output = {}
for name, obj in self._iterate_slices():
output[name] = algos.take_nd(obj.values, indexer)
return self._wrap_transformed_output(output)
@Substitution(name='groupby')
@Appender(_doc_template)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask]
@Substitution(name='groupby')
@Appender(_doc_template)
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').head(1)
A B
0 a 1
2 b 1
"""
self._reset_group_selection()
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask]
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds
the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True,
mutated=False):
self._filter_empty_groups = self.compressed = len(groupings) != 1
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.mutated = mutated
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = self.mutated
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except Exception:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index)
for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
ids, _, ngroup = self.group_info
ids = _ensure_platform_int(ids)
out = np.bincount(ids[ids != -1], minlength=ngroup or None)
return Series(out, index=self.result_index, dtype='int64')
@cache_readonly
def _max_groupsize(self):
"""
Compute size of largest group
"""
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby)
@cache_readonly
def is_monotonic(self):
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = _ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape,
sort=True, xnull=True)
return _compress_group_index(group_index, sort=self.sort)
ping = self.groupings[0]
return ping.labels, np.arange(len(ping.group_index))
@cache_readonly
def ngroups(self):
return len(self.result_index)
@property
def recons_labels(self):
comp_ids, obs_ids, _ = self.group_info
labels = (ping.labels for ping in self.groupings)
return decons_obs_group_ids(comp_ids,
obs_ids, self.shape, labels, xnull=True)
@cache_readonly
def result_index(self):
if not self.compressed and len(self.groupings) == 1:
return self.groupings[0].group_index.rename(self.names[0])
return MultiIndex(levels=[ping.group_index for ping in self.groupings],
labels=self.recons_labels,
verify_integrity=False,
names=self.names)
def get_group_levels(self):
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
name_list = []
for ping, labels in zip(self.groupings, self.recons_labels):
labels = _ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
# ------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'aggregate': {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'ohlc': 'group_ohlc',
},
'transform': {
'cumprod': 'group_cumprod',
'cumsum': 'group_cumsum',
}
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
def _get_cython_function(self, kind, how, values, is_numeric):
dtype_str = values.dtype.name
def get_func(fname):
# see if there is a fused-type version of function
# only valid for numeric
f = getattr(_algos, fname, None)
if f is not None and is_numeric:
return f
# otherwise find dtype-specific version, falling back to object
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
ftype = self._cython_functions[kind][how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def _cython_operation(self, kind, values, how, axis):
assert kind in ['transform', 'aggregate']
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError("arity of more than 1 is not "
"supported for the 'how' argument")
out_shape = (self.ngroups,) + values.shape[1:]
is_numeric = is_numeric_dtype(values.dtype)
if is_datetime_or_timedelta_dtype(values.dtype):
values = values.view('int64')
is_numeric = True
elif is_bool_dtype(values.dtype):
values = _ensure_float64(values)
elif is_integer_dtype(values):
values = values.astype('int64', copy=False)
elif is_numeric and not is_complex_dtype(values):
values = _ensure_float64(values)
else:
values = values.astype(object)
try:
func, dtype_str = self._get_cython_function(
kind, how, values, is_numeric)
except NotImplementedError:
if is_numeric:
values = _ensure_float64(values)
func, dtype_str = self._get_cython_function(
kind, how, values, is_numeric)
else:
raise
if is_numeric:
out_dtype = '%s%d' % (values.dtype.kind, values.dtype.itemsize)
else:
out_dtype = 'object'
labels, _, _ = self.group_info
if kind == 'aggregate':
result = _maybe_fill(np.empty(out_shape, dtype=out_dtype),
fill_value=np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(
result, counts, values, labels, func, is_numeric)
elif kind == 'transform':
result = _maybe_fill(np.empty_like(values, dtype=out_dtype),
fill_value=np.nan)
# temporary storange for running-total type tranforms
accum = np.empty(out_shape, dtype=out_dtype)
result = self._transform(
result, accum, values, labels, func, is_numeric)
if is_integer_dtype(result):
if len(result[result == tslib.iNaT]) > 0:
result = result.astype('float64')
result[result == tslib.iNaT] = np.nan
if kind == 'aggregate' and \
self._filter_empty_groups and not counts.all():
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
_ensure_object(result),
(counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def aggregate(self, values, how, axis=0):
return self._cython_operation('aggregate', values, how, axis)
def transform(self, values, how, axis=0):
return self._cython_operation('transform', values, how, axis)
def _aggregate(self, result, counts, values, comp_ids, agg_func,
is_numeric):
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def _transform(self, result, accum, values, comp_ids, transform_func,
is_numeric):
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
transform_func(result[:, :, i], values,
comp_ids, accum)
else:
transform_func(result, values, comp_ids, accum)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = self._is_builtin_func(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _get_group_index_sorter(group_index, ngroups)
obj = obj.take(indexer, convert=False)
group_index = algos.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False, mutated=False):
self.bins = _ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
self.mutated = mutated
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start, edge: data._slice(
slice(start, edge), axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start, edge: data[slice(start, edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start, edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start, None)
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def group_info(self):
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups)
rep = np.diff(np.r_[0, self.bins])
rep = _ensure_platform_int(rep)
if ngroups == len(self.bins):
comp_ids = np.repeat(np.arange(ngroups), rep)
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
return comp_ids.astype('int64', copy=False), \
obs_group_ids.astype('int64', copy=False), ngroups
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
if len(self.binlabels) != 0 and isnull(self.binlabels[0]):
return self.binlabels[1:]
return self.binlabels
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
return [Grouping(lvl, lvl, in_axis=False, level=None, name=name)
for lvl, name in zip(self.levels, self.names)]
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
# ----------------------------------------------------------------------
# cython aggregation
_cython_functions = copy.deepcopy(BaseGrouper._cython_functions)
_cython_functions['aggregate'].pop('median')
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
in_axis : if the Grouping is a column in self.obj and hence among
Groupby.exclusions list
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True, in_axis=False):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
self.in_axis = in_axis
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._should_compress = True
# we have a single grouper which may be a myriad of things,
# some of which are dependent on the passing in level
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
if self.name is None:
self.name = index.names[level]
self.grouper, self._labels, self._group_index = \
index._get_grouper_for_level(self.grouper, level)
else:
if self.grouper is None and self.name is not None:
self.grouper = self.obj[self.name]
elif isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif is_categorical_dtype(self.grouper):
# must have an ordered categorical
if self.sort:
if not self.grouper.ordered:
# technically we cannot group on an unordered
# Categorical
# but this a user convenience to do so; the ordering
# is preserved and if it's a reduction it doesn't make
# any difference
pass
# fix bug #GH8868 sort=False being ignored in categorical
# groupby
else:
cat = self.grouper.unique()
self.grouper = self.grouper.reorder_categories(
cat.categories)
# we make a CategoricalIndex out of the cat grouper
# preserving the categories / ordered attributes
self._labels = self.grouper.codes
c = self.grouper.categories
self._group_index = CategoricalIndex(
Categorical.from_codes(np.arange(len(c)),
categories=c,
ordered=self.grouper.ordered))
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# we are done
if isinstance(self.grouper, Grouping):
self.grouper = self.grouper.grouper
# no level passed
elif not isinstance(self.grouper,
(Series, Index, Categorical, np.ndarray)):
if getattr(self.grouper, 'ndim', 1) != 1:
t = self.name or str(type(self.grouper))
raise ValueError("Grouper for '%s' not 1-dimensional" % t)
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have
# Timestamps like
if getattr(self.grouper, 'dtype', None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping({0})'.format(self.name)
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
values = _ensure_categorical(self.grouper)
return values._reverse_indexer()
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._labels is None or self._group_index is None:
labels, uniques = algos.factorize(self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
@cache_readonly
def groups(self):
return self.index.groupby(Categorical.from_codes(self.labels,
self.group_index))
def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
mutated=False):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate that the passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
# allow level to be a length-one list-like object
# (e.g., level=[0])
# GH 13901
if is_list_like(level):
nlevels = len(level)
if nlevels == 1:
level = level[0]
elif nlevels == 0:
raise ValueError('No group keys passed!')
else:
raise ValueError('multiple levels only valid with '
'MultiIndex')
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0 or level < -1:
raise ValueError('level > 0 or level < -1 only valid with '
' MultiIndex')
level = None
key = group_axis
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
match_axis_length = False
else:
keys = key
match_axis_length = len(keys) == len(group_axis)
# what are we after, exactly?
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_groupers = any(isinstance(g, Grouper) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns = all(g in obj.columns for g in keys)
else:
all_in_columns = False
except Exception:
all_in_columns = False
if not any_callable and not all_in_columns and \
not any_arraylike and not any_groupers and \
match_axis_length and level is None:
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
# if the actual grouper should be obj[key]
def is_in_axis(key):
if not _is_label_like(key):
try:
obj._data.items.get_loc(key)
except Exception:
return False
return True
# if the the grouper is obj[name]
def is_in_obj(gpr):
try:
return id(gpr) == id(obj[gpr.name])
except Exception:
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis, name = True, gpr.name
exclusions.append(name)
elif is_in_axis(gpr): # df.groupby('name')
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
elif isinstance(gpr, Grouper) and gpr.key is not None:
# Add key to exclusions
exclusions.append(gpr.key)
in_axis, name = False, None
else:
in_axis, name = False, None
if is_categorical_dtype(gpr) and len(gpr) != len(obj):
raise ValueError("Categorical dtype grouper must "
"have len(grouper) == len(data)")
# create the Grouping
# allow us to passing the actual Grouping as the gpr
ping = Grouping(group_axis,
gpr,
obj=obj,
name=name,
level=level,
sort=sort,
in_axis=in_axis) \
if not isinstance(gpr, Grouping) else gpr
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated)
return grouper, exclusions, obj
def _is_label_like(val):
return (isinstance(val, compat.string_types) or
(val is not None and is_scalar(val)))
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper._values
else:
return grouper.reindex(axis)._values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise AssertionError('Grouper and axis must be same length')
return grouper
else:
return grouper
def _whitelist_method_generator(klass, whitelist):
"""
Yields all GroupBy member defs for DataFrame/Series names in _whitelist.
Parameters
----------
klass - class where members are defined. Should be Series or DataFrame
whitelist - list of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
method_wrapper_template = \
"""def %(name)s(%(sig)s) :
\"""
%(doc)s
\"""
f = %(self)s.__getattr__('%(name)s')
return f(%(args)s)"""
property_wrapper_template = \
"""@property
def %(name)s(self) :
\"""
%(doc)s
\"""
return self.__getattr__('%(name)s')"""
for name in whitelist:
# don't override anything that was explicitly defined
# in the base class
if hasattr(GroupBy, name):
continue
# ugly, but we need the name string itself in the method.
f = getattr(klass, name)
doc = f.__doc__
doc = doc if type(doc) == str else ''
if isinstance(f, types.MethodType):
wrapper_template = method_wrapper_template
decl, args = make_signature(f)
# pass args by name to f because otherwise
# GroupBy._make_wrapper won't know whether
# we passed in an axis parameter.
args_by_name = ['{0}={0}'.format(arg) for arg in args[1:]]
params = {'name': name,
'doc': doc,
'sig': ','.join(decl),
'self': args[0],
'args': ','.join(args_by_name)}
else:
wrapper_template = property_wrapper_template
params = {'name': name, 'doc': doc}
yield wrapper_template % params
class SeriesGroupBy(GroupBy):
#
# Make class defs of attributes on SeriesGroupBy whitelist
_apply_whitelist = _series_apply_whitelist
for _def_str in _whitelist_method_generator(Series,
_series_apply_whitelist):
exec(_def_str)
@property
def name(self):
"""
since we are a series, we by definition only have
a single name, but may be the result of a selection or
the name of our object
"""
if self._selection is None:
return self.obj.name
else:
return self._selection
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Series but in some cases DataFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce DataFrame with column names
determined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> series
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mapper = lambda x: x[0] # first letter
>>> grouped = series.groupby(mapper)
>>> grouped.aggregate(np.sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.sum, np.mean, np.std])
mean std sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.mean() / x.std(),
... 'total' : np.sum})
result total
b 2.121 3
q 4.95 7
See also
--------
apply, transform
Returns
-------
Series or DataFrame
"""
_level = kwargs.pop('_level', None)
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs,
(_level or 0) + 1)
else:
cyfunc = self._is_cython_func(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
# _level handled at higher
if not _level and isinstance(ret, dict):
from pandas import concat
ret = concat(ret, axis=1)
return ret
agg = aggregate
def _aggregate_multiple_funcs(self, arg, _level):
if isinstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
obj = self
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
# reset the cache so that we
# only include the named selection
if name in self._selected_obj:
obj = copy.copy(obj)
obj._reset_cache()
obj._selection = name
results[name] = obj.aggregate(func)
if isinstance(list(compat.itervalues(results))[0],
DataFrame):
# let higher level handle
if _level:
return results
return list(compat.itervalues(results))[0]
return DataFrame(results, columns=columns)
def _wrap_output(self, output, index, names=None):
""" common agg/transform wrapping logic """
output = output[self.name]
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_aggregated_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.grouper.result_index,
names=names)
def _wrap_transformed_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.obj.index,
names=names)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self.name, index=keys)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
result = DataFrame(values, index=index).stack()
result.name = self.name
return result
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
func = self._is_cython_func(func) or func
# if string function
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
return self._transform_fast(
lambda: getattr(self, func)(*args, **kwargs))
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.copy()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to astype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.astype(common_type)
except:
pass
indexer = self._get_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self, func)
ids, _, ngroup = self.grouper.group_info
cast = (self.size().fillna(0) > 0).any()
out = algos.take_1d(func().values, ids)
if cast:
out = self._try_cast(out, self.obj)
return Series(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._get_index(name) for name, group in self
if true_and_notnull(group)]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna=True):
""" Returns number of unique elements in the group """
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
assert val.dtype == object, \
'val.dtype must be object, got %s' % val.dtype
val, _ = algos.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
_isnull = lambda a: a == -1
else:
_isnull = isnull
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = _isnull(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype('int64', copy=False)
if len(ids):
res = out if ids[0] != -1 else out[1:]
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids] = out
return Series(res,
index=ri,
name=self.name)
@deprecate_kwarg('take_last', 'keep',
mapping={True: 'last', False: 'first'})
@Appender(Series.nlargest.__doc__)
def nlargest(self, n=5, keep='first'):
# ToDo: When we remove deprecate_kwargs, we can remote these methods
# and include nlargest and nsmallest to _series_apply_whitelist
return self.apply(lambda x: x.nlargest(n=n, keep=keep))
@deprecate_kwarg('take_last', 'keep',
mapping={True: 'last', False: 'first'})
@Appender(Series.nsmallest.__doc__)
def nsmallest(self, n=5, keep='first'):
return self.apply(lambda x: x.nsmallest(n=n, keep=keep))
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
from functools import partial
from pandas.tools.tile import cut
from pandas.tools.merge import _get_join_indexers
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return self.apply(Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins)
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algos.factorize(val, sort=True)
else:
cat, bins = cut(val, bins, retbins=True)
# bins[:-1] for backward compat;
# o.w. cat.categories could be better
lab, lev, dropna = cat.codes, bins[:-1], False
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
inc = np.r_[True, lab[1:] != lab[:-1]]
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
labels = list(map(rep, self.grouper.recons_labels)) + [lab[inc]]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self.name]
if dropna:
mask = labels[-1] != -1
if mask.all():
dropna = False
else:
out, labels = out[mask], [label[mask] for label in labels]
if normalize:
out = out.astype('float')
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
if _np_version_under1p8:
mi, ml = algos.factorize(m)
d[ml] = d[ml] - np.bincount(mi)
else:
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, labels[-1] = out[sorter], labels[-1][sorter]
if bins is None:
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if is_integer_dtype(out):
out = _ensure_int64(out)
return Series(out, index=mi, name=self.name)
# for compat. with algos.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype='bool')
for lab in labels[:-1]:
diff |= np.r_[True, lab[1:] != lab[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin),
np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, labels[-1]]
_, idx = _get_join_indexers(left, right, sort=False, how='left')
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
labels = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1]))
labels.append(left[-1])
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if is_integer_dtype(out):
out = _ensure_int64(out)
return Series(out, index=mi, name=self.name)
def count(self):
""" Compute count of group, excluding missing values """
ids, _, ngroups = self.grouper.group_info
val = self.obj.get_values()
mask = (ids != -1) & ~isnull(val)
ids = _ensure_platform_int(ids)
out = np.bincount(ids[mask], minlength=ngroups or None)
return Series(out,
index=self.grouper.result_index,
name=self.name,
dtype='int64')
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(
how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._get_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.get_numeric_data(copy=False)
for block in data.blocks:
result, _ = self.grouper.aggregate(
block.values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
def aggregate(self, arg, *args, **kwargs):
_level = kwargs.pop('_level', None)
result, how = self._aggregate(arg, _level=_level, *args, **kwargs)
if how is None:
return result
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs(
[arg], _level=_level)
result.columns = Index(
result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
return result._convert(datetime=True)
agg = aggregate
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise AbstractMethodError(self)
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors = None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors = e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
if len(keys) == 0:
return DataFrame(index=keys)
key_names = self.grouper.names
# GH12824.
def first_non_None_value(values):
try:
v = next(v for v in values if v is not None)
except StopIteration:
return None
return v
v = first_non_None_value(values)
if v is None:
# GH9684. If all values are None, then this will throw an error.
# We'd prefer it return an empty dataframe.
return DataFrame()
elif isinstance(v, DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
v = first_non_None_value(values)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.tools.merge import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(
keys, values, not_indexed_same=True,
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if isinstance(v.index,
MultiIndex) or key_index is None:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values, index=key_index,
columns=index)
else:
# GH5788 instead of stacking; concat gets the
# dtypes correct
from pandas.tools.merge import concat
result = concat(values, keys=key_index,
names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values.T, index=v.index,
columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index, name=self.name)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if (so.ndim == 2 and so.dtypes.isin(_DATELIKE_DTYPES).any()):
result = result._convert(numeric=True)
date_cols = self._selected_obj.select_dtypes(
include=list(_DATELIKE_DTYPES)).columns
date_cols = date_cols.intersection(result.columns)
result[date_cols] = (result[date_cols]
._convert(datetime=True,
coerce=True))
else:
result = result._convert(datetime=True)
return self._reindex_output(result)
# values are not series or array-like but scalars
else:
# only coerce dates if we find at least 1 datetime
coerce = True if any([isinstance(x, Timestamp)
for x in values]) else False
# self.name not passed through to Series as the result
# should not take the name of original selection of columns
return (Series(values, index=key_index)
._convert(datetime=True,
coerce=coerce))
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.tools.merge import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except ValueError:
msg = 'transform must return a scalar value for each group'
raise ValueError(msg)
else:
res = path(group)
if isinstance(res, Series):
# we need to broadcast across the
# other dimension; this will preserve dtypes
# GH14457
if not np.prod(group.shape):
continue
elif res.index.is_(obj.index):
r = concat([res] * len(group.columns), axis=1)
r.columns = group.columns
r.index = group.index
else:
r = DataFrame(
np.concatenate([res.values] * len(group.index)
).reshape(group.shape),
columns=group.columns, index=group.index)
applied.append(r)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
return self._set_result_index_ordered(concatenated)
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed DataFrame on each group and
return a DataFrame having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each subframe
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
"""
# optimized transforms
func = self._is_cython_func(func) or func
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
result = getattr(self, func)(*args, **kwargs)
else:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
obj = self._obj_with_exclusions
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
return self._transform_fast(result, obj)
def _transform_fast(self, result, obj):
"""
Fast transform path for aggregations
"""
# if there were groups with no observations (Categorical only?)
# try casting data to original dtype
cast = (self.size().fillna(0) > 0).any()
# for each col, reshape to to size of original frame
# by take operation
ids, _, ngroup = self.grouper.group_info
output = []
for i, _ in enumerate(result.columns):
res = algos.take_1d(result.iloc[:, i].values, ids)
if cast:
res = self._try_cast(res, obj.iloc[:, i])
output.append(res)
return DataFrame._from_arrays(output, columns=result.columns,
index=obj.index)
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, compat.string_types):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we get the same results
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notnull(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return DataFrame(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.filter(lambda x: x['A'].sum() + x['B'].sum() > 0)
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, 'name', name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isnull(res)):
if res and notnull(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError("filter function returned a %s, "
"but expected a scalar bool" %
type(res).__name__)
return self._apply_filter(indices, dropna)
class DataFrameGroupBy(NDFrameGroupBy):
_apply_whitelist = _dataframe_apply_whitelist
#
# Make class defs of attributes on DataFrameGroupBy whitelist.
for _def_str in _whitelist_method_generator(DataFrame, _apply_whitelist):
exec(_def_str)
_block_agg_axis = 1
@Substitution(name='groupby')
@Appender(SelectionMixin._see_also_template)
@Appender(SelectionMixin._agg_doc)
def aggregate(self, arg, *args, **kwargs):
return super(DataFrameGroupBy, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(subset, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(subset, selection=key,
grouper=self.grouper)
raise AssertionError("invalid ndim for _gotitem")
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if self.axis == 0:
return DataFrame(result, index=obj.columns,
columns=result_index).T
else:
return DataFrame(result, index=obj.index,
columns=result_index)
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _insert_inaxis_grouper_inplace(self, result):
# zip in reverse so we can always insert at loc 0
izip = zip(* map(reversed, (
self.grouper.names,
self.grouper.get_group_levels(),
[grp.in_axis for grp in self.grouper.groupings])))
for name, lev, in_axis in izip:
if in_axis:
result.insert(0, name, lev)
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._get_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = DataFrame(output, columns=output_keys)
self._insert_inaxis_grouper_inplace(result)
result = result.consolidate()
else:
index = self.grouper.result_index
result = DataFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _wrap_transformed_output(self, output, names=None):
return DataFrame(output, index=self.obj.index)
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[1])
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result.consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _reindex_output(self, result):
"""
if we have categorical groupers, then we want to make sure that
we have a fully reindex-output to the levels. These may have not
participated in the groupings (e.g. may have all been
nan groups)
This can re-expand the output space
"""
groupings = self.grouper.groupings
if groupings is None:
return result
elif len(groupings) == 1:
return result
elif not any([isinstance(ping.grouper, (Categorical, CategoricalIndex))
for ping in groupings]):
return result
levels_list = [ping.group_index for ping in groupings]
index, _ = MultiIndex.from_product(
levels_list, names=self.grouper.names).sortlevel()
if self.as_index:
d = {self.obj._get_axis_name(self.axis): index, 'copy': False}
return result.reindex(**d)
# GH 13204
# Here, the categorical in-axis groupers, which need to be fully
# expanded, are columns in `result`. An idea is to do:
# result = result.set_index(self.grouper.names)
# .reindex(index).reset_index()
# but special care has to be taken because of possible not-in-axis
# groupers.
# So, we manually select and drop the in-axis grouper columns,
# reindex `result`, and then reset the in-axis grouper columns.
# Select in-axis groupers
in_axis_grps = [(i, ping.name) for (i, ping)
in enumerate(groupings) if ping.in_axis]
g_nums, g_names = zip(*in_axis_grps)
result = result.drop(labels=list(g_names), axis=1)
# Set a temp index and reindex (possibly expanding)
result = result.set_index(self.grouper.result_index
).reindex(index, copy=False)
# Reset in-axis grouper columns
# (using level numbers `g_nums` because level names may not be unique)
result = result.reset_index(level=g_nums)
return result.reset_index(drop=True)
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions)
def _apply_to_column_groupbys(self, func):
from pandas.tools.merge import concat
return concat(
(func(col_groupby) for _, col_groupby
in self._iterate_column_groupbys()),
keys=self._selected_obj.columns, axis=1)
def count(self):
""" Compute count of group, excluding missing values """
from functools import partial
from pandas.lib import count_level_2d
from pandas.types.missing import _isnull_ndarraylike as isnull
data, _ = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
val = ((mask & ~isnull(blk.get_values())) for blk in data.blocks)
loc = (blk.mgr_locs for blk in data.blocks)
counter = partial(count_level_2d, labels=ids, max_bin=ngroups, axis=1)
blk = map(make_block, map(counter, val), loc)
return self._wrap_agged_blocks(data.items, list(blk))
from pandas.tools.plotting import boxplot_frame_groupby # noqa
DataFrameGroupBy.boxplot = boxplot_frame_groupby
class PanelGroupBy(NDFrameGroupBy):
@Substitution(name='groupby')
@Appender(SelectionMixin._see_also_template)
@Appender(SelectionMixin._agg_doc)
def aggregate(self, arg, *args, **kwargs):
return super(PanelGroupBy, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self._selected_obj.items
else:
slice_axis = self._selection_list
slicer = lambda x: self._selected_obj[x]
else:
raise NotImplementedError("axis other than 0 is not supported")
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def aggregate(self, arg, *args, **kwargs):
"""
Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a Panel or when passed to Panel.apply. If
pass a dict, the keys must be DataFrame column names
Returns
-------
aggregated : Panel
"""
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
return self._aggregate_generic(arg, *args, **kwargs)
def _wrap_generic_output(self, result, obj):
if self.axis == 0:
new_axes = list(obj.axes)
new_axes[0] = self.grouper.result_index
elif self.axis == 1:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, z, x]
else:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, y, x]
result = Panel._from_axes(result, new_axes)
if self.axis == 1:
result = result.swapaxes(0, 1).swapaxes(0, 2)
elif self.axis == 2:
result = result.swapaxes(0, 2)
return result
def _aggregate_item_by_item(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
result = {}
if self.axis > 0:
for item in obj:
try:
itemg = DataFrameGroupBy(obj[item],
axis=self.axis - 1,
grouper=self.grouper)
result[item] = itemg.aggregate(func, *args, **kwargs)
except (ValueError, TypeError):
raise
new_axes = list(obj.axes)
new_axes[self.axis] = self.grouper.result_index
return Panel._from_axes(result, new_axes)
else:
raise ValueError("axis value must be greater than 0")
def _wrap_aggregated_output(self, output, names=None):
raise AbstractMethodError(self)
class NDArrayGroupBy(GroupBy):
pass
# ----------------------------------------------------------------------
# Splitting / application
class DataSplitter(object):
def __init__(self, data, labels, ngroups, axis=0):
self.data = data
self.labels = _ensure_int64(labels)
self.ngroups = ngroups
self.axis = axis
@cache_readonly
def slabels(self):
# Sorted labels
return algos.take_nd(self.labels, self.sort_idx, allow_fill=False)
@cache_readonly
def sort_idx(self):
# Counting sort indexer
return _get_group_index_sorter(self.labels, self.ngroups)
def __iter__(self):
sdata = self._get_sorted_data()
if self.ngroups == 0:
raise StopIteration
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for i, (start, end) in enumerate(zip(starts, ends)):
# Since I'm now compressing the group ids, it's now not "possible"
# to produce empty slices because such groups would not be observed
# in the data
# if start >= end:
# raise AssertionError('Start %s must be less than end %s'
# % (str(start), str(end)))
yield i, self._chop(sdata, slice(start, end))
def _get_sorted_data(self):
return self.data.take(self.sort_idx, axis=self.axis, convert=False)
def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
def apply(self, f):
raise AbstractMethodError(self)
class ArraySplitter(DataSplitter):
pass
class SeriesSplitter(DataSplitter):
def _chop(self, sdata, slice_obj):
return sdata._get_values(slice_obj).to_dense()
class FrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(FrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
def fast_apply(self, f, names):
# must return keys::list, values::list, mutated::bool
try:
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
except:
# fails when all -1
return [], True
sdata = self._get_sorted_data()
results, mutated = lib.apply_frame_axis0(sdata, f, names, starts, ends)
return results, mutated
def _chop(self, sdata, slice_obj):
if self.axis == 0:
return sdata.iloc[slice_obj]
else:
return sdata._slice(slice_obj, axis=1) # ix[:, slice_obj]
class NDFrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(NDFrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
self.factory = data._constructor
def _get_sorted_data(self):
# this is the BlockManager
data = self.data._data
# this is sort of wasteful but...
sorted_axis = data.axes[self.axis].take(self.sort_idx)
sorted_data = data.reindex_axis(sorted_axis, axis=self.axis)
return sorted_data
def _chop(self, sdata, slice_obj):
return self.factory(sdata.get_slice(slice_obj, axis=self.axis))
def get_splitter(data, *args, **kwargs):
if isinstance(data, Series):
klass = SeriesSplitter
elif isinstance(data, DataFrame):
klass = FrameSplitter
else:
klass = NDFrameSplitter
return klass(data, *args, **kwargs)
# ----------------------------------------------------------------------
# Misc utilities
def get_group_index(labels, shape, sort, xnull):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
"""
def _int64_cut_off(shape):
acc = long(1)
for i, mul in enumerate(shape):
acc *= long(mul)
if not acc < _INT64_MAX:
return i
return len(shape)
def loop(labels, shape):
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
out = stride * labels[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
if shape[i] == 0:
stride = 0
else:
stride //= shape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(shape): # all levels done!
return out
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = _compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:]
return loop(labels, shape)
def maybe_lift(lab, size): # pormote nan values
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(_ensure_int64, labels)
if not xnull:
labels, shape = map(list, zip(*map(maybe_lift, labels, shape)))
return loop(list(labels), list(shape))
_INT64_MAX = np.iinfo(np.int64).max
def _int64_overflow_possible(shape):
the_prod = long(1)
for x in shape:
the_prod *= long(x)
return the_prod >= _INT64_MAX
def decons_group_index(comp_labels, shape):
# reconstruct labels
if _int64_overflow_possible(shape):
# at some point group indices are factorized,
# and may not be deconstructed here! wrong path!
raise ValueError('cannot deconstruct factorized group indices!')
label_list = []
factor = 1
y = 0
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull):
"""
reconstruct labels from observed group ids
Parameters
----------
xnull: boolean,
if nulls are excluded; i.e. -1 labels are passed through
"""
from pandas.hashtable import unique_label_indices
if not xnull:
lift = np.fromiter(((a == -1).any() for a in labels), dtype='i8')
shape = np.asarray(shape, dtype='i8') + lift
if not _int64_overflow_possible(shape):
# obs ids are deconstructable! take the fast route!
out = decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() \
else [x - y for x, y in zip(out, lift)]
i = unique_label_indices(comp_ids)
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
return [i8copy(lab[i]) for lab in labels]
def _indexer_from_factorized(labels, shape, compress=True):
ids = get_group_index(labels, shape, sort=True, xnull=False)
if not compress:
ngroups = (ids.size and ids.max()) + 1
else:
ids, obs = _compress_group_index(ids, sort=True)
ngroups = len(obs)
return _get_group_index_sorter(ids, ngroups)
def _lexsort_indexer(keys, orders=None, na_position='last'):
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
for key, order in zip(keys, orders):
# we are already a Categorical
if is_categorical_dtype(key):
c = key
# create the Categorical
else:
c = Categorical(key, ordered=True)
if na_position not in ['last', 'first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
n = len(c.categories)
codes = c.codes.copy()
mask = (c.codes == -1)
if order: # ascending
if na_position == 'last':
codes = np.where(mask, n, codes)
elif na_position == 'first':
codes += 1
else: # not order means descending
if na_position == 'last':
codes = np.where(mask, n, n - codes - 1)
elif na_position == 'first':
codes = np.where(mask, 0, n - codes)
if mask.any():
n += 1
shape.append(n)
labels.append(codes)
return _indexer_from_factorized(labels, shape)
def _nargsort(items, kind='quicksort', ascending=True, na_position='last'):
"""
This is intended to be a drop-in replacement for np.argsort which
handles NaNs. It adds ascending and na_position parameters.
GH #6399, #5231
"""
# specially handle Categorical
if is_categorical_dtype(items):
return items.argsort(ascending=ascending)
items = np.asanyarray(items)
idx = np.arange(len(items))
mask = isnull(items)
non_nans = items[~mask]
non_nan_idx = idx[~mask]
nan_idx = np.nonzero(mask)[0]
if not ascending:
non_nans = non_nans[::-1]
non_nan_idx = non_nan_idx[::-1]
indexer = non_nan_idx[non_nans.argsort(kind=kind)]
if not ascending:
indexer = indexer[::-1]
# Finally, place the NaNs at the end or the beginning according to
# na_position
if na_position == 'last':
indexer = np.concatenate([indexer, nan_idx])
elif na_position == 'first':
indexer = np.concatenate([nan_idx, indexer])
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
return indexer
class _KeyMapper(object):
"""
Ease my suffering. Map compressed group id -> key tuple
"""
def __init__(self, comp_ids, ngroups, labels, levels):
self.levels = levels
self.labels = labels
self.comp_ids = comp_ids.astype(np.int64)
self.k = len(labels)
self.tables = [_hash.Int64HashTable(ngroups) for _ in range(self.k)]
self._populate_tables()
def _populate_tables(self):
for labs, table in zip(self.labels, self.tables):
table.map(self.comp_ids, labs.astype(np.int64))
def get_key(self, comp_id):
return tuple(level[table.get_item(comp_id)]
for table, level in zip(self.tables, self.levels))
def _get_indices_dict(label_list, keys):
shape = list(map(len, keys))
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
ngroups = ((group_index.size and group_index.max()) + 1) \
if _int64_overflow_possible(shape) \
else np.prod(shape, dtype='i8')
sorter = _get_group_index_sorter(group_index, ngroups)
sorted_labels = [lab.take(sorter) for lab in label_list]
group_index = group_index.take(sorter)
return lib.indices_fast(sorter, group_index, keys, sorted_labels)
# ----------------------------------------------------------------------
# sorting levels...cleverly?
def _get_group_index_sorter(group_index, ngroups):
"""
_algos.groupsort_indexer implements `counting sort` and it is at least
O(ngroups), where
ngroups = prod(shape)
shape = map(len, keys)
that is, linear in the number of combinations (cartesian product) of unique
values of groupby keys. This can be huge when doing multi-key groupby.
np.argsort(kind='mergesort') is O(count x log(count)) where count is the
length of the data-frame;
Both algorithms are `stable` sort and that is necessary for correctness of
groupby operations. e.g. consider:
df.groupby(key)[col].transform('first')
"""
count = len(group_index)
alpha = 0.0 # taking complexities literally; there may be
beta = 1.0 # some room for fine-tuning these parameters
do_groupsort = (count > 0 and ((alpha + beta * ngroups) <
(count * np.log(count))))
if do_groupsort:
sorter, _ = _algos.groupsort_indexer(_ensure_int64(group_index),
ngroups)
return _ensure_platform_int(sorter)
else:
return group_index.argsort(kind='mergesort')
def _compress_group_index(group_index, sort=True):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
"""
size_hint = min(len(group_index), _hash._SIZE_HINT_LIMIT)
table = _hash.Int64HashTable(size_hint)
group_index = _ensure_int64(group_index)
# note, group labels come out ascending (ie, 1,2,3 etc)
comp_ids, obs_group_ids = table.get_labels_groupby(group_index)
if sort and len(obs_group_ids) > 0:
obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids)
return comp_ids, obs_group_ids
def _reorder_by_uniques(uniques, labels):
# sorter is index where elements ought to go
sorter = uniques.argsort()
# reverse_indexer is where elements came from
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
# move labels to right locations (ie, unsort ascending labels)
labels = algos.take_nd(reverse_indexer, labels, allow_fill=False)
np.putmask(labels, mask, -1)
# sort observed ids
uniques = algos.take_nd(uniques, sorter, allow_fill=False)
return uniques, labels
def numpy_groupby(data, labels, axis=0):
s = np.argsort(labels)
keys, inv = np.unique(labels, return_inverse=True)
i = inv.take(s)
groups_at = np.where(i != np.concatenate(([-1], i[:-1])))[0]
ordered_data = data.take(s, axis=axis)
group_sums = np.add.reduceat(ordered_data, groups_at, axis=axis)
return group_sums
| mit |
qbilius/streams | streams/utils.py | 1 | 13227 | import functools
import numpy as np
import scipy.stats
import pandas
import matplotlib.pyplot as plt
import seaborn as sns
def splithalf(data, aggfunc=np.nanmean, rng=None):
data = np.array(data)
if rng is None:
rng = np.random.RandomState(None)
inds = list(range(data.shape[0]))
rng.shuffle(inds)
half = len(inds) // 2
split1 = aggfunc(data[inds[:half]], axis=0)
split2 = aggfunc(data[inds[half:2*half]], axis=0)
return split1, split2
def pearsonr_matrix(data1, data2, axis=1):
rs = []
for i in range(data1.shape[axis]):
d1 = np.take(data1, i, axis=axis)
d2 = np.take(data2, i, axis=axis)
r, p = scipy.stats.pearsonr(d1, d2)
rs.append(r)
return np.array(rs)
def spearman_brown_correct(pearsonr, n=2):
pearsonr = np.array(pearsonr)
return n * pearsonr / (1 + (n-1) * pearsonr)
def resample(data, rng=None):
data = np.array(data)
if rng is None:
rng = np.random.RandomState(None)
inds = rng.choice(range(data.shape[0]), size=data.shape[0], replace=True)
return data[inds]
def bootstrap_resample(data, func=np.mean, niter=100, ci=95, rng=None):
df = [func(resample(data, rng=rng)) for i in range(niter)]
if ci is not None:
return np.percentile(df, 50-ci/2.), np.percentile(df, 50+ci/2.)
else:
return df
def _timeplot_bootstrap(x, estimator=np.mean, ci=95, n_boot=100):
ci = bootstrap_resample(x, func=estimator, ci=ci, niter=n_boot)
return pandas.Series({'emin': ci[0], 'emax': ci[1]})
def timeplot(data=None, x=None, y=None, hue=None,
estimator=np.mean, ci=95, n_boot=100,
col=None, row=None, sharex=None, sharey=None,
legend_loc='lower right', **fig_kwargs):
if hue is None:
hues = ['']
else:
hues = data[hue].unique()
if data[hue].dtype.name == 'category': hues = hues.sort_values()
# plt.figure()
if row is None:
row_orig = None
tmp = 'row_{}'
i = 0
row = tmp.format(i)
while row in data:
i += 1
row = tmp.format(i)
data[row] = 'row'
else:
row_orig = row
if col is None:
col_orig = None
tmp = 'col_{}'
i = 0
col = tmp.format(i)
while col in data:
i += 1
col = tmp.format(i)
data[col] = 'col'
else:
col_orig = col
if row is not None:
rows = data[row].unique()
if data[row].dtype.name == 'category': rows = rows.sort_values()
else:
rows = [(None, None)]
if col is not None:
cols = data[col].unique()
if data[col].dtype.name == 'category': cols = cols.sort_values()
else:
cols = [(None, None)]
fig, axes = plt.subplots(nrows=len(rows), ncols=len(cols), **fig_kwargs)
if hasattr(axes, 'shape'):
axes = axes.reshape([len(rows), len(cols)])
else:
axes = np.array([[axes]])
xlim = data.groupby([row, col])[x].apply(lambda x: {'amin': x.min(), 'amax': x.max()}).unstack()
ylim = data.groupby([row, col])[y].apply(lambda x: {'amin': x.min(), 'amax': x.max()}).unstack()
if sharex == 'row':
for r in rows:
xlim.loc[r, 'amin'] = xlim.loc[r, 'amin'].min()
xlim.loc[r, 'amax'] = xlim.loc[r, 'amax'].max()
elif sharex == 'col':
for c in cols:
xlim.loc[(slice(None), c), 'amin'] = xlim.loc[(slice(None), c), 'amin'].min()
xlim.loc[(slice(None), c), 'amax'] = xlim.loc[(slice(None), c), 'amax'].max()
elif sharex == 'both':
xlim.loc[:, 'amin'] = xlim.loc[:, 'amin'].min()
xlim.loc[:, 'amax'] = xlim.loc[:, 'amax'].min()
elif isinstance(sharex, (tuple, list)):
xlim.loc[:, 'amin'] = sharex[0]
xlim.loc[:, 'amax'] = sharex[1]
if sharey == 'row':
for r in rows:
ylim.loc[r, 'amin'] = ylim.loc[r, 'amin'].min()
ylim.loc[r, 'amax'] = ylim.loc[r, 'amax'].max()
elif sharey == 'col':
for c in cols:
ylim.loc[(slice(None), c), 'amin'] = ylim.loc[(slice(None), c), 'amin'].min()
ylim.loc[(slice(None), c), 'amax'] = ylim.loc[(slice(None), c), 'amax'].max()
elif sharey == 'both':
ylim.loc[:, 'amin'] = ylim.loc[:, 'amin'].min()
ylim.loc[:, 'amax'] = ylim.loc[:, 'amax'].min()
elif isinstance(sharey, (tuple, list)):
ylim.loc[:, 'amin'] = sharey[0]
ylim.loc[:, 'amax'] = sharey[1]
for rno, r in enumerate(rows):
for cno, c in enumerate(cols):
ax = axes[rno,cno]
for h, color in zip(hues, sns.color_palette(n_colors=len(hues))):
if hue is None:
d = data
else:
d = data[data[hue] == h]
sel_col = d[col] == c if col is not None else True
sel_row = d[row] == r if row is not None else True
if not (col is None and row is None):
d = d[sel_row & sel_col]
# if c == 'hvm_test': import ipdb; ipdb.set_trace()
if len(d) > 0:
mn = d.groupby(x)[y].apply(estimator)
def bootstrap(x):
try:
y = _timeplot_bootstrap(x[x.notnull()], estimator, ci, n_boot)
except:
y = _timeplot_bootstrap(x, estimator, ci, n_boot)
return y
if n_boot > 0:
ebars = d.groupby(x)[y].apply(bootstrap).unstack()
ax.fill_between(mn.index, ebars.emin, ebars.emax, alpha=.5, color=color)
ax.plot(mn.index, mn, linewidth=2, color=color, label=h)
else:
ax.set_visible(False)
try:
ax.set_xlim([xlim.loc[(r, c), 'amin'], xlim.loc[(r, c), 'amax']])
except:
pass
try:
ax.set_ylim([ylim.loc[(r, c), 'amin'], ylim.loc[(r, c), 'amax']])
except:
pass
if ax.is_last_row():
ax.set_xlabel(x)
if ax.is_first_col():
ax.set_ylabel(y)
if row_orig is None:
if col_orig is None:
ax.set_title('')
else:
ax.set_title('{} = {}'.format(col_orig, c))
else:
if col_orig is None:
ax.set_title('{} = {}'.format(row_orig, r))
else:
ax.set_title('{} = {} | {} = {}'.format(row_orig, r, col_orig, c))
if hue is not None:
plt.legend(loc=legend_loc, framealpha=.25)
plt.tight_layout()
return axes
def clean_data(df, std_thres=3, stim_dur_thres=1000./120):
"""
Remove outliers from behavioral data
What is removed:
- If response time is more than `std_thres` standard deviations above
the mean response time to all stimuli (default: 3)
- If the recorded stimulus duration differs by more than `std_thres`
from the requested stimulus duration (default: half a frame for 60 Hz)
:Args:
df - pandas.DataFrame
:Kwargs:
- std_thres (float, default: 3)
- stim_dur_thres (float, default: 1000./120)
:Returns:
pandas.DataFrame that has the outliers removed (not nanned)
"""
fast_rts = np.abs(df.rt - df.rt.mean()) < 3 * df.rt.std()
good_present_time = np.abs(df.actual_stim_dur - df.stim_dur) < stim_dur_thres # half a frame
print('Response too slow: {} out of {}'.format(len(df) - fast_rts.sum(), len(df)))
print('Stimulus presentation too slow: {} out of {}'.format(len(df) - good_present_time.sum(), len(df)))
df = df[fast_rts & good_present_time]
return df
def lazy_property(function):
"""
From: https://danijar.com/structuring-your-tensorflow-models/
"""
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
# def hitrate_to_dprime(df, cap=5):
# # df = pandas.DataFrame(hitrate, index=labels, columns=order)
# out = np.zeros_like(df)
# for (i,j), hit_rate in np.ndenumerate(df.values):
# target = df.index[i]
# distr = df.columns[j]
# if target == distr:
# dprime = np.nan
# else:
# miss_rate = df.loc[df.index == target, distr].mean()
# hit = hit_rate / (hit_rate + miss_rate)
# fa_rate = df.loc[df.index == distr, target].mean()
# rej_rate = df.loc[df.index == distr, distr].mean()
# fa = fa_rate / (fa_rate + rej_rate)
# dprime = scipy.stats.norm.ppf(hit) - scipy.stats.norm.ppf(fa)
# if dprime > cap: dprime = cap
# out[i,j] = dprime
# return out
def hitrate_to_dprime_o1(df, cap=20):
# df = pandas.DataFrame(hitrate, index=labels, columns=order)
targets = df.index.unique()
# distrs = df.columns.unique()
# out = pandas.DataFrame(np.zeros([len(targets), len(distrs)]), index=targets, columns=distrs)
out = pandas.Series(np.zeros(len(targets)), index=targets)
for target in targets:
# if target == 'lo_poly_animal_RHINO_2': import ipdb; ipdb.set_trace()
hit_rate = np.nanmean(df.loc[df.index == target])
# miss_rate = 1 - np.nanmean(df.loc[df.index == target])
fa_rate = np.nanmean(1 - df.loc[df.index != target, target])
dprime = scipy.stats.norm.ppf(hit_rate) - scipy.stats.norm.ppf(fa_rate)
dprime = np.clip(dprime, -cap, cap)
out[target] = dprime
return out
# for distr in distrs:
# # for (i,j), hit_rate in np.ndenumerate(df.values):
# if target == distr:
# dprime = np.nan
# else:
# hit_rate = df.loc[df.index == target].mean()
# miss_rate = df.loc[df.index == target, distr].mean()
# hit = hit_rate / (hit_rate + miss_rate)
# fa_rate = df.loc[df.index == distr, target].mean()
# rej_rate = df.loc[df.index == distr, distr].mean()
# fa = fa_rate / (fa_rate + rej_rate)
# dprime = scipy.stats.norm.ppf(hit) - scipy.stats.norm.ppf(fa)
# if dprime > cap: dprime = cap
# out[target, distr] = dprime
# return out
def hitrate_to_dprime_i1n(df, cap=20, normalize=True):
out = pandas.Series(np.zeros(len(df)),
index=df.set_index(['obj', 'id']).index)
for (target, idd), row in df.iterrows():
hit_rate = row.acc
# miss_rate = 1 - np.nanmean(df.loc[df.index == target])
rej = df.loc[df.obj != target, target]
fa_rate = 1 - np.nanmean(rej)
dprime = scipy.stats.norm.ppf(hit_rate) - scipy.stats.norm.ppf(fa_rate)
dprime = np.clip(dprime, -cap, cap)
out.loc[(target, idd)] = dprime
if normalize:
out.acc -= out.groupby('obj').acc.transform(lambda x: x.mean())
return out
def hitrate_to_dprime_i2n(df, cap=20):
# df = pandas.DataFrame(hitrate, index=labels, columns=order)
# targets = df.index.unique()
# distrs = df.columns.unique()
# out = pandas.DataFrame(np.zeros([len(targets), len(distrs)]), index=targets, columns=distrs)
# df = df.set_index(['obj', 'id', 'distr'])
# out = pandas.DataFrame(np.zeros(len(df), len(df.distr.unique()), index=df.index, columns=df.columns)
out = df.set_index(['obj', 'id', 'distr']).copy()
for (target, idd, distr), hit_rate in out.iterrows():
if target == distr:
out.loc[(target, idd, distr)] = np.nan
else:
# if target == 'lo_poly_animal_RHINO_2': import ipdb; ipdb.set_trace()
# hit_rate = acc
# miss_rate = 1 - np.nanmean(df.loc[df.index == target])
rej = df.loc[(df.obj == distr) & (df.distr == target), 'acc']
# import ipdb; ipdb.set_trace()
fa_rate = 1 - np.nanmean(rej)
dprime = scipy.stats.norm.ppf(hit_rate) - scipy.stats.norm.ppf(fa_rate)
# if target == 'lo_poly_animal_RHINO_2' and distr == 'MB30758' and idd == 'e387f6375d1d01a92f02394ea0c2c89de4ec4f61':
# import ipdb; ipdb.set_trace()
# hit_rate_norm = np.nanmean(df.loc[(df.obj == target) & (df.distr == distr), 'acc'])
# dprime_norm = scipy.stats.norm.ppf(hit_rate_norm) - scipy.stats.norm.ppf(fa_rate)
# dprime -= dprime_norm
out.loc[(target, idd, distr)] = dprime
# def ff(x):
# import ipdb; ipdb.set_trace()
# return x.mean()
out = out.reset_index()
out.acc -= out.groupby(['obj', 'distr']).acc.transform(lambda x: x.mean())
out.acc = np.clip(out.acc, -cap, cap)
# for (target, idd, distr), dprime in out.iterrows():
# out.loc[(target, idd, distr)] = dprime
# dprime = np.clip(dprime, -cap, cap)
return out | gpl-3.0 |
DamCB/tyssue | tyssue/dynamics/planar_gradients.py | 2 | 1567 | import pandas as pd
from ..utils.utils import _to_2d
def area_grad(sheet):
coords = sheet.coords
inv_area = sheet.edge_df.eval("1 / (4 * sub_area)")
face_pos = sheet.edge_df[["f" + c for c in coords]]
srce_pos = sheet.edge_df[["s" + c for c in coords]]
trgt_pos = sheet.edge_df[["t" + c for c in coords]]
r_ak = srce_pos - face_pos.values
r_aj = trgt_pos - face_pos.values
grad_a_srce = pd.DataFrame(index=sheet.edge_df.index, columns=["gx", "gy"])
grad_a_srce["gx"] = r_aj["ty"] * sheet.edge_df["nz"]
grad_a_srce["gy"] = -r_aj["tx"] * sheet.edge_df["nz"]
grad_a_trgt = pd.DataFrame(index=sheet.edge_df.index, columns=["gx", "gy"])
grad_a_trgt["gx"] = -r_ak["sy"] * sheet.edge_df["nz"]
grad_a_trgt["gy"] = r_ak["sx"] * sheet.edge_df["nz"]
grad_a_srce = _to_2d(inv_area) * grad_a_srce
grad_a_trgt = _to_2d(inv_area) * grad_a_trgt
return grad_a_srce, grad_a_trgt
def lumen_area_grad(eptm):
srce_pos = eptm.edge_df[eptm.scoords]
trgt_pos = eptm.edge_df[eptm.tcoords]
grad_srce = srce_pos.copy()
grad_srce.columns = ["g" + c for c in eptm.coords]
grad_trgt = grad_srce.copy()
grad_srce["gx"] = trgt_pos["ty"]
grad_srce["gy"] = -trgt_pos["tx"]
grad_trgt["gx"] = -srce_pos["sy"]
grad_trgt["gy"] = srce_pos["sx"]
lumen_side = eptm.settings.get("lumen_side", "basal")
grad_srce[eptm.edge_df.segment != lumen_side] = 0
grad_trgt[eptm.edge_df.segment != lumen_side] = 0
# minus sign due to the backward orientation
return -grad_srce, -grad_trgt
| gpl-3.0 |
jzt5132/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
ElDeveloper/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
pierreberthet/local-scripts | compare.py | 1 | 5832 | import numpy as np
import json
import matplotlib
#matplotlib.use('Agg')
import pylab as pl
import sys
import pprint as pp
import difflib
#from difflib_data import *
# Plot the different figures for the merged spikes and voltages recordings.
# This file, as the MergeSpikefiles.py should be one level up than Test/..., the output of a simulation.
import scipy.stats as stats
def get_weights(folder):
fparam = folder+'Test/Parameters/simulation_parameters.json'
f = open(fparam, 'r')
params = json.load(f)
params['multi_n']+=1
source_d1 = folder+params['weights_d1_multi_fn']+'_'
source_d2 = folder+params['weights_d2_multi_fn']+'_'
source_rew= folder+params['rewards_multi_fn']+'_'
source_rp = folder+params['weights_rp_multi_fn']+'_'
lend1= len(np.loadtxt(source_d1+'0'))
lend2= len(np.loadtxt(source_d2+'0'))
lenrp= len(np.loadtxt(source_rp+'0'))
if not lend1 == lend2:
print 'INCONSISTENCY D1 and D2 length (number of recordings)'
#params['multi_n'] = params['multi_n'] -1
wd1 = np.zeros((params['multi_n'], lend1, params['n_actions']))
wd1_m = np.zeros((lend1, params['n_actions']))
wd1_std = np.zeros((lend1, params['n_actions']))
wd2 = np.zeros((params['multi_n'], lend2,params['n_actions']))
wd2_m = np.zeros((lend2, params['n_actions']))
wd2_std = np.zeros((lend2, params['n_actions']))
wrp = np.zeros((params['multi_n'], lenrp,params['n_actions'] * params['n_states']))
wrp_m = np.zeros((lenrp, params['n_actions'] * params['n_states']))
wrp_std = np.zeros((lenrp, params['n_actions'] * params['n_states']))
rewards = np.zeros((params['multi_n'], params['n_iterations']))
rewards_m = np.zeros(params['n_iterations'])
rewards_std = np.zeros(params['n_iterations'])
for i in xrange(params['multi_n']):
wd1[i] = np.loadtxt(source_d1+str(i))
wd2[i] = np.loadtxt(source_d2+str(i))
wrp[i] = np.loadtxt(source_rp+str(i))
rewards[i] = np.loadtxt(source_rew+str(i))
#for i in xrange(lend1):
# for j in xrange(params['n_actions']):
# wd1_m[i,j] = np.mean(wd1[:,i,j])
# wd1_std[i,j] = np.std(wd1[:,i,j])
# wd2_m[i,j] = np.mean(wd2[:,i,j])
# wd2_std[i,j] = np.std(wd2[:,i,j])
#for i in xrange(lenrp):
# for j in xrange(params['n_actions']*params['n_states']):
# wrp_m[i,j] = np.mean(wrp[:,i,j])
# wrp_std[i,j] = np.std(wrp[:,i,j])
return wd1, wd2, wrp, rewards
######################################
######################################
if len(sys.argv)<2:
print "Need 2 folders for comparison"
pass
file1 = sys.argv[1]+'/'
file2 = sys.argv[2]+'/'
fparam1 = file1+'Test/Parameters/simulation_parameters.json'
f1 = open(fparam1, 'r')
params1 = json.load(f1)
fparam2 = file2+'Test/Parameters/simulation_parameters.json'
f2 = open(fparam2, 'r')
params2 = json.load(f2)
print 'Do the simulations match? ', params2['n_recordings']==params1['n_recordings']
#diff = difflib.ndiff(open(fparam1,'r').readlines(), open(fparam2,'r').readlines())
#print ''.join(diff)
wd1a, wd2a, wrpa, rewa = get_weights(file1)
wd1b, wd2b, wrpb, rewb = get_weights(file2)
start = 10
shift = start*params1['block_len']*params1['t_iteration']/params1['resolution']
shift_rew = start*params1['block_len']
a1 = np.zeros(params1['n_recordings']-shift)
b1 = np.zeros(params1['n_recordings']-shift)
c1 = np.zeros(params1['n_recordings']-shift)
a2 = np.zeros(params2['n_recordings']-shift)
b2 = np.zeros(params2['n_recordings']-shift)
c2 = np.zeros(params2['n_recordings']-shift)
#r1 = np.zeros(params1['n_iterations']-shift_rew)
#r2 = np.zeros(params2['n_iterations']-shift_rew)
r1 = np.zeros(params1['multi_n'])
r2 = np.zeros(params2['multi_n'])
j=0
for i in xrange(int(shift),int(params1['n_recordings'])):
#for multi in xrange(params['multi_n']):
#a1[j] =sum(sum(abs(wd1a[:, i,:])))
#b1[j] =sum(sum(abs(wd2a[:, i,:])))
#c1[j] =sum(sum(abs(wrpa[:, i,:])))
#a2[j] =sum(sum(abs(wd1b[:, i,:])))
#b2[j] =sum(sum(abs(wd2b[:, i,:])))
#c2[j] =sum(sum(abs(wrpb[:, i,:])))
for q in xrange(params1['multi_n']):
for k in xrange(params1['n_actions']):
a1[j] += abs(wd1a[q,i,k] - wd1a[q,i-1,k])
b1[j] += abs(wd2a[q,i,k] - wd2a[q,i-1,k])
c1[j] += abs(wrpa[q,i,k] - wrpa[q,i-1,k])
a2[j] += abs(wd1b[q,i,k] - wd1b[q,i-1,k])
b2[j] += abs(wd2b[q,i,k] - wd2b[q,i-1,k])
c2[j] += abs(wrpb[q,i,k] - wrpb[q,i-1,k])
j+=1
j=0
#for i in xrange(shift_rew, params1['n_iterations']):
# r1[j]=sum(rewa[:,i])
# r2[j]=sum(rewb[:,i])
# j+=1
#for i in xrange(start, params1['n_blocks']):
for i in xrange(params1['multi_n']):
for q in xrange(start, params1['n_blocks']):
r1[j]+=sum(rewa[i,q*params1['block_len']-6:q*params1['block_len']-1])
r2[j]+=sum(rewb[i,q*params1['block_len']-6:q*params2['block_len']-1])
j+=1
#r1 = r1/params1['multi_n']
#r2 = r2/params2['multi_n']
r1 = r1/((params1['n_blocks']-start)*5.)
r2 = r2/((params2['n_blocks']-start)*5.)
print 'D1'
print 'mean A', np.mean(a1), 'standard deviation A', np.std(a1)
print 'mean B', np.mean(a2), 'standard deviation B', np.std(a2)
print stats.ttest_ind(a1,a2)
print 'D2'
print 'mean A', np.mean(b1), 'standard deviation A', np.std(b1)
print 'mean B', np.mean(b2), 'standard deviation B', np.std(b2)
print stats.ttest_ind(b1,b2)
print 'RP'
print 'mean A', np.mean(c1), 'standard deviation A', np.std(c1)
print 'mean B', np.mean(c2), 'standard deviation B', np.std(c2)
print stats.ttest_ind(c1,c2)
print 'PERF'
print 'mean A', np.mean(r1), 'standard deviation A', np.std(r1)
print 'mean B', np.mean(r2), 'standard deviation B', np.std(r2)
print stats.ttest_ind(r1,r2)
| gpl-2.0 |
js0701/chromium-crosswalk | chrome/test/data/nacl/gdb_rsp.py | 42 | 2542 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is based on gdb_rsp.py file from NaCl repository.
import re
import socket
import time
def RspChecksum(data):
checksum = 0
for char in data:
checksum = (checksum + ord(char)) % 0x100
return checksum
class EofOnReplyException(Exception):
pass
class GdbRspConnection(object):
def __init__(self, addr):
self._socket = self._Connect(addr)
def _Connect(self, addr):
# We have to poll because we do not know when sel_ldr has
# successfully done bind() on the TCP port. This is inherently
# unreliable.
# TODO(mseaborn): Add a more reliable connection mechanism to
# sel_ldr's debug stub.
timeout_in_seconds = 10
poll_time_in_seconds = 0.1
for i in xrange(int(timeout_in_seconds / poll_time_in_seconds)):
# On Mac OS X, we have to create a new socket FD for each retry.
sock = socket.socket()
try:
sock.connect(addr)
except socket.error:
# Retry after a delay.
time.sleep(poll_time_in_seconds)
else:
return sock
raise Exception('Could not connect to sel_ldr\'s debug stub in %i seconds'
% timeout_in_seconds)
def _GetReply(self):
reply = ''
while True:
data = self._socket.recv(1024)
if len(data) == 0:
if reply == '+':
raise EofOnReplyException()
raise AssertionError('EOF on socket reached with '
'incomplete reply message: %r' % reply)
reply += data
if '#' in data:
break
match = re.match('\+\$([^#]*)#([0-9a-fA-F]{2})$', reply)
if match is None:
raise AssertionError('Unexpected reply message: %r' % reply)
reply_body = match.group(1)
checksum = match.group(2)
expected_checksum = '%02x' % RspChecksum(reply_body)
if checksum != expected_checksum:
raise AssertionError('Bad RSP checksum: %r != %r' %
(checksum, expected_checksum))
# Send acknowledgement.
self._socket.send('+')
return reply_body
# Send an rsp message, but don't wait for or expect a reply.
def RspSendOnly(self, data):
msg = '$%s#%02x' % (data, RspChecksum(data))
return self._socket.send(msg)
def RspRequest(self, data):
self.RspSendOnly(data)
return self._GetReply()
def RspInterrupt(self):
self._socket.send('\x03')
return self._GetReply()
| bsd-3-clause |
kaichogami/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
vybstat/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 176 | 2169 | from nose.tools import assert_equal
import numpy as np
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
np.testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have recieved X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
np.testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have recieved X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
np.testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
| bsd-3-clause |
walterreade/scikit-learn | examples/linear_model/plot_robust_fit.py | 147 | 3050 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worse than OLS.
- The scores of HuberRegressor may not be compared directly to both TheilSen
and RANSAC because it does not attempt to completely filter the outliers
but lessen their effect.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn.linear_model import (
LinearRegression, TheilSenRegressor, RANSACRegressor, HuberRegressor)
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)),
('HuberRegressor', HuberRegressor())]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen', 'HuberRegressor': 'black'}
linestyle = {'OLS': '-', 'Theil-Sen': '-.', 'RANSAC': '--', 'HuberRegressor': '--'}
lw = 3
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling Errors Only', X, y),
('Corrupt X, Small Deviants', X_errors, y),
('Corrupt y, Small Deviants', X, y_errors),
('Corrupt X, Large Deviants', X_errors_large, y),
('Corrupt y, Large Deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'b+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot, color=colors[name], linestyle=linestyle[name],
linewidth=lw, label='%s: error = %.3f' % (name, mse))
legend_title = 'Error of Mean\nAbsolute Deviation\nto Non-corrupt Data'
legend = plt.legend(loc='upper right', frameon=False, title=legend_title,
prop=dict(size='x-small'))
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
meduz/scikit-learn | doc/sphinxext/sphinx_gallery/gen_rst.py | 14 | 23291 | # -*- coding: utf-8 -*-
# Author: Óscar Nájera
# License: 3-clause BSD
"""
==================
RST file generator
==================
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
# Don't use unicode_literals here (be explicit with u"..." instead) otherwise
# tricky errors come up with exec(code_blocks, ...) calls
from __future__ import division, print_function, absolute_import
from time import time
import ast
import codecs
import hashlib
import os
import re
import shutil
import subprocess
import sys
import traceback
import warnings
from .downloads import CODE_DOWNLOAD
# Try Python 2 first, otherwise load from Python 3
from textwrap import dedent
try:
# textwrap indent only exists in python 3
from textwrap import indent
except ImportError:
def indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
"""
if predicate is None:
def predicate(line):
return line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
from io import StringIO
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except ImportError:
# this script can be imported by nosetest to find tests to run: we should
# not impose the matplotlib requirement in that case.
pass
from . import glr_path_static
from .backreferences import write_backreferences, _thumbnail_div
from .notebook import Notebook
try:
basestring
except NameError:
basestring = str
unicode = str
###############################################################################
class Tee(object):
"""A tee object to redirect streams to multiple outputs"""
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
# When called from a local terminal seaborn needs it in Python3
def isatty(self):
self.file1.isatty()
class MixedEncodingStringIO(StringIO):
"""Helper when both ASCII and unicode strings will be written"""
def write(self, data):
if not isinstance(data, unicode):
data = data.decode('utf-8')
StringIO.write(self, data)
###############################################################################
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: sphx-glr-horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: /%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: /%s
:align: center
"""
# This one could contain unicode
CODE_OUTPUT = u""".. rst-class:: sphx-glr-script-out
Out::
{0}\n"""
SPHX_GLR_SIG = """\n.. rst-class:: sphx-glr-signature
`Generated by Sphinx-Gallery <http://sphinx-gallery.readthedocs.io>`_\n"""
def get_docstring_and_rest(filename):
"""Separate `filename` content between docstring and the rest
Strongly inspired from ast.get_docstring.
Returns
-------
docstring: str
docstring of `filename`
rest: str
`filename` content without the docstring
"""
# can't use codecs.open(filename, 'r', 'utf-8') here b/c ast doesn't
# seem to work with unicode strings in Python2.7
# "SyntaxError: encoding declaration in Unicode string"
with open(filename, 'rb') as f:
content = f.read()
node = ast.parse(content)
if not isinstance(node, ast.Module):
raise TypeError("This function only supports modules. "
"You provided {0}".format(node.__class__.__name__))
if node.body and isinstance(node.body[0], ast.Expr) and \
isinstance(node.body[0].value, ast.Str):
docstring_node = node.body[0]
docstring = docstring_node.value.s
if hasattr(docstring, 'decode'): # python2.7
docstring = docstring.decode('utf-8')
# This get the content of the file after the docstring last line
# Note: 'maxsplit' argument is not a keyword argument in python2
rest = content.decode('utf-8').split('\n', docstring_node.lineno)[-1]
return docstring, rest
else:
raise ValueError(('Could not find docstring in file "{0}". '
'A docstring is required by sphinx-gallery')
.format(filename))
def split_code_and_text_blocks(source_file):
"""Return list with source file separated into code and text blocks.
Returns
-------
blocks : list of (label, content)
List where each element is a tuple with the label ('text' or 'code'),
and content string of block.
"""
docstring, rest_of_content = get_docstring_and_rest(source_file)
blocks = [('text', docstring)]
pattern = re.compile(
r'(?P<header_line>^#{20,}.*)\s(?P<text_content>(?:^#.*\s)*)',
flags=re.M)
pos_so_far = 0
for match in re.finditer(pattern, rest_of_content):
match_start_pos, match_end_pos = match.span()
code_block_content = rest_of_content[pos_so_far:match_start_pos]
text_content = match.group('text_content')
sub_pat = re.compile('^#', flags=re.M)
text_block_content = dedent(re.sub(sub_pat, '', text_content)).lstrip()
if code_block_content.strip():
blocks.append(('code', code_block_content))
if text_block_content.strip():
blocks.append(('text', text_block_content))
pos_so_far = match_end_pos
remaining_content = rest_of_content[pos_so_far:]
if remaining_content.strip():
blocks.append(('code', remaining_content))
return blocks
def codestr2rst(codestr, lang='python'):
"""Return reStructuredText code block from code string"""
code_directive = "\n.. code-block:: {0}\n\n".format(lang)
indented_block = indent(codestr, ' ' * 4)
return code_directive + indented_block
def text2string(content):
"""Returns a string without the extra triple quotes"""
try:
return ast.literal_eval(content) + '\n'
except Exception:
return content + '\n'
def extract_thumbnail_number(text):
""" Pull out the thumbnail image number specified in the docstring. """
# check whether the user has specified a specific thumbnail image
pattr = re.compile(
r"^\s*#\s*sphinx_gallery_thumbnail_number\s*=\s*([0-9]+)\s*$", flags=re.MULTILINE)
match = pattr.search(text)
if match is None:
# by default, use the first figure created
thumbnail_number = 1
else:
thumbnail_number = int(match.groups()[0])
return thumbnail_number
def extract_intro(filename):
""" Extract the first paragraph of module-level docstring. max:95 char"""
docstring, _ = get_docstring_and_rest(filename)
# lstrip is just in case docstring has a '\n\n' at the beginning
paragraphs = docstring.lstrip().split('\n\n')
if len(paragraphs) > 1:
first_paragraph = re.sub('\n', ' ', paragraphs[1])
first_paragraph = (first_paragraph[:95] + '...'
if len(first_paragraph) > 95 else first_paragraph)
else:
raise ValueError(
"Example docstring should have a header for the example title "
"and at least a paragraph explaining what the example is about. "
"Please check the example file:\n {}\n".format(filename))
return first_paragraph
def get_md5sum(src_file):
"""Returns md5sum of file"""
with open(src_file, 'r') as src_data:
src_content = src_data.read()
# data needs to be encoded in python3 before hashing
if sys.version_info[0] == 3:
src_content = src_content.encode('utf-8')
src_md5 = hashlib.md5(src_content).hexdigest()
return src_md5
def md5sum_is_current(src_file):
"""Returns True if src_file has the same md5 hash as the one stored on disk"""
src_md5 = get_md5sum(src_file)
src_md5_file = src_file + '.md5'
if os.path.exists(src_md5_file):
with open(src_md5_file, 'r') as file_checksum:
ref_md5 = file_checksum.read()
return src_md5 == ref_md5
return False
def save_figures(image_path, fig_count, gallery_conf):
"""Save all open matplotlib figures of the example code-block
Parameters
----------
image_path : str
Path where plots are saved (format string which accepts figure number)
fig_count : int
Previous figure number count. Figure number add from this number
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
Returns
-------
figure_list : list of str
strings containing the full path to each figure
images_rst : str
rst code to embed the images in the document
"""
figure_list = []
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
current_fig = image_path.format(fig_count + fig_mngr.num)
fig.savefig(current_fig, **kwargs)
figure_list.append(current_fig)
if gallery_conf.get('find_mayavi_figures', False):
from mayavi import mlab
e = mlab.get_engine()
last_matplotlib_fig_num = fig_count + len(figure_list)
total_fig_num = last_matplotlib_fig_num + len(e.scenes)
mayavi_fig_nums = range(last_matplotlib_fig_num + 1, total_fig_num + 1)
for scene, mayavi_fig_num in zip(e.scenes, mayavi_fig_nums):
current_fig = image_path.format(mayavi_fig_num)
mlab.savefig(current_fig, figure=scene)
# make sure the image is not too large
scale_image(current_fig, current_fig, 850, 999)
figure_list.append(current_fig)
mlab.close(all=True)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
images_rst = ""
if len(figure_list) == 1:
figure_name = figure_list[0]
images_rst = SINGLE_IMAGE % figure_name.lstrip('/')
elif len(figure_list) > 1:
images_rst = HLIST_HEADER
for figure_name in figure_list:
images_rst += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
return figure_list, images_rst
def scale_image(in_fname, out_fname, max_width, max_height):
"""Scales an image with the same aspect ratio centered in an
image with a given max_width and max_height
if in_fname == out_fname the image can only be scaled down
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = max_width / float(width_in)
scale_h = max_height / float(height_in)
if height_in * scale_w <= max_height:
scale = scale_w
else:
scale = scale_h
if scale >= 1.0 and in_fname == out_fname:
return
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (max_width, max_height), (255, 255, 255))
pos_insert = ((max_width - width_sc) // 2, (max_height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the \
generated images')
def save_thumbnail(image_path_template, src_file, gallery_conf):
"""Save the thumbnail image"""
# read specification of the figure to display as thumbnail from main text
_, content = get_docstring_and_rest(src_file)
thumbnail_number = extract_thumbnail_number(content)
thumbnail_image_path = image_path_template.format(thumbnail_number)
thumb_dir = os.path.join(os.path.dirname(thumbnail_image_path), 'thumb')
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
base_image_name = os.path.splitext(os.path.basename(src_file))[0]
thumb_file = os.path.join(thumb_dir,
'sphx_glr_%s_thumb.png' % base_image_name)
if src_file in gallery_conf['failing_examples']:
broken_img = os.path.join(glr_path_static(), 'broken_example.png')
scale_image(broken_img, thumb_file, 200, 140)
elif os.path.exists(thumbnail_image_path):
scale_image(thumbnail_image_path, thumb_file, 400, 280)
elif not os.path.exists(thumb_file):
# create something to replace the thumbnail
default_thumb_file = os.path.join(glr_path_static(), 'no_image.png')
default_thumb_file = gallery_conf.get("default_thumb_file",
default_thumb_file)
scale_image(default_thumb_file, thumb_file, 200, 140)
def generate_dir_rst(src_dir, target_dir, gallery_conf, seen_backrefs):
"""Generate the gallery reStructuredText for an example directory"""
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
print(80 * '_')
print('Example directory %s does not have a README.txt file' %
src_dir)
print('Skipping this directory')
print(80 * '_')
return "", [] # because string is an expected return type
fhindex = open(os.path.join(src_dir, 'README.txt')).read()
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = [fname for fname in sorted(os.listdir(src_dir))
if fname.endswith('.py')]
entries_text = []
computation_times = []
for fname in sorted_listdir:
amount_of_code, time_elapsed = \
generate_file_rst(fname, target_dir, src_dir, gallery_conf)
computation_times.append((time_elapsed, fname))
new_fname = os.path.join(src_dir, fname)
intro = extract_intro(new_fname)
write_backreferences(seen_backrefs, gallery_conf,
target_dir, fname, intro)
this_entry = _thumbnail_div(target_dir, fname, intro) + """
.. toctree::
:hidden:
/%s/%s\n""" % (target_dir, fname[:-3])
entries_text.append((amount_of_code, this_entry))
# sort to have the smallest entries in the beginning
entries_text.sort()
for _, entry_text in entries_text:
fhindex += entry_text
# clear at the end of the section
fhindex += """.. raw:: html\n
<div style='clear:both'></div>\n\n"""
return fhindex, computation_times
def execute_code_block(code_block, example_globals,
block_vars, gallery_conf):
"""Executes the code block of the example file"""
time_elapsed = 0
stdout = ''
# If example is not suitable to run, skip executing its blocks
if not block_vars['execute_script']:
return stdout, time_elapsed
plt.close('all')
cwd = os.getcwd()
# Redirect output to stdout and
orig_stdout = sys.stdout
src_file = block_vars['src_file']
try:
# First cd in the original example dir, so that any file
# created by the example get created in this directory
os.chdir(os.path.dirname(src_file))
my_buffer = MixedEncodingStringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
t_start = time()
# don't use unicode_literals at the top of this file or you get
# nasty errors here on Py2.7
exec(code_block, example_globals)
time_elapsed = time() - t_start
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue().strip().expandtabs()
# raise RuntimeError
if my_stdout:
stdout = CODE_OUTPUT.format(indent(my_stdout, u' ' * 4))
os.chdir(cwd)
fig_list, images_rst = save_figures(
block_vars['image_path'], block_vars['fig_count'], gallery_conf)
fig_num = len(fig_list)
except Exception:
formatted_exception = traceback.format_exc()
fail_example_warning = 80 * '_' + '\n' + \
'%s failed to execute correctly:' % src_file + \
formatted_exception + 80 * '_' + '\n'
warnings.warn(fail_example_warning)
fig_num = 0
images_rst = codestr2rst(formatted_exception, lang='pytb')
# Breaks build on first example error
# XXX This check can break during testing e.g. if you uncomment the
# `raise RuntimeError` by the `my_stdout` call, maybe use `.get()`?
if gallery_conf['abort_on_example_error']:
raise
# Stores failing file
gallery_conf['failing_examples'][src_file] = formatted_exception
block_vars['execute_script'] = False
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
code_output = u"\n{0}\n\n{1}\n\n".format(images_rst, stdout)
block_vars['fig_count'] += fig_num
return code_output, time_elapsed
def clean_modules():
"""Remove "unload" seaborn from the name space
After a script is executed it can load a variety of setting that one
does not want to influence in other examples in the gallery."""
# Horrible code to 'unload' seaborn, so that it resets
# its default when is load
# Python does not support unloading of modules
# https://bugs.python.org/issue9072
for module in list(sys.modules.keys()):
if 'seaborn' in module:
del sys.modules[module]
# Reset Matplotlib to default
plt.rcdefaults()
def generate_file_rst(fname, target_dir, src_dir, gallery_conf):
"""Generate the rst file for a given example.
Returns
-------
amount_of_code : int
character count of the corresponding python script in file
time_elapsed : float
seconds required to run the script
"""
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
script_blocks = split_code_and_text_blocks(src_file)
amount_of_code = sum([len(bcontent)
for blabel, bcontent in script_blocks
if blabel == 'code'])
if md5sum_is_current(example_file):
return amount_of_code, 0
image_dir = os.path.join(target_dir, 'images')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
base_image_name = os.path.splitext(fname)[0]
image_fname = 'sphx_glr_' + base_image_name + '_{0:03}.png'
image_path_template = os.path.join(image_dir, image_fname)
ref_fname = example_file.replace(os.path.sep, '_')
example_rst = """\n\n.. _sphx_glr_{0}:\n\n""".format(ref_fname)
example_nb = Notebook(fname, target_dir)
filename_pattern = gallery_conf.get('filename_pattern')
execute_script = re.search(filename_pattern, src_file) and gallery_conf[
'plot_gallery']
example_globals = {
# A lot of examples contains 'print(__doc__)' for example in
# scikit-learn so that running the example prints some useful
# information. Because the docstring has been separated from
# the code blocks in sphinx-gallery, __doc__ is actually
# __builtin__.__doc__ in the execution context and we do not
# want to print it
'__doc__': '',
# Examples may contain if __name__ == '__main__' guards
# for in example scikit-learn if the example uses multiprocessing
'__name__': '__main__',
}
# A simple example has two blocks: one for the
# example introduction/explanation and one for the code
is_example_notebook_like = len(script_blocks) > 2
time_elapsed = 0
block_vars = {'execute_script': execute_script, 'fig_count': 0,
'image_path': image_path_template, 'src_file': src_file}
print('Executing file %s' % src_file)
for blabel, bcontent in script_blocks:
if blabel == 'code':
code_output, rtime = execute_code_block(bcontent,
example_globals,
block_vars,
gallery_conf)
time_elapsed += rtime
example_nb.add_code_cell(bcontent)
if is_example_notebook_like:
example_rst += codestr2rst(bcontent) + '\n'
example_rst += code_output
else:
example_rst += code_output
if 'sphx-glr-script-out' in code_output:
# Add some vertical space after output
example_rst += "\n\n|\n\n"
example_rst += codestr2rst(bcontent) + '\n'
else:
example_rst += text2string(bcontent) + '\n'
example_nb.add_markdown_cell(text2string(bcontent))
clean_modules()
# Writes md5 checksum if example has build correctly
# not failed and was initially meant to run(no-plot shall not cache md5sum)
if block_vars['execute_script']:
with open(example_file + '.md5', 'w') as file_checksum:
file_checksum.write(get_md5sum(example_file))
save_thumbnail(image_path_template, src_file, gallery_conf)
time_m, time_s = divmod(time_elapsed, 60)
example_nb.save_file()
with codecs.open(os.path.join(target_dir, base_image_name + '.rst'),
mode='w', encoding='utf-8') as f:
example_rst += "**Total running time of the script:**" \
" ({0: .0f} minutes {1: .3f} seconds)\n\n".format(
time_m, time_s)
example_rst += CODE_DOWNLOAD.format(fname, example_nb.file_name)
example_rst += SPHX_GLR_SIG
f.write(example_rst)
print("{0} ran in : {1:.2g} seconds\n".format(src_file, time_elapsed))
return amount_of_code, time_elapsed
| bsd-3-clause |
arabenjamin/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
mikewolfli/IE_MBom | src/eds_pane.py | 1 | 60893 | # coding=utf-8
'''
Created on 2017年1月24日
@author: 10256603
'''
from global_list import *
global login_info
logger = logging.getLogger()
mat_heads = ['位置号', '物料号', '中文名称', '英文名称', '图号', '数量', '单位', '材料', '重量', '备注']
mat_keys = ['st_no', 'mat_no', 'mat_name_cn', 'mat_name_en', 'drawing_no',
'qty', 'mat_unit', 'mat_material', 'part_weight', 'comments']
mat_cols = ['col1', 'col2', 'col3', 'col4', 'col5',
'col6', 'col7', 'col8', 'col9', 'col10']
def tree_level(val):
l = len(val)
if l == 0:
return 0
r = 1
for i in range(l):
if int(val[i]) > 0:
return r
elif int(val[i]) == 0:
r += 1
return r
def dict2list(dict):
li = []
for i in range(len(mat_heads)):
li.append(dict[mat_heads[i]])
return li
def cell2str(val):
if (val is None) or (val == 'N') or (val == '无'):
return ''
else:
return str(val).strip()
class eds_pane(Frame):
'''
mat_list = {1:{'位置号':value,'物料号':value, ....,'标判断':value},.....,item:{......}}
bom_tree : 物料BOM的树形结构, 以key为节点,保存om树型结构如下:
0
├── 1
│ └── 3
└── 2
'''
hibe_mats = []
mat_branch = [] # 有下层的物料清单
mat_list = {} # 从文件读取的文件列表,以 数字1,2,...为keys
bom_items = [] # 存储有下层BOM的节点,treeview 控件的节点
mat_items = {} # 以物料号为key,存储涉及BOM的物料清单 ,包括下层物料。
# treeview本身是树形结构,无需在重新构建树形model
#bom_tree = Tree()
# bom_tree.create_node(0,0)
mat_pos = 0 # 配合mat_list的的数量
mat_tops = {} # 发运层物料字典,key为物料号,value是struct code 和revision列表
nstd_mat_list = [] # 非标物料列表
sap_thread = None
nstd_app_id = ''
def __init__(self, master=None):
Frame.__init__(self, master)
self.grid()
self.createWidgets()
def createWidgets(self):
'''
self.find_mode = StringVar()
self.find_combo = ttk.Combobox(self,textvariable = self.find_mode)
self.find_combo['values'] = ('列出物料BOM结构','查找物料的上层','查找物料关联项目','查找项目关联物料')
self.find_combo.current(0)
self.find_combo.grid(row =0,column=0, columnspan=2,sticky=EW)
'''
self.find_label = Label(self, text='请输入头物料号查找', anchor='w')
self.find_label.grid(row=0, column=0, columnspan=2, sticky=EW)
self.find_var = StringVar()
self.find_text = Entry(self, textvariable=self.find_var)
self.find_text.grid(row=1, column=0, columnspan=2, sticky=EW)
self.find_text.bind("<Return>", self.search)
self.version_label = Label(self, text='物料版本', anchor='w')
self.version_label.grid(row=0, column=2, columnspan=2, sticky=EW)
self.version_var = StringVar()
self.version_text = Entry(self, textvariable=self.version_var)
self.version_text.grid(row=1, column=2, columnspan=2, sticky=EW)
self.version_text.bind("<Return>", self.search)
self.st_body = Frame(self)
self.st_body.grid(row=0, column=4, rowspan=2,
columnspan=6, sticky=NSEW)
self.import_button = Button(self.st_body, text='文件读取')
self.import_button.grid(row=0, column=0, sticky=NSEW)
self.import_button['command'] = self.excel_import
self.generate_nstd_list = Button(self.st_body, text='生成非标物料申请表')
self.generate_nstd_list.grid(row=1, column=0, sticky=NSEW)
self.generate_nstd_list['command'] = self.generate_app
self.pdm_generate_button = Button(
self.st_body, text='PDM物料导入文件生成\n(物料清单和BOM清单)')
self.pdm_generate_button.grid(row=0, column=1, rowspan=2, sticky=NSEW)
self.pdm_generate_button['command'] = self.pdm_generate
self.del_bom_list = Button(self.st_body, text='BOM树删除(仅删除如下列表中的头层BOM)')
self.del_bom_list.grid(row=1, column=1, rowspan=2, sticky=NSEW)
self.del_bom_list['command'] = self.del_bom_tree
self.para_label = Label(self.st_body, text='搜索参数', anchor='w')
self.para_label.grid(row=0, column=2, columnspan=2, sticky=EW)
self.para_var = StringVar()
self.para_text = Entry(self.st_body, textvariable=self.para_var)
self.para_text.grid(row=1, column=2, columnspan=2, sticky=EW)
self.para_text.bind("<Return>", self.para_search)
self.ie_body = Frame(self)
self.ie_body.grid(row=0, column=10, rowspan=2,
columnspan=4, sticky=NSEW)
self.multi_search = Button(self.ie_body, text='多物料搜索')
self.multi_search.grid(row=0, column=0, sticky=NSEW)
self.multi_search['command'] = self.multi_find
self.import_bom_List = Button(self.ie_body, text='生成BOM导入表')
self.import_bom_List.grid(row=1, column=0, sticky=NSEW)
self.import_bom_List['command'] = self.import_bom_list_x
'''
清单式显示不够直观,同时pandastable表操作速度太慢,故只使用树形结构
list_pane = Frame(self)
model = TableModel(rows=0, columns=0)
for col in mat_heads:
model.addColumn(col)
model.addRow(1)
self.mat_table = Table(list_pane, model, editable=False)
self.mat_table.show()
'''
tree_pane = Frame(self)
self.head_label = Label(tree_pane)
self.head_label["text"] = "操作记录"
self.head_label.grid(row=0, column=0, sticky=W)
self.mat_tree = ttk.Treeview(
tree_pane, columns=mat_cols, selectmode='extended')
self.mat_tree.grid(row=1, column=0, rowspan=6,
columnspan=2, sticky='nsew')
style = ttk.Style()
style.configure("Treeview", font=('TkDefaultFont', 10))
style.configure("Treeview.Heading", font=('TkDefaultFont', 9))
self.mat_tree.heading('#0', text='')
for col in mat_cols:
i = mat_cols.index(col)
if i == 0:
self.mat_tree.heading(col, text="版本号/位置号")
else:
self.mat_tree.heading(col, text=mat_heads[i])
#('位置号','物料号','中文名称','英文名称','图号','数量','单位','材料','重量','备注')
self.mat_tree.column('#0', width=80)
self.mat_tree.column('col1', width=80, anchor='w')
self.mat_tree.column('col2', width=100, anchor='w')
self.mat_tree.column('col3', width=150, anchor='w')
self.mat_tree.column('col4', width=150, anchor='w')
self.mat_tree.column('col5', width=100, anchor='w')
self.mat_tree.column('col6', width=100, anchor='w')
self.mat_tree.column('col7', width=100, anchor='w')
self.mat_tree.column('col8', width=150, anchor='w')
self.mat_tree.column('col9', width=100, anchor='w')
self.mat_tree.column('col10', width=300, anchor='w')
ysb = ttk.Scrollbar(tree_pane, orient='vertical',
command=self.mat_tree.yview)
xsb = ttk.Scrollbar(tree_pane, orient='horizontal',
command=self.mat_tree.xview)
ysb.grid(row=1, column=2, rowspan=6, sticky='ns')
xsb.grid(row=7, column=0, columnspan=2, sticky='ew')
self.mat_tree.configure(yscroll=ysb.set, xscroll=xsb.set)
tree_pane.rowconfigure(3, weight=1)
tree_pane.columnconfigure(1, weight=1)
tree_pane.grid(row=2, column=0, rowspan=6, columnspan=12, sticky=NSEW)
log_pane = Frame(self)
self.log_label = Label(log_pane)
self.log_label["text"] = "操作记录"
self.log_label.grid(row=0, column=0, sticky=W)
self.log_text = scrolledtext.ScrolledText(log_pane, state='disabled',height=10)
self.log_text.config(font=('TkFixedFont', 10, 'normal'))
self.log_text.grid(row=1, column=0, columnspan=2, sticky=EW)
log_pane.rowconfigure(1, weight=1)
log_pane.columnconfigure(1, weight=1)
log_pane.grid(row=8, column=0, columnspan=12, sticky=NSEW)
# Create textLogger
text_handler = TextHandler(self.log_text)
# Add the handler to logger
logger.addHandler(text_handler)
logger.setLevel(logging.INFO)
self.rowconfigure(6, weight=1)
self.columnconfigure(11, weight=1)
if login_info['perm'][3] != '1' and login_info['perm'][3] != '9':
self.st_body.grid_forget()
if login_info['perm'][3] != '2' and login_info['perm'][3] != '9':
self.ie_body.grid_forget()
def pdm_generate(self):
if len(self.bom_items) == 0:
logger.warning('没有bom结构,请先搜索物料BOM')
return
if self.sap_thread is not None and self.sap_thread.is_alive():
messagebox.showinfo('提示', '正在后台检查SAP非标物料,请等待完成后再点击!')
return
if len(self.nstd_mat_list) == 0:
logger.warning('此物料BOM中包含未维护进SAP系统的物料,请等待其维护完成')
return
if len(self.nstd_app_id) == 0:
logger.warning('请先生成非标申请表,填入非标单号后生成此文件')
return
gen_dir = filedialog.askdirectory(title="请选择输出文件保存的文件夹!")
if not gen_dir or len(gen_dir) == 0:
return
temp_file = os.path.join(cur_dir(), 'PDMT1.xls')
rb = xlrd.open_workbook(temp_file, formatting_info=True)
wb = sheet_copy(rb)
ws = wb.get_sheet(0)
#now = datetime.datetime.now()
#s_now = now.strftime('%Y%m%d%H%M%S')
file_name = self.nstd_app_id + '物料清单.xls'
pdm_mats_str = os.path.join(gen_dir, file_name)
logger.info('正在生成导入物料清单文件:' + pdm_mats_str)
i = 2
for it in self.nstd_mat_list:
ws.write(i, 0, it)
value = self.mat_items[it]
ws.write(i, 1, value[mat_heads[4]])
ws.write(i, 3, value[mat_heads[2]])
ws.write(i, 4, value[mat_heads[3]])
ws.write(i, 6, value[mat_heads[7]])
ws.write(i, 8, value[mat_heads[6]])
ws.write(i, 9, value[mat_heads[9]])
ws.write(i, 11, 'EDS系统')
if it in self.mat_tops:
rp_box = self.mat_tops[it]['rp_box']
ws.write(i, 12, rp_box['2101'][0])
ws.write(i, 13, rp_box['2101'][1])
ws.write(i, 14, rp_box['2001'][0])
ws.write(i, 15, rp_box['2001'][1])
i += 1
wb.save(pdm_mats_str)
logger.info(pdm_mats_str + '保存完成!')
temp_file = os.path.join(cur_dir(), 'PDMT2.xlsx')
logger.info('正在根据模板文件:' + temp_file + '生成PDM BOM导入清单...')
wb = load_workbook(temp_file)
temp_ws = wb.get_sheet_by_name('template')
for it in self.bom_items:
p_mat = self.mat_tree.item(it, 'values')[1]
ws = wb.copy_worksheet(temp_ws)
ws.sheet_state = 'visible'
ws.title = p_mat
logger.info('正在构建物料' + p_mat + '的PDM BOM导入清单...')
p_name = self.mat_tree.item(it, 'values')[2]
p_drawing = self.mat_tree.item(it, 'values')[4]
ws.cell(row=43, column=18).value = p_mat
ws.cell(row=41, column=18).value = p_name
ws.cell(row=45, column=18).value = p_drawing
ws.cell(row=41, column=10).value = 'L' + p_mat
children = self.mat_tree.get_children(it)
i = 4
for child in children:
value = self.mat_tree.item(child, 'values')
ws.cell(row=i, column=2).value = value[1]
ws.cell(row=i, column=5).value = value[4]
ws.cell(row=i, column=10).value = value[6]
ws.cell(row=i, column=13).value = value[2]
ws.cell(row=i, column=16).value = value[7]
ws.cell(row=i, column=20).value = value[5]
ws.cell(row=i, column=23).value = value[9]
i += 1
wb.remove_sheet(temp_ws)
file_name = self.nstd_app_id + 'PDM BOM物料导入清单.xlsx'
pdm_bom_str = os.path.join(gen_dir, file_name)
if writer.excel.save_workbook(workbook=wb, filename=pdm_bom_str):
logger.info('生成PDM BOM导入清单:' + pdm_bom_str + ' 成功!')
else:
logger.info('文件保存失败!')
def del_bom_tree(self):
if len(self.bom_items) == 0:
logger.warning('没有bom结构,请先搜索物料BOM')
return
if messagebox.askyesno('确认删除', '由于物料BOM存在多层级,而下层物料BOM中可能同时也是其他物料的下层,故本操作仅删除如下结构中的头层物料的BOM. \n\t是否确认删除(YES/NO)?\n 注意: 如果已经到处非标物料申请表,请提供非标物料申请号通知物料组删除非标物料申请(否则无法更新非标申请)!') == NO:
return
children = self.mat_tree.get_children()
for child in children:
mat = self.mat_tree.item(child, 'values')[1]
del_qer = bom_header.delete().where((bom_header.mat_no == mat)
& (bom_header.is_active == True))
r = del_qer.execute()
if r > 0:
self.mat_tree.delete(child)
logger.info(
'物料:' + mat + ' BOM删除成功, 若已导出非标申请,请提供非标申请号予数据组删除非标申请!')
def import_bom_list_x(self):
if len(self.bom_items) == 0:
logger.warning('没有bom结构,请先搜索物料BOM')
return
if self.sap_thread is not None and self.sap_thread.is_alive():
messagebox.showinfo('提示', '正在后台检查SAP非标物料,请等待完成后再点击!')
return
if len(self.nstd_mat_list) != 0:
# logger.warning('此物料BOM中包含未维护进SAP系统的物料,请等待其维护完成')
if messagebox.askyesno('确认导出', '此物料BOM中包含未维护进SAP系统的物料,是否继续(YES/NO)?') == NO:
return
file_str = filedialog.asksaveasfilename(
title="导出文件", initialfile="temp", filetypes=[('excel file', '.xlsx')])
if not file_str:
return
if not file_str.endswith(".xlsx"):
file_str += ".xlsx"
temp_file = os.path.join(cur_dir(), 'bom.xlsx')
wb = load_workbook(temp_file)
ws = wb.get_sheet_by_name('view')
logger.info('正在生成文件' + file_str)
i = 5
for it in self.bom_items:
p_mat = self.mat_tree.item(it, 'values')[1]
logger.info('正在构建物料' + p_mat + '的BOM导入清单...')
p_name = self.mat_tree.item(it, 'values')[2]
children = self.mat_tree.get_children(it)
for child in children:
value = self.mat_tree.item(child, 'values')
c_mat = value[1]
c_name = value[2]
ws.cell(row=i, column=1).value = p_mat
ws.cell(row=i, column=2).value = p_name
ws.cell(row=i, column=6).value = c_mat
ws.cell(row=i, column=7).value = c_name
ws.cell(row=i, column=3).value = 2102
ws.cell(row=i, column=4).value = 1
if c_mat in self.hibe_mats:
ws.cell(row=i, column=5).value = 'N'
else:
ws.cell(row=i, column=5).value = 'L'
ws.cell(row=i, column=15).value = 'X'
ws.cell(row=i, column=8).value = float(value[5])
i += 1
ws1 = wb.get_sheet_by_name('BOM')
i = 5
for it in self.bom_items:
p_mat = self.mat_tree.item(it, 'values')[1]
logger.info('正在构建物料' + p_mat + '的BOM导入清单...')
children = self.mat_tree.get_children(it)
for child in children:
value = self.mat_tree.item(child, 'values')
c_mat = value[1]
ws1.cell(row=i, column=1).value = p_mat
ws1.cell(row=i, column=5).value = c_mat
ws1.cell(row=i, column=2).value = 2102
ws1.cell(row=i, column=3).value = 1
if c_mat in self.hibe_mats:
ws1.cell(row=i, column=4).value = 'N'
else:
ws1.cell(row=i, column=4).value = 'L'
ws1.cell(row=i, column=13).value = 'X'
ws1.cell(row=i, column=6).value = float(value[5])
i += 1
if writer.excel.save_workbook(workbook=wb, filename=file_str):
logger.info('生成BOM导入清单文件:' + file_str + ' 成功!')
else:
logger.info('文件保存失败!')
def import_bom_list(self):
if len(self.bom_items) == 0:
logger.warning('没有bom结构,请先搜索物料BOM')
return
if self.sap_thread is not None and self.sap_thread.is_alive():
messagebox.showinfo('提示', '正在后台检查SAP非标物料,请等待完成后再点击!')
return
if len(self.nstd_mat_list) != 0:
# logger.warning('此物料BOM中包含未维护进SAP系统的物料,请等待其维护完成')
if messagebox.askyesno('确认导出', '此物料BOM中包含未维护进SAP系统的物料,是否继续(YES/NO)?') == NO:
return
file_str = filedialog.asksaveasfilename(
title="导出文件", initialfile="temp", filetypes=[('excel file', '.xls')])
if not file_str:
return
if not file_str.endswith(".xls"):
file_str += ".xls"
temp_file = os.path.join(cur_dir(), 'bom.xls')
rb = xlrd.open_workbook(temp_file, formatting_info=True)
wb = copy(rb)
ws = wb.get_sheet(0)
logger.info('正在生成文件' + file_str)
i = 4
for it in self.bom_items:
p_mat = self.mat_tree.item(it, 'values')[1]
logger.info('正在构建物料' + p_mat + '的BOM导入清单...')
p_name = self.mat_tree.item(it, 'values')[2]
children = self.mat_tree.get_children(it)
for child in children:
value = self.mat_tree.item(child, 'values')
c_mat = value[1]
c_name = value[2]
ws.write(i, 0, p_mat)
ws.write(i, 1, p_name)
ws.write(i, 5, c_mat)
ws.write(i, 6, c_name)
ws.write(i, 2, 2102)
ws.write(i, 3, 1)
if c_mat not in self.nstd_mat_list:
if c_mat in self.hibe_mats:
ws.write(i, 4, 'N')
else:
ws.write(i, 4, 'L')
ws.write(i, 14, 'X')
ws.write(i, 7, float(value[5]))
i += 1
wb.save(file_str)
logger.info(file_str + '保存完成!')
def generate_app(self):
if len(self.nstd_mat_list) == 0:
logger.warning('没有非标物料,无法生成非标物料申请表')
return
nstd_id = simpledialog.askstring(
'非标申请编号', '请输入完整非标申请编号(不区分大小写),系统将自动关联项目:')
if nstd_id is None:
return
nstd_id = nstd_id.upper().strip()
basic_info = self.get_rel_nstd_info(nstd_id)
if not basic_info:
logger.warning('非标申请:' + nstd_id + '在流程软件中未创建,请先创建后再生成非标物料申请表!')
return
file_str = filedialog.asksaveasfilename(
title="导出文件", initialfile=nstd_id, filetypes=[('excel file', '.xlsx')])
if not file_str:
return
if not file_str.endswith(".xlsx"):
file_str += ".xlsx"
if not self.create_nstd_mat_table(nstd_id, basic_info):
logger.warning('由于非标物料均已经在其他非标申请中提交,故中止创建非标申请清单文件。')
return
temp_file = os.path.join(cur_dir(), 'temp_eds.xlsx')
logger.info('正在根据模板文件:' + temp_file + '生成申请表...')
wb = load_workbook(temp_file)
temp_ws = wb.get_sheet_by_name('template')
m_qty = len(self.nstd_mat_list)
if m_qty % 28 == 0:
s_qty = int(m_qty / 28)
else:
s_qty = int(m_qty / 28) + 1
for i in range(1, s_qty + 1):
ws = wb.copy_worksheet(temp_ws)
ws.sheet_state = 'visible'
ws.title = 'page' + str(i)
self.style_worksheet(ws)
ws.cell(row=5, column=1).value = 'Page ' + \
str(i) + '/' + str(s_qty)
logger.info('正在向第' + str(i) + '页填入物料数据...')
self.fill_nstd_app_table(ws, i, nstd_id, basic_info, m_qty)
wb.remove_sheet(temp_ws)
self.nstd_app_id = nstd_id
if writer.excel.save_workbook(workbook=wb, filename=file_str):
logger.info('生成非标物料申请文件:' + file_str + ' 成功!')
else:
logger.info('文件保存失败!')
def create_nstd_mat_table(self, nstd_id, res):
logger.info('正在保存非标物料到维护列表中...')
no_need_mats = []
try:
nstd_app_head.get(nstd_app_head.nstd_app == nstd_id)
logger.warning('非标申请:' + nstd_id + '已经存在,故未重新创建!')
#q= nstd_app_head.update(project=res['project_id'], contract=res['contract'], index_mat=res['index_mat_id'], app_person=res['app_person']).where(nstd_app_head.nstd_app == nstd_id)
# q.execute()
except nstd_app_head.DoesNotExist:
nstd_app_head.create(nstd_app=nstd_id, project=res['project_id'], contract=res[
'contract'], index_mat=res['index_mat_id'], app_person=res['app_person'])
wbs_list = res['units']
for wbs in wbs_list:
if len(wbs.strip()) == 0 and len(wbs_list) > 1:
continue
nstd_app_link.get_or_create(
nstd_app=nstd_id, wbs_no=wbs, mbom_fin=False)
for mat in self.nstd_mat_list:
line = self.mat_items[mat]
try:
r = nstd_app_head.select().join(nstd_mat_table).where(
nstd_mat_table.mat_no == mat).naive().get()
nstd_app = none2str(r.nstd_app)
logger.error('非标物料:' + mat + '已经在非标申请:' +
nstd_app + '中提交,请勿重复提交!')
if nstd_id != nstd_app and mat not in no_need_mats:
no_need_mats.append(mat)
except nstd_app_head.DoesNotExist:
rp_sj = ''
box_sj = ''
rp_zs = ''
box_zs = ''
if mat in self.mat_tops.keys():
rp_box = self.mat_tops[mat]['rp_box']
if rp_box is not None:
rp_sj = rp_box['2101'][0]
box_sj = rp_box['2101'][1]
rp_zs = rp_box['2001'][0]
box_zs = rp_box['2001'][1]
nstd_mat_table.create(mat_no=mat, mat_name_cn=line[mat_heads[2]],
mat_name_en=line[mat_heads[3]], drawing_no=line[
mat_heads[4]],
mat_unit=line[mat_heads[6]], comments=line[
mat_heads[9]],
rp=rp_sj, box_code_sj=box_sj, rp_zs=rp_zs, box_code_zs=box_zs,
nstd_app=nstd_id, mat_app_person=res['app_person'])
try:
nstd_mat_fin.get(nstd_mat_fin.mat_no == mat)
except nstd_mat_fin.DoesNotExist:
nstd_mat_fin.create(mat_no=mat, justify=-1, mbom_fin=False,
pu_price_fin=False, co_run_fin=False, modify_by=login_info['uid'], modify_on=datetime.datetime.now())
for mat in no_need_mats:
self.nstd_mat_list.remove(mat)
if len(self.nstd_mat_list) == 0:
logger.error(' 所有非标物料已经在另外的非标申请中提交,请勿重复提交!')
return False
else:
logger.info('非标物料维护列表保存进程完成.')
return True
def fill_nstd_app_table(self, ws, page, nstd, res, count):
ws.cell(row=6, column=2).value = nstd
ws.cell(row=7, column=4).value = res['project_name']
ws.cell(row=7, column=20).value = res['contract']
wbses = res['units']
ws.cell(row=7, column=12).value = self.combine_wbs(wbses)
if count - count % (page * 28) > 0:
ran = 28
else:
ran = count % 28
for i in range(1, ran + 1):
mat = self.nstd_mat_list[((page - 1) * 28 + i - 1)]
line = self.mat_items[mat]
ws.cell(row=i + 10, column=3).value = line[mat_heads[2]]
ws.cell(row=i + 10, column=4).value = line[mat_heads[3]]
ws.cell(row=i + 10, column=5).value = mat
drawing_id = line[mat_heads[4]]
ws.cell(row=i + 10, column=7).value = drawing_id
if mat in self.mat_branch:
ws.cell(row=i + 10, column=9).value = 'L' + drawing_id
ws.cell(row=i + 10, column=10).value = line[mat_heads[9]]
ws.cell(row=i + 10, column=20).value = line[mat_heads[6]]
if drawing_id == 'NO' or len(drawing_id) == 0:
ws.cell(row=i + 10, column=21).value = '否'
else:
ws.cell(row=i + 10, column=21).value = '是'
if mat in self.mat_tops.keys():
rp_box = self.mat_tops[mat]['rp_box']
if rp_box is not None:
ws.cell(
row=i + 10, column=15).value = rp_box[login_info['plant']][1]
ws.cell(
row=i + 10, column=17).value = rp_box[login_info['plant']][0]
def style_worksheet(self, ws):
thin = Side(border_style="thin", color="000000")
dash = Side(border_style="dashed", color="000000")
other_border = Border(top=dash, left=dash, right=dash)
self.set_border(ws, 'T5:V5', other_border)
main_border = Border(top=thin, left=thin, right=thin, bottom=thin)
self.set_border(ws, 'A6:V40', main_border)
logo = Image(img=os.path.join(cur_dir(), 'logo.png'))
logo.drawing.top = 0
logo.drawing.left = 30
logo.drawing.width = 110
logo.drawing.height = 71
head = Image(img=os.path.join(cur_dir(), 'head.png'))
head.drawing.width = 221
head.drawing.height = 51
ws.add_image(head, 'A2')
ws.add_image(logo, 'T1')
ws.print_area = 'A1:V40'
ws.print_options.horizontalCentered = True
ws.print_options.verticalCentered = True
ws.page_setup.orientation = ws.ORIENTATION_LANDSCAPE
ws.page_setup.paperSize = ws.PAPERSIZE_A4
ws.page_margins.left = 0.24
ws.page_margins.right = 0.24
ws.page_margins.top = 0.19
ws.page_margins.bottom = 0.63
ws.page_margins.header = 0
ws.page_margins.footer = 0
ws.page_setup.scale = 80
ws.sheet_properties.pageSetUpPr.fitToPage = True
ws.oddFooter.left.text = '''Songjiang Plant,ThyssenKrupp Elevator ( Shanghai ) Co., Ltd.
No.2, Xunye Road, Sheshan Subarea, Songjiang Industrial Area, Shanghai
Tel.: +86 (21) 37869898 Fax: +86 (21) 57793363
TKEC.SJ-F-03-03'''
ws.oddFooter.left.font = 'TKTypeMedium, Regular'
ws.oddFooter.left.size = 7
ws.oddFooter.right.text = '项目非标物料汇总表V2.01'
ws.oddFooter.right.font = '宋体, Regular'
#ws.oddFooter.right.size =8
def set_border(self, ws, cell_range, border):
top = Border(top=border.top)
left = Border(left=border.left)
right = Border(right=border.right)
bottom = Border(bottom=border.bottom)
rows = ws[cell_range]
for cell in rows[-1]:
cell.border = cell.border + bottom
for row in rows:
r = row[-1]
r.border = r.border + right
for cell in row:
cell.border = cell.border + top + left
def combine_wbs(self, li):
li.sort()
if len(li) > 1:
head = li[0]
elif li is None:
return ''
elif len(li) == 0:
return ''
else:
return li[0]
start = int(li[0][11:])
j = 1
end = ''
for i in range(1, len(li)):
if int(li[i][11:]) == start + j:
j += 1
else:
if j > 1:
head = head + '~' + end
elif len(end) > 0:
head = head + ',' + end
if j > 1:
head = head + ',' + li[i][11:]
start = int(li[i][11:])
j = 1
end = li[i][11:]
if j > 1:
head = head + '~' + end
else:
head = head + ',' + end
return head
def get_rel_nstd_info(self, nstd_id):
try:
nstd_result = NonstdAppItem.select(NonstdAppItem.link_list, NonstdAppItemInstance.index_mat, NonstdAppItemInstance.res_engineer, NonstdAppItemInstance.create_emp).join(NonstdAppItemInstance, on=(NonstdAppItem.index == NonstdAppItemInstance.index))\
.where((NonstdAppItemInstance.nstd_mat_app == nstd_id) & (NonstdAppItem.status >= 0) & (NonstdAppItemInstance.status >= 0)).naive().get()
except NonstdAppItem.DoesNotExist:
return None
res = {}
wbs_res = nstd_result.link_list
index_mat = nstd_result.index_mat
try:
emp_res = SEmployee.get(SEmployee.employee == login_info['uid'])
app_per = emp_res.name
except SEmployee.DoesNotExist:
app_per = ''
i_pos = index_mat.find('-')
nstd_app_id = index_mat[0:i_pos]
try:
nstd_app_result = NonstdAppHeader.get(
(NonstdAppHeader.nonstd == nstd_app_id) & (NonstdAppHeader.status >= 0))
except NonstdAppHeader.DoesNotExist:
return None
project_id = nstd_app_result.project
contract_id = nstd_app_result.contract
try:
p_r = ProjectInfo.get(ProjectInfo.project == project_id)
except ProjectInfo.DoesNotExist:
return None
project_name = p_r.project_name
if isinstance(wbs_res, str):
wbs_list = wbs_res.split(';')
else:
wbs_list = ['']
wbses = []
for wbs in wbs_list:
if len(wbs.strip()) == 0 and len(wbs_list) > 1:
continue
w = wbs.strip()
w = w[0:14]
wbses.append(w)
res['units'] = wbses
res['contract'] = contract_id
res['project_id'] = project_id
res['project_name'] = project_name
res['app_person'] = app_per
res['index_mat_id'] = index_mat
return res
def run_check_in_sap(self):
if self.sap_thread is not None:
if self.sap_thread.is_alive():
messagebox.showinfo('提示', '正在后台检查SAP非标物料,请等待完成后再点击!')
return
self.sap_thread = refresh_thread(self)
self.sap_thread.setDaemon(True)
self.sap_thread.start()
def refresh(self):
self.nstd_mat_list = []
self.nstd_app_id = ''
self.hibe_mats = []
logger.info("正在登陆SAP...")
config = ConfigParser()
config.read('sapnwrfc.cfg')
para_conn = config._sections['connection']
para_conn['user'] = base64.b64decode(para_conn['user']).decode()
para_conn['passwd'] = base64.b64decode(para_conn['passwd']).decode()
mats = self.mat_items.keys()
try:
conn = pyrfc.Connection(**para_conn)
imp = []
for mat in mats:
line = dict(MATNR=mat, WERKS='2101')
imp.append(line)
logger.info("正在调用RFC函数...")
result = conn.call('ZAP_PS_MATERIAL_INFO',
IT_CE_MARA=imp, CE_SPRAS='1')
std_mats = []
for re in result['OT_CE_MARA']:
std_mats.append(re['MATNR'])
if re['BKLAS'] == '3030' and re['MATNR'] not in self.hibe_mats:
self.hibe_mats.append(re['MATNR'])
for mat in mats:
if mat not in std_mats:
logger.info("标记非标物料:" + mat)
self.nstd_mat_list.append(mat)
self.mark_nstd_mat(mat, True)
else:
self.mark_nstd_mat(mat, False)
logger.info("非标物料确认完成,共计" +
str(len(self.nstd_mat_list)) + "个非标物料。")
except pyrfc.CommunicationError:
logger.error("无法连接服务器")
return -1
except pyrfc.LogonError:
logger.error("无法登陆,帐户密码错误!")
return -1
except (pyrfc.ABAPApplicationError, pyrfc.ABAPRuntimeError):
logger.error("函数执行错误。")
return -1
conn.close()
return len(self.nstd_mat_list)
def mark_nstd_mat(self, mat, non=True):
re = mat_info.get(mat_info.mat_no == mat)
if re.is_nonstd == non:
return 0
else:
q = mat_info.update(is_nonstd=non).where(mat_info.mat_no == mat)
r = q.execute()
if r > 0:
self.change_log('mat_info', 'is_nonstd', mat, (not non), non)
return r
def multi_find(self):
d = ask_list('物料拷贝器', 2)
if not d:
logger.warning('物料清单不能为空,请务必填写物料号')
return
self.mat_tops = {}
self.mat_items = {}
self.mat_list = {}
self.bom_items = []
self.mat_branch = []
self.nstd_mat_list = []
for row in self.mat_tree.get_children():
self.mat_tree.delete(row)
logger.info('开始搜索匹配的物料号...')
res = mat_info.select(mat_info, bom_header.struct_code, bom_header.bom_id, bom_header.revision, bom_header.is_active).join(bom_header, on=(bom_header.mat_no == mat_info.mat_no)).where((mat_info.mat_no.in_(d)) & (bom_header.is_active == True))\
.order_by(mat_info.mat_no.asc()).naive()
if not res:
logger.warning("没有与搜索条件匹配的物料BOM.")
return
self.get_res(res)
def search(self, event=None):
if len(self.find_var.get()) == 0:
logger.warning("物料号不能为空,请务必填写物料号")
return
self.mat_tops = {}
self.mat_items = {}
self.mat_list = {}
self.bom_items = []
self.mat_branch = []
self.nstd_mat_list = []
for row in self.mat_tree.get_children():
self.mat_tree.delete(row)
logger.info('开始搜索匹配的物料号...')
if len(self.version_var.get()) == 0:
res = mat_info.select(mat_info, bom_header.struct_code, bom_header.bom_id, bom_header.revision, bom_header.is_active).join(bom_header, on=(bom_header.mat_no == mat_info.mat_no)).where((mat_info.mat_no.contains(self.find_var.get().strip())) & (bom_header.is_active == True))\
.order_by(mat_info.mat_no.asc()).naive()
else:
res = mat_info.select(mat_info, bom_header.bom_id, bom_header.struct_code, bom_header.revision, bom_header.is_active).join(bom_header, on=(bom_header.mat_no == mat_info.mat_no)).where((mat_info.mat_no.contains(self.find_var.get().strip())) & (bom_header.revision == self.version_var.get()) & (bom_header.is_active == True))\
.order_by(mat_info.mat_no.asc()).naive()
if not res:
logger.warning("没有与搜索条件匹配的物料BOM.")
return
self.get_res(res)
def get_res(self, res):
for l in res:
line = {}
re = {}
mat = none2str(l.mat_no)
rev = none2str(l.revision)
line[mat_heads[0]] = rev
line[mat_heads[1]] = mat
line[mat_heads[2]] = none2str(l.mat_name_cn)
line[mat_heads[3]] = none2str(l.mat_name_en)
line[mat_heads[4]] = none2str(l.drawing_no)
line[mat_heads[5]] = 0
line[mat_heads[6]] = none2str(l.mat_unit)
line[mat_heads[7]] = none2str(l.mat_material)
line[mat_heads[8]] = none2str(l.part_weight)
line[mat_heads[9]] = ''
#revision = none2str(l.revision)
#struct_code = none2str(l.struct_code)
re['revision'] = none2str(l.revision)
re['struct_code'] = none2str(l.struct_code)
# if len(struct_code)>0 and mat not in self.mat_tops:
# re['revision']=revision
# re['struct_code']=struct_code
rp_box = {}
if len(none2str(l.rp)) != 0 or len(none2str(l.box_code_sj)) != 0 or \
len(none2str(l.rp_zs)) != 0 or len(none2str(l.box_code_zs)) != 0:
lt = []
lt.append(none2str(l.rp))
lt.append(none2str(l.box_code_sj))
rp_box['2101'] = lt
lt = []
lt.append(none2str(l.rp_zs))
lt.append(none2str(l.box_code_zs))
rp_box['2001'] = lt
re['rp_box'] = rp_box
self.mat_tops[mat] = re
# else:
# rp_box=None
#re['rp_box'] = rp_box
# self.mat_tops[mat]=re
is_nstd = l.is_nonstd
if is_nstd and mat not in self.nstd_mat_list:
self.nstd_mat_list.append(mat)
if mat not in self.mat_items.keys():
self.mat_items[mat] = line
item = self.mat_tree.insert('', END, values=dict2list(line))
self.mat_list[item] = line
if self.get_sub_bom(item, mat, rev):
self.bom_items.append(item)
self.mat_branch.append(mat)
logger.info('正在与SAP匹配确认非标物料,请勿进行其他操作!')
self.run_check_in_sap()
def para_search(self, event=None):
if len(self.para_var.get()) == 0:
logger.warning("参数不能为空,请务必填写 参数")
return
self.mat_tops = {}
self.mat_items = {}
self.mat_list = {}
self.bom_items = []
self.mat_branch = []
self.nstd_mat_list = []
for row in self.mat_tree.get_children():
self.mat_tree.delete(row)
logger.info('开始搜索匹配的物料号...')
res = mat_info.select(mat_info, bom_header.bom_id, bom_header.struct_code, bom_header.revision, bom_header.is_active).join(bom_header, on=(bom_header.mat_no == mat_info.mat_no)).where((mat_info.mat_name_cn.contains(self.para_var.get()) | mat_info.comments.contains(self.para_var.get())) & (bom_header.is_active == True))\
.order_by(mat_info.mat_no.asc()).naive()
if not res:
logger.warning("没有与搜索条件匹配的物料号.")
return
for l in res:
line = {}
re = {}
mat = none2str(l.mat_no)
rev = none2str(l.revision)
line[mat_heads[0]] = rev
line[mat_heads[1]] = mat
line[mat_heads[2]] = none2str(l.mat_name_cn)
line[mat_heads[3]] = none2str(l.mat_name_en)
line[mat_heads[4]] = none2str(l.drawing_no)
line[mat_heads[5]] = 0
line[mat_heads[6]] = none2str(l.mat_unit)
line[mat_heads[7]] = none2str(l.mat_material)
line[mat_heads[8]] = none2str(l.part_weight)
line[mat_heads[9]] = ''
re['revision'] = none2str(l.revision)
re['struct_code'] = none2str(l.struct_code)
rp_box = {}
if len(none2str(l.rp)) != 0 or len(none2str(l.box_code_sj)) != 0 or \
len(none2str(l.rp_zs)) != 0 or len(none2str(l.box_code_zs)) != 0:
lt = []
lt.append(none2str(l.rp))
lt.append(none2str(l.box_code_sj))
rp_box['2101'] = lt
lt = []
lt.append(none2str(l.rp_zs))
lt.append(none2str(l.box_code_zs))
rp_box['2001'] = lt
re['rp_box'] = rp_box
self.mat_tops[mat] = re
if mat not in self.mat_items.keys():
self.mat_items[mat] = line
item = self.mat_tree.insert('', END, values=dict2list(line))
self.mat_list[item] = line
if self.get_sub_bom(item, mat, rev, False):
self.bom_items.append(item)
self.mat_branch.append(mat)
def get_sub_bom(self, item, mat, rev='', nstd_check=True):
r = bom_header.select(bom_header, bom_item, mat_info).join(bom_item, on=(bom_header.bom_id == bom_item.bom_id)).switch(bom_item).join(mat_info, on=(bom_item.component == mat_info.mat_no))\
.where((bom_header.mat_no == mat) & (bom_header.revision == rev) & (bom_header.is_active == True)).order_by(bom_item.index.asc()).naive()
if not r:
return False
logger.info('开始搜索物料:' + mat + '的下层BOM')
for l in r:
line = {}
re = {}
line[mat_heads[0]] = none2str(l.st_no)
mat = none2str(l.component)
line[mat_heads[1]] = mat
line[mat_heads[2]] = none2str(l.mat_name_cn)
line[mat_heads[3]] = none2str(l.mat_name_en)
line[mat_heads[4]] = none2str(l.drawing_no)
line[mat_heads[5]] = l.qty
line[mat_heads[6]] = none2str(l.mat_unit)
line[mat_heads[7]] = none2str(l.mat_material)
line[mat_heads[8]] = none2str(l.part_weight)
line[mat_heads[9]] = none2str(l.bom_remark)
if nstd_check == True:
is_nstd = l.is_nonstd
if is_nstd and mat not in self.nstd_mat_list:
self.nstd_mat_list.append(mat)
tree_item = self.mat_tree.insert(item, END, values=dict2list(line))
self.mat_list[tree_item] = line
if mat not in self.mat_items.keys():
self.mat_items[mat] = line
if self.get_sub_bom(tree_item, mat, '', nstd_check):
self.bom_items.append(tree_item)
self.mat_branch.append(mat)
logger.info('构建物料:' + mat + '下层BOM完成!')
return True
def check_sub_bom(self, mat, ver=''):
try:
bom_header.get((bom_header.mat_no == mat) &
(bom_header.revision == ver))
return True
except bom_header.DoesNotExist:
return False
def excel_import(self):
file_list = filedialog.askopenfilenames(
title="导入文件", filetypes=[('excel file', '.xlsx'), ('excel file', '.xlsm')])
if not file_list:
return
self.mat_list = {}
self.mat_pos = 0
self.mat_tops = {}
self.mat_items = {}
for row in self.mat_tree.get_children():
self.mat_tree.delete(row)
# for node in self.bom_tree.children(0):
# self.bom_tree.remove_node(node.identifier)
for file in file_list:
logger.info("正在读取文件:" + file + ",转换保存物料信息,同时构建数据Model")
c = self.read_excel_files(file)
logger.info("文件:" + file + "读取完成, 共计处理 " + str(c) + " 个物料。")
#df = pd.DataFrame(self.mat_list,index=mat_heads, columns=[ i for i in range(1, self.mat_pos+1)])
#model = TableModel(dataframe=df.T)
# self.mat_table.updateModel(model)
# self.mat_table.redraw()
logger.info("正在生成BOM层次结构...")
c = self.build_tree_struct()
logger.info("Bom结构生成完成,共为" + str(c) + "个发运层物料生成BOM.")
logger.info("正在保存BOM...")
c = self.save_mats_bom()
logger.info("共保存" + str(c) + "个物料BOM")
logger.info("正在核查非标物料...")
self.run_check_in_sap()
def save_mat_info(self, method=False, **para):
b_level = False
if para['mat_no'] in self.mat_tops.keys():
rp_box = self.mat_tops[para['mat_no']]['rp_box']
if rp_box is not None:
b_level = True
try:
mat_info.get(mat_info.mat_no == para['mat_no'])
if method:
if b_level:
q = mat_info.update(mat_name_en=para['mat_name_en'], mat_name_cn=para['mat_name_cn'], drawing_no=para['drawing_no'], mat_material=para['mat_material'], mat_unit=para['mat_unit'], rp=rp_box['2101'][0], box_code_sj=rp_box['2101'][1],
rp_zs=rp_box['2001'][0], box_code_zs=rp_box['2001'][1], mat_material_en=para['mat_material_en'], part_weight=para['part_weight'], comments=para['comments'], modify_by=login_info['uid'], modify_on=datetime.datetime.now()).where(mat_info.mat_no == para['mat_no'])
else:
q = mat_info.update(mat_name_en=para['mat_name_en'], mat_name_cn=para['mat_name_cn'], drawing_no=para['drawing_no'], mat_material=para['mat_material'], mat_unit=para['mat_unit'],
mat_material_en=para['mat_material_en'], part_weight=para['part_weight'], comments=para['comments'], modify_by=login_info['uid'], modify_on=datetime.datetime.now()).where(mat_info.mat_no == para['mat_no'])
return q.execute()
except mat_info.DoesNotExist:
if b_level:
q = mat_info.insert(mat_no=para['mat_no'], mat_name_en=para['mat_name_en'], mat_name_cn=para['mat_name_cn'], drawing_no=para['drawing_no'], mat_material=para['mat_material'], mat_unit=para['mat_unit'],
mat_material_en=para['mat_material_en'], part_weight=para['part_weight'], rp=rp_box['2101'][0], box_code_sj=rp_box['2101'][1], rp_zs=rp_box['2001'][0], box_code_zs=rp_box['2001'][1], comments=para['comments'], modify_by=login_info['uid'], modify_on=datetime.datetime.now())
else:
q = mat_info.insert(mat_no=para['mat_no'], mat_name_en=para['mat_name_en'], mat_name_cn=para['mat_name_cn'], drawing_no=para['drawing_no'], mat_material=para['mat_material'], mat_unit=para['mat_unit'],
mat_material_en=para['mat_material_en'], part_weight=para['part_weight'], comments=para['comments'], modify_by=login_info['uid'], modify_on=datetime.datetime.now())
return q.execute()
return 0
def check_branch(self, item):
mat = self.mat_tree.item(item, "values")[1]
for li in self.bom_items:
if mat == self.mat_tree.item(li, "values")[1]:
return False
self.bom_items.append(item)
self.mat_branch.append(mat)
return True
def save_bom_list(self, item):
it_list = self.mat_tree.item(item, "values")
mat = it_list[1]
drawing = it_list[4]
if mat in self.mat_tops.keys():
revision = self.mat_tops[mat]['revision']
st_code = self.mat_tops[mat]['struct_code']
else:
revision = ''
st_code = ''
try:
bom_header.get((bom_header.mat_no == mat) & (
bom_header.revision == revision) & (bom_header.is_active == True))
logger.warning(mat + "BOM已经存在,无需重新创建!")
return 0
except bom_header.DoesNotExist:
b_id = self.bom_id_generator()
q = bom_header.insert(bom_id=b_id, mat_no=mat, revision=revision, drawing_no=drawing, struct_code=st_code, is_active=True, plant=login_info['plant'],
modify_by=login_info['uid'], modify_on=datetime.datetime.now(), create_by=login_info['uid'], create_on=datetime.datetime.now())
q.execute()
children = self.mat_tree.get_children(item)
data = []
for child in children:
d_line = {}
d_line['bom_id'] = b_id
d_line['index'] = int(self.mat_tree.item(child, "values")[0])
d_line['st_no'] = self.mat_tree.item(child, "values")[0]
d_line['component'] = self.mat_tree.item(child, "values")[1]
d_line['qty'] = Decimal(self.mat_tree.item(child, "values")[5])
d_line['bom_remark'] = self.mat_tree.item(child, "values")[9]
d_line['parent_mat'] = mat
d_line['modify_by'] = login_info['uid']
d_line['modify_on'] = datetime.datetime.now()
d_line['create_by'] = login_info['uid']
d_line['create_on'] = datetime.datetime.now()
data.append(d_line)
q = bom_item.insert_many(data)
return q.execute()
def get_rp_boxid(self, struct, plant='2101'):
rp_box = {}
res = struct_gc_rel.select().where(struct_gc_rel.st_code == struct)
for r in res:
lt = []
lt.append(r.rp)
lt.append(r.box_code)
rp_box[r.plant] = lt
return rp_box
def save_mats_bom(self):
if len(self.bom_items) == 0:
return 0
i = 0
for item in self.bom_items:
if self.save_bom_list(item) > 0:
i += 1
return i
def build_tree_struct(self):
self.bom_items = []
self.mat_branch = []
if len(self.mat_list) == 0:
return 0
cur_level = 0
pre_level = 0
parent_node = self.mat_tree.insert(
'', END, values=dict2list(self.mat_list[1]))
counter = 0
cur_node = parent_node
self.check_branch(parent_node)
self.mat_tree.item(parent_node, open=True)
for i in range(1, self.mat_pos + 1):
cur_level = tree_level(self.mat_list[i][mat_heads[0]])
if cur_level == 0:
counter += 1
if (pre_level == cur_level) and pre_level != 0:
cur_node = self.mat_tree.insert(
parent_node, END, values=dict2list(self.mat_list[i]))
if pre_level < cur_level:
parent_node = cur_node
self.check_branch(parent_node)
cur_node = self.mat_tree.insert(
parent_node, END, values=dict2list(self.mat_list[i]))
if pre_level > cur_level:
while pre_level >= cur_level:
parent_node = self.mat_tree.parent(parent_node)
if pre_level != 0:
if len(self.mat_tree.item(parent_node, 'values'))==0:
pre_level=0
else:
pre_level = tree_level(
self.mat_tree.item(parent_node, 'values')[0])
else:
pre_level = -1
cur_node = self.mat_tree.insert(
parent_node, END, values=dict2list(self.mat_list[i]))
if cur_level == 0:
self.mat_tree.item(cur_node, open=True)
pre_level = cur_level
return counter
'''
def build_tree_struct(self):
if len(self.mat_list)==0:
return
cur_level=0
pre_level=0
parent_node=0
counter=0
for i in range(1, self.mat_pos+1):
cur_level = tree_level(self.mat_list[i][mat_heads[0]])
if cur_level==0:
counter+=1
if pre_level == cur_level:
self.bom_tree.create_node(i,i,parent_node)
if pre_level < cur_level:
parent_node = i-1
self.bom_tree.create_node(i,i,parent_node)
if pre_level > cur_level:
while pre_level > cur_level:
parent_node = self.bom_tree.parent(parent_node).identifier
pre_level = tree_level(self.mat_list[parent_node][mat_heads[0]])
self.bom_tree.create_node(i,i,parent_node)
pre_level = cur_level
return counter
'''
def is_num(self, t):
try:
int(t)
return True
except ValueError:
return False
def addition_line_qty(self, ws):
b_finish=False
i=2
while not b_finish:
i=i+1
s=cell2str(ws.cell(row=i, column=1).value)
if not self.is_num(s):
b_finish=True
if i-1>38:
return i-38-1
else:
return 0
def read_excel_files(self, file):
'''
返回值:
-2: 读取EXCEL失败
-1 : 头物料位置为空
0: 头物料的版本已经存在
1:
'''
wb = load_workbook(file, read_only=True, data_only=True)
sheetnames = wb.get_sheet_names()
if len(sheetnames) == 0:
return -2
counter = 0
for i in range(0, len(sheetnames)):
if not str(sheetnames[i]).isdigit():
continue
ws = wb.get_sheet_by_name(sheetnames[i])
i_add = self.addition_line_qty(ws)
for j in range(1, 19+(i_add//2)):
mat_line = {}
mat_top_line = {}
mat = ''
mat_line[mat_heads[0]] = cell2str(
ws.cell(row=2 * j + 1, column=2).value)
mat = cell2str(ws.cell(row=2 * j + 1, column=5).value)
if len(mat) == 0:
break
mat_line[mat_heads[1]] = mat
mat_line[mat_heads[2]] = cell2str(
ws.cell(row=2 * j + 1, column=7).value)
mat_line[mat_heads[3]] = cell2str(
ws.cell(row=2 * j + 2, column=7).value)
mat_line[mat_heads[4]] = cell2str(
ws.cell(row=2 * j + 1, column=6).value)
qty = cell2str(ws.cell(row=2 * j + 1, column=3).value)
if len(qty) == 0:
continue
self.mat_pos += 1
counter += 1
mat_line[mat_heads[5]] = Decimal(qty)
mat_line[mat_heads[6]] = cell2str(
ws.cell(row=2 * j + 1, column=4).value)
mat_line[mat_heads[7]] = cell2str(
ws.cell(row=2 * j + 1, column=9).value)
material_en = cell2str(ws.cell(row=2 * j + 2, column=9).value)
weight = cell2str(ws.cell(row=2 * j + 1, column=10).value)
if len(weight) == 0:
mat_line[mat_heads[8]] = 0
else:
mat_line[mat_heads[8]] = Decimal(weight)
mat_line[mat_heads[9]] = cell2str(
ws.cell(row=2 * j + 1, column=11).value)
len_of_st = len(mat_line[mat_heads[0]])
str_code = cell2str(ws.cell(row=39+i_add, column=12).value)
if len_of_st <= 1:
if len_of_st == 0:
mat_top_line['revision'] = cell2str(
ws.cell(row=43+i_add, column=8).value)
mat_top_line['struct_code'] = str_code
else:
mat_top_line['revision'] = ''
mat_top_line['struct_code'] = ''
rp_box = self.get_rp_boxid(str_code)
mat_top_line['rp_box'] = rp_box
self.mat_tops[mat_line[mat_heads[1]]] = mat_top_line
# 保存物料基本信息
if self.save_mat_info(True, mat_no=mat_line[mat_heads[1]], mat_name_en=mat_line[mat_heads[3]], mat_name_cn=mat_line[mat_heads[2]], drawing_no=mat_line[mat_heads[4]], mat_material=mat_line[mat_heads[7]], mat_unit=mat_line[mat_heads[6]],
mat_material_en=material_en, part_weight=mat_line[mat_heads[8]], comments=mat_line[mat_heads[9]]) == 0:
logger.info(mat_line[mat_heads[1]] + '数据库中已经存在,故没有保存')
else:
logger.info(mat_line[mat_heads[1]] + '保存成功。')
self.mat_list[self.mat_pos] = mat_line
if mat not in self.mat_items.keys():
self.mat_items[mat] = mat_line
return counter
def bom_id_generator(self):
try:
bom_res = id_generator.get(id_generator.id == 1)
except id_generator.DoesNotExist:
return None
pre_char = none2str(bom_res.pre_character)
fol_char = none2str(bom_res.fol_character)
c_len = bom_res.id_length
cur_id = bom_res.current
step = bom_res.step
new_id = str(cur_id + step)
# 前缀+前侧补零后长度为c_len+后缀, 组成新的BOM id
id_char = pre_char + new_id.zfill(c_len) + fol_char
q = id_generator.update(
current=cur_id + step).where(id_generator.id == 1)
q.execute()
return id_char
def change_log(self, table, section, key, old, new):
q = s_change_log.insert(table_name=table, change_section=section, key_word=str(key), old_value=str(
old), new_value=str(new), log_on=datetime.datetime.now(), log_by=login_info['uid'])
q.execute()
| unlicense |
lenovor/scikit-learn | sklearn/utils/fixes.py | 133 | 12882 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
| bsd-3-clause |
paulgradie/SeqPyPlot | main_app/seqpyplot/plot/de_tally_plotter.py | 1 | 4840 | import os
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rcParams
import seaborn as sns
from seqpyplot.plot.base.plot_base import PlotBase
from tqdm import tqdm
from ..analyzer.paired_sample_filter import PairedSampleFilter
class TallyDe(PlotBase):
def __init__(
self,
output_dir,
container_obj,
experiment_name,
log2fold,
expression_max,
expression_min,
min_diff,
max_diff,
file_name_pairs,
time_point_names
):
plt.close()
super()
self.cutoffs = [x/100. for x in range(200) if x % 5 == 0][1:]
self.container_obj = container_obj
self.output_dir = output_dir
self.experiment_name = experiment_name
self.log2fold = log2fold
self.expression_min = expression_min
self.expression_max = expression_max
self.diff = [min_diff, max_diff]
self.file_name_pairs = file_name_pairs
self.time_point_names = time_point_names
def compute_tally(self, input_df_list):
y_values = []
print("\nIterating over log2fold cutoff values... ")
for idx, cutoff in tqdm(enumerate(self.cutoffs), total=len(self.cutoffs)):
analyzer = PairedSampleFilter(
log2fold=cutoff,
expression_min=self.expression_min,
expression_max=self.expression_max,
min_diff=self.diff[0],
max_diff=self.diff[1],
time_point_names=self.time_point_names,
file_name_pairs=self.file_name_pairs
)
_ = analyzer.main_filter_process(input_df_list)
# print temp_de_count
y_values.append(len(analyzer.complete_de_gene_list))
text = "{} of {}.".format(idx + 1, len(self.cutoffs)+1)
# print('{:^43}'.format(text))
return y_values
def set_figure(self, handles):
# create a figure for the current figure
fig = plt.figure(num=1,
figsize=(7, 7),
dpi=600,
edgecolor='black',
frameon=False,
)
# set figure tit
fig.suptitle("DE genes Detected vs log2Fold cutoff",
verticalalignment='top',
horizontalalignment='center',
fontsize=12,
x=0.315
)
labels = [
" ".join(['Upper:', str(self.expression_max)]),
" ".join(['Lower:', str(self.expression_min)]),
" ".join(['Dif:', str(self.diff[0]), '-', str(self.diff[1])])
]
fig.legend(handles=handles,
labels=(labels),
loc='upper right')
rcParams['legend.frameon'] = 'False'
return fig
def create_subplot(self, ax, y_values):
ax.plot(self.cutoffs,
y_values,
'bo',
color="black",
marker='o',
linewidth=1.4,
linestyle="-",
dash_capstyle='round',
dash_joinstyle='bevel',
label="DE cutoffs by log2fold",
fillstyle='full',
markeredgecolor='black',
markerfacecolor='white',
markeredgewidth=.95,
markersize=6)
return ax
def format_tally_plot(self, ax):
# xlim is the length of the label list
ax.set_xlim(0, 3)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['bottom'].set_position(('data', 0))
ax.set_ylabel('Number of DE genes Detected')
ax.set_xlabel('log2Fold Cutoff')
return ax
def save_fig(self, fig):
path_ = os.path.join(self.output_dir, self.experiment_name)
file_name = "_".join([path_, "_DE_Tally_Plot.png"])
fig.savefig(file_name, format='png', bbox_inches='tight')
fig.close()
def save_plot(self, fig):
path_ = os.path.join(self.output_dir, self.experiment_name)
file_name = "_".join([path_,"DE_Tally_Plot.png"])
fig.savefig(file_name, format='png', bbox_inches='tight')
return fig
def create_tally_plot(self, input_df_list):
handles = [self.set_line() for _ in range(3)]
fig = self.set_figure(handles)
ax = plt.subplot()
y_values = self.compute_tally(input_df_list)
ax = self.create_subplot(ax, y_values)
ax = self.format_tally_plot(ax)
self.save_plot(fig)
plt.close()
| gpl-3.0 |
rvraghav93/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 35 | 7372 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.org/basemap>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
jreback/pandas | pandas/tests/frame/methods/test_sort_index.py | 1 | 29215 | import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
IntervalIndex,
MultiIndex,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDataFrameSortIndex:
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame(
{"value": [1, 2, 3, 4]},
index=MultiIndex(
levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
assert df.index.is_lexsorted()
assert not df.index.is_monotonic
# sort it
expected = DataFrame(
{"value": [2, 1, 4, 3]},
index=MultiIndex(
levels=[["a", "b"], ["aa", "bb"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = df.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_index_non_existent_label_multiindex(self):
# GH#12261
df = DataFrame(0, columns=[], index=MultiIndex.from_product([[], []]))
df.loc["b", "2"] = 1
df.loc["a", "3"] = 1
result = df.sort_index().index.is_monotonic
assert result is True
def test_sort_index_reorder_on_ops(self):
# GH#15687
df = DataFrame(
np.random.randn(8, 2),
index=MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["red", "blu"]],
names=["letter", "size", "color"],
),
columns=["near", "far"],
)
df = df.sort_index()
def my_func(group):
group.index = ["newz", "newa"]
return group
result = df.groupby(level=["letter", "size"]).apply(my_func).sort_index()
expected = MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["newa", "newz"]],
names=["letter", "size", None],
)
tm.assert_index_equal(result.index, expected)
def test_sort_index_nan_multiindex(self):
# GH#14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4), index=mi, columns=list("ABCD"))
s = Series(np.arange(4), index=mi)
df2 = DataFrame(
{
"date": pd.DatetimeIndex(
[
"20121002",
"20121007",
"20130130",
"20130202",
"20130305",
"20121002",
"20121207",
"20130130",
"20130202",
"20130305",
"20130202",
"20130305",
]
),
"user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
"whole_cost": [
1790,
np.nan,
280,
259,
np.nan,
623,
90,
312,
np.nan,
301,
359,
801,
],
"cost": [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12],
}
).set_index(["date", "user_id"])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position="last")
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position="first")
expected = df.iloc[[1, 2, 3, 0], :]
tm.assert_frame_equal(result, expected)
# sorting frame with removed rows
result = df2.dropna().sort_index()
expected = df2.sort_index().dropna()
tm.assert_frame_equal(result, expected)
# sorting series, default nan position is last
result = s.sort_index()
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position last
result = s.sort_index(na_position="last")
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position first
result = s.sort_index(na_position="first")
expected = s.iloc[[1, 2, 3, 0]]
tm.assert_series_equal(result, expected)
def test_sort_index_nan(self):
# GH#3917
# Test DataFrame with nan label
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=True, na_position="last")
expected = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position="first")
expected = DataFrame(
{"A": [4, 1, 2, np.nan, 1, 6, 8], "B": [5, 9, np.nan, 5, 2, 5, 4]},
index=[np.nan, 1, 2, 3, 4, 5, 6],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=False)
expected = DataFrame(
{"A": [8, 6, 1, np.nan, 2, 1, 4], "B": [4, 5, 2, 5, np.nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind="quicksort", ascending=False, na_position="first"
)
expected = DataFrame(
{"A": [4, 8, 6, 1, np.nan, 2, 1], "B": [5, 4, 5, 2, 5, np.nan, 9]},
index=[np.nan, 6, 5, 4, 3, 2, 1],
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_index_multi_index(self):
# GH#25775, testing that sorting by index works with a multi-index.
df = DataFrame(
{"a": [3, 1, 2], "b": [0, 0, 0], "c": [0, 1, 2], "d": list("abc")}
)
result = df.set_index(list("abc")).sort_index(level=list("ba"))
expected = DataFrame(
{"a": [1, 2, 3], "b": [0, 0, 0], "c": [1, 2, 0], "d": list("bca")}
)
expected = expected.set_index(list("abc"))
tm.assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered["A"])
df = unordered.copy()
return_value = df.sort_index(inplace=True)
assert return_value is None
expected = frame
tm.assert_frame_equal(df, expected)
assert a_id != id(df["A"])
df = unordered.copy()
return_value = df.sort_index(ascending=False, inplace=True)
assert return_value is None
expected = frame[::-1]
tm.assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ["D", "B", "C", "A"]]
df = unordered.copy()
return_value = df.sort_index(axis=1, inplace=True)
assert return_value is None
expected = frame
tm.assert_frame_equal(df, expected)
df = unordered.copy()
return_value = df.sort_index(axis=1, ascending=False, inplace=True)
assert return_value is None
expected = frame.iloc[:, ::-1]
tm.assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
# test with multiindex, too
idf = df.set_index(["A", "B"])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
tm.assert_frame_equal(result, expected)
# also, Series!
result = idf["C"].sort_index(ascending=[1, 0])
tm.assert_series_equal(result, expected["C"])
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(level="A", sort_remaining=False)
expected = df
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["A", "B"], sort_remaining=False)
expected = df
tm.assert_frame_equal(result, expected)
# Error thrown by sort_index when
# first index is sorted last (GH#26053)
result = df.sort_index(level=["C", "B", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["B", "C", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["C", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
def test_sort_index_categorical_index(self):
df = DataFrame(
{
"A": np.arange(6, dtype="int64"),
"B": Series(list("aabbca")).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[2, 3, 0, 1, 5, 4]]
tm.assert_frame_equal(result, expected)
def test_sort_index(self):
# GH#13496
frame = DataFrame(
np.arange(16).reshape(4, 4),
index=[1, 2, 3, 4],
columns=["A", "B", "C", "D"],
)
# axis=0 : sort rows by index labels
unordered = frame.loc[[3, 2, 4, 1]]
result = unordered.sort_index(axis=0)
expected = frame
tm.assert_frame_equal(result, expected)
result = unordered.sort_index(ascending=False)
expected = frame[::-1]
tm.assert_frame_equal(result, expected)
# axis=1 : sort columns by column names
unordered = frame.iloc[:, [2, 1, 3, 0]]
result = unordered.sort_index(axis=1)
tm.assert_frame_equal(result, frame)
result = unordered.sort_index(axis=1, ascending=False)
expected = frame.iloc[:, ::-1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("level", ["A", 0]) # GH#21052
def test_sort_index_multiindex(self, level):
# GH#13496
# sort rows by specified level of multi-index
mi = MultiIndex.from_tuples(
[[2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list("ABC")
)
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi)
expected_mi = MultiIndex.from_tuples(
[[1, 1, 1], [2, 1, 2], [2, 1, 3]], names=list("ABC")
)
expected = DataFrame([[5, 6], [3, 4], [1, 2]], index=expected_mi)
result = df.sort_index(level=level)
tm.assert_frame_equal(result, expected)
# sort_remaining=False
expected_mi = MultiIndex.from_tuples(
[[1, 1, 1], [2, 1, 3], [2, 1, 2]], names=list("ABC")
)
expected = DataFrame([[5, 6], [1, 2], [3, 4]], index=expected_mi)
result = df.sort_index(level=level, sort_remaining=False)
tm.assert_frame_equal(result, expected)
def test_sort_index_intervalindex(self):
# this is a de-facto sort via unstack
# confirming that we sort in the order of the bins
y = Series(np.random.randn(100))
x1 = Series(np.sign(np.random.randn(100)))
x2 = pd.cut(Series(np.random.randn(100)), bins=[-3, -0.5, 0, 0.5, 3])
model = pd.concat([y, x1, x2], axis=1, keys=["Y", "X1", "X2"])
result = model.groupby(["X1", "X2"], observed=True).mean().unstack()
expected = IntervalIndex.from_tuples(
[(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)], closed="right"
)
result = result.columns.levels[1].categories
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ascending, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, False, [5, 3, 2]),
({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, False, [2, 3, 5]),
],
)
def test_sort_index_ignore_index(
self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index
):
# GH 30114
original_index = [2, 5, 3]
df = DataFrame(original_dict, index=original_index)
expected_df = DataFrame(sorted_dict, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_df = df.copy()
result_df.sort_index(**kwargs)
else:
result_df = df.sort_index(**kwargs)
tm.assert_frame_equal(result_df, expected_df)
tm.assert_frame_equal(df, DataFrame(original_dict, index=original_index))
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ascending, ignore_index, output_index",
[
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [1, 2], "M2": [3, 4]},
True,
True,
[0, 1],
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [2, 1], "M2": [4, 3]},
False,
True,
[0, 1],
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [1, 2], "M2": [3, 4]},
True,
False,
MultiIndex.from_tuples([[2, 1], [3, 4]], names=list("AB")),
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [2, 1], "M2": [4, 3]},
False,
False,
MultiIndex.from_tuples([[3, 4], [2, 1]], names=list("AB")),
),
],
)
def test_sort_index_ignore_index_multi_index(
self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index
):
# GH 30114, this is to test ignore_index on MulitIndex of index
mi = MultiIndex.from_tuples([[2, 1], [3, 4]], names=list("AB"))
df = DataFrame(original_dict, index=mi)
expected_df = DataFrame(sorted_dict, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_df = df.copy()
result_df.sort_index(**kwargs)
else:
result_df = df.sort_index(**kwargs)
tm.assert_frame_equal(result_df, expected_df)
tm.assert_frame_equal(df, DataFrame(original_dict, index=mi))
def test_sort_index_categorical_multiindex(self):
# GH#15058
df = DataFrame(
{
"a": range(6),
"l1": pd.Categorical(
["a", "a", "b", "b", "c", "c"],
categories=["c", "a", "b"],
ordered=True,
),
"l2": [0, 1, 0, 1, 0, 1],
}
)
result = df.set_index(["l1", "l2"]).sort_index()
expected = DataFrame(
[4, 5, 0, 1, 2, 3],
columns=["a"],
index=MultiIndex(
levels=[
CategoricalIndex(
["c", "a", "b"],
categories=["c", "a", "b"],
ordered=True,
name="l1",
dtype="category",
),
[0, 1],
],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=["l1", "l2"],
),
)
tm.assert_frame_equal(result, expected)
def test_sort_index_and_reconstruction(self):
# GH#15622
# lexsortedness should be identical
# across MultiIndex construction methods
df = DataFrame([[1, 1], [2, 2]], index=list("ab"))
expected = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_tuples(
[(0.5, "a"), (0.5, "b"), (0.8, "a"), (0.8, "b")]
),
)
assert expected.index.is_lexsorted()
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_product([[0.5, 0.8], list("ab")]),
)
result = result.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex(
levels=[[0.5, 0.8], ["a", "b"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = result.sort_index()
assert result.index.is_lexsorted()
tm.assert_frame_equal(result, expected)
concatted = pd.concat([df, df], keys=[0.8, 0.5])
result = concatted.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# GH#14015
df = DataFrame(
[[1, 2], [6, 7]],
columns=MultiIndex.from_tuples(
[(0, "20160811 12:00:00"), (0, "20160809 12:00:00")],
names=["l1", "Date"],
),
)
df.columns = df.columns.set_levels(
pd.to_datetime(df.columns.levels[1]), level=1
)
assert not df.columns.is_lexsorted()
assert not df.columns.is_monotonic
result = df.sort_index(axis=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
result = df.sort_index(axis=1, level=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
# TODO: better name, de-duplicate with test_sort_index_level above
def test_sort_index_level2(self):
mi = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=mi,
columns=Index(["A", "B", "C"], name="exp"),
)
df = frame.copy()
df.index = np.arange(len(df))
# axis=1
# series
a_sorted = frame["A"].sort_index(level=0)
# preserve names
assert a_sorted.index.names == frame.index.names
# inplace
rs = frame.copy()
return_value = rs.sort_index(level=0, inplace=True)
assert return_value is None
tm.assert_frame_equal(rs, frame.sort_index(level=0))
def test_sort_index_level_large_cardinality(self):
# GH#2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int64)
# it works!
result = df.sort_index(level=0)
assert result.index.lexsort_depth == 3
# GH#2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int32)
# it works!
result = df.sort_index(level=0)
assert (result.dtypes.values == df.dtypes.values).all()
assert result.index.lexsort_depth == 3
def test_sort_index_level_by_name(self):
mi = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=mi,
columns=Index(["A", "B", "C"], name="exp"),
)
frame.index.names = ["first", "second"]
result = frame.sort_index(level="second")
expected = frame.sort_index(level=1)
tm.assert_frame_equal(result, expected)
def test_sort_index_level_mixed(self):
mi = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=mi,
columns=Index(["A", "B", "C"], name="exp"),
)
sorted_before = frame.sort_index(level=1)
df = frame.copy()
df["foo"] = "bar"
sorted_after = df.sort_index(level=1)
tm.assert_frame_equal(sorted_before, sorted_after.drop(["foo"], axis=1))
dft = frame.T
sorted_before = dft.sort_index(level=1, axis=1)
dft["foo", "three"] = "bar"
sorted_after = dft.sort_index(level=1, axis=1)
tm.assert_frame_equal(
sorted_before.drop([("foo", "three")], axis=1),
sorted_after.drop([("foo", "three")], axis=1),
)
def test_sort_index_preserve_levels(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
result = frame.sort_index()
assert result.index.names == frame.index.names
@pytest.mark.parametrize(
"gen,extra",
[
([1.0, 3.0, 2.0, 5.0], 4.0),
([1, 3, 2, 5], 4),
(
[
Timestamp("20130101"),
Timestamp("20130103"),
Timestamp("20130102"),
Timestamp("20130105"),
],
Timestamp("20130104"),
),
(["1one", "3one", "2one", "5one"], "4one"),
],
)
def test_sort_index_multilevel_repr_8017(self, gen, extra):
np.random.seed(0)
data = np.random.randn(3, 4)
columns = MultiIndex.from_tuples([("red", i) for i in gen])
df = DataFrame(data, index=list("def"), columns=columns)
df2 = pd.concat(
[
df,
DataFrame(
"world",
index=list("def"),
columns=MultiIndex.from_tuples([("red", extra)]),
),
],
axis=1,
)
# check that the repr is good
# make sure that we have a correct sparsified repr
# e.g. only 1 header of read
assert str(df2).splitlines()[0].split() == ["red"]
# GH 8017
# sorting fails after columns added
# construct single-dtype then sort
result = df.copy().sort_index(axis=1)
expected = df.iloc[:, [0, 2, 1, 3]]
tm.assert_frame_equal(result, expected)
result = df2.sort_index(axis=1)
expected = df2.iloc[:, [0, 2, 1, 4, 3]]
tm.assert_frame_equal(result, expected)
# setitem then sort
result = df.copy()
result[("red", extra)] = "world"
result = result.sort_index(axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"categories",
[
pytest.param(["a", "b", "c"], id="str"),
pytest.param(
[pd.Interval(0, 1), pd.Interval(1, 2), pd.Interval(2, 3)],
id="pd.Interval",
),
],
)
def test_sort_index_with_categories(self, categories):
# GH#23452
df = DataFrame(
{"foo": range(len(categories))},
index=CategoricalIndex(
data=categories, categories=categories, ordered=True
),
)
df.index = df.index.reorder_categories(df.index.categories[::-1])
result = df.sort_index()
expected = DataFrame(
{"foo": reversed(range(len(categories)))},
index=CategoricalIndex(
data=categories[::-1], categories=categories[::-1], ordered=True
),
)
tm.assert_frame_equal(result, expected)
class TestDataFrameSortIndexKey:
def test_sort_multi_index_key(self):
# GH 25775, testing that sorting by index works with a multi-index.
df = DataFrame(
{"a": [3, 1, 2], "b": [0, 0, 0], "c": [0, 1, 2], "d": list("abc")}
).set_index(list("abc"))
result = df.sort_index(level=list("ac"), key=lambda x: x)
expected = DataFrame(
{"a": [1, 2, 3], "b": [0, 0, 0], "c": [1, 2, 0], "d": list("bca")}
).set_index(list("abc"))
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=list("ac"), key=lambda x: -x)
expected = DataFrame(
{"a": [3, 2, 1], "b": [0, 0, 0], "c": [0, 2, 1], "d": list("acb")}
).set_index(list("abc"))
tm.assert_frame_equal(result, expected)
def test_sort_index_key(self): # issue 27237
df = DataFrame(np.arange(6, dtype="int64"), index=list("aaBBca"))
result = df.sort_index()
expected = df.iloc[[2, 3, 0, 1, 5, 4]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(key=lambda x: x.str.lower())
expected = df.iloc[[0, 1, 5, 2, 3, 4]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(key=lambda x: x.str.lower(), ascending=False)
expected = df.iloc[[4, 2, 3, 0, 1, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_index_key_int(self):
df = DataFrame(np.arange(6, dtype="int64"), index=np.arange(6, dtype="int64"))
result = df.sort_index()
tm.assert_frame_equal(result, df)
result = df.sort_index(key=lambda x: -x)
expected = df.sort_index(ascending=False)
tm.assert_frame_equal(result, expected)
result = df.sort_index(key=lambda x: 2 * x)
tm.assert_frame_equal(result, df)
def test_sort_multi_index_key_str(self):
# GH 25775, testing that sorting by index works with a multi-index.
df = DataFrame(
{"a": ["B", "a", "C"], "b": [0, 1, 0], "c": list("abc"), "d": [0, 1, 2]}
).set_index(list("abc"))
result = df.sort_index(level="a", key=lambda x: x.str.lower())
expected = DataFrame(
{"a": ["a", "B", "C"], "b": [1, 0, 0], "c": list("bac"), "d": [1, 0, 2]}
).set_index(list("abc"))
tm.assert_frame_equal(result, expected)
result = df.sort_index(
level=list("abc"), # can refer to names
key=lambda x: x.str.lower() if x.name in ["a", "c"] else -x,
)
expected = DataFrame(
{"a": ["a", "B", "C"], "b": [1, 0, 0], "c": list("bac"), "d": [1, 0, 2]}
).set_index(list("abc"))
tm.assert_frame_equal(result, expected)
def test_changes_length_raises(self):
df = DataFrame({"A": [1, 2, 3]})
with pytest.raises(ValueError, match="change the shape"):
df.sort_index(key=lambda x: x[:1])
def test_sort_index_multiindex_sparse_column(self):
# GH 29735, testing that sort_index on a multiindexed frame with sparse
# columns fills with 0.
expected = DataFrame(
{
i: pd.array([0.0, 0.0, 0.0, 0.0], dtype=pd.SparseDtype("float64", 0.0))
for i in range(0, 4)
},
index=MultiIndex.from_product([[1, 2], [1, 2]]),
)
result = expected.sort_index(level=0)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
YinongLong/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
alekz112/statsmodels | statsmodels/stats/tests/test_weightstats.py | 30 | 21864 | '''tests for weightstats, compares with replication
no failures but needs cleanup
update 2012-09-09:
added test after fixing bug in covariance
TODOs:
- I don't remember what all the commented out code is doing
- should be refactored to use generator or inherited tests
- still gaps in test coverage
- value/diff in ttest_ind is tested in test_tost.py
- what about pandas data structures?
Author: Josef Perktold
License: BSD (3-clause)
'''
import numpy as np
from scipy import stats
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose
from statsmodels.stats.weightstats import \
DescrStatsW, CompareMeans, ttest_ind, ztest, zconfint
#import statsmodels.stats.weightstats as smws
class Holder(object):
pass
class TestWeightstats(object):
def __init__(self):
np.random.seed(9876789)
n1, n2 = 20,20
m1, m2 = 1, 1.2
x1 = m1 + np.random.randn(n1)
x2 = m2 + np.random.randn(n2)
x1_2d = m1 + np.random.randn(n1, 3)
x2_2d = m2 + np.random.randn(n2, 3)
w1_ = 2. * np.ones(n1)
w2_ = 2. * np.ones(n2)
w1 = np.random.randint(1,4, n1)
w2 = np.random.randint(1,4, n2)
self.x1, self.x2 = x1, x2
self.w1, self.w2 = w1, w2
self.x1_2d, self.x2_2d = x1_2d, x2_2d
def test_weightstats_1(self):
x1, x2 = self.x1, self.x2
w1, w2 = self.w1, self.w2
w1_ = 2. * np.ones(len(x1))
w2_ = 2. * np.ones(len(x2))
d1 = DescrStatsW(x1)
# print ttest_ind(x1, x2)
# print ttest_ind(x1, x2, usevar='unequal')
# #print ttest_ind(x1, x2, usevar='unequal')
# print stats.ttest_ind(x1, x2)
# print ttest_ind(x1, x2, usevar='unequal', alternative='larger')
# print ttest_ind(x1, x2, usevar='unequal', alternative='smaller')
# print ttest_ind(x1, x2, usevar='unequal', weights=(w1_, w2_))
# print stats.ttest_ind(np.r_[x1, x1], np.r_[x2,x2])
assert_almost_equal(ttest_ind(x1, x2, weights=(w1_, w2_))[:2],
stats.ttest_ind(np.r_[x1, x1], np.r_[x2,x2]))
def test_weightstats_2(self):
x1, x2 = self.x1, self.x2
w1, w2 = self.w1, self.w2
d1 = DescrStatsW(x1)
d1w = DescrStatsW(x1, weights=w1)
d2w = DescrStatsW(x2, weights=w2)
x1r = d1w.asrepeats()
x2r = d2w.asrepeats()
# print 'random weights'
# print ttest_ind(x1, x2, weights=(w1, w2))
# print stats.ttest_ind(x1r, x2r)
assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2],
stats.ttest_ind(x1r, x2r), 14)
#not the same as new version with random weights/replication
# assert x1r.shape[0] == d1w.sum_weights
# assert x2r.shape[0] == d2w.sum_weights
assert_almost_equal(x2r.mean(0), d2w.mean, 14)
assert_almost_equal(x2r.var(), d2w.var, 14)
assert_almost_equal(x2r.std(), d2w.std, 14)
#note: the following is for 1d
assert_almost_equal(np.cov(x2r, bias=1), d2w.cov, 14)
#assert_almost_equal(np.corrcoef(np.x2r), d2w.corrcoef, 19)
#TODO: exception in corrcoef (scalar case)
#one-sample tests
# print d1.ttest_mean(3)
# print stats.ttest_1samp(x1, 3)
# print d1w.ttest_mean(3)
# print stats.ttest_1samp(x1r, 3)
assert_almost_equal(d1.ttest_mean(3)[:2], stats.ttest_1samp(x1, 3), 11)
assert_almost_equal(d1w.ttest_mean(3)[:2], stats.ttest_1samp(x1r, 3), 11)
def test_weightstats_3(self):
x1_2d, x2_2d = self.x1_2d, self.x2_2d
w1, w2 = self.w1, self.w2
d1w_2d = DescrStatsW(x1_2d, weights=w1)
d2w_2d = DescrStatsW(x2_2d, weights=w2)
x1r_2d = d1w_2d.asrepeats()
x2r_2d = d2w_2d.asrepeats()
assert_almost_equal(x2r_2d.mean(0), d2w_2d.mean, 14)
assert_almost_equal(x2r_2d.var(0), d2w_2d.var, 14)
assert_almost_equal(x2r_2d.std(0), d2w_2d.std, 14)
assert_almost_equal(np.cov(x2r_2d.T, bias=1), d2w_2d.cov, 14)
assert_almost_equal(np.corrcoef(x2r_2d.T), d2w_2d.corrcoef, 14)
# print d1w_2d.ttest_mean(3)
# #scipy.stats.ttest is also vectorized
# print stats.ttest_1samp(x1r_2d, 3)
t,p,d = d1w_2d.ttest_mean(3)
assert_almost_equal([t, p], stats.ttest_1samp(x1r_2d, 3), 11)
#print [stats.ttest_1samp(xi, 3) for xi in x1r_2d.T]
cm = CompareMeans(d1w_2d, d2w_2d)
ressm = cm.ttest_ind()
resss = stats.ttest_ind(x1r_2d, x2r_2d)
assert_almost_equal(ressm[:2], resss, 14)
## #doesn't work for 2d, levene doesn't use weights
## cm = CompareMeans(d1w_2d, d2w_2d)
## ressm = cm.test_equal_var()
## resss = stats.levene(x1r_2d, x2r_2d)
## assert_almost_equal(ressm[:2], resss, 14)
def test_weightstats_ddof_tests(self):
# explicit test that ttest and confint are independent of ddof
# one sample case
x1_2d = self.x1_2d
w1 = self.w1
d1w_d0 = DescrStatsW(x1_2d, weights=w1, ddof=0)
d1w_d1 = DescrStatsW(x1_2d, weights=w1, ddof=1)
d1w_d2 = DescrStatsW(x1_2d, weights=w1, ddof=2)
#check confint independent of user ddof
res0 = d1w_d0.ttest_mean()
res1 = d1w_d1.ttest_mean()
res2 = d1w_d2.ttest_mean()
# concatenate into one array with np.r_
assert_almost_equal(np.r_[res1], np.r_[res0], 14)
assert_almost_equal(np.r_[res2], np.r_[res0], 14)
res0 = d1w_d0.ttest_mean(0.5)
res1 = d1w_d1.ttest_mean(0.5)
res2 = d1w_d2.ttest_mean(0.5)
assert_almost_equal(np.r_[res1], np.r_[res0], 14)
assert_almost_equal(np.r_[res2], np.r_[res0], 14)
#check confint independent of user ddof
res0 = d1w_d0.tconfint_mean()
res1 = d1w_d1.tconfint_mean()
res2 = d1w_d2.tconfint_mean()
assert_almost_equal(res1, res0, 14)
assert_almost_equal(res2, res0, 14)
class CheckWeightstats1dMixin(object):
def test_basic(self):
x1r = self.x1r
d1w = self.d1w
assert_almost_equal(x1r.mean(0), d1w.mean, 14)
assert_almost_equal(x1r.var(0, ddof=d1w.ddof), d1w.var, 14)
assert_almost_equal(x1r.std(0, ddof=d1w.ddof), d1w.std, 14)
var1 = d1w.var_ddof(ddof=1)
assert_almost_equal(x1r.var(0, ddof=1), var1, 14)
std1 = d1w.std_ddof(ddof=1)
assert_almost_equal(x1r.std(0, ddof=1), std1, 14)
assert_almost_equal(np.cov(x1r.T, bias=1-d1w.ddof), d1w.cov, 14)
#
#assert_almost_equal(np.corrcoef(x1r.T), d1w.corrcoef, 14)
def test_ttest(self):
x1r = self.x1r
d1w = self.d1w
assert_almost_equal(d1w.ttest_mean(3)[:2],
stats.ttest_1samp(x1r, 3), 11)
# def
# assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2],
# stats.ttest_ind(x1r, x2r), 14)
def test_ttest_2sample(self):
x1, x2 = self.x1, self.x2
x1r, x2r = self.x1r, self.x2r
w1, w2 = self.w1, self.w2
#Note: stats.ttest_ind handles 2d/nd arguments
res_sp = stats.ttest_ind(x1r, x2r)
assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2],
res_sp, 14)
#check correct ttest independent of user ddof
cm = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0),
DescrStatsW(x2, weights=w2, ddof=1))
assert_almost_equal(cm.ttest_ind()[:2], res_sp, 14)
cm = CompareMeans(DescrStatsW(x1, weights=w1, ddof=1),
DescrStatsW(x2, weights=w2, ddof=2))
assert_almost_equal(cm.ttest_ind()[:2], res_sp, 14)
cm0 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0),
DescrStatsW(x2, weights=w2, ddof=0))
cm1 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0),
DescrStatsW(x2, weights=w2, ddof=1))
cm2 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=1),
DescrStatsW(x2, weights=w2, ddof=2))
res0 = cm0.ttest_ind(usevar='unequal')
res1 = cm1.ttest_ind(usevar='unequal')
res2 = cm2.ttest_ind(usevar='unequal')
assert_almost_equal(res1, res0, 14)
assert_almost_equal(res2, res0, 14)
#check confint independent of user ddof
res0 = cm0.tconfint_diff(usevar='pooled')
res1 = cm1.tconfint_diff(usevar='pooled')
res2 = cm2.tconfint_diff(usevar='pooled')
assert_almost_equal(res1, res0, 14)
assert_almost_equal(res2, res0, 14)
res0 = cm0.tconfint_diff(usevar='unequal')
res1 = cm1.tconfint_diff(usevar='unequal')
res2 = cm2.tconfint_diff(usevar='unequal')
assert_almost_equal(res1, res0, 14)
assert_almost_equal(res2, res0, 14)
def test_confint_mean(self):
#compare confint_mean with ttest
d1w = self.d1w
alpha = 0.05
low, upp = d1w.tconfint_mean()
t, p, d = d1w.ttest_mean(low)
assert_almost_equal(p, alpha * np.ones(p.shape), 8)
t, p, d = d1w.ttest_mean(upp)
assert_almost_equal(p, alpha * np.ones(p.shape), 8)
t, p, d = d1w.ttest_mean(np.vstack((low, upp)))
assert_almost_equal(p, alpha * np.ones(p.shape), 8)
class CheckWeightstats2dMixin(CheckWeightstats1dMixin):
def test_corr(self):
x1r = self.x1r
d1w = self.d1w
assert_almost_equal(np.corrcoef(x1r.T), d1w.corrcoef, 14)
class TestWeightstats1d_ddof(CheckWeightstats1dMixin):
@classmethod
def setup_class(self):
np.random.seed(9876789)
n1, n2 = 20,20
m1, m2 = 1, 1.2
x1 = m1 + np.random.randn(n1, 1)
x2 = m2 + np.random.randn(n2, 1)
w1 = np.random.randint(1,4, n1)
w2 = np.random.randint(1,4, n2)
self.x1, self.x2 = x1, x2
self.w1, self.w2 = w1, w2
self.d1w = DescrStatsW(x1, weights=w1, ddof=1)
self.d2w = DescrStatsW(x2, weights=w2, ddof=1)
self.x1r = self.d1w.asrepeats()
self.x2r = self.d2w.asrepeats()
class TestWeightstats2d(CheckWeightstats2dMixin):
@classmethod
def setup_class(self):
np.random.seed(9876789)
n1, n2 = 20,20
m1, m2 = 1, 1.2
x1 = m1 + np.random.randn(n1, 3)
x2 = m2 + np.random.randn(n2, 3)
w1_ = 2. * np.ones(n1)
w2_ = 2. * np.ones(n2)
w1 = np.random.randint(1,4, n1)
w2 = np.random.randint(1,4, n2)
self.x1, self.x2 = x1, x2
self.w1, self.w2 = w1, w2
self.d1w = DescrStatsW(x1, weights=w1)
self.d2w = DescrStatsW(x2, weights=w2)
self.x1r = self.d1w.asrepeats()
self.x2r = self.d2w.asrepeats()
class TestWeightstats2d_ddof(CheckWeightstats2dMixin):
@classmethod
def setup_class(self):
np.random.seed(9876789)
n1, n2 = 20,20
m1, m2 = 1, 1.2
x1 = m1 + np.random.randn(n1, 3)
x2 = m2 + np.random.randn(n2, 3)
w1 = np.random.randint(1,4, n1)
w2 = np.random.randint(1,4, n2)
self.x1, self.x2 = x1, x2
self.w1, self.w2 = w1, w2
self.d1w = DescrStatsW(x1, weights=w1, ddof=1)
self.d2w = DescrStatsW(x2, weights=w2, ddof=1)
self.x1r = self.d1w.asrepeats()
self.x2r = self.d2w.asrepeats()
class TestWeightstats2d_nobs(CheckWeightstats2dMixin):
@classmethod
def setup_class(self):
np.random.seed(9876789)
n1, n2 = 20,30
m1, m2 = 1, 1.2
x1 = m1 + np.random.randn(n1, 3)
x2 = m2 + np.random.randn(n2, 3)
w1 = np.random.randint(1,4, n1)
w2 = np.random.randint(1,4, n2)
self.x1, self.x2 = x1, x2
self.w1, self.w2 = w1, w2
self.d1w = DescrStatsW(x1, weights=w1, ddof=0)
self.d2w = DescrStatsW(x2, weights=w2, ddof=1)
self.x1r = self.d1w.asrepeats()
self.x2r = self.d2w.asrepeats()
def test_ttest_ind_with_uneq_var():
#from scipy
# check vs. R
a = (1, 2, 3)
b = (1.1, 2.9, 4.2)
pr = 0.53619490753126731
tr = -0.68649512735572582
t, p, df = ttest_ind(a, b, usevar='unequal')
assert_almost_equal([t,p], [tr, pr], 13)
a = (1, 2, 3, 4)
pr = 0.84354139131608286
tr = -0.2108663315950719
t, p, df = ttest_ind(a, b, usevar='unequal')
assert_almost_equal([t,p], [tr, pr], 13)
def test_ztest_ztost():
# compare weightstats with separately tested proportion ztest ztost
import statsmodels.stats.proportion as smprop
x1 = [0, 1]
w1 = [5, 15]
res2 = smprop.proportions_ztest(15, 20., value=0.5)
d1 = DescrStatsW(x1, w1)
res1 = d1.ztest_mean(0.5)
assert_allclose(res1, res2, rtol=0.03, atol=0.003)
d2 = DescrStatsW(x1, np.array(w1)*21./20)
res1 = d2.ztest_mean(0.5)
assert_almost_equal(res1, res2, decimal=12)
res1 = d2.ztost_mean(0.4, 0.6)
res2 = smprop.proportions_ztost(15, 20., 0.4, 0.6)
assert_almost_equal(res1[0], res2[0], decimal=12)
x2 = [0, 1]
w2 = [10, 10]
#d2 = DescrStatsW(x1, np.array(w1)*21./20)
d2 = DescrStatsW(x2, w2)
res1 = ztest(d1.asrepeats(), d2.asrepeats())
res2 = smprop.proportions_chisquare(np.asarray([15, 10]),
np.asarray([20., 20]))
#TODO: check this is this difference expected?, see test_proportion
assert_allclose(res1[1], res2[1], rtol=0.03)
res1a = CompareMeans(d1, d2).ztest_ind()
assert_allclose(res1a[1], res2[1], rtol=0.03)
assert_almost_equal(res1a, res1, decimal=12)
###### test for ztest and z confidence interval against R BSDA z.test
# Note: I needed to calculate the pooled standard deviation for R
# std = np.std(np.concatenate((x-x.mean(),y-y.mean())), ddof=2)
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667)
#> cat_items(zt, "ztest.")
ztest_ = Holder()
ztest_.statistic = 6.55109865675183
ztest_.p_value = 5.711530850508982e-11
ztest_.conf_int = np.array([1.230415246535603, 2.280948389828034])
ztest_.estimate = np.array([7.01818181818182, 5.2625])
ztest_.null_value = 0
ztest_.alternative = 'two.sided'
ztest_.method = 'Two-sample z-Test'
ztest_.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, alternative="less")
#> cat_items(zt, "ztest_smaller.")
ztest_smaller = Holder()
ztest_smaller.statistic = 6.55109865675183
ztest_smaller.p_value = 0.999999999971442
ztest_smaller.conf_int = np.array([np.nan, 2.196499421109045])
ztest_smaller.estimate = np.array([7.01818181818182, 5.2625])
ztest_smaller.null_value = 0
ztest_smaller.alternative = 'less'
ztest_smaller.method = 'Two-sample z-Test'
ztest_smaller.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, alternative="greater")
#> cat_items(zt, "ztest_larger.")
ztest_larger = Holder()
ztest_larger.statistic = 6.55109865675183
ztest_larger.p_value = 2.855760072861813e-11
ztest_larger.conf_int = np.array([1.314864215254592, np.nan])
ztest_larger.estimate = np.array([7.01818181818182, 5.2625 ])
ztest_larger.null_value = 0
ztest_larger.alternative = 'greater'
ztest_larger.method = 'Two-sample z-Test'
ztest_larger.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, mu=1, alternative="two.sided")
#> cat_items(zt, "ztest_mu.")
ztest_mu = Holder()
ztest_mu.statistic = 2.81972854805176
ztest_mu.p_value = 0.00480642898427981
ztest_mu.conf_int = np.array([1.230415246535603, 2.280948389828034])
ztest_mu.estimate = np.array([7.01818181818182, 5.2625])
ztest_mu.null_value = 1
ztest_mu.alternative = 'two.sided'
ztest_mu.method = 'Two-sample z-Test'
ztest_mu.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, mu=1, alternative="greater")
#> cat_items(zt, "ztest_larger_mu.")
ztest_larger_mu = Holder()
ztest_larger_mu.statistic = 2.81972854805176
ztest_larger_mu.p_value = 0.002403214492139871
ztest_larger_mu.conf_int = np.array([1.314864215254592, np.nan])
ztest_larger_mu.estimate = np.array([7.01818181818182, 5.2625])
ztest_larger_mu.null_value = 1
ztest_larger_mu.alternative = 'greater'
ztest_larger_mu.method = 'Two-sample z-Test'
ztest_larger_mu.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, mu=2, alternative="less")
#> cat_items(zt, "ztest_smaller_mu.")
ztest_smaller_mu = Holder()
ztest_smaller_mu.statistic = -0.911641560648313
ztest_smaller_mu.p_value = 0.1809787183191324
ztest_smaller_mu.conf_int = np.array([np.nan, 2.196499421109045])
ztest_smaller_mu.estimate = np.array([7.01818181818182, 5.2625])
ztest_smaller_mu.null_value = 2
ztest_smaller_mu.alternative = 'less'
ztest_smaller_mu.method = 'Two-sample z-Test'
ztest_smaller_mu.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.46436662631627995, mu=6.4, alternative="two.sided")
#> cat_items(zt, "ztest_mu_1s.")
ztest_mu_1s = Holder()
ztest_mu_1s.statistic = 4.415212090914452
ztest_mu_1s.p_value = 1.009110038015147e-05
ztest_mu_1s.conf_int = np.array([6.74376372125119, 7.29259991511245])
ztest_mu_1s.estimate = 7.01818181818182
ztest_mu_1s.null_value = 6.4
ztest_mu_1s.alternative = 'two.sided'
ztest_mu_1s.method = 'One-sample z-Test'
ztest_mu_1s.data_name = 'x'
#> zt = z.test(x, sigma.x=0.46436662631627995, mu=7.4, alternative="less")
#> cat_items(zt, "ztest_smaller_mu_1s.")
ztest_smaller_mu_1s = Holder()
ztest_smaller_mu_1s.statistic = -2.727042762035397
ztest_smaller_mu_1s.p_value = 0.00319523783881176
ztest_smaller_mu_1s.conf_int = np.array([np.nan, 7.248480744895716])
ztest_smaller_mu_1s.estimate = 7.01818181818182
ztest_smaller_mu_1s.null_value = 7.4
ztest_smaller_mu_1s.alternative = 'less'
ztest_smaller_mu_1s.method = 'One-sample z-Test'
ztest_smaller_mu_1s.data_name = 'x'
#> zt = z.test(x, sigma.x=0.46436662631627995, mu=6.4, alternative="greater")
#> cat_items(zt, "ztest_greater_mu_1s.")
ztest_larger_mu_1s = Holder()
ztest_larger_mu_1s.statistic = 4.415212090914452
ztest_larger_mu_1s.p_value = 5.045550190097003e-06
ztest_larger_mu_1s.conf_int = np.array([6.78788289146792, np.nan])
ztest_larger_mu_1s.estimate = 7.01818181818182
ztest_larger_mu_1s.null_value = 6.4
ztest_larger_mu_1s.alternative = 'greater'
ztest_larger_mu_1s.method = 'One-sample z-Test'
ztest_larger_mu_1s.data_name = 'x'
alternatives = {'less' : 'smaller',
'greater' : 'larger',
'two.sided' : 'two-sided'}
class TestZTest(object):
# all examples use the same data
# no weights used in tests
@classmethod
def setup_class(cls):
cls.x1 = np.array([7.8, 6.6, 6.5, 7.4, 7.3, 7., 6.4, 7.1, 6.7, 7.6, 6.8])
cls.x2 = np.array([4.5, 5.4, 6.1, 6.1, 5.4, 5., 4.1, 5.5])
cls.d1 = DescrStatsW(cls.x1)
cls.d2 = DescrStatsW(cls.x2)
cls.cm = CompareMeans(cls.d1, cls.d2)
def test(self):
x1, x2 = self.x1, self.x2
cm = self.cm
# tc : test cases
for tc in [ztest_, ztest_smaller, ztest_larger,
ztest_mu, ztest_smaller_mu, ztest_larger_mu]:
zstat, pval = ztest(x1, x2, value=tc.null_value,
alternative=alternatives[tc.alternative])
assert_allclose(zstat, tc.statistic, rtol=1e-10)
assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16)
zstat, pval = cm.ztest_ind(value=tc.null_value,
alternative=alternatives[tc.alternative])
assert_allclose(zstat, tc.statistic, rtol=1e-10)
assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16)
#overwrite nan in R's confint
tc_conf_int = tc.conf_int.copy()
if np.isnan(tc_conf_int[0]):
tc_conf_int[0] = - np.inf
if np.isnan(tc_conf_int[1]):
tc_conf_int[1] = np.inf
# Note: value is shifting our confidence interval in zconfint
ci = zconfint(x1, x2, value=0,
alternative=alternatives[tc.alternative])
assert_allclose(ci, tc_conf_int, rtol=1e-10)
ci = cm.zconfint_diff(alternative=alternatives[tc.alternative])
assert_allclose(ci, tc_conf_int, rtol=1e-10)
ci = zconfint(x1, x2, value=tc.null_value,
alternative=alternatives[tc.alternative])
assert_allclose(ci, tc_conf_int - tc.null_value, rtol=1e-10)
# 1 sample test copy-paste
d1 = self.d1
for tc in [ztest_mu_1s, ztest_smaller_mu_1s, ztest_larger_mu_1s]:
zstat, pval = ztest(x1, value=tc.null_value,
alternative=alternatives[tc.alternative])
assert_allclose(zstat, tc.statistic, rtol=1e-10)
assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16)
zstat, pval = d1.ztest_mean(value=tc.null_value,
alternative=alternatives[tc.alternative])
assert_allclose(zstat, tc.statistic, rtol=1e-10)
assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16)
#overwrite nan in R's confint
tc_conf_int = tc.conf_int.copy()
if np.isnan(tc_conf_int[0]):
tc_conf_int[0] = - np.inf
if np.isnan(tc_conf_int[1]):
tc_conf_int[1] = np.inf
# Note: value is shifting our confidence interval in zconfint
ci = zconfint(x1, value=0,
alternative=alternatives[tc.alternative])
assert_allclose(ci, tc_conf_int, rtol=1e-10)
ci = d1.zconfint_mean(alternative=alternatives[tc.alternative])
assert_allclose(ci, tc_conf_int, rtol=1e-10)
| bsd-3-clause |
pmav99/satellite-change-detect | tiledelta/scripts/cli.py | 2 | 2678 | #!/usr/bin/env python
import rasterio as rio
import numpy as np
import click, json, os
import tiledelta, mercantile
@click.group()
def cli():
pass
@click.command(short_help="HELP")
@click.argument('bounds', default='-', required=False)
@click.option('--stride', default=1)
def loaddata(bounds, stride):
"""Does something"""
try:
inBounds = click.open_file(bounds).readlines()
except IOError:
inBounds = [bounds]
bounds = json.loads(inBounds[0])
click.echo(bounds['bbox'])
# with rio.drivers():
# with rio.open('src_path', 'r') as src:
cli.add_command(loaddata)
@click.command()
@click.argument('filedir', type=click.Path(exists=True))
@click.argument('comparedir', type=click.Path(exists=True))
@click.option('--sampling', '-s', type=(int), default=0)
@click.option('--filetype', '-f', type=(str), default='png')
@click.option('--plotdir', '-p', type=click.Path(exists=True))
def comptiles(filedir, comparedir, sampling, filetype):
# plotdir = '/Users/dnomadb/Documents/pcomp'
files = os.listdir(filedir)
cfiles = os.listdir(comparedir)
if plotdir:
import matplotlib.pyplot as plot
for f in files:
fileinfo = f.split('-')
if len(fileinfo[-1].split('.')) != 0 and fileinfo[-1].split('.')[-1] == filetype:
x, y, z = tiledelta.getXYZ(fileinfo)
bbox = mercantile.bounds(x, y, z)
with rio.drivers():
with rio.open(os.path.join(filedir, f), 'r') as src:
greyimage_before = (src.read(1).astype(np.uint16) + src.read(2).astype(np.uint16) + src.read(3).astype(np.uint16))
with rio.open(os.path.join(comparedir, f), 'r') as src:
greyimage_after = (src.read(1).astype(np.uint16) + src.read(2).astype(np.uint16) + src.read(3).astype(np.uint16))
pcplo = tiledelta.compareGreys(greyimage_after, greyimage_before, 10, 20)
pcplo = pcplo[::sampling,::sampling]
if plotdir:
fig = plot.figure(figsize=(20,10))
before = fig.add_subplot(131)
before.imshow(greyimage_after,cmap='Greys_r')
after = fig.add_subplot(132)
after.imshow(greyimage_before, cmap='Greys_r')
pc2 = fig.add_subplot(133)
pc2.imshow(pcplo, cmap='YlGnBu')
fig.savefig(os.path.join(plotdir, f))
else:
tiledelta.makeVectors(pcplo, tiledelta.makeAffine(pcplo.shape, bbox))
cli.add_command(comptiles)
if __name__ == '__main__':
cli()
| mit |
jougs/nest-simulator | pynest/examples/sinusoidal_poisson_generator.py | 7 | 5526 | # -*- coding: utf-8 -*-
#
# sinusoidal_poisson_generator.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
#
"""
Sinusoidal poisson generator example
------------------------------------
This script demonstrates the use of the ``sinusoidal_poisson_generator``
and its different parameters and modes. The source code of the model
can be found in ``models/sinusoidal_poisson_generator.h``.
The script is structured into two parts and creates one common figure.
In Part 1, two instances of the ``sinusoidal_poisson_generator`` are
created with different parameters. Part 2 illustrates the effect of
the ``individual_spike_trains`` switch.
"""
###############################################################################
# We import the modules required to simulate, analyze and plot this example.
import nest
import matplotlib.pyplot as plt
import numpy as np
nest.ResetKernel() # in case we run the script multiple times from iPython
####################################################################################
# We create two instances of the ``sinusoidal_poisson_generator`` with two
# different parameter sets using ``Create``. Moreover, we create devices to
# record firing rates (``multimeter``) and spikes (``spike_recorder``) and connect
# them to the generators using ``Connect``.
nest.SetKernelStatus({'resolution': 0.01})
g = nest.Create('sinusoidal_poisson_generator', n=2,
params=[{'rate': 10000.0,
'amplitude': 5000.0,
'frequency': 10.0,
'phase': 0.0},
{'rate': 0.0,
'amplitude': 10000.0,
'frequency': 5.0,
'phase': 90.0}])
m = nest.Create('multimeter', 2, {'interval': 0.1, 'record_from': ['rate']})
s = nest.Create('spike_recorder', 2)
nest.Connect(m, g, 'one_to_one')
nest.Connect(g, s, 'one_to_one')
print(m.get())
nest.Simulate(200)
###############################################################################
# After simulating, the spikes are extracted from the ``spike_recorder`` using
# ``GetStatus`` and plots are created with panels for the PST and ISI histograms.
colors = ['b', 'g']
for j in range(2):
ev = m[j].events
t = ev['times']
r = ev['rate']
sp = nest.GetStatus(s[j])[0]['events']['times']
plt.subplot(221)
h, e = np.histogram(sp, bins=np.arange(0., 201., 5.))
plt.plot(t, r, color=colors[j])
plt.step(e[:-1], h * 1000 / 5., color=colors[j], where='post')
plt.title('PST histogram and firing rates')
plt.ylabel('Spikes per second')
plt.subplot(223)
plt.hist(np.diff(sp), bins=np.arange(0., 1.005, 0.02),
histtype='step', color=colors[j])
plt.title('ISI histogram')
###############################################################################
# The kernel is reset and the number of threads set to 4.
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4})
###############################################################################
# A ``sinusoidal_poisson_generator`` with ``individual_spike_trains`` set to
# `True` is created and connected to 20 parrot neurons whose spikes are
# recorded by a ``spike_recorder``. After simulating, a raster plot of the spikes
# is created.
g = nest.Create('sinusoidal_poisson_generator',
params={'rate': 100.0, 'amplitude': 50.0,
'frequency': 10.0, 'phase': 0.0,
'individual_spike_trains': True})
p = nest.Create('parrot_neuron', 20)
s = nest.Create('spike_recorder')
nest.Connect(g, p, 'all_to_all')
nest.Connect(p, s, 'all_to_all')
nest.Simulate(200)
ev = s.events
plt.subplot(222)
plt.plot(ev['times'], ev['senders'] - min(ev['senders']), 'o')
plt.ylim([-0.5, 19.5])
plt.yticks([])
plt.title('Individual spike trains for each target')
###############################################################################
# The kernel is reset again and the whole procedure is repeated for a
# ``sinusoidal_poisson_generator`` with `individual_spike_trains` set to
# `False`. The plot shows that in this case, all neurons receive the same
# spike train from the ``sinusoidal_poisson_generator``.
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4})
g = nest.Create('sinusoidal_poisson_generator',
params={'rate': 100.0, 'amplitude': 50.0,
'frequency': 10.0, 'phase': 0.0,
'individual_spike_trains': False})
p = nest.Create('parrot_neuron', 20)
s = nest.Create('spike_recorder')
nest.Connect(g, p, 'all_to_all')
nest.Connect(p, s, 'all_to_all')
nest.Simulate(200)
ev = s.events
plt.subplot(224)
plt.plot(ev['times'], ev['senders'] - min(ev['senders']), 'o')
plt.ylim([-0.5, 19.5])
plt.yticks([])
plt.title('One spike train for all targets')
plt.show()
| gpl-2.0 |
r-mart/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 130 | 6059 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF(random_state=42)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
| bsd-3-clause |
omad/damootils | scripts/gqa_report.py | 1 | 1420 | import io
import pandas as pd
'''select name, dataset.metadata ? 'gqa' as has_gqa, count(dataset.id)
from agdc.dataset
left join agdc.dataset_type on dataset_type.id = dataset.dataset_type_ref
where name like '%level1%'
group by name, has_gqa
order by name;'''
csv = io.StringIO('''
name,has_gqa,count
ls7_pq_scene,false,130858
ls7_nbar_scene,false,130883
ls5_nbart_scene,false,128734
ls8_pq_scene,false,47817
ls5_level1_scene,true,161173
ls7_level1_scene,true,161894
ls8_level1_oli_scene,true,272
ls8_level1_scene,true,54990
ls8_nbart_scene,false,44514
ls5_nbar_scene,false,128797
ls5_level1_scene,false,65972
ls7_level1_scene,false,55570
ls8_level1_oli_scene,false,56
ls5_pq_scene,false,128766
ls8_nbar_scene,false,47867
ls7_nbart_scene,false,128193
ls8_level1_scene,false,762
''')
df = pd.read_csv(csv)
gqadf = df[df['name'].str.contains('level1')]
total = gqadf.groupby('name', as_index=False).sum().drop(['has_gqa'], axis=1).rename(columns={'count': 'total'})
gqadf = pd.merge(gqadf, total, on='name')
gqadf['pct'] = gqadf['count'] / gqadf['total']
gqadf['indexed_scenes'] = gqadf['count'].sum()
gqadf['pct_all'] = gqadf['count'] / gqadf.indexed_scenes
print(gqadf[['name', 'has_gqa', 'count', 'pct', 'pct_all']].to_html(
formatters={'pct': '{:,.2%}'.format, 'pct_all': '{:,.2%}'.format}))
print(gqadf.groupby('has_gqa').sum()[['pct_all']].to_string(
formatters={'pct_all': '{:,.2%}'.format}))
| apache-2.0 |
horance-liu/tensorflow | tensorflow/contrib/losses/python/metric_learning/metric_loss_ops.py | 24 | 40534 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements various metric learning losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.summary import summary
try:
# pylint: disable=g-import-not-at-top
from sklearn import metrics
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def pairwise_distance(feature, squared=False):
"""Computes the pairwise distance matrix with numerical stability.
output[i, j] = || feature[i, :] - feature[j, :] ||_2
Args:
feature: 2-D Tensor of size [number of data, feature dimension].
squared: Boolean, whether or not to square the pairwise distances.
Returns:
pairwise_distances: 2-D Tensor of size [number of data, number of data].
"""
pairwise_distances_squared = math_ops.add(
math_ops.reduce_sum(
math_ops.square(feature),
axis=[1],
keep_dims=True),
math_ops.reduce_sum(
math_ops.square(
array_ops.transpose(feature)),
axis=[0],
keep_dims=True)) - 2.0 * math_ops.matmul(
feature, array_ops.transpose(feature))
# Deal with numerical inaccuracies. Set small negatives to zero.
pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared, 0.0)
# Get the mask where the zero distances are at.
error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)
# Optionally take the sqrt.
if squared:
pairwise_distances = pairwise_distances_squared
else:
pairwise_distances = math_ops.sqrt(
pairwise_distances_squared + math_ops.to_float(error_mask) * 1e-16)
# Undo conditionally adding 1e-16.
pairwise_distances = math_ops.multiply(
pairwise_distances, math_ops.to_float(math_ops.logical_not(error_mask)))
num_data = array_ops.shape(feature)[0]
# Explicitly set diagonals to zero.
mask_offdiagonals = array_ops.ones_like(pairwise_distances) - array_ops.diag(
array_ops.ones([num_data]))
pairwise_distances = math_ops.multiply(pairwise_distances, mask_offdiagonals)
return pairwise_distances
def contrastive_loss(labels, embeddings_anchor, embeddings_positive,
margin=1.0):
"""Computes the contrastive loss.
This loss encourages the embedding to be close to each other for
the samples of the same label and the embedding to be far apart at least
by the margin constant for the samples of different labels.
See: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
binary labels indicating positive vs negative pair.
embeddings_anchor: 2-D float `Tensor` of embedding vectors for the anchor
images. Embeddings should be l2 normalized.
embeddings_positive: 2-D float `Tensor` of embedding vectors for the
positive images. Embeddings should be l2 normalized.
margin: margin term in the loss definition.
Returns:
contrastive_loss: tf.float32 scalar.
"""
# Get per pair distances
distances = math_ops.sqrt(
math_ops.reduce_sum(
math_ops.square(embeddings_anchor - embeddings_positive), 1))
# Add contrastive loss for the siamese network.
# label here is {0,1} for neg, pos.
return math_ops.reduce_mean(
math_ops.to_float(labels) * math_ops.square(distances) +
(1. - math_ops.to_float(labels)) *
math_ops.square(math_ops.maximum(margin - distances, 0.)),
name='contrastive_loss')
def masked_maximum(data, mask, dim=1):
"""Computes the axis wise maximum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the maximum.
Returns:
masked_maximums: N-D `Tensor`.
The maximized dimension is of size 1 after the operation.
"""
axis_minimums = math_ops.reduce_min(data, dim, keep_dims=True)
masked_maximums = math_ops.reduce_max(
math_ops.multiply(
data - axis_minimums, mask), dim, keep_dims=True) + axis_minimums
return masked_maximums
def masked_minimum(data, mask, dim=1):
"""Computes the axis wise minimum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the minimum.
Returns:
masked_minimums: N-D `Tensor`.
The minimized dimension is of size 1 after the operation.
"""
axis_maximums = math_ops.reduce_max(data, dim, keep_dims=True)
masked_minimums = math_ops.reduce_min(
math_ops.multiply(
data - axis_maximums, mask), dim, keep_dims=True) + axis_maximums
return masked_minimums
def triplet_semihard_loss(labels, embeddings, margin=1.0):
"""Computes the triplet loss with semi-hard negative mining.
The loss encourages the positive distances (between a pair of embeddings with
the same labels) to be smaller than the minimum negative distance among
which are at least greater than the positive distance plus the margin constant
(called semi-hard negative) in the mini-batch. If no such negative exists,
uses the largest negative distance instead.
See: https://arxiv.org/abs/1503.03832.
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multiclass integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
margin: Float, margin term in the loss definition.
Returns:
triplet_loss: tf.float32 scalar.
"""
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pdist_matrix = pairwise_distance(embeddings, squared=True)
# Build pairwise binary adjacency matrix.
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = math_ops.logical_not(adjacency)
batch_size = array_ops.size(labels)
# Compute the mask.
pdist_matrix_tile = array_ops.tile(pdist_matrix, [batch_size, 1])
mask = math_ops.logical_and(
array_ops.tile(adjacency_not, [batch_size, 1]),
math_ops.greater(
pdist_matrix_tile, array_ops.reshape(
array_ops.transpose(pdist_matrix), [-1, 1])))
mask_final = array_ops.reshape(
math_ops.greater(
math_ops.reduce_sum(
math_ops.cast(
mask, dtype=dtypes.float32), 1, keep_dims=True),
0.0), [batch_size, batch_size])
mask_final = array_ops.transpose(mask_final)
adjacency_not = math_ops.cast(adjacency_not, dtype=dtypes.float32)
mask = math_ops.cast(mask, dtype=dtypes.float32)
# negatives_outside: smallest D_an where D_an > D_ap.
negatives_outside = array_ops.reshape(
masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])
negatives_outside = array_ops.transpose(negatives_outside)
# negatives_inside: largest D_an.
negatives_inside = array_ops.tile(
masked_maximum(pdist_matrix, adjacency_not), [1, batch_size])
semi_hard_negatives = array_ops.where(
mask_final, negatives_outside, negatives_inside)
loss_mat = math_ops.add(margin, pdist_matrix - semi_hard_negatives)
mask_positives = math_ops.cast(
adjacency, dtype=dtypes.float32) - array_ops.diag(
array_ops.ones([batch_size]))
# In lifted-struct, the authors multiply 0.5 for upper triangular
# in semihard, they take all positive pairs except the diagonal.
num_positives = math_ops.reduce_sum(mask_positives)
triplet_loss = math_ops.truediv(
math_ops.reduce_sum(
math_ops.maximum(
math_ops.multiply(loss_mat, mask_positives), 0.0)),
num_positives,
name='triplet_semihard_loss')
return triplet_loss
# pylint: disable=line-too-long
def npairs_loss(labels, embeddings_anchor, embeddings_positive,
reg_lambda=0.002, print_losses=False):
"""Computes the npairs loss.
Npairs loss expects paired data where a pair is composed of samples from the
same labels and each pairs in the minibatch have different labels. The loss
has two components. The first component is the L2 regularizer on the
embedding vectors. The second component is the sum of cross entropy loss
which takes each row of the pair-wise similarity matrix as logits and
the remapped one-hot labels as labels.
See: http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/nips16_npairmetriclearning.pdf
Args:
labels: 1-D tf.int32 `Tensor` of shape [batch_size/2].
embeddings_anchor: 2-D Tensor of shape [batch_size/2, embedding_dim] for the
embedding vectors for the anchor images. Embeddings should not be
l2 normalized.
embeddings_positive: 2-D Tensor of shape [batch_size/2, embedding_dim] for the
embedding vectors for the positive images. Embeddings should not be
l2 normalized.
reg_lambda: Float. L2 regularization term on the embedding vectors.
print_losses: Boolean. Option to print the xent and l2loss.
Returns:
npairs_loss: tf.float32 scalar.
"""
# pylint: enable=line-too-long
# Add the regularizer on the embedding.
reg_anchor = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_anchor), 1))
reg_positive = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_positive), 1))
l2loss = math_ops.multiply(
0.25 * reg_lambda, reg_anchor + reg_positive, name='l2loss')
# Get per pair similarities.
similarity_matrix = math_ops.matmul(
embeddings_anchor, embeddings_positive, transpose_a=False,
transpose_b=True)
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
labels_remapped = math_ops.to_float(
math_ops.equal(labels, array_ops.transpose(labels)))
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keep_dims=True)
# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
logits=similarity_matrix, labels=labels_remapped)
xent_loss = math_ops.reduce_mean(xent_loss, name='xentropy')
if print_losses:
xent_loss = logging_ops.Print(
xent_loss, ['cross entropy:', xent_loss, 'l2loss:', l2loss])
return l2loss + xent_loss
def _build_multilabel_adjacency(sparse_labels):
"""Builds multilabel adjacency matrix.
As of March 14th, 2017, there's no op for the dot product between
two sparse tensors in TF. However, there is `sparse_minimum` op which is
equivalent to an AND op between two sparse boolean tensors.
This computes the dot product between two sparse boolean inputs.
Args:
sparse_labels: List of 1-D boolean sparse tensors.
Returns:
adjacency_matrix: 2-D dense `Tensor`.
"""
num_pairs = len(sparse_labels)
adjacency_matrix = array_ops.zeros([num_pairs, num_pairs])
for i in range(num_pairs):
for j in range(num_pairs):
sparse_dot_product = math_ops.to_float(
sparse_ops.sparse_reduce_sum(sparse_ops.sparse_minimum(
sparse_labels[i], sparse_labels[j])))
sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 0)
sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 1)
one_hot_matrix = array_ops.pad(sparse_dot_product,
[[i, num_pairs-i-1],
[j, num_pairs-j-1]], 'CONSTANT')
adjacency_matrix += one_hot_matrix
return adjacency_matrix
def npairs_loss_multilabel(sparse_labels, embeddings_anchor,
embeddings_positive, reg_lambda=0.002,
print_losses=False):
r"""Computes the npairs loss with multilabel data.
Npairs loss expects paired data where a pair is composed of samples from the
same labels and each pairs in the minibatch have different labels. The loss
has two components. The first component is the L2 regularizer on the
embedding vectors. The second component is the sum of cross entropy loss
which takes each row of the pair-wise similarity matrix as logits and
the remapped one-hot labels as labels. Here, the similarity is defined by the
dot product between two embedding vectors. S_{i,j} = f(x_i)^T f(x_j)
To deal with multilabel inputs, we use the count of label intersection
i.e. L_{i,j} = | set_of_labels_for(i) \cap set_of_labels_for(j) |
Then we normalize each rows of the count based label matrix so that each row
sums to one.
Args:
sparse_labels: List of 1-D Boolean `SparseTensor` of dense_shape
[batch_size/2, num_classes] labels for the anchor-pos pairs.
embeddings_anchor: 2-D `Tensor` of shape [batch_size/2, embedding_dim] for
the embedding vectors for the anchor images. Embeddings should not be
l2 normalized.
embeddings_positive: 2-D `Tensor` of shape [batch_size/2, embedding_dim] for
the embedding vectors for the positive images. Embeddings should not be
l2 normalized.
reg_lambda: Float. L2 regularization term on the embedding vectors.
print_losses: Boolean. Option to print the xent and l2loss.
Returns:
npairs_loss: tf.float32 scalar.
Raises:
TypeError: When the specified sparse_labels is not a `SparseTensor`.
"""
if False in [isinstance(
l, sparse_tensor.SparseTensor) for l in sparse_labels]:
raise TypeError(
'sparse_labels must be a list of SparseTensors, but got %s' % str(
sparse_labels))
with ops.name_scope('NpairsLossMultiLabel'):
# Add the regularizer on the embedding.
reg_anchor = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_anchor), 1))
reg_positive = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_positive), 1))
l2loss = math_ops.multiply(0.25 * reg_lambda,
reg_anchor + reg_positive, name='l2loss')
# Get per pair similarities.
similarity_matrix = math_ops.matmul(
embeddings_anchor, embeddings_positive, transpose_a=False,
transpose_b=True)
# TODO(coreylynch): need to check the sparse values
# TODO(coreylynch): are composed only of 0's and 1's.
multilabel_adjacency_matrix = _build_multilabel_adjacency(sparse_labels)
labels_remapped = math_ops.to_float(multilabel_adjacency_matrix)
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keep_dims=True)
# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
logits=similarity_matrix, labels=labels_remapped)
xent_loss = math_ops.reduce_mean(xent_loss, name='xentropy')
if print_losses:
xent_loss = logging_ops.Print(
xent_loss, ['cross entropy:', xent_loss, 'l2loss:', l2loss])
return l2loss + xent_loss
def lifted_struct_loss(labels, embeddings, margin=1.0):
"""Computes the lifted structured loss.
The loss encourages the positive distances (between a pair of embeddings
with the same labels) to be smaller than any negative distances (between a
pair of embeddings with different labels) in the mini-batch in a way
that is differentiable with respect to the embedding vectors.
See: https://arxiv.org/abs/1511.06452.
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multiclass integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should not
be l2 normalized.
margin: Float, margin term in the loss definition.
Returns:
lifted_loss: tf.float32 scalar.
"""
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pairwise_distances = pairwise_distance(embeddings)
# Build pairwise binary adjacency matrix.
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = math_ops.logical_not(adjacency)
batch_size = array_ops.size(labels)
diff = margin - pairwise_distances
mask = math_ops.cast(adjacency_not, dtype=dtypes.float32)
# Safe maximum: Temporarily shift negative distances
# above zero before taking max.
# this is to take the max only among negatives.
row_minimums = math_ops.reduce_min(diff, 1, keep_dims=True)
row_negative_maximums = math_ops.reduce_max(
math_ops.multiply(
diff - row_minimums, mask), 1, keep_dims=True) + row_minimums
# Compute the loss.
# Keep track of matrix of maximums where M_ij = max(m_i, m_j)
# where m_i is the max of alpha - negative D_i's.
# This matches the Caffe loss layer implementation at:
# https://github.com/rksltnl/Caffe-Deep-Metric-Learning-CVPR16/blob/0efd7544a9846f58df923c8b992198ba5c355454/src/caffe/layers/lifted_struct_similarity_softmax_layer.cpp # pylint: disable=line-too-long
max_elements = math_ops.maximum(
row_negative_maximums, array_ops.transpose(row_negative_maximums))
diff_tiled = array_ops.tile(diff, [batch_size, 1])
mask_tiled = array_ops.tile(mask, [batch_size, 1])
max_elements_vect = array_ops.reshape(
array_ops.transpose(max_elements), [-1, 1])
loss_exp_left = array_ops.reshape(
math_ops.reduce_sum(math_ops.multiply(
math_ops.exp(
diff_tiled - max_elements_vect),
mask_tiled), 1, keep_dims=True), [batch_size, batch_size])
loss_mat = max_elements + math_ops.log(
loss_exp_left + array_ops.transpose(loss_exp_left))
# Add the positive distance.
loss_mat += pairwise_distances
mask_positives = math_ops.cast(
adjacency, dtype=dtypes.float32) - array_ops.diag(
array_ops.ones([batch_size]))
# *0.5 for upper triangular, and another *0.5 for 1/2 factor for loss^2.
num_positives = math_ops.reduce_sum(mask_positives) / 2.0
lifted_loss = math_ops.truediv(
0.25 * math_ops.reduce_sum(
math_ops.square(
math_ops.maximum(
math_ops.multiply(loss_mat, mask_positives), 0.0))),
num_positives,
name='liftedstruct_loss')
return lifted_loss
def update_1d_tensor(y, index, value):
"""Updates 1d tensor y so that y[index] = value.
Args:
y: 1-D Tensor.
index: index of y to modify.
value: new value to write at y[index].
Returns:
y_mod: 1-D Tensor. Tensor y after the update.
"""
value = array_ops.squeeze(value)
# modify the 1D tensor x at index with value.
# ex) chosen_ids = update_1D_tensor(chosen_ids, cluster_idx, best_medoid)
y_before = array_ops.slice(y, [0], [index])
y_after = array_ops.slice(y, [index + 1], [-1])
y_mod = array_ops.concat([y_before, [value], y_after], 0)
return y_mod
def get_cluster_assignment(pairwise_distances, centroid_ids):
"""Assign data points to the neareset centroids.
Tensorflow has numerical instability and doesn't always choose
the data point with theoretically zero distance as it's nearest neighbor.
Thus, for each centroid in centroid_ids, explicitly assign
the centroid itself as the nearest centroid.
This is done through the mask tensor and the constraint_vect tensor.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
centroid_ids: 1-D Tensor of centroid indices.
Returns:
y_fixed: 1-D tensor of cluster assignment.
"""
predictions = math_ops.argmin(
array_ops.gather(pairwise_distances, centroid_ids), dimension=0)
batch_size = array_ops.shape(pairwise_distances)[0]
# Deal with numerical instability
mask = math_ops.reduce_any(array_ops.one_hot(
centroid_ids, batch_size, True, False, axis=-1, dtype=dtypes.bool),
axis=0)
constraint_one_hot = math_ops.multiply(
array_ops.one_hot(centroid_ids,
batch_size,
array_ops.constant(1, dtype=dtypes.int64),
array_ops.constant(0, dtype=dtypes.int64),
axis=0,
dtype=dtypes.int64),
math_ops.to_int64(math_ops.range(array_ops.shape(centroid_ids)[0])))
constraint_vect = math_ops.reduce_sum(
array_ops.transpose(constraint_one_hot), axis=0)
y_fixed = array_ops.where(mask, constraint_vect, predictions)
return y_fixed
def compute_facility_energy(pairwise_distances, centroid_ids):
"""Compute the average travel distance to the assigned centroid.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
centroid_ids: 1-D Tensor of indices.
Returns:
facility_energy: dtypes.float32 scalar.
"""
return -1.0 * math_ops.reduce_sum(
math_ops.reduce_min(
array_ops.gather(pairwise_distances, centroid_ids), axis=0))
def compute_clustering_score(labels, predictions, margin_type):
"""Computes the clustering score via sklearn.metrics functions.
There are various ways to compute the clustering score. Intuitively,
we want to measure the agreement of two clustering assignments (labels vs
predictions) ignoring the permutations and output a score from zero to one.
(where the values close to one indicate significant agreement).
This code supports following scoring functions:
nmi: normalized mutual information
ami: adjusted mutual information
ari: adjusted random index
vmeasure: v-measure
const: indicator checking whether the two clusterings are the same.
See http://scikit-learn.org/stable/modules/classes.html#clustering-metrics
for the detailed descriptions.
Args:
labels: 1-D Tensor. ground truth cluster assignment.
predictions: 1-D Tensor. predicted cluster assignment.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
clustering_score: dtypes.float32 scalar.
The possible valid values are from zero to one.
Zero means the worst clustering and one means the perfect clustering.
Raises:
ValueError: margin_type is not recognized.
"""
margin_type_to_func = {
'nmi': _compute_nmi_score,
'ami': _compute_ami_score,
'ari': _compute_ari_score,
'vmeasure': _compute_vmeasure_score,
'const': _compute_zeroone_score
}
if margin_type not in margin_type_to_func:
raise ValueError('Unrecognized margin_type: %s' % margin_type)
clustering_score_fn = margin_type_to_func[margin_type]
return array_ops.squeeze(clustering_score_fn(labels, predictions))
def _compute_nmi_score(labels, predictions):
return math_ops.to_float(
script_ops.py_func(
metrics.normalized_mutual_info_score, [labels, predictions],
[dtypes.float64],
name='nmi'))
def _compute_ami_score(labels, predictions):
ami_score = math_ops.to_float(
script_ops.py_func(
metrics.adjusted_mutual_info_score, [labels, predictions],
[dtypes.float64],
name='ami'))
return math_ops.maximum(0.0, ami_score)
def _compute_ari_score(labels, predictions):
ari_score = math_ops.to_float(
script_ops.py_func(
metrics.adjusted_rand_score, [labels, predictions], [dtypes.float64],
name='ari'))
# ari score can go below 0
# http://scikit-learn.org/stable/modules/clustering.html#adjusted-rand-score
return math_ops.maximum(0.0, ari_score)
def _compute_vmeasure_score(labels, predictions):
vmeasure_score = math_ops.to_float(
script_ops.py_func(
metrics.v_measure_score, [labels, predictions], [dtypes.float64],
name='vmeasure'))
return math_ops.maximum(0.0, vmeasure_score)
def _compute_zeroone_score(labels, predictions):
zeroone_score = math_ops.to_float(
math_ops.equal(
math_ops.reduce_sum(
math_ops.to_int32(math_ops.equal(labels, predictions))),
array_ops.shape(labels)[0]))
return zeroone_score
def _find_loss_augmented_facility_idx(pairwise_distances, labels, chosen_ids,
candidate_ids, margin_multiplier,
margin_type):
"""Find the next centroid that maximizes the loss augmented inference.
This function is a subroutine called from compute_augmented_facility_locations
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of current centroid indices.
candidate_ids: 1-D Tensor of candidate indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
integer index.
"""
num_candidates = array_ops.shape(candidate_ids)[0]
pairwise_distances_chosen = array_ops.gather(pairwise_distances, chosen_ids)
pairwise_distances_candidate = array_ops.gather(
pairwise_distances, candidate_ids)
pairwise_distances_chosen_tile = array_ops.tile(
pairwise_distances_chosen, [1, num_candidates])
candidate_scores = -1.0 * math_ops.reduce_sum(
array_ops.reshape(
math_ops.reduce_min(
array_ops.concat([
pairwise_distances_chosen_tile,
array_ops.reshape(pairwise_distances_candidate, [1, -1])
], 0),
axis=0,
keep_dims=True), [num_candidates, -1]),
axis=1)
nmi_scores = array_ops.zeros([num_candidates])
iteration = array_ops.constant(0)
def func_cond(iteration, nmi_scores):
del nmi_scores # Unused in func_cond()
return iteration < num_candidates
def func_body(iteration, nmi_scores):
predictions = get_cluster_assignment(
pairwise_distances,
array_ops.concat([chosen_ids, [candidate_ids[iteration]]], 0))
nmi_score_i = compute_clustering_score(labels, predictions, margin_type)
pad_before = array_ops.zeros([iteration])
pad_after = array_ops.zeros([num_candidates - 1 - iteration])
# return 1 - NMI score as the structured loss.
# because NMI is higher the better [0,1].
return iteration + 1, nmi_scores + array_ops.concat(
[pad_before, [1.0 - nmi_score_i], pad_after], 0)
_, nmi_scores = control_flow_ops.while_loop(
func_cond, func_body, [iteration, nmi_scores])
candidate_scores = math_ops.add(
candidate_scores, margin_multiplier * nmi_scores)
argmax_index = math_ops.to_int32(
math_ops.argmax(candidate_scores, dimension=0))
return candidate_ids[argmax_index]
def compute_augmented_facility_locations(pairwise_distances, labels, all_ids,
margin_multiplier, margin_type):
"""Computes the centroid locations.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
all_ids: 1-D Tensor of all data indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: 1-D Tensor of chosen centroid indices.
"""
def func_cond_augmented(iteration, chosen_ids):
del chosen_ids # Unused argument in func_cond_augmented.
return iteration < num_classes
def func_body_augmented(iteration, chosen_ids):
# find a new facility location to add
# based on the clustering score and the NMI score
candidate_ids = array_ops.setdiff1d(all_ids, chosen_ids)[0]
new_chosen_idx = _find_loss_augmented_facility_idx(pairwise_distances,
labels, chosen_ids,
candidate_ids,
margin_multiplier,
margin_type)
chosen_ids = array_ops.concat([chosen_ids, [new_chosen_idx]], 0)
return iteration + 1, chosen_ids
num_classes = array_ops.size(array_ops.unique(labels)[0])
chosen_ids = array_ops.constant(0, dtype=dtypes.int32, shape=[0])
# num_classes get determined at run time based on the sampled batch.
iteration = array_ops.constant(0)
_, chosen_ids = control_flow_ops.while_loop(
func_cond_augmented,
func_body_augmented, [iteration, chosen_ids],
shape_invariants=[iteration.get_shape(), tensor_shape.TensorShape(
[None])])
return chosen_ids
def update_medoid_per_cluster(pairwise_distances, pairwise_distances_subset,
labels, chosen_ids, cluster_member_ids,
cluster_idx, margin_multiplier, margin_type):
"""Updates the cluster medoid per cluster.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
pairwise_distances_subset: 2-D Tensor of pairwise distances for one cluster.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of cluster centroid indices.
cluster_member_ids: 1-D Tensor of cluster member indices for one cluster.
cluster_idx: Index of this one cluster.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
def func_cond(iteration, scores_margin):
del scores_margin # Unused variable scores_margin.
return iteration < num_candidates
def func_body(iteration, scores_margin):
# swap the current medoid with the candidate cluster member
candidate_medoid = math_ops.to_int32(cluster_member_ids[iteration])
tmp_chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, candidate_medoid)
predictions = get_cluster_assignment(pairwise_distances, tmp_chosen_ids)
metric_score = compute_clustering_score(labels, predictions, margin_type)
pad_before = array_ops.zeros([iteration])
pad_after = array_ops.zeros([num_candidates - 1 - iteration])
return iteration + 1, scores_margin + array_ops.concat(
[pad_before, [1.0 - metric_score], pad_after], 0)
# pairwise_distances_subset is of size [p, 1, 1, p],
# the intermediate dummy dimensions at
# [1, 2] makes this code work in the edge case where p=1.
# this happens if the cluster size is one.
scores_fac = -1.0 * math_ops.reduce_sum(
array_ops.squeeze(pairwise_distances_subset, [1, 2]), axis=0)
iteration = array_ops.constant(0)
num_candidates = array_ops.size(cluster_member_ids)
scores_margin = array_ops.zeros([num_candidates])
_, scores_margin = control_flow_ops.while_loop(func_cond, func_body,
[iteration, scores_margin])
candidate_scores = math_ops.add(scores_fac, margin_multiplier * scores_margin)
argmax_index = math_ops.to_int32(
math_ops.argmax(candidate_scores, dimension=0))
best_medoid = math_ops.to_int32(cluster_member_ids[argmax_index])
chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, best_medoid)
return chosen_ids
def update_all_medoids(pairwise_distances, predictions, labels, chosen_ids,
margin_multiplier, margin_type):
"""Updates all cluster medoids a cluster at a time.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
predictions: 1-D Tensor of predicted cluster assignment.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of cluster centroid indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
def func_cond_augmented_pam(iteration, chosen_ids):
del chosen_ids # Unused argument.
return iteration < num_classes
def func_body_augmented_pam(iteration, chosen_ids):
"""Call the update_medoid_per_cluster subroutine."""
mask = math_ops.equal(
math_ops.to_int64(predictions), math_ops.to_int64(iteration))
this_cluster_ids = array_ops.where(mask)
pairwise_distances_subset = array_ops.transpose(
array_ops.gather(
array_ops.transpose(
array_ops.gather(pairwise_distances, this_cluster_ids)),
this_cluster_ids))
chosen_ids = update_medoid_per_cluster(pairwise_distances,
pairwise_distances_subset, labels,
chosen_ids, this_cluster_ids,
iteration, margin_multiplier,
margin_type)
return iteration + 1, chosen_ids
unique_class_ids = array_ops.unique(labels)[0]
num_classes = array_ops.size(unique_class_ids)
iteration = array_ops.constant(0)
_, chosen_ids = control_flow_ops.while_loop(
func_cond_augmented_pam, func_body_augmented_pam, [iteration, chosen_ids])
return chosen_ids
def compute_augmented_facility_locations_pam(pairwise_distances,
labels,
margin_multiplier,
margin_type,
chosen_ids,
pam_max_iter=5):
"""Refine the cluster centroids with PAM local search.
For fixed iterations, alternate between updating the cluster assignment
and updating cluster medoids.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
chosen_ids: 1-D Tensor of initial estimate of cluster centroids.
pam_max_iter: Number of refinement iterations.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
for _ in range(pam_max_iter):
# update the cluster assignment given the chosen_ids (S_pred)
predictions = get_cluster_assignment(pairwise_distances, chosen_ids)
# update the medoids per each cluster
chosen_ids = update_all_medoids(pairwise_distances, predictions, labels,
chosen_ids, margin_multiplier, margin_type)
return chosen_ids
def compute_gt_cluster_score(pairwise_distances, labels):
"""Compute ground truth facility location score.
Loop over each unique classes and compute average travel distances.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
Returns:
gt_cluster_score: dtypes.float32 score.
"""
unique_class_ids = array_ops.unique(labels)[0]
num_classes = array_ops.size(unique_class_ids)
iteration = array_ops.constant(0)
gt_cluster_score = array_ops.constant(0.0, dtype=dtypes.float32)
def func_cond(iteration, gt_cluster_score):
del gt_cluster_score # Unused argument.
return iteration < num_classes
def func_body(iteration, gt_cluster_score):
"""Per each cluster, compute the average travel distance."""
mask = math_ops.equal(labels, unique_class_ids[iteration])
this_cluster_ids = array_ops.where(mask)
pairwise_distances_subset = array_ops.transpose(
array_ops.gather(
array_ops.transpose(
array_ops.gather(pairwise_distances, this_cluster_ids)),
this_cluster_ids))
this_cluster_score = -1.0 * math_ops.reduce_min(
math_ops.reduce_sum(
pairwise_distances_subset, axis=0))
return iteration + 1, gt_cluster_score + this_cluster_score
_, gt_cluster_score = control_flow_ops.while_loop(
func_cond, func_body, [iteration, gt_cluster_score])
return gt_cluster_score
def cluster_loss(labels,
embeddings,
margin_multiplier,
enable_pam_finetuning=True,
margin_type='nmi',
print_losses=False):
"""Computes the clustering loss.
The following structured margins are supported:
nmi: normalized mutual information
ami: adjusted mutual information
ari: adjusted random index
vmeasure: v-measure
const: indicator checking whether the two clusterings are the same.
Args:
labels: 2-D Tensor of labels of shape [batch size, 1]
embeddings: 2-D Tensor of embeddings of shape
[batch size, embedding dimension]. Embeddings should be l2 normalized.
margin_multiplier: float32 scalar. multiplier on the structured margin term
See section 3.2 of paper for discussion.
enable_pam_finetuning: Boolean, Whether to run local pam refinement.
See section 3.4 of paper for discussion.
margin_type: Type of structured margin to use. See section 3.2 of
paper for discussion. Can be 'nmi', 'ami', 'ari', 'vmeasure', 'const'.
print_losses: Boolean. Option to print the loss.
Paper: https://arxiv.org/abs/1612.01213.
Returns:
clustering_loss: A float32 scalar `Tensor`.
Raises:
ImportError: If sklearn dependency is not installed.
"""
if not HAS_SKLEARN:
raise ImportError('Cluster loss depends on sklearn.')
pairwise_distances = pairwise_distance(embeddings)
labels = array_ops.squeeze(labels)
all_ids = math_ops.range(array_ops.shape(embeddings)[0])
# Compute the loss augmented inference and get the cluster centroids.
chosen_ids = compute_augmented_facility_locations(pairwise_distances, labels,
all_ids, margin_multiplier,
margin_type)
# Given the predicted centroids, compute the clustering score.
score_pred = compute_facility_energy(pairwise_distances, chosen_ids)
# Branch whether to use PAM finetuning.
if enable_pam_finetuning:
# Initialize with augmented facility solution.
chosen_ids = compute_augmented_facility_locations_pam(pairwise_distances,
labels,
margin_multiplier,
margin_type,
chosen_ids)
score_pred = compute_facility_energy(pairwise_distances, chosen_ids)
# Given the predicted centroids, compute the cluster assignments.
predictions = get_cluster_assignment(pairwise_distances, chosen_ids)
# Compute the clustering (i.e. NMI) score between the two assignments.
clustering_score_pred = compute_clustering_score(labels, predictions,
margin_type)
# Compute the clustering score from labels.
score_gt = compute_gt_cluster_score(pairwise_distances, labels)
# Compute the hinge loss.
clustering_loss = math_ops.maximum(
score_pred + margin_multiplier * (1.0 - clustering_score_pred) - score_gt,
0.0,
name='clustering_loss')
clustering_loss.set_shape([])
if print_losses:
clustering_loss = logging_ops.Print(
clustering_loss,
['clustering_loss: ', clustering_loss, array_ops.shape(
clustering_loss)])
# Clustering specific summary.
summary.scalar('losses/score_pred', score_pred)
summary.scalar('losses/' + margin_type, clustering_score_pred)
summary.scalar('losses/score_gt', score_gt)
return clustering_loss
| apache-2.0 |
arabenjamin/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
AnasGhrab/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
xuyongzhi/scan_volume | src/rotate3D/scripts/rotate_to_3D.py | 1 | 7338 | #!/usr/bin/env python
import rospy
import rosbag
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import PointField
from std_msgs.msg import Int64
from laser_geometry import LaserProjection
import numpy as np
import matplotlib.pyplot as plt
import math
import os
BASE_DIR = os.path.dirname( os.path.abspath(__file__) )
# each scan is along x axis
# raw z is negative
def rotate_2d ( angle ):
R = np.array( [ [ np.cos(angle), -np.sin( angle ) ],[ np.sin( angle ), np.cos( angle ) ] ] )
return R
class RotateTo3D:
'''
self.status: 'waitting' --start--> 'scanning' --stop--> 'waitting'
'''
def __init__(self):
self.separate_models = False
self.auto_pub_ref_at_frame = 5
pi_angle = 41
speed = 1.0 * math.pi / pi_angle
fre = 49.5
self.increment_theta = 1.0 * speed / fre
self.z0_offset = 4 * 0.01
#self.z0_offset = 10 * 0.01
#self.z0_offset = 0 * 0.01
self.status = 'waiting'
self.pcl_3d = None
self.all_3d_points_ls = []
self.scanN = 0
self.theta = math.pi * 0.2
self.pcl_n = 0
self.pcl_3d_pub = rospy.Publisher('pcl_3d',PointCloud2,queue_size=10)
self.fig_dif = plt.figure()
self.ax_dif = self.fig_dif.add_subplot(111)
self.received_n = 0
res_path = os.path.join( BASE_DIR,'3d_res' )
if not os.path.exists( res_path ):
os.makedirs( res_path )
self.res_bag_name = os.path.join( res_path, 'pcl_3d-zofs_%d-piangle_%d-fre_%d.bag'%(self.z0_offset*100, pi_angle, fre*10) )
self.pcl3d_bag = rosbag.Bag( self.res_bag_name,'w')
rospy.loginfo( 'res path:%s'%(self.res_bag_name) )
def start(self):
self.status = 'start'
self.scanN = 0
self.pcl_3d = None
self.all_3d_points_ls = []
rospy.loginfo('received sart command')
def stop(self):
self.status = 'stop'
rospy.loginfo('received stop command, theta: %0.2f'%(self.theta*180.0/math.pi))
def from_2D_to_3D( self, point_2d ):
x0 = point_2d[0]
y0 = point_2d[1]
self.theta = theta = self.scanN * self.increment_theta
xy = np.matmul( rotate_2d( self.theta ), np.array( [[y0],[self.z0_offset]] ) )
point_3d = [ xy[0,0], xy[1,0], x0, point_2d[3], point_2d[4] ]
return point_3d
#def from_2D_to_3D( self, point_2d ):
# x0 = point_2d[1]
# y0 = point_2d[0]
# self.theta = theta = self.scanN * self.increment_theta
# x = x0 * math.cos(theta)
# y = -x0 * math.sin(theta)
# z = y0
# point_3d = [x, y, z, point_2d[3], point_2d[4]]
# return point_3d
def add_data( self, pcl_LaserScan, dif_start=None, dif_end=None ) :
gen_data = pc2.read_points(pcl_LaserScan, field_names=None, skip_nans=True)
curscan_points = []
#if self.pcl_3d != None:
# gen_trunk = pc2.read_points(self.pcl_3d, field_names=None,skip_nans=True)
# for p in gen_trunk:
# curscan_points.append(list(p))
for idx, p in enumerate(gen_data):
if dif_start==None or ( idx >= dif_start and idx <= dif_end ):
point_2d = list(p) #[ x,y,z,?,? ] z==0
point_3d = self.from_2D_to_3D( point_2d )
curscan_points.append(point_3d)
#if self.scanN % 100 == 0 and idx==0:
# rospy.loginfo( 'scanN= %d, point_2d:%s, point_3d:%s'%( self.scanN, point_2d, point_3d ) )
self.all_3d_points_ls += curscan_points
self.pcl_3d = pc2.create_cloud(pcl_LaserScan.header, pcl_LaserScan.fields, curscan_points)
def xyz_from_pcl(self,pcl):
gen = pc2.read_points(pcl, field_names=None, skip_nans=True)
points = []
for p in gen:
xyz = np.array(list(p)[1:4])
if points == []:
points = xyz
else:
points = np.vstack((points,xyz))
return points
def update_scan_increment(self):
'''
do this at the end
'''
self.increment = self.trunk_length / self.scanN
rospy.loginfo('increment = %f / %d = %f',self.trunk_length,self.scanN,self.increment)
def push(self,data_LaserScan):
# rospy.loginfo('project data_LaserScan to PointCloud OK')
pcl_LaserScan = LaserProjection().projectLaser(data_LaserScan)
points_xyz = self.xyz_from_pcl(pcl_LaserScan) # points_xyz: [N,3] [:,1]=0
# print "scan point N = ",points_xyz.shape[0]," / ", pcl_LaserScan.width, " rangesN = ",len(data_LaserScan.ranges)
if self.status == 'start' or self.status == 'scanning':
if self.status == 'start':
self.status = 'scanning'
self.add_data( pcl_LaserScan )
self.scanN += 1
self.pcl_3d_pub.publish(self.pcl_3d)
self.pcl3d_bag.write( 'pcl_3d', self.pcl_3d )
elif self.status == 'stop':
self.status = 'waitting'
if self.separate_models:
self.pcl_n = self.pcl_n + 1
self.reset()
self.pcl3d_bag.close()
rospy.loginfo('stop recording, save this model: ' + self.res_bag_name )
if self.status == 'scanning' and self.theta > 181.0 * math.pi / 180:
self.stop()
return self.scanN, self.theta
def dif_range(self,points_xyz):
'''
Compare the difference between points_xyz and self.ref_points_xyz.
Return the index of dif_start and dif_end
'''
min_N = min(points_xyz.shape[0],self.ref_points_xyz.shape[0])
dif = points_xyz[0:min_N,self.height_axis] - self.ref_points_xyz[0:min_N,self.height_axis]
dif = np.fabs(dif)
threshold = self.dif_threshold
dif_N = sum([ d > threshold for d in dif ])
self.scan_difN_pub.publish(dif_N)
if dif_N > 5:
dif_start = len(dif)
dif_end = 0
for i,d in enumerate(dif):
if dif_start==len(dif) and d > threshold and i+3<len(dif) and dif[i+1] > threshold and dif[i+3] > threshold:
dif_start = i
self.scan_difStart_pub.publish(dif_start)
if dif_start < len(dif) and i > dif_start and ( d < threshold or (d > threshold and i==len(dif)-1 ) ):
dif_end = i
self.scan_difEnd_pub.publish(dif_end)
if dif_end - dif_start > 3:
break
else:
# rospy.loginfo('short dif_range: dif_start= %d dif_end= %d dif_len= %d',dif_start,dif_end,dif_end-dif_start)
dif_start = len(dif)
dif_end = 0
return True,dif_start,dif_end
else:
return False,0,0
def volume_from_bag(self,model_bag_file):
model_bag = rosbag.Bag(model_bag_file)
msg_gen = model_bag.read_messages(topics='pcl_3d')
for topic,msg,t in msg_gen:
self. pcl_volume(msg)
if __name__ == '__main__':
print 'in main'
#TVD = RotateTo3D()
#TVD.volume_from_bag('model_result_new/empty.bag')
| mit |
probml/pyprobml | scripts/Old/boss_motifs.py | 1 | 1696 |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import boss_utils
import utils
np.random.seed(0)
def motif_distance(x, m):
# hamming distance of x to motif
# If m[i]=nan, it means locn i is a don't care
mask = [not(np.isnan(v)) for v in m] #np.where(m>0)
return np.sum(x[mask] != m[mask])
seq_len = 3 # L
alpha_size = 4 # A
nseq = alpha_size ** seq_len
print("Generating {} sequences of length {}".format(nseq, seq_len))
motifs = [];
#m = np.arange(seq_len, dtype=float)
m = np.repeat(3.0, seq_len)
m1 = np.copy(m)
m1[0] = np.nan
m2 = np.copy(m)
m2[seq_len-1] = np.nan
#motifs = [m1, m2]
motifs = [m2]
print("Motifs")
print(motifs)
def oracle(x):
d = np.inf
for motif in motifs:
d = min(d, motif_distance(x, motif))
d = seq_len - d # closer implies higher score
d = d + np.random.normal(0, 0.01)
return d
def oracle_batch(X):
return np.apply_along_axis(oracle, 1, X)
Xall = utils.gen_all_strings(seq_len) # (N,L) array of ints (in 0..A)
yall = oracle_batch(Xall)
plt.figure()
plt.plot(yall)
Xtrain = Xall
ytrain = yall
predictor = boss_utils.learn_supervised_model(Xtrain, ytrain)
ypred = predictor.predict(Xall)
plt.figure()
plt.scatter(yall, ypred)
plt.xlabel('True Values')
plt.ylabel('Predictions')
plt.show()
embedder = boss_utils.convert_to_embedder(predictor, seq_len)
def embed_fn(x):
return embedder.predict(x)
Xinit = Xtrain[:10]
yinit = ytrain[:10]
n_iter=2
methods = []
methods.append('bayes')
methods.append('random')
for method in methods:
np.random.seed(0)
ytrace = boss_utils.boss_maximize(method, oracle, Xinit, yinit, embed_fn, n_iter=n_iter)
plt.figure()
plt.plot(ytrace)
plt.title(method) | mit |
hainm/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
tmerrick1/spack | var/spack/repos/builtin/packages/py-localcider/package.py | 5 | 1786 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyLocalcider(PythonPackage):
"""Tools for calculating sequence properties of disordered proteins"""
homepage = "http://pappulab.github.io/localCIDER"
url = "https://pypi.io/packages/source/l/localcider/localcider-0.1.14.tar.gz"
version('0.1.14', 'cd3c992595c5cb280374de3750663cfa')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
| lgpl-2.1 |
spallavolu/scikit-learn | doc/conf.py | 210 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-0.18.1/examples/classification/plot_classification_probability.py | 138 | 2871 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting, and Gaussian process classification.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
kernel = 1.0 * RBF([1.0, 1.0]) # for GPC
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'),
'GPC': GaussianProcessClassifier(kernel)
}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
whereaswhile/DLSR | convnet-folk_master/shownet_bbx.py | 1 | 22862 | # Copyright (c) 2011, Alex Krizhevsky ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy
import sys
import getopt as opt
from util import *
from math import sqrt, ceil, floor
import os
import scipy.io as sio
from gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from options import *
from data import DataProvider, dp_types
from w_util import readLines
try:
import pylab as pl
except:
print "This script requires the matplotlib python library (Ubuntu/Fedora package name python-matplotlib). Please install it."
# sys.exit(1)
import matplotlib.cm as cm
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def get_gpus(self):
self.need_gpu = self.op.get_value('show_preds') or self.op.get_value('write_features') or self.op.get_value('write_pixel_proj')
if self.need_gpu:
ConvNet.get_gpus(self)
def init_data_providers(self):
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_data_providers(self):
self.dp_params['convnet'] = self
self.dp_params['imgprovider'] = self.img_provider_file
try:
if self.need_gpu:
self.test_data_provider = DataProvider.get_instance(self.data_path_test, self.test_batch_range,
type=self.dp_type_test, dp_params=self.dp_params, test=True)
self.test_batch_range = self.test_data_provider.batch_range
except Exception, e:
print "Unable to create data provider: %s" % e
self.print_data_providers()
sys.exit()
def init_model_state(self):
#ConvNet.init_model_state(self)
if self.op.get_value('show_preds'):
self.sotmax_idx = self.get_layer_idx(self.op.get_value('show_preds'), check_type='softmax')
if self.op.get_value('write_features'):
self.ftr_layer_idx = self.get_layer_idx(self.op.get_value('write_features'))
if self.op.get_value('write_pixel_proj'):
tmp = self.op.get_value('write_pixel_proj')
tmp = tmp.split[',']
self.ftr_layer_idx = self.get_layer_idx[tmp[0]]
self.ftr_res_idx = int(tmp[1])
def init_model_lib(self):
if self.need_gpu:
if self.op.get_value('write_pixel_proj'):
# in pixel projection model, activation matrix cannot be shared
for l in self.model_state['layers']:
l['usesActs'] = True
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
train_errors = [o[0][self.show_cost][self.cost_idx] for o in self.train_outputs]
test_errors = [o[0][self.show_cost][self.cost_idx] for o in self.test_outputs]
# numbatches = len(self.train_batch_range)
numbatches = self.model_state['batchnum']
test_errors = numpy.row_stack(test_errors)
test_errors = numpy.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
print numepochs, numbatches
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.)) # aim for about 20 labels
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) # but round to nearest 10
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title(self.show_cost)
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans):
FILTERS_PER_ROW = 16
MAX_ROWS = 16
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
filter_start = 0 # First filter to show
layer_names = [l['name'] for l in self.layers]
if self.show_filters not in layer_names:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[layer_names.index(self.show_filters)]
filters = layer['weights'][self.input_idx]
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], layer['filterPixels'][self.input_idx] * channels, num_filters))
filter_start = r.randint(0, layer['modules']-1)*num_filters # pick out some random modules
filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
num_filters *= layer['modules']
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans)
def plot_predictions(self):
data = self.get_next_batch(train=False)[2] # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS
NUM_TOP_CLASSES = min(num_classes, 4) # show this many top labels
label_names = self.test_data_provider.batch_meta['label_names']
if self.only_errors:
preds = n.zeros((data[0].shape[1], num_classes), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, num_classes), dtype=n.single)
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
data[0] = n.require(data[0][:,rand_idx], requirements='C')
data[1] = n.require(data[1][:,rand_idx], requirements='C')
data += [preds]
# Run the model
self.libmodel.startFeatureWriter(data, self.sotmax_idx)
self.finish_batch()
fig = pl.figure(3)
fig.text(.4, .95, '%s test case predictions' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
err_idx = nr.permutation(n.where(preds.argmax(axis=1) != data[1][0,:])[0])[:NUM_IMGS] # what the net got wrong
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
data[0] = self.test_data_provider.get_plottable_data(data[0])
for r in xrange(NUM_ROWS):
for c in xrange(NUM_COLS):
img_idx = r * NUM_COLS + c
if data[0].shape[0] <= img_idx:
break
pl.subplot(NUM_ROWS*2, NUM_COLS, r * 2 * NUM_COLS + c + 1)
pl.xticks([])
pl.yticks([])
greyscale = False
try:
img = data[0][img_idx,:,:,:]
except IndexError:
# maybe greyscale?
greyscale = True
img = data[0][img_idx,:,:]
if len(img.shape) == 3 and img.shape[2]==1:
img = img.reshape(img.shape[:2])
greyscale = True
if not greyscale:
pl.imshow(img, interpolation='nearest')
else:
pl.imshow(img, interpolation='nearest', cmap=cm.Greys_r)
true_label = int(data[1][0,img_idx])
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
pl.subplot(NUM_ROWS*2, NUM_COLS, (r * 2 + 1) * NUM_COLS + c + 1, aspect='equal')
ylocs = n.array(range(NUM_TOP_CLASSES)) + 0.5
height = 0.5
width = max(ylocs)
pl.barh(ylocs, [l[0]*width for l in img_labels], height=height, \
color=['r' if l[1] == label_names[true_label] else 'b' for l in img_labels])
pl.title(label_names[true_label])
pl.yticks(ylocs + height/2, [l[1] for l in img_labels])
pl.xticks([width/2.0, width], ['50%', ''])
pl.ylim(0, ylocs[-1] + height*2)
def rect_overlap(self, bbgt, bb):
bi=[max(bb[0],bbgt[0]), max(bb[1],bbgt[1]), min(bb[2],bbgt[2]), min(bb[3],bbgt[3])];
iw=bi[2]-bi[0]+1;
ih=bi[3]-bi[1]+1;
ov=0
if iw>0 and ih>0:
ua=(bb[2]-bb[0]+1)*(bb[3]-bb[1]+1) + (bbgt[2]-bbgt[0]+1)*(bbgt[3]-bbgt[1]+1) - iw*ih
ov=iw*ih*1.0/ua
return ov
def increase_acc_count(self, pred, nacc, ncnt):
if ncnt==0:
self.remain_pred=pred[0:0]
pred=numpy.concatenate((self.remain_pred, pred), axis=0)
base_view=int(numpy.sqrt(self.mult_view))
idx=range(0, len(pred), self.mult_view)
if len(pred)%self.mult_view!=0:
idx=idx[:-1]
for i in idx:
imid=self.imgList[ncnt]
ncnt+=1
sz=self.imgSize[imid-1]
l=int(self.scale_view*min(sz[0], sz[1]))
ll=224.0
b=[1, 1, sz[1], sz[0]] #global bbx
#b=self.bbx[imid-1][0] #gt bbx
bx = (b[1]+b[3])/2.0
by = (b[0]+b[2])/2.0
x0 = max(0, bx-l)
x1 = min(sz[0]-l, bx)
y0 = max(0, by-l)
y1 = min(sz[1]-l, by)
bpred = numpy.array([0, 0, 0, 0])
for j in range(self.mult_view):
cidx=j%base_view
ridx=j/base_view
dx = int(x0+(x1-x0)/(base_view-1)*cidx)
dy = int(y0+(y1-y0)/(base_view-1)*ridx)
bpred += pred[i+j]*l/ll + numpy.array([dy, dx, dy, dx]) + 1
bpred=numpy.round(bpred/self.mult_view).astype(int)
ov = self.rect_overlap(self.bbx[imid-1][0], bpred)
if ov > 0.5:
nacc+=1;
self.remain_pred=pred[len(idx)*self.mult_view:]
#print 'remain: ', self.remain_label
return (nacc, ncnt)
def do_write_features(self):
if len(self.feature_path)==0: #evaluate only
print "evaluation mode, no feature will be saved"
nacc = 0
ncnt = 0
self.mult_view=max(1, self.mult_view)
elif not os.path.exists(self.feature_path):
os.makedirs(self.feature_path)
next_data = self.get_next_batch(train=False)
b1 = next_data[1]
num_ftrs = self.layers[self.ftr_layer_idx]['outputs']
data_dims = [_.shape[0] for _ in next_data[2]]
print "input data dimensions: {}".format(data_dims)
assert(data_dims.count(4)==1)
label_idx = data_dims.index(4) # regression data
sz=sio.loadmat(self.size_file)
self.imgSize=[(sz['imsize'][0,i][0,0][0][0,0], sz['imsize'][0,i][0,0][1][0,0]) for i in range(len(sz['imsize'][0]))]
if len(self.feature_path)==0:
self.imgList=readLines(self.img_list)
self.imgList=[int(_.rstrip()) for _ in self.imgList]
print "%d images found" % len(self.imgList)
bb=sio.loadmat(self.bbx_file)
self.bbx=[bb['res'][0][i][0,0][0] for i in range(len(bb['res'][0]))]
assert(self.bbx[0].shape[1]==4)
print "%d bbxes loaded" % len(self.bbx)
print "writing features: layer idx={}, {} fitlers, label_idx={}".format(self.ftr_layer_idx, num_ftrs, label_idx)
print "starting from batch: {}".format(b1)
while True:
batch = next_data[1]
data = next_data[2]
ftrs = n.zeros((data[0].shape[1], num_ftrs), dtype=n.single)
self.libmodel.startFeatureWriter(data + [ftrs], self.ftr_layer_idx)
# load the next batch while the current one is computing
next_data = self.get_next_batch(train=False)
self.finish_batch()
ftrs=ftrs*self.lin_scale #predited, zero data input, bbx multiplier
output = {'source_model':self.load_file, 'num_vis':num_ftrs, 'data': ftrs, 'labels': data[label_idx]}
try:
output['aux'] = self.test_data_provider.getftraux()
except AttributeError:
pass
if len(self.feature_path)==0: #evaluate only
nacc, ncnt=self.increase_acc_count(ftrs, nacc, ncnt)
if ncnt>0:
print "Batch %d evaluated: %.2f" % (batch, 1.0*nacc/ncnt*100)
else:
path_out = os.path.join(self.feature_path, 'data_batch_%d' % batch)
pickle(path_out,output)
print "Wrote feature file %s" % path_out
sys.stdout.flush()
if next_data[1] == b1:
break
if len(self.feature_path)==0: #evaluate only
print "overall accuracy: %.3f%%" % (1.0*nacc/ncnt*100)
def do_write_pixel_proj(self):
if not os.path.exists(self.feature_path):
os.makedirs(self.feature_path)
next_data = self.get_next_batch(train=False)
b1 = next_data[1]
num_ftrs = self.layers[self.ftr_layer_idx]['outputs']
while True:
batch = next_data[1]
data = next_data[2]
ftrs = n.zeros((data[0].shape[1], num_ftrs), dtype=n.single)
projs = n.zeros(data[0].shape, dtype=n.single)
print data[0].shape
self.libmodel.startProjWriter(data + [projs], self.ftr_layer_idx, self.ftr_res_idx)
# load the next batch while the current one is computing
next_data = self.get_next_batch(train=False)
self.finish_batch()
path_out = os.path.join(self.feature_path, 'data_batch_%d' % batch)
output = {'source_model':self.load_file, 'ftridx':self.ftr_layer_idx, 'data': pts, 'labels':data[1]}
try:
output['aux'] = self.test_data_provider.getftraux()
except AttributeError:
pass
pickle(path_out,output)
print "Wrote res file %s" % path_out
if next_data[1] == b1:
break
# pickle(os.path.join(self.feature_path, 'batches.meta'), output)
def start(self):
if self.verbose:
self.op.print_values()
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if self.write_features:
self.do_write_features()
if self.write_pixel_proj:
self.do_write_pixel_proj()
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('data_path_train', 'data_path_test', 'dp_type_train', 'dp_type_test', 'gpu', 'rnorm_const', 'img_provider_file', 'load_file', 'train_batch_range', 'test_batch_range', 'verbose'):
op.delete_option(option)
op.add_option("test-only", "test_only", BooleanOptionParser, "Test and quit?", default=1)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("write-features", "write_features", StringOptionParser, "Write test data features from given layer", default="", requires=['feature-path'])
op.add_option("feature-path", "feature_path", StringOptionParser, "Write test data features to this path (to be used with --write-features)", default="")
op.add_option("write-pixel-proj", "write_pixel_proj", StringOptionParser, "Write the projection of some response on pixel space", default = "", requires=['response_idx'])
op.add_option("multiview", "mult_view", IntegerOptionParser, "Number of views for multi-view testing", default=1)
op.add_option("scaleview", "scale_view", FloatOptionParser, "Scaling factor of the views in multi-view testing", default=1.0)
op.add_option("bbxfile", "bbx_file", StringOptionParser, "Contains ground truth bounding box for each image", default="")
op.add_option("sizefile", "size_file", StringOptionParser, "Contains size of each image", default="")
op.add_option("imglist", "img_list", StringOptionParser, "Image list file", default="")
op.add_option("linscale", "lin_scale", FloatOptionParser, "Linear scale of bbx prediction", default=1.0)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| gpl-2.0 |
YihaoLu/statsmodels | statsmodels/examples/l1_demo/sklearn_compare.py | 33 | 3710 | """
For comparison with sklearn.linear_model.LogisticRegression
Computes a regularzation path with both packages. The coefficient values in
either path are related by a "constant" in the sense that for any fixed
value of the constraint C and log likelihood, there exists an l1
regularization constant alpha such that the optimal solutions should be
the same. Note that alpha(C) is a nonlinear function in general. Here we
find alpha(C) by finding a reparameterization of the statsmodels path that
makes the paths match up. An equation is available, but to use it I would
need to hack the sklearn code to extract the gradient of the log
likelihood.
The results "prove" that the regularization paths are the same. Note that
finding the reparameterization is non-trivial since the coefficient paths
are NOT monotonic. As a result, the paths don't match up perfectly.
"""
from __future__ import print_function
from statsmodels.compat.python import range, lrange
from sklearn import linear_model
from sklearn import datasets
import statsmodels.api as sm
import numpy as np
import matplotlib.pyplot as plt
import pdb # pdb.set_trace
import sys
## Decide which dataset to use
# Use either spector or anes96
use_spector = False
#### Load data
## The Spector and Mazzeo (1980) data from statsmodels
if use_spector:
spector_data = sm.datasets.spector.load()
X = spector_data.exog
Y = spector_data.endog
else:
raise Exception(
"The anes96 dataset is now loaded in as a short version that cannot "\
"be used here")
anes96_data = sm.datasets.anes96.load_pandas()
Y = anes96_data.exog.vote
#### Fit and plot results
N = 200 # number of points to solve at
K = X.shape[1]
## Statsmodels
logit_mod = sm.Logit(Y, X)
sm_coeff = np.zeros((N, K)) # Holds the coefficients
if use_spector:
alphas = 1 / np.logspace(-1, 2, N) # for spector_data
else:
alphas = 1 / np.logspace(-3, 2, N) # for anes96_data
for n, alpha in enumerate(alphas):
logit_res = logit_mod.fit_regularized(
method='l1', alpha=alpha, disp=False, trim_mode='off')
sm_coeff[n,:] = logit_res.params
## Sklearn
sk_coeff = np.zeros((N, K))
if use_spector:
Cs = np.logspace(-0.45, 2, N)
else:
Cs = np.logspace(-2.6, 0, N)
for n, C in enumerate(Cs):
clf = linear_model.LogisticRegression(
C=C, penalty='l1', fit_intercept=False)
clf.fit(X, Y)
sk_coeff[n, :] = clf.coef_
## Get the reparametrization of sm_coeff that makes the paths equal
# Do this by finding one single re-parameterization of the second coefficient
# that makes the path for the second coefficient (almost) identical. This
# same parameterization will work for the other two coefficients since the
# the regularization coefficients (in sk and sm) are related by a constant.
#
# special_X is chosen since this coefficient becomes non-zero before the
# other two...and is relatively monotonic...with both datasets.
sk_special_X = np.fabs(sk_coeff[:,2])
sm_special_X = np.fabs(sm_coeff[:,2])
s = np.zeros(N)
# Note that sk_special_X will not always be perfectly sorted...
s = np.searchsorted(sk_special_X, sm_special_X)
## Plot
plt.figure(2);plt.clf();plt.grid()
plt.xlabel('Index in sklearn simulation')
plt.ylabel('Coefficient value')
plt.title('Regularization Paths')
colors = ['b', 'r', 'k', 'g', 'm', 'c', 'y']
for coeff, name in [(sm_coeff, 'sm'), (sk_coeff, 'sk')]:
if name == 'sk':
ltype = 'x' # linetype
t = lrange(N) # The 'time' parameter
else:
ltype = 'o'
t = s
for i in range(K):
plt.plot(t, coeff[:,i], ltype+colors[i], label=name+'-X'+str(i))
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
lucidfrontier45/scikit-learn | sklearn/utils/setup.py | 1 | 2475 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('arraybuilder', sources=['arraybuilder.c'])
config.add_extension('sparsefuncs', sources=['sparsefuncs.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.cpp', join('src', 'Gamma.cpp')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("random",
sources=["random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
3324fr/spinalcordtoolbox | dev/old/sct_straighten_spinalcord__testing.py | 1 | 144941 | #!/usr/bin/env python
## @package sct_straighten_spinalcord
#
# - run a gaussian weighted slice by slice registration over a machine to straighten the spinal cord.
# - use the centerline previously calculated during the registration but fitted with spline interpolation to improve the spinal cord straightening
# - find the corresponding warping field (non-linear transformation) to straighten the spine relatively to each orthogonal plane of the centerline fitted
#
#
# Description about how the function works:
#
# 1. slice-wise realignement
# ------------------------------------------------
# input: center
# the algorithm iterates in the upwards and then downward direction (along Z). For each direction, it registers the slice i+1 on the slice i. It does that using a gaussian mask, centers on the spinal cord, and is applied on both the i and i+1 slices (inweight and refweight from flirt).
# NB: the code works on APRLIS
# output:
# - centerline
# - image straightened (but badly straightened because only applied translation plane-wise, therefore there is distortion of structure for high curvature)
# - gaussian mask
# - transformation matrices Tx,Ty
#
# 2. smoothing of the centerline
# ------------------------------------------------
# input: centerline, transfo matrices
# it fits in 3d (using an independent decomposition of the planes XZ and YZ) a spline function of order 1.
# apply the smoothed transformation matrices to the image --> gives a "smoothly" straighted spinal cord (but stil with errors due to deformation of strucutre related to curvatyre)
# output:
# - centerline fitted
# - transformation matrices Tx,Ty smoothed
# - straighted spinal cord
#
# 3. estimation of warping
# ------------------------------------------------
# input: centerline smooth
# parametrize the centerline, i.e., finds its equation
# create two landmark images:
# - source landmarks: landmarks that represents a cross (i.e., 5 landmarks to define the cross), the cross being centered at the centerline, and oriented along a plane orthogonal to the centerline (calculated using the above equation).
# - destination landmarks: landmarks that represents a cross, the cross being centered at a vertical centerline (the one we want to register the input centerline to), and oriented along AP and RL.
# N.B: a "landmark" corresponds to a voxel with a given intereger value. : e.g., that are positioned at equal distance
#
# one the two sets of crosses are created, a volume representing the straightened spinal cord is generated. The way this volume is generated is by:
# - sampling the smooth centerline at a specific step. This step correponds to the size of the pixel in the z direction. E.g., for a 1x1x1mm acquisition, the step is 1 mm.
# - for each sample, the plane orthogonal to the centerline will be used to created a new slice in a destination volume. The output of this procedure is a stack of slices orthogonal to the centerline. Note that the destination (straight) centerline is positioned at the XY center of the destination volume.
# NB: The advantage of this approach is that, converserly to the previously straightened spinal cord, which had unwanted curvature-dependent deformations, this reconstruction will correctly XXX reconstruct the spinal cord by straightening it along its curviline absissa. Moreover, as opposed to the previous attempts for straightening the spianl cord [ref: horsfield], the step along the centerline will NOT correpond to the z of the input image, but to a fixed step based on the native z-voxel resoltution. This is importnat, as in case of large curvature, a non-fixed step (i.e., based on the z of the input image) will result in compression/extension of the structure. Which is not desirable, if the end goal is to register the spinal cord to an anatomical template.
#
# Once the destination volume with the straightened spinal cord is created, the volumes are padded by 50 voxels on the top and bottom of the volume. The reason for padding, is that, from our experience, when ANTS generates a deformation field, this deformation field is constrainted at the edges of the images, i.e., without padding, there would be no effective deformation at Z=0 and Z=end. The padded volumes are the following:
# - src
# - src landmarks ("orthogonal_landmarks" in the code)
# - dest (straightened spinal cord-- using orthogonal interpolation)
# - dest landmarks ("horizontal_landmarks" in the code)
#
# When the above things are done, ANTS is applied to estimate a deformation field. The method employed are: PSE (landmark-based) and CC (using the src and dest images).
# The warping field is then applied to the src image in order to give the user the straightened spinal cord.
# output:
# - warping field (and inverted) + affine
# - straighnted spinal cord
#
#
# USAGE
# ---------------------------------------------------------------------------------------
# sct_straighten_spinalcord.py -i <data> -p <binary>
#
# - -h help. Show this message.
#
# MANDATORY ARGUMENTS
# ---------------------------------------------------------------------------------------
# - -i anatomic nifti file. Image to straighten.
# - -p binary nifti file. Image used as initialization for the straightening process.
#
# OPTIONAL ARGUMENTS
# ---------------------------------------------------------------------------------------
# - -o nifti file. Spinal cord straightened using slice-by-slice gaussian weighted registration.
# - -m nifti file. Gaussian mask centered along the spinal cord.
# - -g [gap] integer. Gap between slices used for registration. [Default: gap=1].
# - -d [deform 0|1] deformation field. Determine a non-linear transformation (Warping field + Affine transform) to straighten the spinal cord based on an orthogonal centerline plane resampling approach. [Default: deform=0].
#
# EXAMPLES
# ---------------------------------------------------------------------------------------
# - Straighten the spine using only a slice-by-slice gaussian-weighted registration. For example:
# sct_straighten_spinalcord.py -i t2.nii.gz -p binary_t2.nii.gz
#
# - Straighten the spine using a slice-by-slice gaussian-weighted registration and a fitted centerline (spline interpolation). For example:
# sct_straighten_spinalcord.py -i t2.nii.gz -p binary_t2.nii.gz -r 1
#
# - Find the warping transformation (warping filed + affine transform) to straighten the spine using an orthogonal centerline planes resampling approach. For example:
# sct_straighten_spinalcord.py -i t2.nii.gz -p binary_t2.nii.gz -d 1
#
#
# DEPENDENCIES
# ---------------------------------------------------------------------------------------
# EXTERNAL PYTHON PACKAGES
# - nibabel: <http://nipy.sourceforge.net/nibabel/>
# - scipy: <http://www.scipy.org>
# - numpy: <http://www.numpy.org>
# - sympy: <http://sympy.org/en/index.html>
# - PIL: <http://www.pythonware.com/products/pil/>
#
# EXTERNAL SOFTWARE
# - FSL: <http://fsl.fmrib.ox.ac.uk/fsl/>
# - ANTs: <http://stnava.github.io/ANTs/>
# - sct_orientation: get the spatial orientation of an input image
#
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 NeuroPoly, Polytechnique Montreal <www.neuropoly.info>
# Author: Geoffrey LEVEQUE
# Modified: 2013-12-06
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# TODO: output png of sagittal slice centered at hte middle of the destination straightedned spinal cord -> done -- functionning
# TODO: for the orthogonal resampling, reduce the XY size of the destination image. -> not functionning 2013-12-19
# TODO: at the end, apply the deformation field on the UN-PADDED image (i.e., the source) -> done -- functionning
# TODO: use .nii throughout the whole code to be user-environment-variable-independent : fsloutput = 'export FSLOUTPUTTYPE=NIFTI; ' # for faster processing, all outputs are in NIFTI -> later
# TODO: things to test with batch
# - landmarks with different values -> test on /scripts -> done -- functionning
# - 3d smooth of the input image (e.g., 3mm gaussian kernel) -> later
# TODO: crop the warping field to match the size of the input volume -> not functionning 2013-12-19
# NOTE FOR THE DEVELOPER:
# ---------------------------------------------------------------------------------------
# for the module that creates the landmarks, if you want to generate landmarks with different/or similar values, go to the line of code that says: "landmarks_orthogonal"...
# there is 5 lines of code to change!!!!!
## Create a structure to pass important user parameters to the main function
class parameters:
## The constructor
def __init__(self):
self.schedule_file = 'schedule_TxTy_8mm_4mm.sch'
## @var schedule_file
self.debug = 1
## @var debug
self.order_interp_ZX = 1
## @var spline interpolation order ZX plane (with gap != 1)
self.order_interp_ZY = 1
## @var spline interpolation order ZY plane (with gap != 1)
self.order_interp_ZX_ZY = 1
## @var spline interpolation order ZX_ZY plane (with gap = 1)
self.step = 25
## @var gap between landmarks
self.landmarks_cross_size = 5
## @var distance between landmarks within the cross structure
# check if needed Python libraries are already installed or not
print 'Verify if needed Python libraries are already installed ...'
try:
from nibabel import load, save, Nifti1Image
print '--- nibabel already installed ---'
except ImportError:
print '--- nibabel not already installed ---'
exit(2)
try:
from numpy import array, loadtxt, cross
print '--- numpy already installed ---'
except ImportError:
print '--- numpy not already installed ---'
exit(2)
try:
from scipy.integrate import quad
from scipy import interpolate
print '--- scipy already installed ---'
except ImportError:
print '--- scipy not already installed ---'
exit(2)
try:
from sympy import Symbol, nsolve
print '--- sympy already installed ---'
except ImportError:
print '--- sympy not already installed ---'
exit(2)
from fnmatch import filter
from csv import reader
from math import sqrt
from os import path, walk
from getopt import getopt, GetoptError
from commands import getstatusoutput
from sys import exit, argv
from matplotlib import pyplot as plt
import matplotlib.cm as cm
from scipy import ndimage
import numpy as np
import operator
## Extracts path, file and extension
def extract_fname(fname):
# extract path
path_fname = path.dirname(fname)+'/'
# check if only single file was entered (without path)
if path_fname == '/':
path_fname = ''
# extract file and extension
file_fname = fname
file_fname = file_fname.replace(path_fname,'')
file_fname, ext_fname = path.splitext(file_fname)
# check if .nii.gz file
if ext_fname == '.gz':
file_fname = file_fname[0:len(file_fname)-4]
ext_fname = ".nii.gz"
return path_fname, file_fname, ext_fname
## Check existence of a file
def exist_image(fname):
if path.isfile(fname) or path.isfile(fname + '.nii') or path.isfile(fname + '.nii.gz'):
pass
else:
print('\nERROR: ' + fname + ' does not exist. Exit program.\n')
exit(2)
## Find the coordinate in the sagittal plane of the binary point needed for the centerline initilization
def find_initial_mask_z_coordinate(fname):
# read file
initial_file_name = fname
file = load(initial_file_name)
# get the image data
data = file.get_data()
X, Y, Z = (data > 0).nonzero()
Z = Z[0]
reference = Z
return reference
## Find the dimension of the binary image needed for the centerline initilization in the coronal plane
def find_initial_mask_lenght(fname):
#read binary file
initial_file_name = fname
file = load(initial_file_name)
dimX, dimY, dimZ = file.get_header().get_data_shape()
return dimX
## Find the dimension of the binary image needed for the centerline initilization in the sagital plane
def find_initial_mask_width(fname):
#read binary file
initial_file_name = fname
file = load(initial_file_name)
dimX, dimY, dimZ = file.get_header().get_data_shape()
return dimY
## Find the centerline coordinates points used for the centerline initialization
def find_centerline_coordinates(volume, variable, reference, distance):
Z = 0
if variable == reference:
# create txt centerline files only one time (up move)
if distance > 0:
fileID = open('tmp.centerX.txt','w')
fileID.close()
fileID = open('tmp.centerY.txt','w')
fileID.close()
fileID = open('tmp.centerZ.txt','w')
fileID.close()
fileID = open('tmp.center.txt','w')
fileID.close()
reference = int(reference)
Z=int(variable)
# read centerline file
initial_mask_name = volume + '_splitZ' + str(reference).zfill(4) + '-mask.nii.gz'
print "mask name : " + initial_mask_name
centerline = load(initial_mask_name)
#dimX, dimY = centerline.get_header().get_data_shape()
#print "dimX : " + str(dimX)
#print "dimY : " + str(dimY)
#Z = variable
#print "Z : " + str(Z)
# get the image data
data = centerline.get_data()
X, Y = (data > 0).nonzero()
X = X[0]
Y = Y[0]
print 'reference point coordinates: ' + str(X) + ' ; ' + str(Y) + ' ; ' + str(Z)
centerX = X
centerY = Y
centerZ = Z
# write reference centerline point only one time (down move)
if distance < 0:
fileID = open('tmp.centerX.txt','a')
fileID.write("%f \n" %centerX)
fileID.close()
fileID = open('tmp.centerY.txt','a')
fileID.write("%f \n" %centerY)
fileID.close()
fileID = open('tmp.centerZ.txt','a')
fileID.write("%f \n" %centerZ)
fileID.close()
fileID = open('tmp.center.txt','a')
fileID.write('%f\t%f\t%f\n' %(centerX, centerY, centerZ))
fileID.close()
Z = Z + distance
# import .mat transformation matrix
omat_inv = loadtxt('tmp.omat_cumul_inv.mat')
X = X + omat_inv[0][3]
Y = Y + omat_inv[1][3]
print 'centerline coordinates: ' + str(X) + ' ; ' + str(Y) + ' ; ' + str(Z)
centerX = X
centerY = Y
centerZ = Z
fileID = open('tmp.centerX.txt','a')
fileID.write("%f \n" %centerX)
fileID.close()
fileID = open('tmp.centerY.txt','a')
fileID.write("%f \n" %centerY)
fileID.close()
fileID = open('tmp.centerZ.txt','a')
fileID.write("%f \n" %centerZ)
fileID.close()
fileID = open('tmp.center.txt','a')
fileID.write('%f\t%f\t%f\n' %(centerX, centerY, centerZ))
fileID.close()
else:
Z1 = int(variable) + distance
reference = int(reference)
# read centerline file
initial_mask_name = volume + '_splitZ' + str(reference).zfill(4) + '-mask.nii.gz'
print "mask name : " + initial_mask_name
centerline = load(initial_mask_name)
#dimX, dimY = centerline.get_header().get_data_shape()
#print "dimX : " + str(dimX)
#print "dimY : " + str(dimY)
#Z = variable
#print "dimZ : " + str(Z)
# get the image data
data = centerline.get_data()
X, Y = (data > 0).nonzero()
X = X[0]
Y = Y[0]
print 'reference point coordinates: ' + str(X) + ' ; ' + str(Y) + ' ; ' + str(Z)
# import .mat matrix
omat_cumul_inv = loadtxt('tmp.omat_cumul_inv.mat')
X = X + omat_cumul_inv[0][3]
Y = Y + omat_cumul_inv[1][3]
print 'centerline coordinates: ' + str(X) + ' ; ' + str(Y) + ' ; ' + str(Z1)
centerX = X
centerY = Y
centerZ = Z1
fileID = open('tmp.centerX.txt','a')
fileID.write("%f \n" %centerX)
fileID.close()
fileID = open('tmp.centerY.txt','a')
fileID.write("%f \n" %centerY)
fileID.close()
fileID = open('tmp.centerZ.txt','a')
fileID.write("%f \n" %centerZ)
fileID.close()
fileID = open('tmp.center.txt','a')
fileID.write('%f\t%f\t%f\n' %(centerX, centerY, centerZ))
fileID.close()
## Create slice by slice transformation matrices from fitted centerline
def apply_fitted_transfo_to_matrices( file_volume, binary, fname, reference, slice, distance ):
# recover the centerline points coordinates stored in center.txt file
orderedcenter = 'tmp.center.txt'
file = open(orderedcenter, 'rb')
data_ordered_center = reader(file, delimiter='\t')
table_ordered_center = [row for row in data_ordered_center]
lenght = len(table_ordered_center)
for i in range(0,lenght):
table_ordered_center[i][2] = float(table_ordered_center[i][2])
# sort the list in z expanded way
table_ordered_center = sorted(table_ordered_center, key=operator.itemgetter(2))
# count all the lines not empty in the txt file to determine the size of the M matrix defined below
lines_counter = 0
with open(orderedcenter) as f:
for line in f:
if line != '\n':
lines_counter += 1
lenght = lines_counter
print "Number of centerline points:"
print lenght
X_init = [0 for x in xrange(0, lenght)]
Y_init = [0 for x in xrange(0, lenght)]
Z_init = [0 for x in xrange(0, lenght)]
i = 0
while (i < lenght):
X_init[i]=float(table_ordered_center[i][0])
Y_init[i]=float(table_ordered_center[i][1])
Z_init[i]=float(table_ordered_center[i][2])
i = i + 1
X = array(X_init)
Y = array(Y_init)
Z = array(Z_init)
if distance != 1:
# centerline fitting using InterpolatedUnivariateSpline
tck_X = interpolate.splrep(Z,X,s=parameters.order_interp_ZX)
Xnew = interpolate.splev(Z,tck_X,der=0)
tck_X_order_2 = interpolate.splrep(Z,X,s=2)
Xnew_order_2 = interpolate.splev(Z,tck_X_order_2,der=0)
tck_X_order_10 = interpolate.splrep(Z,X,s=10)
Xnew_order_10 = interpolate.splev(Z,tck_X_order_10,der=0)
#plt.figure()
#plt.plot(Z,X,'.-',label='Linear')
#plt.plot(Z,Xnew,'r',label='Spline interpolation: order=' + parameters.order_interp_ZX)
#plt.plot(Z,Xnew_order_2,'g',label='Spline interpolation: order=2')
#plt.plot(Z,Xnew_order_10,'c',label='Spline interpolation: order=10')
#plt.legend(loc='upper right')
#plt.title('Z-X plane polynomial interpolation')
#plt.show()
tck_Y = interpolate.splrep(Z,Y,s=parameters.order_interp_ZY)
Ynew = interpolate.splev(Z,tck_Y,der=0)
tck_Y_order_2 = interpolate.splrep(Z,Y,s=2)
Ynew_order_2 = interpolate.splev(Z,tck_Y_order_2,der=0)
tck_Y_order_10 = interpolate.splrep(Z,Y,s=10)
Ynew_order_10 = interpolate.splev(Z,tck_Y_order_10,der=0)
#plt.figure()
#plt.plot(Z,Y,'.-',label='Linear')
#plt.plot(Z,Ynew,'r',label='Spline interpolation: order=' + parameters.order_interp_ZY)
#plt.plot(Z,Ynew_order_2,'g',label='Spline interpolation: order=2')
#plt.plot(Z,Ynew_order_10,'c',label='Spline interpolation: order=10')
#plt.legend(loc='upper right')
#plt.title('Z-Y plane polynomial interpolation')
#plt.show()
# calculate the missing centerline point due to the slice gap with the fitted centerline curve equation
xf = [0 for x in xrange(0, slice+2)]
yf = [0 for x in xrange(0, slice+2)]
zf = [0 for x in xrange(0, slice+2)]
z = 0
while z < slice+2:
x_gap = interpolate.splev(z,tck_X,der=0)
xf[z] = x_gap
y_gap = interpolate.splev(z,tck_Y,der=0)
yf[z] = y_gap
zf[z] = z
# next iteration
z = z + 1
Xf = array(xf)
Yf = array(yf)
Zf = array(zf)
#plt.figure()
#plt.plot(Zf,Xf,'.-')
#plt.legend(['spline interpolation: order=' + str(parameters.order_interp_ZX)])
#plt.title('Z-X plane polynomial interpolation extended to all centerline points')
#plt.show()
#plt.figure()
#plt.plot(Zf,Yf,'.-')
#plt.legend(['spline interpolation: order=' + str(parameters.order_interp_ZY)])
#plt.title('Z-Y plane polynomial interpolation extended to all centerline points')
#plt.show()
print '******************************************************************************'
print 'Write txt files for a slice gap: ' + str(distance)
fileID = open('tmp.centerline_fitted.txt', 'w')
a = len(xf)
for i in range(0, a):
fileID.write('%f\t%f\t%f\n' %(float(xf[i]), float(yf[i]), float(zf[i])))
fileID.close()
print 'Write: ' + 'tmp.centerline_fitted.txt'
if parameters.debug == 1:
fileID = open('txt_files/centerline_fitted.txt', 'w')
a = len(xf)
for i in range(0, a):
fileID.write('%f\t%f\t%f\n' %(float(xf[i]), float(yf[i]), float(zf[i])))
fileID.close()
print 'Write: ' + 'txt_files/centerline_fitted.txt'
fileID = open('txt_files/centerline_fitted_pse.txt', 'w')
a = len(xf)
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
for i in range(0, a):
fileID.write('%f\t%f\t%f\t%f\n' %(float(xf[i]), float(yf[i]), float(zf[i]), 1))
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
fileID.close()
print 'Write: ' + 'txt_files/centerline_fitted_pse.txt'
fileID = open('txt_files/centerline_fitted_pse_pad50.txt', 'w')
a = len(xf)
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
for i in range(0, a):
fileID.write('%f\t%f\t%f\t%f\n' %(float(xf[i]), float(yf[i]), float(zf[i])+50, 1))
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
fileID.close()
print 'Write: ' + 'txt_files/centerline_fitted_pse_pad50.txt'
print '******************************************************************************'
else:
print str(lenght)
print str(slice+1)
x = [0 for x in xrange(slice+2)]
y = [0 for x in xrange(slice+2)]
z = [0 for x in xrange(slice+2)]
x = X
y = Y
z = Z
print '******************************************************************************'
print 'Write txt files for slice gap: ' + str(distance)
if parameters.debug ==1:
fileID = open('txt_files/centerline.txt', 'w')
a = len(x)
for i in range(0, a):
fileID.write('%f\t%f\t%f\n' %(float(x[i]), float(y[i]), float(z[i])))
fileID.close()
print 'Write: ' + 'txt_files/centerline.txt'
fileID = open('txt_files/centerline_pse.txt', 'w')
a = len(x)
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
for i in range(0, a):
fileID.write('%f\t%f\t%f\t%f\n' %(float(x[i]), float(y[i]), float(z[i]), 1))
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
fileID.close()
print 'Write: ' + 'txt_files/centerline_pse.txt'
fileID = open('txt_files/centerline_pse_pad50.txt', 'w')
a = len(x)
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
for i in range(0, a):
fileID.write('%f\t%f\t%f\t%f\n' %(float(x[i]), float(y[i]), float(z[i])+50, 1))
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
fileID.close()
print 'Write: ' + 'txt_files/centerline_pse_pad50.txt'
reference=int(reference)
fileID = open('txt_files/centerline_straightened_pse.txt', 'w')
a = len(x)
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
for i in range(0, a):
fileID.write('%f\t%f\t%f\t%f\n' %(float(x[reference]), float(y[reference]), float(z[i]), 1))
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
fileID.close()
print 'Write: ' + 'txt_files/centerline_straightened_pse.txt'
reference=int(reference)
fileID = open('txt_files/centerline_straightened_pse_pad50.txt', 'w')
a = len(x)
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
for i in range(0, a):
fileID.write('%f\t%f\t%f\t%f\n' %(float(x[reference]), float(y[reference]), float(z[i])+50, 1))
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
fileID.close()
print 'Write: ' + 'txt_files/centerline_straightened_pse_pad50.txt'
print '******************************************************************************'
# centerline fitting using InterpolatedUnivariateSpline
tck_X = interpolate.splrep(Z, X, s=parameters.order_interp_ZX_ZY)
Xnew = interpolate.splev(Z,tck_X,der=0)
#plt.figure()
#plt.plot(Z,X,'.-',Z,Xnew,'r')
#plt.legend(['Linear','spline interpolation: order=' + str(parameters.order_interp_ZX_ZY)])
#plt.title('Z-X plane polynomial interpolation')
#plt.show()
tck_Y = interpolate.splrep(Z, Y, s=parameters.order_interp_ZX_ZY)
Ynew = interpolate.splev(Z,tck_Y,der=0)
#plt.figure()
#plt.plot(Z,Y,'.-',Z,Ynew,'r')
#plt.legend(['Linear','spline interpolation: order=' + str(parameters.order_interp_ZX_ZY)])
#plt.title('Z-Y plane polynomial interpolation')
#plt.show()
x_final = [0 for x in xrange(slice+2)]
y_final = [0 for x in xrange(slice+2)]
z_final = [0 for x in xrange(slice+2)]
x_final = Xnew
y_final = Ynew
z_final = Z
print '******************************************************************************'
print 'Write txt files for slice gap: ' + str(distance)
fileID = open('tmp.centerline_fitted.txt', 'w')
a = len(x_final)
for i in range(0, a):
fileID.write('%f\t%f\t%f\n' %(float(x_final[i]), float(y_final[i]), float(z_final[i])))
fileID.close()
print 'Write: ' + 'tmp.centerline_fitted.txt'
if parameters.debug == 1:
fileID = open('txt_files/centerline_fitted.txt', 'w')
a = len(x_final)
for i in range(0, a):
fileID.write('%f\t%f\t%f\n' %(float(x_final[i]), float(y_final[i]), float(z_final[i])))
fileID.close()
print 'Write: ' + 'txt_files/centerline_fitted.txt'
fileID = open('txt_files/centerline_fitted_pse.txt', 'w')
a = len(x_final)
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
for i in range(0, a):
fileID.write('%f\t%f\t%f\t%f\n' %(float(x_final[i]), float(y_final[i]), float(z_final[i]), 1))
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
fileID.close()
print 'Write: ' + 'txt_files/centerline_fitted_pse.txt'
fileID = open('txt_files/centerline_fitted_pse_pad50.txt', 'w')
a = len(x_final)
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
for i in range(0, a):
fileID.write('%f\t%f\t%f\t%f\n' %(float(x_final[i]), float(y_final[i]), float(z_final[i])+50, 1))
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
fileID.close()
print 'Write: ' + 'txt_files/centerline_fitted_pse_pad50.txt'
print '******************************************************************************'
fileID = 'tmp.centerline_fitted.txt'
# read file
fid = open(fileID,'rb')
data = reader(fid, delimiter='\t')
table = [row for row in data]
fid.close()
table_numpy = array(table)
x_fitted = table_numpy[:, 0]
y_fitted = table_numpy[:, 1]
z_fitted = table_numpy[:, 2]
# use nibabel to read the binary volume
centerline = load(binary + '.nii.gz')
img = centerline.get_data()
shape = img.shape
print "Input volume size:"
print shape
# copy header of input volume
hdr_binary = centerline.get_header()
hdr_binary_copy = hdr_binary.copy()
reference = int(reference)
for i in range(0,a):
if i != reference:
img[int(float(x_fitted[i]))][int(float(y_fitted[i]))][int(float(z_fitted[i]))]=1
fcenterline = fname + '_APRLIS_centerline_fitted.nii.gz'
# save the new fitted centerline volume
data_numpy = array(img)
img = Nifti1Image(data_numpy, None, hdr_binary_copy)
save(img, fcenterline)
centerline = load(binary + '.nii.gz')
img = centerline.get_data()
for i in range(0,a):
if i != reference:
img[int(float(x_fitted[reference]))][int(float(y_fitted[reference]))][int(float(z_fitted[i]))]=1
fcenterline_straightened = fname + '_APRLIS_centerline_straightened.nii.gz'
# save the new straightened centerline volume
data_numpy = array(img)
img = Nifti1Image(data_numpy, None, hdr_binary_copy)
save(img, fcenterline_straightened)
# create all slice by slice cumulative transformation matrices
a = len(x_fitted)
for ref in range(0, a):
x_tansform = float(x_fitted[ref]) - float(x_fitted[0])
y_tansform = float(y_fitted[ref]) - float(y_fitted[0])
initial_mat_name = file_volume + '_splitZ' + str(ref).zfill(4) + '-omat_cumul.txt'
print '>> ' + initial_mat_name + ' created'
fid = open(initial_mat_name,'w')
fid.write('%f %f %f %f\n%f %f %f %f\n%f %f %f %f\n%f %f %f %f' %(1, 0, 0, -x_tansform, 0, 1, 0, -y_tansform, 0, 0, 1, 0, 0, 0, 0, 1))
fid.close()
## Realize a slice by slice gaussian weighted registration over a machine to straighten the spinal cord using fitted centerline and corresponding transformation matrices previously calculated
def apply_fitted_transfo_to_image(volume, reference, volume_fitted_straightened, distance):
VOLUME=volume
REFERENCE=reference
OUTPUT=volume_fitted_straightened
DISTANCE=distance
print '*************************************************************************************************************************'
print '* Image to straighten: ' + str(VOLUME)
print '* Reference slice: ' + str(REFERENCE)
print '* Spinal cord straightened using fitted centerline (spline interpolation): ' + str(OUTPUT)
print '* Gap between slices used for registration. [Default=1]: ' + str(DISTANCE)
print '*************************************************************************************************************************'
FILE_VOLUME = VOLUME
# extract slices from nii volumes
cmd = 'fslsplit ' + str(FILE_VOLUME) + ' ' + str(FILE_VOLUME) + '_splitZ -z'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
#height of the entire input volume
FILE_VOLUME_SLICE = load(FILE_VOLUME + '.nii.gz')
FILE_VOLUME_DATA = FILE_VOLUME_SLICE.get_data()
FILE_VOLUME_SHAPE = FILE_VOLUME_DATA.shape
HEIGHT = FILE_VOLUME_SHAPE[2] - 2
SLICE = HEIGHT
#print 'Slice number of interest: ' + str(SLICE+1)
REFERENCE_MOINS = REFERENCE - 1
FILE_REF_FIT = FILE_VOLUME + '_splitZ' + str(REFERENCE).zfill(4)
cmd = 'convert_xfm -omat ' + FILE_REF_FIT + '-omat_cumul_inv.txt -inverse ' + FILE_REF_FIT + '-omat_cumul.txt'
print('>> '+ cmd)
status, PWD = getstatusoutput(cmd)
VARIABLE=0
if VARIABLE <= SLICE:
while VARIABLE <= SLICE:
# iterative variable
VARIABLE_PLUS = VARIABLE + 1
# slice variables
FILE_DEST = FILE_VOLUME + '_splitZ' + str(VARIABLE).zfill(4)
FILE_SRC = FILE_VOLUME + '_splitZ' + str(VARIABLE_PLUS).zfill(4)
#print 'slice by slice registration'
if VARIABLE == 0:
cmd = 'flirt -in ' + FILE_DEST + ' -ref ' + FILE_DEST + ' -applyxfm -init ' + FILE_REF_FIT + '-omat_cumul_inv.txt -out ' + FILE_DEST
print('>> '+ cmd)
status, PWD = getstatusoutput(cmd)
cmd = 'flirt -in ' + FILE_SRC + ' -ref ' + FILE_SRC + ' -applyxfm -init ' + FILE_REF_FIT + '-omat_cumul_inv.txt -out ' + FILE_SRC
print('>> '+ cmd)
status, PWD = getstatusoutput(cmd)
if VARIABLE == REFERENCE_MOINS:
cmd = 'cp ' + FILE_SRC + '.nii.gz ' + FILE_SRC + '_reg_mask.nii.gz'
print('>> '+ cmd)
status, PWD = getstatusoutput(cmd)
if VARIABLE != REFERENCE_MOINS and VARIABLE != 0:
cmd = 'flirt -in ' + FILE_SRC + ' -ref ' + FILE_SRC + ' -applyxfm -init ' + FILE_REF_FIT + '-omat_cumul_inv.txt -out ' + FILE_SRC
print('>> '+ cmd)
status, PWD = getstatusoutput(cmd)
if VARIABLE != REFERENCE_MOINS:
cmd = 'flirt -in ' + FILE_SRC + ' -ref ' + FILE_DEST + ' -applyxfm -init ' + FILE_SRC + '-omat_cumul.txt -out ' + FILE_SRC + '_reg_mask'
print('>> '+ cmd)
status, PWD = getstatusoutput(cmd)
VARIABLE = VARIABLE + 1
VARIABLE=0
#merging of registered spinal cord slices
while VARIABLE <= SLICE:
# iterative variable
VARIABLE_PLUS=VARIABLE + 1
# input volume slice variables
FILE_DEST = FILE_VOLUME + '_splitZ' + str(VARIABLE).zfill(4)
FILE_SRC = FILE_VOLUME + '_splitZ' + str(VARIABLE_PLUS).zfill(4)
# merge each slice file into a pseudo list of image registered files
if VARIABLE == 0:
FILE_MASK_REG_LIST = FILE_DEST
elif VARIABLE == SLICE:
FILE_MASK_REG_LIST = FILE_MASK_REG_LIST + ' ' + FILE_DEST + '_reg_mask' + ' ' + FILE_SRC + '_reg_mask'
else:
FILE_MASK_REG_LIST = FILE_MASK_REG_LIST + ' ' + FILE_DEST + '_reg_mask'
VARIABLE = VARIABLE + 1
# merge the new registered images with -z axis [concatenate]
cmd = 'fslmerge -z ' + OUTPUT + '.nii.gz ' + FILE_MASK_REG_LIST
print('>> ' + cmd)
status, PWD = getstatusoutput(cmd)
## Print usage
def usage():
print 'USAGE: \n' \
'Spinal cord straightening:\n' \
' sct_straighten_spinalcord.py -i <data> -p <binary>\n' \
'\n'\
' -h help. Show this message.\n' \
'\n'\
'MANDATORY ARGUMENTS\n' \
' -i anatomic nifti file. Image to straighten.\n' \
' -p binary nifti file. Image used as initialization for the straightening process.\n' \
'\n'\
'OPTIONAL ARGUMENTS\n' \
' -o nifti file. Spinal cord straightened using slice-by-slice gaussian weighted registration.\n' \
' -m nifti file. Gaussian mask centered along the spinal cord.\n' \
' -g integer. Gap between slices used for registration. [Default=1].\n' \
' -d deformation field. Determine a transformation (Warping field + Affine transform) to straighten the spinal cord based on an orthogonal centerline plane resampling approach. [Default=0].\n' \
'\n'\
'EXAMPLES:\n' \
'\n'\
'Straighten the spine using only a slice-by-slice gaussian-weighted registration. For example:\n' \
' sct_straighten_spinalcord.py -i t2.nii.gz -p binary_t2.nii.gz\n' \
'\n'\
'Straighten the spine using a slice-by-slice gaussian-weighted registration and a fitted centerline (spline interpolation). For example:\n' \
' sct_straighten_spinalcord.py -i t2.nii.gz -p binary_t2.nii.gz -r 1\n' \
'\n'\
'Find the warping transformation (warping filed + affine transform) to straighten the spine using an orthogonal centerline planes resampling approach. For example:\n' \
' sct_straighten_spinalcord.py -i t2.nii.gz -p binary_t2.nii.gz -d 1\n'
exit(2)
## Main function
def main():
# Initialization
VOLUME = ''
BINARY = ''
OUTPUT = ''
GAUSS = ''
DISTANCE = ''
DEFORMATION = ''
# manage intermediary files into python script directory
if parameters.debug == 1:
#create directories
cmd = 'mkdir output_images'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'rm output_images/*'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'mkdir txt_files'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'rm txt_files/*'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'rm PSE*'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
# Check input parameters
try:
opts, args = getopt(argv[1:],'hi:p:o:m:g:d:')
except GetoptError as err:
print str(err)
usage()
exit(2)
if not opts:
# no option supplied
print 'no option supplied'
usage()
for opt, arg in opts:
if opt == '-h':
usage()
elif opt in ('-i'):
VOLUME = arg
elif opt in ('-p'):
BINARY = arg
elif opt in ('-o'):
OUTPUT = arg
elif opt in ('-m'):
GAUSS = arg
elif opt in ('-g'):
DISTANCE = arg
elif opt in ('-d'):
DEFORMATION = arg
# display usage if a mandatory argument is not provided
if VOLUME == '' and BINARY == '':
#print "no argument provided"
usage()
# run the registration if both mandatory argument are provided
# detect the schedule file location
# extract path of the script
print 'Find the FSL schedule file for the FLIRT registration ...'
path_script = path.dirname(__file__)+'/'
# extract path of schedule file
schedule_path = path_script[0:-8]+'src/' + parameters.schedule_file
print schedule_path
print '***************************************************************************************************************************************************'
print '* Image to straighten: ' + VOLUME
print '* Binary image used as initialization for the straightening: ' + BINARY
print '* Straightened spinal cord using slice by slice gaussian weighted registration: ' + OUTPUT
print '* Gaussian mask centered along the spinal cord: ' + GAUSS
print '* Gap between slices used for registration. [Default=1]: ' + DISTANCE
print '* Deformation. Determine a warping transformation to straighten the spinal cord" [Default=0]: ' + DEFORMATION
print '***************************************************************************************************************************************************'
# copy the input volume into the script directory
print 'Verify if anatomical input volume exists ...'
exist_image(VOLUME)
print 'Anatomical input volume exists.'
path_func_VOLUME, file_func_VOLUME, ext_func_VOLUME = extract_fname(VOLUME)
cmd = 'cp ' + VOLUME + ' ' + file_func_VOLUME + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
VOLUME = file_func_VOLUME
if parameters.debug == 1:
# remove unecessary files presented in the script directory
cmd = 'rm ' + VOLUME + '_*'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
# copy the binary volume into the script directory
print 'Verify if input binary volume exists ...'
exist_image(BINARY)
print 'Input binary volume exists.'
path_func_BINARY, file_func_BINARY, ext_func_BINARY = extract_fname(BINARY)
cmd = 'cp ' + BINARY + ' ' + file_func_BINARY + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
BINARY = file_func_BINARY
if parameters.debug == 1:
# remove unecessary files presented in the script directory
cmd = 'rm ' + BINARY + '_*'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
# recover the local name of future output images without absolute paths
if OUTPUT != '':
path_func_OUTPUT, file_func_OUTPUT, ext_func_OUTPUT = extract_fname(OUTPUT)
OUTPUT = file_func_OUTPUT
if GAUSS != '':
path_func_GAUSS, file_func_GAUSS, ext_func_GAUSS = extract_fname(GAUSS)
GAUSS = file_func_GAUSS
# reorient binary image into AP RL IS orientation
cmd = 'fslswapdim ' + BINARY + ' AP RL IS tmp.' + BINARY + '_APRLIS'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
FILE_BINARY = 'tmp.' + BINARY + "_APRLIS"
# reorient input anatomical volume into AP RL IS orientation
cmd = 'fslswapdim ' + VOLUME + ' AP RL IS tmp.' + VOLUME + '_APRLIS'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
FILE_VOLUME = 'tmp.' + VOLUME + '_APRLIS'
FILE_VOLUME_OUTPUT = VOLUME
# extract slices from nii volume
cmd = 'fslsplit ' + FILE_VOLUME + ' ' + FILE_VOLUME + '_splitZ' + " -z"
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
#height of the entire input volume
FILE_VOLUME_SLICE = load(FILE_VOLUME + '.nii.gz')
FILE_VOLUME_DATA = FILE_VOLUME_SLICE.get_data()
FILE_VOLUME_SHAPE = FILE_VOLUME_DATA.shape
HEIGHT = FILE_VOLUME_SHAPE[2] - 2
# get the user binary point height and the lenght and width of the input binary volume
REFERENCE = find_initial_mask_z_coordinate(FILE_BINARY + '.nii.gz')
print 'Binary point: height=' + str(REFERENCE)
LENGTH = find_initial_mask_lenght(FILE_BINARY + '.nii.gz')
print 'Binary volume slice dim.: lenght= ' + str(LENGTH)
WIDTH = find_initial_mask_width(FILE_BINARY + '.nii.gz')
print 'Binary volume slice dim.: width= ' + str(WIDTH)
print 'Input volume slices number: ' + str(HEIGHT)
SLICE=HEIGHT
if DISTANCE == '':
print 'Gap between input slices not defined. [Default gap=1]'
# set default gap
DISTANCE=1
else:
print 'Gap between input slices defined: gap=' + str(DISTANCE)
# set defined gap
DISTANCE = int(DISTANCE)
# go up, then down in reference to the binary point
for iUpDown in range(1, 3):
print '*************************'
print 'iUpDown: ' + str(iUpDown)
print '*************************'
VARIABLE = REFERENCE
while VARIABLE <= SLICE and VARIABLE >= DISTANCE:
# define iterative variables
if iUpDown == 1:
# inter gap variable - up move
VARIABLE_PLUS = VARIABLE + DISTANCE
VARIABLE_PLUS_SUM = VARIABLE - REFERENCE + 1
else:
# inter gap variable - down move
VARIABLE_PLUS = VARIABLE - DISTANCE
VARIABLE_PLUS_SUM = REFERENCE - VARIABLE_PLUS
# define input volume slice variables
# inter gap transformation
FILE_DEST = FILE_VOLUME + '_splitZ' + str(VARIABLE).zfill(4)
FILE_SRC = FILE_VOLUME + '_splitZ' + str(VARIABLE_PLUS).zfill(4)
# use to create the reference mask
FILE_MASK = FILE_DEST + '-mask'
#print 'Slide by slide registration using gaussian mask and iterative mean'
if VARIABLE == REFERENCE:
print '***************************'
print 'z = ' + str(VARIABLE)
print '***************************'
# binary point centered in the centerline used as initialization for the straightening
# get the reference slice from the binary point
cmd = 'fslroi ' + FILE_BINARY + '.nii.gz ' + FILE_MASK + '.nii.gz' + ' 0 ' + str(LENGTH) + ' 0 ' + str(WIDTH) + ' ' + str(REFERENCE) + ' 1'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
# FSL gaussian mask creation
cmd = 'fslmaths ' + FILE_MASK + ' -kernel gauss 6.5 -dilM -s 3 ' + FILE_MASK + '_gaussian'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
# implement the gap straightening
VARIABLE_DISTANCE = VARIABLE
for i in range(0, DISTANCE):
if iUpDown == 1:
# inner gap variable
VARIABLE_DISTANCE_PLUS = VARIABLE_DISTANCE + 1
else:
VARIABLE_DISTANCE_PLUS = VARIABLE_DISTANCE - 1
# inner gap transformation
FILE_DEST_DISTANCE = FILE_VOLUME + '_splitZ' + str(VARIABLE_DISTANCE).zfill(4)
FILE_SRC_DISTANCE = FILE_VOLUME + '_splitZ' + str(VARIABLE_DISTANCE_PLUS).zfill(4)
# use to define slice by slice gaussian mask
FILE_MASK_DISTANCE = FILE_DEST_DISTANCE + '-mask'
FILE_MASK_PLUS_DISTANCE = FILE_SRC_DISTANCE + '-mask'
if i == 0:
# using two gaussian masks for both src and ref images
cmd = 'flirt -in ' + FILE_SRC_DISTANCE + ' -ref ' + FILE_DEST_DISTANCE + ' -schedule ' + schedule_path + ' -verbose 0 -omat tmp.omat_tmp.mat -cost normcorr -forcescaling -inweight ' + FILE_MASK_DISTANCE + '_gaussian' + ' -refweight ' + FILE_MASK_DISTANCE + '_gaussian -out ' + FILE_SRC_DISTANCE + '_reg_mask -paddingsize 3'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
# if registration fails
omat_inv = loadtxt('tmp.omat_tmp.mat')
if (abs(omat_inv[0][3]) > DISTANCE or abs(omat_inv[1][3]) > DISTANCE):
print 'Matrice de transformation incorrecte'
cmd = 'flirt -in ' + FILE_SRC_DISTANCE + ' -ref ' + FILE_DEST_DISTANCE + ' -schedule ' + schedule_path + ' -verbose 0 -omat tmp.omat_tmp.mat -out ' + FILE_SRC_DISTANCE + '_reg_mask -paddingsize 3'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
#cmd = 'cat tmp.omat_tmp.mat'
#print('>> ' + cmd)
#status, output = getstatusoutput(cmd)
#print output
cmd = 'cp tmp.omat_tmp.mat tmp.omat_cumul_tmp.mat'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
# apply the inverse transformation matrix to the first gaussian mask
cmd = 'convert_xfm -omat tmp.omat_cumul_inv_tmp.mat -inverse tmp.omat_cumul_tmp.mat'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
#cmd = 'cat tmp.omat_cumul_inv_tmp.mat'
#print('>> ' + cmd)
#status, output = getstatusoutput(cmd)
#print output
cmd = 'flirt -in ' + FILE_VOLUME + '_splitZ' + str(REFERENCE).zfill(4) + '-mask_gaussian -ref ' + FILE_SRC_DISTANCE + ' -applyxfm -init tmp.omat_cumul_inv_tmp.mat -out ' + FILE_MASK_PLUS_DISTANCE + '_gaussian -paddingsize 3'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
if iUpDown == 1:
VARIABLE_DISTANCE = VARIABLE_DISTANCE + 1
else:
VARIABLE_DISTANCE = VARIABLE_DISTANCE - 1
else:
cmd = 'flirt -in ' + FILE_SRC_DISTANCE + ' -ref ' + FILE_DEST_DISTANCE + ' -schedule ' + schedule_path + ' -verbose 0 -omat tmp.omat_tmp.mat -cost normcorr -forcescaling -inweight ' + FILE_MASK_DISTANCE + '_gaussian' + ' -refweight ' + FILE_MASK_DISTANCE + '_gaussian -out ' + FILE_SRC_DISTANCE + '_reg_mask -paddingsize 3'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
# if registration fails
omat_inv = loadtxt('tmp.omat_tmp.mat')
if (abs(omat_inv[0][3]) > DISTANCE or abs(omat_inv[1][3]) > DISTANCE):
print 'Matrice de transformation incorrecte'
cmd = 'flirt -in ' + FILE_SRC_DISTANCE + ' -ref ' + FILE_DEST_DISTANCE + ' -schedule ' + schedule_path + ' -verbose 0 -omat tmp.omat_tmp.mat -out ' + FILE_SRC_DISTANCE + '_reg_mask -paddingsize 3'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
#cmd = 'cat tmp.omat_tmp.mat'
#print('>> ' + cmd)
#status, output = getstatusoutput(cmd)
#print output
cmd = 'convert_xfm -omat tmp.omat_cumul_tmp.mat -concat tmp.omat_cumul_tmp.mat tmp.omat_tmp.mat'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
#cmd = 'cat tmp.omat_cumul_tmp.mat'
#print('>> ' + cmd)
#status, output = getstatusoutput(cmd)
#print output
cmd = 'convert_xfm -omat tmp.omat_cumul_inv_tmp.mat -inverse tmp.omat_cumul_tmp.mat'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
#cmd = 'cat tmp.omat_cumul_inv_tmp.mat'
#print('>> ' + cmd)
#status, output = getstatusoutput(cmd)
#print output
cmd = 'flirt -in ' + FILE_VOLUME + '_splitZ' + str(REFERENCE).zfill(4) + '-mask_gaussian -ref ' + FILE_SRC_DISTANCE + ' -applyxfm -init tmp.omat_cumul_inv_tmp.mat -out ' + FILE_MASK_PLUS_DISTANCE + '_gaussian -paddingsize 3'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
if iUpDown == 1:
VARIABLE_DISTANCE = VARIABLE_DISTANCE + 1
else:
VARIABLE_DISTANCE = VARIABLE_DISTANCE - 1
cmd = 'cp tmp.omat_cumul_inv_tmp.mat tmp.omat_cumul_inv.mat'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp tmp.omat_cumul_tmp.mat tmp.omat_cumul.mat'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
# apply the cumulative transformation over the slice gap
cmd = 'flirt -in ' + FILE_SRC + ' -ref ' + FILE_DEST + ' -applyxfm -init tmp.omat_cumul.mat -out ' + FILE_SRC + '_reg_mask'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
# store the centerline points coordinates
if iUpDown == 1:
# find and store in txt files all the centerline points coordinates above reference slice
print 'find_centerline_coordinates(' + str(VARIABLE) + ', ' + str(REFERENCE) + ', ' + str(DISTANCE) + ')'
find_centerline_coordinates(FILE_VOLUME, VARIABLE, REFERENCE, DISTANCE)
else:
# find and store in txt files centerline points coordinates below reference slice
print 'find_centerline_coordinates(' + str(VARIABLE) + ', ' + str(REFERENCE) + ', -' + str(DISTANCE) + ')'
find_centerline_coordinates(FILE_VOLUME, VARIABLE, REFERENCE, -DISTANCE)
# verify the cumulative transformation matrix
#cmd = 'cat tmp.omat_cumul.mat'
#print('>> ' + cmd)
#status, output = getstatusoutput(cmd)
#print output
#cmd = 'cat tmp.omat_cumul_inv.mat'
#print('>> ' + cmd)
#status, output = getstatusoutput(cmd)
#print output
elif VARIABLE != REFERENCE:
# i try to implement the gap straightening
VARIABLE_DISTANCE = VARIABLE
for i in range(0, DISTANCE):
# inner gap variable
if iUpDown == 1:
VARIABLE_DISTANCE_PLUS = VARIABLE_DISTANCE + 1
else:
VARIABLE_DISTANCE_PLUS = VARIABLE_DISTANCE - 1
# inner gap transformation
FILE_DEST_DISTANCE = FILE_VOLUME + '_splitZ' + str(VARIABLE_DISTANCE).zfill(4)
FILE_SRC_DISTANCE = FILE_VOLUME + '_splitZ' + str(VARIABLE_DISTANCE_PLUS).zfill(4)
# use to define slice by slice gaussian mask
FILE_MASK_DISTANCE = FILE_DEST_DISTANCE + '-mask'
FILE_MASK_PLUS_DISTANCE = FILE_SRC_DISTANCE + '-mask'
if i == 0:
# do not use iterative mean for t2 image
cmd = 'flirt -in ' + FILE_SRC_DISTANCE + ' -ref ' + FILE_DEST_DISTANCE + ' -schedule ' + schedule_path + ' -verbose 0 -omat tmp.omat_tmp.mat -cost normcorr -forcescaling -inweight ' + FILE_MASK_DISTANCE + '_gaussian' + ' -refweight ' + FILE_MASK_DISTANCE + '_gaussian -out ' + FILE_SRC_DISTANCE + '_reg_mask -paddingsize 3'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
# if registration fails
omat_inv = loadtxt('tmp.omat_tmp.mat')
if (abs(omat_inv[0][3]) > DISTANCE or abs(omat_inv[1][3]) > DISTANCE):
print 'Matrice de transformation incorrecte'
cmd = 'flirt -in ' + FILE_SRC_DISTANCE + ' -ref ' + FILE_DEST_DISTANCE + ' -schedule ' + schedule_path + ' -verbose 0 -omat tmp.omat_tmp.mat -out ' + FILE_SRC_DISTANCE + '_reg_mask -paddingsize 3'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
#cmd = 'cat tmp.omat_tmp.mat'
#print('>> ' + cmd)
#status, output = getstatusoutput(cmd)
#print output
cmd = 'convert_xfm -omat tmp.omat_cumul_tmp.mat -concat tmp.omat_cumul_tmp.mat tmp.omat_tmp.mat'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
#cmd = 'cat tmp.omat_cumul_tmp.mat'
#print('>> ' + cmd)
#status, output = getstatusoutput(cmd)
#print output
# apply the inverse transformation matrix to the first gaussian mask
cmd = 'convert_xfm -omat tmp.omat_cumul_inv_tmp.mat -inverse tmp.omat_cumul_tmp.mat'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
#cmd = 'cat tmp.omat_cumul_inv_tmp.mat'
#print('>> ' + cmd)
#status, output = getstatusoutput(cmd)
#print output
cmd = 'flirt -in ' + FILE_VOLUME + '_splitZ' + str(REFERENCE).zfill(4) + '-mask_gaussian -ref ' + FILE_SRC_DISTANCE + ' -applyxfm -init tmp.omat_cumul_inv_tmp.mat -out ' + FILE_MASK_PLUS_DISTANCE + '_gaussian -paddingsize 3'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
if iUpDown == 1:
VARIABLE_DISTANCE = VARIABLE_DISTANCE + 1
else:
VARIABLE_DISTANCE = VARIABLE_DISTANCE - 1
else:
cmd = 'flirt -in ' + FILE_SRC_DISTANCE + ' -ref ' + FILE_DEST_DISTANCE + ' -schedule ' + schedule_path + ' -verbose 0 -omat tmp.omat_tmp.mat -cost normcorr -forcescaling -inweight ' + FILE_MASK_DISTANCE + '_gaussian' + ' -refweight ' + FILE_MASK_DISTANCE + '_gaussian -out ' + FILE_SRC_DISTANCE + '_reg_mask -paddingsize 3'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
# if registration fails
omat_inv = loadtxt('tmp.omat_tmp.mat')
if (abs(omat_inv[0][3]) > DISTANCE or abs(omat_inv[1][3]) > DISTANCE):
print 'Matrice de transformation incorrecte'
cmd = 'flirt -in ' + FILE_SRC_DISTANCE + ' -ref ' + FILE_DEST_DISTANCE + ' -schedule ' + schedule_path + ' -verbose 0 -omat tmp.omat_tmp.mat -out ' + FILE_SRC_DISTANCE + '_reg_mask -paddingsize 3'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
#cmd = 'cat tmp.omat_tmp.mat'
#print('>> ' + cmd)
#status, output = getstatusoutput(cmd)
#print output
cmd = 'convert_xfm -omat tmp.omat_cumul_tmp.mat -concat tmp.omat_cumul_tmp.mat tmp.omat_tmp.mat'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
#cmd = 'cat tmp.omat_cumul_tmp.mat'
#print('>> ' + cmd)
#status, output = getstatusoutput(cmd)
#print output
cmd = 'convert_xfm -omat tmp.omat_cumul_inv_tmp.mat -inverse tmp.omat_cumul_tmp.mat'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
#cmd = 'cat tmp.omat_cumul_inv_tmp.mat'
#print('>> ' + cmd)
#status, output = getstatusoutput(cmd)
#print output
cmd = 'flirt -in ' + FILE_VOLUME + '_splitZ' + str(REFERENCE).zfill(4) + '-mask_gaussian -ref ' + FILE_SRC_DISTANCE + ' -applyxfm -init tmp.omat_cumul_inv_tmp.mat -out ' + FILE_MASK_PLUS_DISTANCE + '_gaussian -paddingsize 3'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
if iUpDown == 1:
VARIABLE_DISTANCE = VARIABLE_DISTANCE + 1
else:
VARIABLE_DISTANCE = VARIABLE_DISTANCE - 1
cmd = 'cp tmp.omat_cumul_inv_tmp.mat tmp.omat_cumul_inv.mat'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp tmp.omat_cumul_tmp.mat tmp.omat_cumul.mat'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
# apply the cumulative transformation over the slice gap
cmd = 'flirt -in ' + FILE_SRC + ' -ref ' + FILE_DEST + ' -applyxfm -init tmp.omat_cumul.mat -out ' + FILE_SRC + '_reg_mask'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
# store the centerline points coordinates
if iUpDown == 1:
# find and store in txt files all the centerline points coordinates above reference slice
print 'find_centerline_coordinates(' + str(VARIABLE) + ', ' + str(REFERENCE) + ', ' + str(DISTANCE) + ')'
find_centerline_coordinates(FILE_VOLUME, VARIABLE, REFERENCE, DISTANCE)
else:
# find and store in txt files centerline points coordinates below reference slice
print 'find_centerline_coordinates(' + str(VARIABLE) + ', ' + str(REFERENCE) + ', -' + str(DISTANCE) + ')'
find_centerline_coordinates(FILE_VOLUME, VARIABLE, REFERENCE, -DISTANCE)
# verify the cumulative transformation matrix
#cmd = 'cat tmp.omat_cumul.mat'
#print('>> ' + cmd)
#status, output = getstatusoutput(cmd)
#print output
#cmd = 'cat tmp.omat_cumul_inv.mat'
#print('>> ' + cmd)
#status, output = getstatusoutput(cmd)
#print output
# increase the loop variable
if iUpDown == 1:
VARIABLE = VARIABLE + DISTANCE
else:
VARIABLE = VARIABLE - DISTANCE
print '***************************'
print 'z = ' + str(VARIABLE)
print '***************************'
# merge straightened and gaussian mask spinal cord slices
VARIABLE=0
while VARIABLE <= SLICE:
# iterative variables
VARIABLE_PLUS=VARIABLE + DISTANCE
# input volume slice variables
FILE_DEST = FILE_VOLUME + '_splitZ' + str(VARIABLE).zfill(4)
FILE_SRC = FILE_VOLUME + '_splitZ' + str(VARIABLE_PLUS).zfill(4)
FILE_MASK=FILE_DEST + '-mask'
FILE_MASK_PLUS=FILE_SRC + '-mask'
if VARIABLE == 0:
FILE_MASK_LIST=FILE_MASK + '_gaussian.nii.gz'
FILE_MASK_REG_LIST=FILE_DEST + '_reg_mask.nii.gz'
elif VARIABLE == REFERENCE:
FILE_MASK_REG_LIST=FILE_MASK_REG_LIST + ' ' + FILE_DEST + '.nii.gz'
FILE_MASK_LIST=FILE_MASK_LIST + ' ' + FILE_MASK + '_gaussian.nii.gz'
elif VARIABLE == SLICE:
FILE_MASK_REG_LIST=FILE_MASK_REG_LIST + ' ' + FILE_DEST + '_reg_mask.nii.gz ' + FILE_SRC + '_reg_mask.nii.gz'
FILE_MASK_LIST=FILE_MASK_LIST + ' ' + FILE_MASK + '_gaussian.nii.gz ' + FILE_MASK_PLUS + '_gaussian.nii.gz'
else:
FILE_MASK_LIST=FILE_MASK_LIST + ' ' + FILE_MASK + '_gaussian.nii.gz'
FILE_MASK_REG_LIST=FILE_MASK_REG_LIST + ' ' + FILE_DEST + '_reg_mask.nii.gz'
VARIABLE=VARIABLE + DISTANCE
if OUTPUT != '':
# merge the new straightened images
cmd = 'fslmerge -z ' + OUTPUT + '.nii.gz ' + FILE_MASK_REG_LIST
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
if OUTPUT == '':
# merge the new straightened images
cmd = 'fslmerge -z ' + FILE_VOLUME + '_straightened.nii.gz ' + FILE_MASK_REG_LIST
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
if GAUSS != '':
# merge the new mask images
cmd = 'fslmerge -z ' + GAUSS + '.nii.gz ' + FILE_MASK_LIST
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
if GAUSS == '':
# merge the new mask images
cmd = 'fslmerge -z ' + FILE_VOLUME + '_gaussian_mask.nii.gz ' + FILE_MASK_LIST
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
# get the original orientation of the input image
cmd = 'sct_orientation -i ' + VOLUME + ".nii.gz -get"
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
index = output.find(':')
orientation = output[index+2:len(output)]
final_orientation = [0 for x in xrange(6)]
for j in range(0, 3):
if j == 0:
if orientation[j] == 'A':
final_orientation[j] = 'A'
final_orientation[j+1] = 'P'
elif orientation[j] == 'P':
final_orientation[j] = 'P'
final_orientation[j+1] = 'A'
elif orientation[j] == 'R':
final_orientation[j] = 'R'
final_orientation[j+1] = 'L'
elif orientation[j] == 'L':
final_orientation[j] = 'L'
final_orientation[j+1] = 'R'
elif orientation[j] == 'I':
final_orientation[j] = 'I'
final_orientation[j+1] = 'S'
elif orientation[j] == 'S':
final_orientation[j] = 'S'
final_orientation[j+1] = 'I'
if j == 1:
if orientation[j] == 'A':
final_orientation[j+1] = 'A'
final_orientation[j+2] = 'P'
elif orientation[j] == 'P':
final_orientation[j+1] = 'P'
final_orientation[j+2] = 'A'
elif orientation[j] == 'R':
final_orientation[j+1] = 'R'
final_orientation[j+2] = 'L'
elif orientation[j] == 'L':
final_orientation[j+1] = 'L'
final_orientation[j+2] = 'R'
elif orientation[j] == 'I':
final_orientation[j+1] = 'I'
final_orientation[j+2] = 'S'
elif orientation[j] == 'S':
final_orientation[j+1] = 'S'
final_orientation[j+2] = 'I'
if j == 2:
if orientation[j] == 'A':
final_orientation[j+2] = 'A'
final_orientation[j+3] = 'P'
elif orientation[j] == 'P':
final_orientation[j+2] = 'P'
final_orientation[j+3] = 'A'
elif orientation[j] == 'R':
final_orientation[j+2] = 'R'
final_orientation[j+3] = 'L'
elif orientation[j] == 'L':
final_orientation[j+2] = 'L'
final_orientation[j+3] = 'R'
elif orientation[j] == 'I':
final_orientation[j+2] = 'I'
final_orientation[j+3] = 'S'
elif orientation[j] == 'S':
final_orientation[j+2] = 'S'
final_orientation[j+3] = 'I'
#apply the original orientation of the input volume to the gaussian mask and the spinal cord straightened using only slice by slice gaussian weighted registration
if GAUSS != '':
cmd = 'fslswapdim ' + GAUSS + '.nii.gz ' + str(final_orientation[0]) + str(final_orientation[1]) + ' ' + str(final_orientation[2]) + str(final_orientation[3]) + ' ' + str(final_orientation[4]) + str(final_orientation[5]) + ' ' + GAUSS + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
if GAUSS == '':
cmd = 'fslswapdim ' + FILE_VOLUME + '_gaussian_mask.nii.gz ' + str(final_orientation[0]) + str(final_orientation[1]) + ' ' + str(final_orientation[2]) + str(final_orientation[3]) + ' ' + str(final_orientation[4]) + str(final_orientation[5]) + ' ' + FILE_VOLUME_OUTPUT + '_gaussian_mask.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
if OUTPUT != '':
cmd = 'fslswapdim ' + OUTPUT + '.nii.gz ' + str(final_orientation[0]) + str(final_orientation[1]) + ' ' + str(final_orientation[2]) + str(final_orientation[3]) + ' ' + str(final_orientation[4]) + str(final_orientation[5]) + ' ' + OUTPUT + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
if OUTPUT == '':
cmd = 'fslswapdim ' + FILE_VOLUME + '_straightened.nii.gz ' + str(final_orientation[0]) + str(final_orientation[1]) + ' ' + str(final_orientation[2]) + str(final_orientation[3]) + ' ' + str(final_orientation[4]) + str(final_orientation[5]) + ' ' + FILE_VOLUME_OUTPUT + '_straightened.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
if parameters.debug ==1:
if OUTPUT != '':
cmd = 'cp ' + OUTPUT + '.nii.gz ' + 'output_images/' + OUTPUT + '.nii.gz'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
if OUTPUT == '':
cmd = 'cp ' + FILE_VOLUME_OUTPUT + '_straightened.nii.gz ' + 'output_images/' + FILE_VOLUME_OUTPUT + '_straightened.nii.gz'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
if GAUSS != '':
cmd = 'cp ' + GAUSS + '.nii.gz ' + 'output_images/' + GAUSS + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
if GAUSS == '':
cmd = 'cp ' + FILE_VOLUME_OUTPUT + '_gaussian_mask.nii.gz ' + 'output_images/' + FILE_VOLUME_OUTPUT + '_gaussian_mask.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
################################################################################################################
##### Centerline fitting (spline interpolation) to increase straightening robustness #####
################################################################################################################
print 'Centerline regularized (spline interpolation) ...'
# centerline fitting using spline interpolation
apply_fitted_transfo_to_matrices(FILE_VOLUME, FILE_BINARY, FILE_VOLUME_OUTPUT, REFERENCE, SLICE, DISTANCE)
# calculate the new spinal cord straightened using the fitted centerline previously calculated
apply_fitted_transfo_to_image(FILE_VOLUME, REFERENCE, FILE_VOLUME_OUTPUT + '_fitted_straightened', DISTANCE)
cmd = 'fslswapdim ' + FILE_VOLUME_OUTPUT + '_fitted_straightened.nii.gz ' + str(final_orientation[0]) + str(final_orientation[1]) + ' ' + str(final_orientation[2]) + str(final_orientation[3]) + ' ' + str(final_orientation[4]) + str(final_orientation[5]) + ' ' + FILE_VOLUME_OUTPUT + '_fitted_straightened.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
if parameters.debug ==1:
cmd = 'cp ' + FILE_VOLUME_OUTPUT + '_fitted_straightened.nii.gz ' + 'output_images/' + FILE_VOLUME_OUTPUT + '_fitted_straightened.nii.gz'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
cmd = 'mv ' + FILE_VOLUME_OUTPUT + '_APRLIS_centerline_straightened.nii.gz ' + 'output_images/' + FILE_VOLUME_OUTPUT + '_APRLIS_centerline_straightened.nii.gz'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
cmd = 'mv ' + FILE_VOLUME_OUTPUT + '_APRLIS_centerline_fitted.nii.gz ' + 'output_images/' + FILE_VOLUME_OUTPUT + '_APRLIS_centerline_fitted.nii.gz'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
################################################################################################################
##### FIND THE WARPING FIELD TO STRAIGHTEN THE SPINE CONSIDERING ORTHOGONAL CENTERLINE PLANES #####
################################################################################################################
if DEFORMATION != '':
if DEFORMATION == '1':
# read the fitted centerline txt file
file = open('tmp.centerline_fitted.txt', 'rb')
data_centerline = reader(file, delimiter='\t')
table_centerline_fitted = [row for row in data_centerline]
# count all the lines not empty in the txt file to determine the size of the M matrix defined below
lines_counter = 0
with open('tmp.centerline_fitted.txt') as f:
for line in f:
if line != '\n':
lines_counter += 1
lenght = lines_counter - 1
print "Number of centerline points:"
print lenght
X_init = [0 for x in xrange(lenght)]
Y_init = [0 for x in xrange(lenght)]
Y = array(Y_init)
Z_init = [0 for x in xrange(lenght)]
Z = array(Z_init)
for i in range(0, lenght):
X_init[i]=float(table_centerline_fitted[i][0])
Y_init[i]=float(table_centerline_fitted[i][1])
Z_init[i]=float(table_centerline_fitted[i][2])
X = array(X_init)
Y = array(Y_init)
Z = array(Z_init)
# centerline fitting using InterpolatedUnivariateSpline
tck_X = interpolate.splrep(Z, X, s=0)
Xnew = interpolate.splev(Z,tck_X,der=0)
#plt.figure()
#plt.plot(Z,X,'.-',Z,Xnew,'r')
#plt.legend(['Linear','InterpolatedUnivariateSpline'])
#plt.title('Z-X plane interpolation')
#plt.show()
tck_Y = interpolate.splrep(Z, Y, s=0)
Ynew = interpolate.splev(Z,tck_Y,der=0)
#plt.figure()
#plt.plot(Z,Y,'.-',Z,Ynew,'r')
#plt.legend(['Linear','InterpolatedUnivariateSpline'])
#plt.title('Z-Y plane interpolation')
#plt.show()
# the idea now would be to calculate each point coordinates along the curved spine
# calculate the derivatives of the two spline functions.
sprime = interpolate.splev(Z,tck_X,der=1)
tprime = interpolate.splev(Z,tck_Y,der=1)
# Local functions
# calculate the lenght of a 3d curve portion
# ==========================================================================================
def integrand(z):
return (sqrt(1 + sprime[z]*sprime[z] + tprime[z]*tprime[z]))
def integrale(x):
return quad(integrand, 0, x)[0]
# normalize vectors functions
# ==========================================================================================
def mag(V):
return sqrt(sum([x * x for x in V]))
def n(V):
v_m = mag(V)
return [vi / v_m for vi in V]
# txt file of centerline points 25 mm away along the spine (usefull to create future PSE landmarks)
step = parameters.step
fileID = open('tmp.centerline_fitted_orthogonal_resampling_pad' + str(step) + '.txt','w')
fileID.close()
# txt file of centerline points 1 mm away along the spine (usefull to resample the spine in each orthogonal plane of the previous centerline points considered)
nostep = 1
fileID = open('tmp.centerline_fitted_orthogonal_resampling.txt','w')
fileID.close()
# calculate coordinates of centerline points 25 mm away along the spine (usefull to create future PSE landmarks)
# use of a while loop
count = 0
while (count <= lenght):
if count == 0:
#if round(quad(integrand, 0, count)[0],1) % step == 0:
if round(quad(integrand, 0, count)[0],0) % step == 0:
print 'The point of z: ' + str(count) + ' is selected'
#print round(quad(integrand, 0, count)[0],1)
print round(quad(integrand, 0, count)[0],0)
z = count
# find x and y of the point
x = interpolate.splev(z,tck_X,der=0)
y = interpolate.splev(z,tck_Y,der=0)
fileID = open('tmp.centerline_fitted_orthogonal_resampling_pad' + str(step) + '.txt','a')
#fileID.write("%f %f %f %f\n" %(x, y, z, round(quad(integrand, 0, count)[0],1)))
fileID.write("%f %f %f %f\n" %(x, y, z, round(quad(integrand, 0, count)[0],0)))
fileID.close()
#count = count + 0.01
count = count + 0.1
else:
#if round(quad(integrand, 0, count)[0],1) % step == 0 and round(quad(integrand, 0, count)[0],1)!=round(quad(integrand, 0, z)[0],1):
if round(quad(integrand, 0, count)[0],0) % step == 0 and round(quad(integrand, 0, count)[0],0)!=round(quad(integrand, 0, z)[0],0):
print "Test"
#print round(quad(integrand, 0, count)[0],1)
#print round(quad(integrand, 0, z)[0],1)+step
print round(quad(integrand, 0, count)[0],0)
print round(quad(integrand, 0, z)[0],0)+step
#if round(quad(integrand, 0, count)[0],1)==round(quad(integrand, 0, z)[0],1)+step:
if round(quad(integrand, 0, count)[0],0)==round(quad(integrand, 0, z)[0],0)+step:
print "Consecutive values"
print 'The point of z: ' + str(count) + ' is selected'
#print round(quad(integrand, 0, count)[0],1)
print round(quad(integrand, 0, count)[0],0)
z = count
# find x and y of the point
x = interpolate.splev(z,tck_X,der=0)
y = interpolate.splev(z,tck_Y,der=0)
fileID = open('tmp.centerline_fitted_orthogonal_resampling_pad' + str(step) + '.txt','a')
#fileID.write("%f %f %f %f\n" %(x, y, z, round(quad(integrand, 0, count)[0],1)))
fileID.write("%f %f %f %f\n" %(x, y, z, round(quad(integrand, 0, count)[0],0)))
fileID.close()
else:
print "Values not consecutives"
#count = count + 0.01
count = count + 0.1
if parameters.debug == 1:
cmd = 'cp ' + 'tmp.centerline_fitted_orthogonal_resampling_pad' + str(step) + '.txt txt_files/' + 'centerline_fitted_orthogonal_resampling_pad' + str(step) + '.txt'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
# calculate coordinates of centerline points 1 mm away along the spine (usefull to resample the spine in each orthogonal plane of the previous centerline points considered)
# use of a while loop
count = 0
while (count <= lenght):
if count == 0:
#if round(quad(integrand, 0, count)[0],1) % nostep == 0:
if round(quad(integrand, 0, count)[0],0) % nostep == 0:
print 'The point of z: ' + str(count) + ' is selected'
#print round(quad(integrand, 0, count)[0],1)
print round(quad(integrand, 0, count)[0],0)
z = count
# find x and y of the point
x = interpolate.splev(z,tck_X,der=0)
y = interpolate.splev(z,tck_Y,der=0)
fileID = open('tmp.centerline_fitted_orthogonal_resampling.txt','a')
fileID.write("%f %f %f\n" %(x, y, z))
fileID.close()
#count = count + 0.01
count = count + 0.1
else:
#if round(quad(integrand, 0, count)[0],1) % nostep == 0 and round(quad(integrand, 0, count)[0],1)!=round(quad(integrand, 0, z)[0],1):
if round(quad(integrand, 0, count)[0],0) % nostep == 0 and round(quad(integrand, 0, count)[0],0)!=round(quad(integrand, 0, z)[0],0):
print "Test"
#print round(quad(integrand, 0, count)[0],1)
#print round(quad(integrand, 0, z)[0],1)+nostep
print round(quad(integrand, 0, count)[0],0)
print round(quad(integrand, 0, z)[0],0)+nostep
#if round(quad(integrand, 0, count)[0],1)==round(quad(integrand, 0, z)[0],1)+nostep:
if round(quad(integrand, 0, count)[0],0)==round(quad(integrand, 0, z)[0],0)+nostep:
print "Consecutive values"
print 'The point of z: ' + str(count) + ' is selected'
#print round(quad(integrand, 0, count)[0],1)
print round(quad(integrand, 0, count)[0],0)
z = count
# find x and y of the point
x = interpolate.splev(z,tck_X,der=0)
y = interpolate.splev(z,tck_Y,der=0)
fileID = open('tmp.centerline_fitted_orthogonal_resampling.txt','a')
fileID.write("%f %f %f\n" %(x, y, z))
fileID.close()
else:
print "Values not consecutives"
#count = count + 0.01
count = count + 0.1
#debug
#FILE_VOLUME = 'tmp.errsm_24_t2_cropped_APRLIS'
#FILE_VOLUME_OUTPUT = 'errsm_24_t2_cropped'
#step = 25
#nostep = 1
# function which calculate the rotation of a vector in three dimensions using the Euler Rodrigues formula.
# axis = axis of rotation, theta = angle of rotation in radian
def rotation_matrix(axis,theta):
axis = axis/np.sqrt(np.dot(axis,axis))
a = np.cos(theta/2)
b,c,d = -axis*np.sin(theta/2)
return np.array([[a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)],
[2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)],
[2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c]])
if parameters.debug == 1:
cmd = 'cp ' + 'tmp.centerline_fitted_orthogonal_resampling.txt txt_files/' + 'centerline_fitted_orthogonal_resampling.txt'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
# read nifti input file
centerline = load(FILE_VOLUME + '.nii.gz')
# 3d array for each x y z voxel values for the input nifti image
data = centerline.get_data()
shape = data.shape
print "Input volume dim.:"
print shape
# read the txt file of fitted centerline points 25 mm away along the spine
file = open('tmp.centerline_fitted_orthogonal_resampling_pad' + str(step) + '.txt', 'rb')
data_centerline = reader(file, delimiter=' ')
table_centerline = [row for row in data_centerline]
# count all the lines not empty in the txt file to determine the size of the M matrix defined below
lines_counter = 0
with open('tmp.centerline_fitted_orthogonal_resampling_pad' + str(step) + '.txt') as f:
for line in f:
if line != '\n':
lines_counter += 1
lenght = lines_counter - 1
#print "Number of centerline points:"
#print lenght
lines_counter_nopad = 0
with open('tmp.centerline_fitted_orthogonal_resampling.txt') as f:
for line in f:
if line != '\n':
lines_counter_nopad += 1
lenght_nopad = lines_counter_nopad - 1
#print "Number of centerline points:"
#print lenght
# creation of a matrix resuming coefficients for all equations of orthogonal plans to the fitted centerline at the previous points 25mm away: Ai*x + Bi*y + Ci*z + Di = 0
# create a list containing other lists initialized to 0
M = [[0 for x in xrange(4)] for x in xrange(lenght)]
for i in range(0, lenght):
M[i][0] = float(table_centerline[i + 1][0]) - float(table_centerline[i][0])
M[i][1] = float(table_centerline[i + 1][1]) - float(table_centerline[i][1])
M[i][2] = float(table_centerline[i + 1][2]) - float(table_centerline[i][2])
M[i][3] = - float(table_centerline[i][0]) * (M[i][0]) - float(table_centerline[i][1]) * (M[i][1]) - float(table_centerline[i][2]) * (M[i][2])
# initialize normal and tangent vectors for each orthogonal plan to the fitted centerline
Normal_vect_orthogonal = [[0 for x in xrange(3)] for x in xrange(lenght)]
Tangent_vect_orthogonal_x = [[0 for x in xrange(3)] for x in xrange(lenght)]
Tangent_vect_orthogonal_y = [[0 for x in xrange(3)] for x in xrange(lenght)]
# initialize normal and tangent vectors for each horizontal plan to the fitted centerline
Normal_vect_horizontal = [[0 for x in xrange(3)] for x in xrange(lenght)]
Tangent_vect_horizontal_x = [[0 for x in xrange(3)] for x in xrange(lenght)]
Tangent_vect_horizontal_y = [[0 for x in xrange(3)] for x in xrange(lenght)]
# compute normal and tangent vectors for each orthogonal plane to the fitted centerline
for i in range(0, lenght):
Normal_vect_orthogonal[i][0] = M[i][0]
Normal_vect_orthogonal[i][1] = M[i][1]
Normal_vect_orthogonal[i][2] = M[i][2]
# normalize the normal vector
Normal_vect_orthogonal[i] = n(Normal_vect_orthogonal[i])
# solve a set of two equations with two unknown variables to find tangent vector for each orthogonal plan to the centerline (tangent vector chosen in y plane with x > x_centerline) -> old way not working
#x = Symbol('x')
#z = Symbol('z')
#x, z = nsolve([Normal_vect_orthogonal[i][0] * x + Normal_vect_orthogonal[i][2] * z, float(table_centerline[i][0]) + x + float(table_centerline[i][2]) + z + M[i][3]], [x, z], [1, 1])
# By default, the result chosen is the opposite vector we want
# define the intersection between the orthogonal plane of the centerline and an arbitrary plane (here the plane where each x is equal to the abscisse of the centerline point considered for the actual orthogonal plane).
# This intesection, defined as a vector, will be our rotation axis
axis = cross(Normal_vect_orthogonal[i], [1, 0, 0])
v = [Normal_vect_orthogonal[i][0],Normal_vect_orthogonal[i][1],Normal_vect_orthogonal[i][2]]
theta = (np.pi)/2 #radian
Tangent_vect_orthogonal_x[i] = np.dot(rotation_matrix(axis,theta),v)
# TEST
#print Tangent_vect_orthogonal_x[i]
#print Normal_vect_orthogonal[i]
# print the dot product to make sure the tangent vector is in the plane.
#print sum([x * y for x, y in zip(Tangent_vect_orthogonal_x[i], Normal_vect_orthogonal[i])])
#print 'Tangent_vect_orthogonal_x[' + str(i) + '][0] = ' + str(Tangent_vect_orthogonal_x[i][0])
#print 'Tangent_vect_orthogonal_x[' + str(i) + '][1] = ' + str(Tangent_vect_orthogonal_x[i][1])
#print 'Tangent_vect_orthogonal_x[' + str(i) + '][2] = ' + str(Tangent_vect_orthogonal_x[i][2])
# normalize the tangent vector previously created
Tangent_vect_orthogonal_x[i] = n(Tangent_vect_orthogonal_x[i])
# calculate Tangent_vect_orthogonal_y: Normal_vect^Tangent_vect_orthogonal_x
Tangent_vect_orthogonal_y[i] = cross(Normal_vect_orthogonal[i], Tangent_vect_orthogonal_x[i])
# normalize tangent vector y
Tangent_vect_orthogonal_y[i] = n(Tangent_vect_orthogonal_y[i])
# compute normal and tangent vectors for each horizontal plan to the fitted centerline
for i in range(0, lenght):
Normal_vect_horizontal[i][0] = 0
Normal_vect_horizontal[i][1] = 0
Normal_vect_horizontal[i][2] = 1
# normalize normal vector
Normal_vect_horizontal[i] = n(Normal_vect_horizontal[i])
Tangent_vect_horizontal_x[i][0] = 1
Tangent_vect_horizontal_x[i][1] = 0
Tangent_vect_horizontal_x[i][2] = 0
# normalize tangent vector creation
Tangent_vect_horizontal_x[i] = n(Tangent_vect_horizontal_x[i])
#calculate Tangent_vect_horizontal_y: Normal_vect^Tangent_vect_horizontal_x
Tangent_vect_horizontal_y[i] = cross(Normal_vect_horizontal[i], Tangent_vect_horizontal_x[i])
Tangent_vect_horizontal_y[i] = n(Tangent_vect_horizontal_y[i])
landmarks_orthogonal = [[[0 for x in xrange(shape[2])] for x in xrange(shape[1])] for x in xrange(shape[0])]
landmarks_orthogonal_size = array(landmarks_orthogonal).shape
landmarks_horizontal = [[[0 for x in xrange(lenght_nopad)] for x in xrange(shape[1])] for x in xrange(shape[0])]
landmarks_horizontal_size = array(landmarks_horizontal).shape
# create PSE landmarks
# create a timer to increment landmarks value.
landmark_value = 1
landmark_value_horizontal = 1
# define the padding value
padding = 50
# create txt files
fileID = open(FILE_VOLUME + '_orthogonal_landmarks.txt', 'w')
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
fileID.close()
fileID = open(FILE_VOLUME + '_orthogonal_landmarks_pad' + str(padding) + '.txt', 'w')
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
fileID.close()
fileID = open(FILE_VOLUME + '_horizontal_landmarks.txt', 'w')
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
fileID.close()
fileID = open(FILE_VOLUME + '_horizontal_landmarks_pad' + str(padding) + '.txt', 'w')
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
fileID.close()
for l in range(0, lenght):
# calculate the origin of the finite orthogonal mesh
orthogonal_mesh_origin = [0 for x in xrange(3)]
orthogonal_mesh_origin[0] = float(table_centerline[l][0])
orthogonal_mesh_origin[1] = float(table_centerline[l][1])
orthogonal_mesh_origin[2] = float(table_centerline[l][2])
horizontal_mesh_origin = [0 for x in xrange(3)]
horizontal_mesh_origin[0] = shape[0]/2
horizontal_mesh_origin[1] = shape[1]/2
horizontal_mesh_origin[2] = float(table_centerline[l][3])
orthogonal_mesh_pos_centerline = [0 for x in xrange(3)]
orthogonal_mesh_pos_x = [0 for x in xrange(3)]
orthogonal_mesh_pos__x = [0 for x in xrange(3)]
orthogonal_mesh_pos_y = [0 for x in xrange(3)]
orthogonal_mesh_pos__y = [0 for x in xrange(3)]
horizontal_mesh_pos_centerline = [0 for x in xrange(3)]
horizontal_mesh_pos_x = [0 for x in xrange(3)]
horizontal_mesh_pos__x = [0 for x in xrange(3)]
horizontal_mesh_pos_y = [0 for x in xrange(3)]
horizontal_mesh_pos__y = [0 for x in xrange(3)]
orthogonal_mesh_pos_centerline = orthogonal_mesh_origin
horizontal_mesh_pos_centerline = horizontal_mesh_origin
# here, it defines the coordinates of the orthogonal and horizontal cross
orthogonal_mesh_pos_x[0] = orthogonal_mesh_origin[0] + parameters.landmarks_cross_size*float(Tangent_vect_orthogonal_x[l][0])
orthogonal_mesh_pos_x[1] = orthogonal_mesh_origin[1] + parameters.landmarks_cross_size*float(Tangent_vect_orthogonal_x[l][1])
orthogonal_mesh_pos_x[2] = orthogonal_mesh_origin[2] + parameters.landmarks_cross_size*float(Tangent_vect_orthogonal_x[l][2])
print 'orthogonal_mesh_pos_x: ' + str(orthogonal_mesh_pos_x)
orthogonal_mesh_pos__x[0] = orthogonal_mesh_origin[0] - parameters.landmarks_cross_size*float(Tangent_vect_orthogonal_x[l][0])
orthogonal_mesh_pos__x[1] = orthogonal_mesh_origin[1] - parameters.landmarks_cross_size*float(Tangent_vect_orthogonal_x[l][1])
orthogonal_mesh_pos__x[2] = orthogonal_mesh_origin[2] - parameters.landmarks_cross_size*float(Tangent_vect_orthogonal_x[l][2])
print 'orthogonal_mesh_pos__x: ' + str(orthogonal_mesh_pos__x)
orthogonal_mesh_pos_y[0] = orthogonal_mesh_origin[0] + parameters.landmarks_cross_size*float(Tangent_vect_orthogonal_y[l][0])
orthogonal_mesh_pos_y[1] = orthogonal_mesh_origin[1] + parameters.landmarks_cross_size*float(Tangent_vect_orthogonal_y[l][1])
orthogonal_mesh_pos_y[2] = orthogonal_mesh_origin[2] + parameters.landmarks_cross_size*float(Tangent_vect_orthogonal_y[l][2])
print 'orthogonal_mesh_pos_y: ' + str(orthogonal_mesh_pos_y)
orthogonal_mesh_pos__y[0] = orthogonal_mesh_origin[0] - parameters.landmarks_cross_size*float(Tangent_vect_orthogonal_y[l][0])
orthogonal_mesh_pos__y[1] = orthogonal_mesh_origin[1] - parameters.landmarks_cross_size*float(Tangent_vect_orthogonal_y[l][1])
orthogonal_mesh_pos__y[2] = orthogonal_mesh_origin[2] - parameters.landmarks_cross_size*float(Tangent_vect_orthogonal_y[l][2])
print 'orthogonal_mesh_pos__y: ' + str(orthogonal_mesh_pos__y)
horizontal_mesh_pos_x[0] = horizontal_mesh_origin[0] + parameters.landmarks_cross_size*float(Tangent_vect_horizontal_x[l][0])
horizontal_mesh_pos_x[1] = horizontal_mesh_origin[1] + parameters.landmarks_cross_size*float(Tangent_vect_horizontal_x[l][1])
horizontal_mesh_pos_x[2] = horizontal_mesh_origin[2] + parameters.landmarks_cross_size*float(Tangent_vect_horizontal_x[l][2])
print 'horizontal_mesh_pos_x: ' + str(horizontal_mesh_pos_x)
horizontal_mesh_pos__x[0] = horizontal_mesh_origin[0] - parameters.landmarks_cross_size*float(Tangent_vect_horizontal_x[l][0])
horizontal_mesh_pos__x[1] = horizontal_mesh_origin[1] - parameters.landmarks_cross_size*float(Tangent_vect_horizontal_x[l][1])
horizontal_mesh_pos__x[2] = horizontal_mesh_origin[2] - parameters.landmarks_cross_size*float(Tangent_vect_horizontal_x[l][2])
print 'horizontal_mesh_pos__x: ' + str(horizontal_mesh_pos__x)
horizontal_mesh_pos_y[0] = horizontal_mesh_origin[0] + parameters.landmarks_cross_size*float(Tangent_vect_horizontal_y[l][0])
horizontal_mesh_pos_y[1] = horizontal_mesh_origin[1] + parameters.landmarks_cross_size*float(Tangent_vect_horizontal_y[l][1])
horizontal_mesh_pos_y[2] = horizontal_mesh_origin[2] + parameters.landmarks_cross_size*float(Tangent_vect_horizontal_y[l][2])
print 'horizontal_mesh_pos_y: ' + str(horizontal_mesh_pos_y)
horizontal_mesh_pos__y[0] = horizontal_mesh_origin[0] - parameters.landmarks_cross_size*float(Tangent_vect_horizontal_y[l][0])
horizontal_mesh_pos__y[1] = horizontal_mesh_origin[1] - parameters.landmarks_cross_size*float(Tangent_vect_horizontal_y[l][1])
horizontal_mesh_pos__y[2] = horizontal_mesh_origin[2] - parameters.landmarks_cross_size*float(Tangent_vect_horizontal_y[l][2])
print 'horizontal_mesh_pos__y: ' + str(horizontal_mesh_pos__y)
# allocate the value of the landmark to the center of the cross for the orthogonal case
landmarks_orthogonal[int(round(orthogonal_mesh_pos_centerline[0]))][int(round(orthogonal_mesh_pos_centerline[1]))][int(round(orthogonal_mesh_pos_centerline[2]))] = landmark_value
#landmarks_orthogonal[int(round(orthogonal_mesh_pos_centerline[0]))][int(round(orthogonal_mesh_pos_centerline[1]))][int(round(orthogonal_mesh_pos_centerline[2]))] = 1
# write the point in a txt file
fileID = open(FILE_VOLUME + '_orthogonal_landmarks.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(orthogonal_mesh_pos_centerline[0]), float(orthogonal_mesh_pos_centerline[1]), float(orthogonal_mesh_pos_centerline[2]), 1))
fileID.close()
# write the point in a txt file
fileID = open(FILE_VOLUME + '_orthogonal_landmarks_pad' + str(padding) + '.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(orthogonal_mesh_pos_centerline[0]), float(orthogonal_mesh_pos_centerline[1]), float(orthogonal_mesh_pos_centerline[2])+padding, 1))
fileID.close()
# allocate the value of the landmark to the center of the cross for the horizontal case
landmarks_horizontal[int(round(horizontal_mesh_pos_centerline[0]))][int(round(horizontal_mesh_pos_centerline[1]))][int(round(horizontal_mesh_pos_centerline[2]))] = landmark_value_horizontal
#landmarks_horizontal[int(round(horizontal_mesh_pos_centerline[0]))][int(round(horizontal_mesh_pos_centerline[1]))][int(round(horizontal_mesh_pos_centerline[2]))] = 1
# write the point in a txt file
fileID = open(FILE_VOLUME + '_horizontal_landmarks.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(horizontal_mesh_pos_centerline[0]), float(horizontal_mesh_pos_centerline[1]), float(horizontal_mesh_pos_centerline[2]), 1))
fileID.close()
# write the point in a txt file
fileID = open(FILE_VOLUME + '_horizontal_landmarks_pad' + str(padding) + '.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(horizontal_mesh_pos_centerline[0]), float(horizontal_mesh_pos_centerline[1]), float(horizontal_mesh_pos_centerline[2])+padding, 1))
fileID.close()
landmark_value=landmark_value + 1
print landmark_value
landmark_value_horizontal=landmark_value_horizontal + 1
print landmark_value_horizontal
if (orthogonal_mesh_pos_x[0] > shape[0]-1):
print "x outside (upper limit)"
print orthogonal_mesh_pos_x[0]
landmarks_orthogonal[int(round(orthogonal_mesh_pos_x[0]))][int(round(orthogonal_mesh_pos_x[1]))][int(round(orthogonal_mesh_pos_x[2]))] = 0
elif (orthogonal_mesh_pos_x[1] > shape[1]-1):
print "y outside (upper limit)"
print orthogonal_mesh_pos_x[1]
landmarks_orthogonal[int(round(orthogonal_mesh_pos_x[0]))][int(round(orthogonal_mesh_pos_x[1]))][int(round(orthogonal_mesh_pos_x[2]))] = 0
elif (orthogonal_mesh_pos_x[2] > shape[2]-1):
print "z outside (upper limit)"
print orthogonal_mesh_pos_x[2]
landmarks_orthogonal[int(round(orthogonal_mesh_pos_x[0]))][int(round(orthogonal_mesh_pos_x[1]))][int(round(orthogonal_mesh_pos_x[2]))] = 0
elif (orthogonal_mesh_pos_x[0] < 0):
print "x outside (lower limit)"
print orthogonal_mesh_pos_x[0]
landmarks_orthogonal[int(round(orthogonal_mesh_pos_x[0]))][int(round(orthogonal_mesh_pos_x[1]))][int(round(orthogonal_mesh_pos_x[2]))] = 0
elif (orthogonal_mesh_pos_x[1] < 0):
print "y outside (lower limit)"
print orthogonal_mesh_pos_x[1]
landmarks_orthogonal[int(round(orthogonal_mesh_pos_x[0]))][int(round(orthogonal_mesh_pos_x[1]))][int(round(orthogonal_mesh_pos_x[2]))] = 0
elif (orthogonal_mesh_pos_x[2] < 0):
print "z outside (lower limit)"
print orthogonal_mesh_pos_x[2]
landmarks_orthogonal[int(round(orthogonal_mesh_pos_x[0]))][int(round(orthogonal_mesh_pos_x[1]))][int(round(orthogonal_mesh_pos_x[2]))] = 0
else:
print "point inside"
landmarks_orthogonal[int(round(orthogonal_mesh_pos_x[0]))][int(round(orthogonal_mesh_pos_x[1]))][int(round(orthogonal_mesh_pos_x[2]))] = landmark_value
#landmarks_orthogonal[int(round(orthogonal_mesh_pos_x[0]))][int(round(orthogonal_mesh_pos_x[1]))][int(round(orthogonal_mesh_pos_x[2]))] = 1
# write the point in a txt file
fileID = open(FILE_VOLUME + '_orthogonal_landmarks.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(orthogonal_mesh_pos_x[0]), float(orthogonal_mesh_pos_x[1]), float(orthogonal_mesh_pos_x[2]), 1))
fileID.close()
fileID = open(FILE_VOLUME + '_orthogonal_landmarks_pad' + str(padding) + '.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(orthogonal_mesh_pos_x[0]), float(orthogonal_mesh_pos_x[1]), float(orthogonal_mesh_pos_x[2])+padding, 1))
fileID.close()
landmarks_horizontal[int(round(horizontal_mesh_pos_x[0]))][int(round(horizontal_mesh_pos_x[1]))][int(round(horizontal_mesh_pos_x[2]))] = landmark_value_horizontal
#landmarks_horizontal[int(round(horizontal_mesh_pos_x[0]))][int(round(horizontal_mesh_pos_x[1]))][int(round(horizontal_mesh_pos_x[2]))] = 1
# write the point in a txt file
fileID = open(FILE_VOLUME + '_horizontal_landmarks.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(horizontal_mesh_pos_x[0]), float(horizontal_mesh_pos_x[1]), float(horizontal_mesh_pos_x[2]), 1))
fileID.close()
fileID = open(FILE_VOLUME + '_horizontal_landmarks_pad' + str(padding) + '.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(horizontal_mesh_pos_x[0]), float(horizontal_mesh_pos_x[1]), float(horizontal_mesh_pos_x[2])+padding, 1))
fileID.close()
landmark_value=landmark_value+1
print landmark_value
landmark_value_horizontal=landmark_value_horizontal+1
print landmark_value_horizontal
if (orthogonal_mesh_pos__y[0] > shape[0]-1):
print "x outside (upper limit)"
print orthogonal_mesh_pos__y[0]
landmarks_orthogonal[int(round(orthogonal_mesh_pos__y[0]))][int(round(orthogonal_mesh_pos__y[1]))][int(round(orthogonal_mesh_pos__y[2]))] = 0
elif (orthogonal_mesh_pos__y[1] > shape[1]-1):
print "y outside (upper limit)"
print orthogonal_mesh_pos__y[1]
landmarks_orthogonal[int(round(orthogonal_mesh_pos__y[0]))][int(round(orthogonal_mesh_pos__y[1]))][int(round(orthogonal_mesh_pos__y[2]))] = 0
elif (orthogonal_mesh_pos__y[2] > shape[2]-1):
print "z outside (upper limit)"
print orthogonal_mesh_pos__y[2]
landmarks_orthogonal[int(round(orthogonal_mesh_pos__y[0]))][int(round(orthogonal_mesh_pos__y[1]))][int(round(orthogonal_mesh_pos__y[2]))] = 0
elif (orthogonal_mesh_pos__y[0] < 0):
print "x outside (lower limit)"
print orthogonal_mesh_pos__y[0]
landmarks_orthogonal[int(round(orthogonal_mesh_pos__y[0]))][int(round(orthogonal_mesh_pos__y[1]))][int(round(orthogonal_mesh_pos__y[2]))] = 0
elif (orthogonal_mesh_pos__y[1] < 0):
print "y outside (lower limit)"
print orthogonal_mesh_pos__y[1]
landmarks_orthogonal[int(round(orthogonal_mesh_pos__y[0]))][int(round(orthogonal_mesh_pos__y[1]))][int(round(orthogonal_mesh_pos__y[2]))] = 0
elif (orthogonal_mesh_pos__y[2] < 0):
print "z outside (lower limit)"
print orthogonal_mesh_pos__y[2]
landmarks_orthogonal[int(round(orthogonal_mesh_pos__y[0]))][int(round(orthogonal_mesh_pos__y[1]))][int(round(orthogonal_mesh_pos__y[2]))] = 0
else:
print "point inside"
landmarks_orthogonal[int(round(orthogonal_mesh_pos__y[0]))][int(round(orthogonal_mesh_pos__y[1]))][int(round(orthogonal_mesh_pos__y[2]))] = landmark_value
#landmarks_orthogonal[int(round(orthogonal_mesh_pos__y[0]))][int(round(orthogonal_mesh_pos__y[1]))][int(round(orthogonal_mesh_pos__y[2]))] = 1
# write the point in a txt file
fileID = open(FILE_VOLUME + '_orthogonal_landmarks.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(orthogonal_mesh_pos__y[0]), float(orthogonal_mesh_pos__y[1]), float(orthogonal_mesh_pos__y[2]), 1))
fileID.close()
fileID = open(FILE_VOLUME + '_orthogonal_landmarks_pad' + str(padding) + '.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(orthogonal_mesh_pos__y[0]), float(orthogonal_mesh_pos__y[1]), float(orthogonal_mesh_pos__y[2])+padding, 1))
fileID.close()
landmarks_horizontal[int(round(horizontal_mesh_pos__y[0]))][int(round(horizontal_mesh_pos__y[1]))][int(round(horizontal_mesh_pos__y[2]))] = landmark_value_horizontal
#landmarks_horizontal[int(round(horizontal_mesh_pos__y[0]))][int(round(horizontal_mesh_pos__y[1]))][int(round(horizontal_mesh_pos__y[2]))] = 1
# write the point in a txt file
fileID = open(FILE_VOLUME + '_horizontal_landmarks.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(horizontal_mesh_pos__y[0]), float(horizontal_mesh_pos__y[1]), float(horizontal_mesh_pos__y[2]), 1))
fileID.close()
fileID = open(FILE_VOLUME + '_horizontal_landmarks_pad' + str(padding) + '.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(horizontal_mesh_pos__y[0]), float(horizontal_mesh_pos__y[1]), float(horizontal_mesh_pos__y[2])+padding, 1))
fileID.close()
landmark_value=landmark_value+1
print landmark_value
landmark_value_horizontal=landmark_value_horizontal+1
print landmark_value_horizontal
if (orthogonal_mesh_pos__x[0] > shape[0]-1):
print "x outside (upper limit)"
print orthogonal_mesh_pos__x[0]
landmarks_orthogonal[int(round(orthogonal_mesh_pos__x[0]))][int(round(orthogonal_mesh_pos__x[1]))][int(round(orthogonal_mesh_pos__x[2]))] = 0
elif (orthogonal_mesh_pos__x[1] > shape[1]-1):
print "y outside (upper limit)"
print orthogonal_mesh_pos__x[1]
landmarks_orthogonal[int(round(orthogonal_mesh_pos__x[0]))][int(round(orthogonal_mesh_pos__x[1]))][int(round(orthogonal_mesh_pos__x[2]))] = 0
elif (orthogonal_mesh_pos__x[2] > shape[2]-1):
print "z outside (upper limit)"
print orthogonal_mesh_pos__x[2]
landmarks_orthogonal[int(round(orthogonal_mesh_pos__x[0]))][int(round(orthogonal_mesh_pos__x[1]))][int(round(orthogonal_mesh_pos__x[2]))] = 0
elif (orthogonal_mesh_pos__x[0] < 0):
print "x outside (lower limit)"
print orthogonal_mesh_pos__x[0]
landmarks_orthogonal[int(round(orthogonal_mesh_pos__x[0]))][int(round(orthogonal_mesh_pos__x[1]))][int(round(orthogonal_mesh_pos__x[2]))] = 0
elif (orthogonal_mesh_pos__x[1] < 0):
print "y outside (lower limit)"
print orthogonal_mesh_pos__x[1]
landmarks_orthogonal[int(round(orthogonal_mesh_pos__x[0]))][int(round(orthogonal_mesh_pos__x[1]))][int(round(orthogonal_mesh_pos__x[2]))] = 0
elif (orthogonal_mesh_pos__x[2] < 0):
print "z outside (lower limit)"
print orthogonal_mesh_pos__x[2]
landmarks_orthogonal[int(round(orthogonal_mesh_pos__x[0]))][int(round(orthogonal_mesh_pos__x[1]))][int(round(orthogonal_mesh_pos__x[2]))] = 0
else:
print "point inside"
landmarks_orthogonal[int(round(orthogonal_mesh_pos__x[0]))][int(round(orthogonal_mesh_pos__x[1]))][int(round(orthogonal_mesh_pos__x[2]))] = landmark_value
#landmarks_orthogonal[int(round(orthogonal_mesh_pos__x[0]))][int(round(orthogonal_mesh_pos__x[1]))][int(round(orthogonal_mesh_pos__x[2]))] = 1
# write the point in a txt file
fileID = open(FILE_VOLUME + '_orthogonal_landmarks.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(orthogonal_mesh_pos__x[0]), float(orthogonal_mesh_pos__x[1]), float(orthogonal_mesh_pos__x[2]), 1))
fileID.close()
fileID = open(FILE_VOLUME + '_orthogonal_landmarks_pad' + str(padding) + '.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(orthogonal_mesh_pos__x[0]), float(orthogonal_mesh_pos__x[1]), float(orthogonal_mesh_pos__x[2])+padding, 1))
fileID.close()
landmarks_horizontal[int(round(horizontal_mesh_pos__x[0]))][int(round(horizontal_mesh_pos__x[1]))][int(round(horizontal_mesh_pos__x[2]))] = landmark_value_horizontal
#landmarks_horizontal[int(round(horizontal_mesh_pos__x[0]))][int(round(horizontal_mesh_pos__x[1]))][int(round(horizontal_mesh_pos__x[2]))] = 1
# write the point in a txt file
fileID = open(FILE_VOLUME + '_horizontal_landmarks.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(horizontal_mesh_pos__x[0]), float(horizontal_mesh_pos__x[1]), float(horizontal_mesh_pos__x[2]), 1))
fileID.close()
fileID = open(FILE_VOLUME + '_horizontal_landmarks_pad' + str(padding) + '.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(horizontal_mesh_pos__x[0]), float(horizontal_mesh_pos__x[1]), float(horizontal_mesh_pos__x[2])+padding, 1))
fileID.close()
landmark_value=landmark_value+1
print landmark_value
landmark_value_horizontal=landmark_value_horizontal+1
print landmark_value_horizontal
if (orthogonal_mesh_pos_y[0] > shape[0]-1):
print "x outside (upper limit)"
print orthogonal_mesh_pos_y[0]
landmarks_orthogonal[int(round(orthogonal_mesh_pos_y[0]))][int(round(orthogonal_mesh_pos_y[1]))][int(round(orthogonal_mesh_pos_y[2]))] = 0
elif (orthogonal_mesh_pos_y[1] > shape[1]-1):
print "y outside (upper limit)"
print orthogonal_mesh_pos_y[1]
landmarks_orthogonal[int(round(orthogonal_mesh_pos_y[0]))][int(round(orthogonal_mesh_pos_y[1]))][int(round(orthogonal_mesh_pos_y[2]))] = 0
elif (orthogonal_mesh_pos_y[2] > shape[2]-1):
print "z outside (upper limit)"
print orthogonal_mesh_pos_y[2]
landmarks_orthogonal[int(round(orthogonal_mesh_pos_y[0]))][int(round(orthogonal_mesh_pos_y[1]))][int(round(orthogonal_mesh_pos_y[2]))] = 0
elif (orthogonal_mesh_pos_y[0] < 0):
print "x outside (lower limit)"
print orthogonal_mesh_pos_y[0]
landmarks_orthogonal[int(round(orthogonal_mesh_pos_y[0]))][int(round(orthogonal_mesh_pos_y[1]))][int(round(orthogonal_mesh_pos_y[2]))] = 0
elif (orthogonal_mesh_pos_y[1] < 0):
print "y outside (lower limit)"
print orthogonal_mesh_pos_y[1]
landmarks_orthogonal[int(round(orthogonal_mesh_pos_y[0]))][int(round(orthogonal_mesh_pos_y[1]))][int(round(orthogonal_mesh_pos_y[2]))] = 0
elif (orthogonal_mesh_pos_y[2] < 0):
print "z outside (lower limit)"
print orthogonal_mesh_pos_y[2]
landmarks_orthogonal[int(round(orthogonal_mesh_pos_y[0]))][int(round(orthogonal_mesh_pos_y[1]))][int(round(orthogonal_mesh_pos_y[2]))] = 0
else:
print "point inside"
landmarks_orthogonal[int(round(orthogonal_mesh_pos_y[0]))][int(round(orthogonal_mesh_pos_y[1]))][int(round(orthogonal_mesh_pos_y[2]))] = landmark_value
#landmarks_orthogonal[int(round(orthogonal_mesh_pos_y[0]))][int(round(orthogonal_mesh_pos_y[1]))][int(round(orthogonal_mesh_pos_y[2]))] = 1
# write the point in a txt file
fileID = open(FILE_VOLUME + '_orthogonal_landmarks.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(orthogonal_mesh_pos_y[0]), float(orthogonal_mesh_pos_y[1]), float(orthogonal_mesh_pos_y[2]), 1))
fileID.close()
fileID = open(FILE_VOLUME + '_orthogonal_landmarks_pad' + str(padding) + '.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(orthogonal_mesh_pos_y[0]), float(orthogonal_mesh_pos_y[1]), float(orthogonal_mesh_pos_y[2])+padding, 1))
fileID.close()
landmarks_horizontal[int(round(horizontal_mesh_pos_y[0]))][int(round(horizontal_mesh_pos_y[1]))][int(round(horizontal_mesh_pos_y[2]))] = landmark_value_horizontal
#landmarks_horizontal[int(round(horizontal_mesh_pos_y[0]))][int(round(horizontal_mesh_pos_y[1]))][int(round(horizontal_mesh_pos_y[2]))] = 1
# write the point in a txt file
fileID = open(FILE_VOLUME + '_horizontal_landmarks.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(horizontal_mesh_pos_y[0]), float(horizontal_mesh_pos_y[1]), float(horizontal_mesh_pos_y[2]), 1))
fileID.close()
fileID = open(FILE_VOLUME + '_horizontal_landmarks_pad' + str(padding) + '.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(float(horizontal_mesh_pos_y[0]), float(horizontal_mesh_pos_y[1]), float(horizontal_mesh_pos_y[2])+padding, 1))
fileID.close()
landmark_value=landmark_value+1
print landmark_value
landmark_value_horizontal=landmark_value_horizontal+1
print landmark_value_horizontal
# write the point in a txt file
fileID = open(FILE_VOLUME + '_orthogonal_landmarks.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
fileID.close()
fileID = open(FILE_VOLUME + '_orthogonal_landmarks_pad' + str(padding) + '.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
fileID.close()
fileID = open(FILE_VOLUME + '_horizontal_landmarks.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
fileID.close()
fileID = open(FILE_VOLUME + '_horizontal_landmarks_pad' + str(padding) + '.txt', 'a')
fileID.write('%f\t%f\t%f\t%f\n' %(0, 0, 0, 0))
fileID.close()
if parameters.debug == 1:
cmd = 'cp ' + FILE_VOLUME + '_horizontal_landmarks_pad' + str(padding) + '.txt txt_files/' + FILE_VOLUME_OUTPUT + '_APRLIS_horizontal_landmarks_pad' + str(padding) + '.txt'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp ' + FILE_VOLUME + '_orthogonal_landmarks_pad' + str(padding) + '.txt txt_files/' + FILE_VOLUME_OUTPUT + '_APRLIS_orthogonal_landmarks_pad' + str(padding) + '.txt'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp ' + FILE_VOLUME + '_horizontal_landmarks.txt txt_files/' + FILE_VOLUME_OUTPUT + '_APRLIS_horizontal_landmarks.txt'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp ' + FILE_VOLUME + '_orthogonal_landmarks.txt txt_files/' + FILE_VOLUME_OUTPUT + '_APRLIS_orthogonal_landmarks.txt'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
# copy the header of the input volume
hdr = centerline.get_header()
hdr_copy = hdr.copy()
shape_output = (shape[1], shape[2], lenght)
hdr_copy_output = hdr.copy()
hdr_copy_output.set_data_shape(shape_output)
data_numpy = array(landmarks_orthogonal)
img = Nifti1Image(data_numpy, None, hdr_copy)
#img = nib.Nifti1Image(data_numpy, np.eye(4))
save(img, FILE_VOLUME + '_orthogonal_landmarks.nii.gz')
data_numpy = array(landmarks_horizontal)
img = Nifti1Image(data_numpy, None, hdr_copy_output)
save(img, FILE_VOLUME + '_horizontal_landmarks.nii.gz')
if parameters.debug == 1:
cmd = 'cp ' + FILE_VOLUME + '_horizontal_landmarks.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_APRLIS_horizontal_landmarks.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp ' + FILE_VOLUME + '_orthogonal_landmarks.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_APRLIS_orthogonal_landmarks.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
# read the txt file of fitted centerline points 1 mm away along the spine
file = open('tmp.centerline_fitted_orthogonal_resampling.txt', 'rb')
data_centerline = reader(file, delimiter=' ')
table_centerline = [row for row in data_centerline]
# count all the lines not empty in the txt file to determine the size of the M matrix defined below
lines_counter = 0
with open('tmp.centerline_fitted_orthogonal_resampling.txt') as f:
for line in f:
if line != '\n':
lines_counter += 1
lenght = lines_counter - 1
print "Number of centerline points:"
print lenght
# creation of a matrix resuming coefficients for all equations of orthogonal plans to the fitted centerline at the previous points 1 mm away: Ai*x + Bi*y + Ci*z + Di = 0
# create a list containing other lists initialized to 0
M = [[0 for x in xrange(4)] for x in xrange(lenght)]
for i in range(0, lenght):
M[i][0] = float(table_centerline[i + 1][0]) - float(table_centerline[i][0])
M[i][1] = float(table_centerline[i + 1][1]) - float(table_centerline[i][1])
M[i][2] = float(table_centerline[i + 1][2]) - float(table_centerline[i][2])
M[i][3] = - float(table_centerline[i][0]) * (M[i][0]) - float(table_centerline[i][1]) * (M[i][1]) - float(table_centerline[i][2]) * (M[i][2])
# initialize normal and tangent vectors for each orthogonal plan to the fitted centerline
Normal_vect_orthogonal = [[0 for x in xrange(3)] for x in xrange(lenght)]
Tangent_vect_orthogonal_x = [[0 for x in xrange(3)] for x in xrange(lenght)]
Tangent_vect_orthogonal_y = [[0 for x in xrange(3)] for x in xrange(lenght)]
# compute normal and tangent vectors for each orthogonal plan to the fitted centerline
for i in range(0, lenght):
Normal_vect_orthogonal[i][0] = M[i][0]
Normal_vect_orthogonal[i][1] = M[i][1]
Normal_vect_orthogonal[i][2] = M[i][2]
Normal_vect_orthogonal[i] = n(Normal_vect_orthogonal[i])
# solve a set of two equations with two unknown variables to find tangent vector for each orthogonal plan to the centerline (tangent vector chosen in y plane with x > x_centerline)
x = Symbol('x')
z = Symbol('z')
x, z = nsolve([Normal_vect_orthogonal[i][0] * x + Normal_vect_orthogonal[i][2] * z, float(table_centerline[i][0]) + x + float(table_centerline[i ][2]) + z + M[i][3]], [x, z], [1, 1])
# by default, the result chosen is the opposite vector we want
Tangent_vect_orthogonal_x[i][0] = -x
Tangent_vect_orthogonal_x[i][1] = 0
Tangent_vect_orthogonal_x[i][2] = -z
# normalize tangent vector x
Tangent_vect_orthogonal_x[i] = n(Tangent_vect_orthogonal_x[i])
# calculate Tangent_vect_orthogonal_y: Normal_vect^Tangent_vect_orthogonal_x
Tangent_vect_orthogonal_y[i] = cross(Normal_vect_orthogonal[i], Tangent_vect_orthogonal_x[i])
# normalize tangent vector y
Tangent_vect_orthogonal_y[i] = n(Tangent_vect_orthogonal_y[i])
orthogonal_volume_resampled = [[[0 for x in xrange(lenght)] for x in xrange(shape[1])] for x in xrange(shape[0])]
orthogonal_volume_resampled_size = array(orthogonal_volume_resampled).shape
gaussian_mask_orthogonal_volume_resampled = [[[0 for x in xrange(lenght)] for x in xrange(shape[1])] for x in xrange(shape[0])]
gaussian_mask_orthogonal_volume_resampled_size = array(gaussian_mask_orthogonal_volume_resampled).shape
for l in range(0, lenght):
# create the centerline points for the future gaussian mask creation
horizontal_mesh_origin = [0 for x in xrange(3)]
horizontal_mesh_origin[0] = (shape[0]/2)-10
horizontal_mesh_origin[1] = shape[1]/2
horizontal_mesh_origin[2] = l
gaussian_mask_orthogonal_volume_resampled[int(round(horizontal_mesh_origin[0]))][int(round(horizontal_mesh_origin[1]))][int(round(horizontal_mesh_origin[2]))] = 1
# write in nifti file the centerline binary volume usefull for the future gaussian mask
data_numpy = array(gaussian_mask_orthogonal_volume_resampled)
img = Nifti1Image(data_numpy, None, hdr_copy)
save(img, FILE_VOLUME + '_gaussian_mask_orthogonal_resampling.nii.gz')
if parameters.debug == 1:
cmd = 'cp ' + FILE_VOLUME + '_gaussian_mask_orthogonal_resampling.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_APRLIS_gaussian_mask_orthogonal_resampling.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
# create a new volume where spinal cord is resampled along all of its orthogonal planes
for i in range(0, shape[0]):
for j in range(0, shape[1]):
for l in range(0, lenght):
# calculate the origin of the finite orthogonal mesh and the finite horizontal mesh
orthogonal_mesh_origin = [0 for x in xrange(3)]
orthogonal_mesh_origin[0] = float(table_centerline[l][0]) - (shape[0]/2) * float(Tangent_vect_orthogonal_x[l][0]) - (shape[1]/2) * float(Tangent_vect_orthogonal_y[l][0])
orthogonal_mesh_origin[1] = float(table_centerline[l][1]) - (shape[0]/2) * float(Tangent_vect_orthogonal_x[l][1]) - (shape[1]/2) * float(Tangent_vect_orthogonal_y[l][1])
orthogonal_mesh_origin[2] = float(table_centerline[l][2]) - (shape[0]/2) * float(Tangent_vect_orthogonal_x[l][2]) - (shape[1]/2) * float(Tangent_vect_orthogonal_y[l][2])
# fill the orthogonal mesh plane
orthogonal_mesh_pos = [0 for x in xrange(3)]
orthogonal_mesh_pos[0] = orthogonal_mesh_origin[0] + i*float(Tangent_vect_orthogonal_x[l][0]) + j*float(Tangent_vect_orthogonal_y[l][0])
orthogonal_mesh_pos[1] = orthogonal_mesh_origin[1] + i*float(Tangent_vect_orthogonal_x[l][1]) + j*float(Tangent_vect_orthogonal_y[l][1])
orthogonal_mesh_pos[2] = orthogonal_mesh_origin[2] + i*float(Tangent_vect_orthogonal_x[l][2]) + j*float(Tangent_vect_orthogonal_y[l][2])
if (orthogonal_mesh_pos[0] > shape[0]-1):
print "x outside (upper limit)"
print orthogonal_mesh_pos[0]
orthogonal_volume_resampled[i][j][l] = 0
elif (orthogonal_mesh_pos[1] > shape[1]-1):
print "y outside (upper limit)"
print orthogonal_mesh_pos[1]
orthogonal_volume_resampled[i][j][l] = 0
elif (orthogonal_mesh_pos[2] > shape[2]-1):
print "z outside (upper limit)"
print orthogonal_mesh_pos[2]
orthogonal_volume_resampled[i][j][l] = 0
elif (orthogonal_mesh_pos[0] < 0):
print "x outside (lower limit)"
print orthogonal_mesh_pos[0]
orthogonal_volume_resampled[i][j][l] = 0
elif (orthogonal_mesh_pos[1] < 0):
print "y outside (lower limit)"
print orthogonal_mesh_pos[1]
orthogonal_volume_resampled[i][j][l] = 0
elif (orthogonal_mesh_pos[2] < 0):
print "z outside (lower limit)"
print orthogonal_mesh_pos[2]
orthogonal_volume_resampled[i][j][l] = 0
else:
print "x inside"
print orthogonal_mesh_pos[0]
print "y inside"
print orthogonal_mesh_pos[1]
print "z inside"
print orthogonal_mesh_pos[2]
orthogonal_volume_resampled[i][j][l] = data[round(orthogonal_mesh_pos[0])][round(orthogonal_mesh_pos[1])][round(orthogonal_mesh_pos[2])]
# write in nifti file the new orthogonal resampled along the spine volume
data_numpy = array(orthogonal_volume_resampled)
img = Nifti1Image(data_numpy, None, hdr_copy_output)
save(img, FILE_VOLUME + '_orthogonal_resampling.nii.gz')
if parameters.debug == 1:
cmd = 'cp ' + FILE_VOLUME + '_orthogonal_resampling.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_APRLIS_orthogonal_resampling.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
# create a gaussian mask centered along the centerline in the orthogonally resampled volume
cmd = 'fslsplit ' + str(FILE_VOLUME) + '_orthogonal_resampling.nii.gz ' + str(FILE_VOLUME) + '_orthogonal_resampling_splitZ -z'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'fslsplit ' + str(FILE_VOLUME) + '_gaussian_mask_orthogonal_resampling.nii.gz ' + str(FILE_VOLUME) + '_gaussian_mask_orthogonal_resampling_splitZ -z'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
# height of the entire input volume
FILE_VOLUME_SLICE = load(str(FILE_VOLUME) + '_orthogonal_resampling.nii.gz')
FILE_VOLUME_DATA = FILE_VOLUME_SLICE.get_data()
FILE_VOLUME_SHAPE = FILE_VOLUME_DATA.shape
HEIGHT = FILE_VOLUME_SHAPE[2] - 1
if REFERENCE != HEIGHT:
VARIABLE=0
while VARIABLE <= HEIGHT:
FILE_DEST = FILE_VOLUME + '_orthogonal_resampling_splitZ' + str(VARIABLE).zfill(4)
FILE_BINARY = FILE_VOLUME + '_gaussian_mask_orthogonal_resampling_splitZ' + str(VARIABLE).zfill(4)
FILE_MASK = FILE_DEST + '-mask'
print 'Create a Gaussian mask in the orthogonal resampled space'
print '***************************'
print 'z = ' + str(VARIABLE)
print '***************************'
cmd = 'fslroi ' + FILE_BINARY + '.nii.gz ' + FILE_MASK + '.nii.gz' + ' 0 ' + '-1 ' + '0 ' + '-1 ' + '0 ' + '1'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
# FSL gaussian mask creation
cmd = 'fslmaths ' + FILE_MASK + ' -kernel gauss 6.5 -dilM -s 3 ' + FILE_MASK + '_gaussian'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
VARIABLE = VARIABLE + 1
# merge the new gaussian mask
VARIABLE=0
while VARIABLE <= HEIGHT:
FILE_DEST = FILE_VOLUME + '_orthogonal_resampling_splitZ' + str(VARIABLE).zfill(4)
FILE_MASK = FILE_DEST + '-mask_gaussian'
# merge each slice file into a pseudo list of image registered files
if VARIABLE == 0:
FILE_MASK_LIST = FILE_MASK
else:
FILE_MASK_LIST = FILE_MASK_LIST + ' ' + FILE_MASK
VARIABLE=VARIABLE + 1
# merge the images with -z axis [concatenate]
cmd = 'fslmerge -z ' + FILE_VOLUME + '_orthogonal_resampled_gaussian_mask.nii.gz ' + FILE_MASK_LIST
print('>> '+ cmd)
status, PWD = getstatusoutput(cmd)
if parameters.debug == 1:
cmd = 'cp ' + FILE_VOLUME + '_orthogonal_resampled_gaussian_mask.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_APRLIS_orthogonal_resampled_gaussian_mask.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
print "End of the orthogonal resampling part"
############################################################################################################
# Estimate a deformation field between the input image and the orthogonally straightened one.
# considering the case when output file names have not been specified at the beginning by the user
############################################################################################################
# Padding of all images involved with ANTs
print 'Pad source image, straightened image, gaussian mask and landmarks ...'
cmd = 'isct_c3d ' + FILE_VOLUME + '.nii.gz' + ' -pad 0x0x'+str(padding)+'vox 0x0x' + str(padding) + 'vox 0 -o ' + FILE_VOLUME + '_pad' + str(padding) + '.nii.gz'
print(">> "+cmd)
status, output = getstatusoutput(cmd)
cmd = 'isct_c3d ' + FILE_VOLUME + '_orthogonal_landmarks.nii.gz' + ' -pad 0x0x' + str(padding) + 'vox 0x0x' + str(padding) + 'vox 0 -o ' + FILE_VOLUME + '_orthogonal_landmarks_pad' + str(padding) + '.nii.gz'
print(">> "+cmd)
status, output = getstatusoutput(cmd)
cmd = 'isct_c3d ' + FILE_VOLUME + '_horizontal_landmarks.nii.gz' + ' -pad 0x0x' + str(padding) + 'vox 0x0x' + str(padding) + 'vox 0 -o ' + FILE_VOLUME + '_horizontal_landmarks_pad' + str(padding) + '.nii.gz'
print(">> "+cmd)
status, output = getstatusoutput(cmd)
if GAUSS != '':
cmd = 'isct_c3d ' + GAUSS + '.nii.gz' + ' -pad 0x0x' + str(padding) + 'vox 0x0x' + str(padding) + 'vox 0 -o ' + GAUSS + '_pad' + str(padding) + '.nii.gz'
print(">> "+cmd)
status, output = getstatusoutput(cmd)
if GAUSS == '':
cmd = 'isct_c3d ' + FILE_VOLUME + '_gaussian_mask.nii.gz' + ' -pad 0x0x' + str(padding) + 'vox 0x0x' + str(padding) + 'vox 0 -o ' + FILE_VOLUME + '_gaussian_mask_pad' + str(padding) + '.nii.gz'
print(">> "+cmd)
status, output = getstatusoutput(cmd)
cmd = 'isct_c3d ' + FILE_VOLUME + '_fitted_straightened.nii.gz' + ' -pad 0x0x' + str(padding) + 'vox 0x0x' + str(padding) + 'vox 0 -o ' + FILE_VOLUME + '_fitted_straightened_pad' + str(padding) + '.nii.gz'
print(">> "+cmd)
status, output = getstatusoutput(cmd)
cmd = 'isct_c3d ' + FILE_VOLUME + '_orthogonal_resampling.nii.gz' + ' -pad 0x0x' + str(padding) + 'vox 0x0x' + str(padding) + 'vox 0 -o ' + FILE_VOLUME + '_orthogonal_resampling_pad' + str(padding) + '.nii.gz'
print(">> "+cmd)
status, output = getstatusoutput(cmd)
cmd = 'isct_c3d ' + FILE_VOLUME + '_orthogonal_resampled_gaussian_mask.nii.gz' + ' -pad 0x0x' + str(padding) + 'vox 0x0x' + str(padding) + 'vox 0 -o ' + FILE_VOLUME + '_orthogonal_resampled_gaussian_mask_pad' + str(padding) + '.nii.gz'
print(">> "+cmd)
status, output = getstatusoutput(cmd)
if parameters.debug == 1:
cmd = 'cp ' + FILE_VOLUME + '_pad' + str(padding) + '.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_APRLIS_pad' + str(padding) + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp ' + FILE_VOLUME + '_orthogonal_landmarks_pad' + str(padding) + '.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_APRLIS_orthogonal_landmarks_pad' + str(padding) + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp ' + FILE_VOLUME + '_gaussian_mask_pad' + str(padding) + '.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_APRLIS_gaussian_mask_pad' + str(padding) + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp ' + FILE_VOLUME + '_fitted_straightened_pad' + str(padding) + '.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_APRLIS_fitted_straightened_pad' + str(padding) + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp ' + FILE_VOLUME + '_orthogonal_resampling_pad' + str(padding) + '.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_APRLIS_orthogonal_resampling_pad' + str(padding) + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp ' + FILE_VOLUME + '_orthogonal_resampled_gaussian_mask_pad' + str(padding) + '.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_APRLIS_orthogonal_resampled_gaussian_mask_pad' + str(padding) + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp ' + FILE_VOLUME + '_horizontal_landmarks_pad' + str(padding) + '.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_APRLIS_horizontal_landmarks_pad' + str(padding) + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
# put all images usefull for ANTS (two landmarks images + orthogonal resampling + input image) in original input volume orientation to get a warping field in this orientation
cmd = 'fslswapdim ' + FILE_VOLUME + '_orthogonal_resampling_pad' + str(padding) + '.nii.gz ' + str(final_orientation[0]) + str(final_orientation[1]) + ' ' + str(final_orientation[2]) + str(final_orientation[3]) + ' ' + str(final_orientation[4]) + str(final_orientation[5]) + ' tmp.' + FILE_VOLUME_OUTPUT + '_orthogonal_resampling_pad' + str(padding) + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'fslswapdim ' + FILE_VOLUME + '_orthogonal_resampling.nii.gz ' + str(final_orientation[0]) + str(final_orientation[1]) + ' ' + str(final_orientation[2]) + str(final_orientation[3]) + ' ' + str(final_orientation[4]) + str(final_orientation[5]) + ' tmp.' + FILE_VOLUME_OUTPUT + '_orthogonal_resampling.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'fslswapdim ' + FILE_VOLUME + '_pad' + str(padding) + '.nii.gz ' + str(final_orientation[0]) + str(final_orientation[1]) + ' ' + str(final_orientation[2]) + str(final_orientation[3]) + ' ' + str(final_orientation[4]) + str(final_orientation[5]) + ' tmp.' + FILE_VOLUME_OUTPUT + '_pad' + str(padding) + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'fslswapdim ' + FILE_VOLUME + '_horizontal_landmarks_pad' + str(padding) + '.nii.gz ' + str(final_orientation[0]) + str(final_orientation[1]) + ' ' + str(final_orientation[2]) + str(final_orientation[3]) + ' ' + str(final_orientation[4]) + str(final_orientation[5]) + ' tmp.' + FILE_VOLUME_OUTPUT + '_horizontal_landmarks_pad' + str(padding) + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'fslswapdim ' + FILE_VOLUME + '_orthogonal_landmarks_pad' + str(padding) + '.nii.gz ' + str(final_orientation[0]) + str(final_orientation[1]) + ' ' + str(final_orientation[2]) + str(final_orientation[3]) + ' ' + str(final_orientation[4]) + str(final_orientation[5]) + ' tmp.' + FILE_VOLUME_OUTPUT + '_orthogonal_landmarks_pad' + str(padding) + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
if parameters.debug == 1:
cmd = 'cp tmp.' + FILE_VOLUME_OUTPUT + '_orthogonal_resampling_pad' + str(padding) + '.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_orthogonal_resampling_pad' + str(padding) + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp tmp.' + FILE_VOLUME_OUTPUT + '_orthogonal_resampling.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_orthogonal_resampling.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp tmp.' + FILE_VOLUME_OUTPUT + '_pad' + str(padding) + '.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_pad' + str(padding) + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp tmp.' + FILE_VOLUME_OUTPUT + '_horizontal_landmarks_pad' + str(padding) + '.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_horizontal_landmarks_pad' + str(padding) + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp tmp.' + FILE_VOLUME_OUTPUT + '_horizontal_landmarks_pad' + str(padding) + '.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_horizontal_landmarks_pad' + str(padding) + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
# use ANTs to find a warping field to straighten orthogonally the spine
# apparently, using a gaussian mask on the orthogonal resampled image is not usefull at all
cmd = 'ants 3 -m PSE[tmp.' + FILE_VOLUME_OUTPUT + '_orthogonal_resampling_pad' + str(padding) + '.nii.gz,tmp.' + FILE_VOLUME_OUTPUT + '_pad' + str(padding) + '.nii.gz,tmp.' + FILE_VOLUME_OUTPUT + '_horizontal_landmarks_pad' + str(padding) + '.nii.gz,tmp.' + FILE_VOLUME_OUTPUT + '_orthogonal_landmarks_pad' + str(padding) + '.nii.gz,' + '0.2,100,1,0,1,100000] -o PSE -i 1000x1000x0 --number-of-affine-iterations 1000x1000x1000 -m CC[tmp.' + FILE_VOLUME_OUTPUT + '_orthogonal_resampling_pad' + str(padding) + '.nii.gz,tmp.' + FILE_VOLUME_OUTPUT + '_pad' + str(padding) + '.nii.gz,' + '0.8,4] --use-all-metrics-for-convergence 1'
print(">> "+cmd)
status, output = getstatusoutput(cmd)
print output
# apply the PSE transformation previously calculated
cmd = 'WarpImageMultiTransform 3 tmp.' + FILE_VOLUME_OUTPUT + '_pad' + str(padding) + '.nii.gz tmp.' + FILE_VOLUME_OUTPUT + '_reg_PSE_pad' + str(padding) + '.nii.gz -R tmp.' + FILE_VOLUME_OUTPUT + '_orthogonal_resampling_pad' + str(padding) + '.nii.gz --use-BSpline PSEWarp.nii.gz PSEAffine.txt'
print(">> "+cmd)
status, output = getstatusoutput(cmd)
print output
cmd = 'WarpImageMultiTransform 3 ' + FILE_VOLUME_OUTPUT + '.nii.gz ' + FILE_VOLUME_OUTPUT + '_reg_PSE.nii.gz -R tmp.' + FILE_VOLUME_OUTPUT + '_orthogonal_resampling.nii.gz --use-BSpline PSEWarp.nii.gz PSEAffine.txt'
print(">> "+cmd)
status, output = getstatusoutput(cmd)
print output
#store in png slice the result of the middle caracteristic slice
# read nifti input file
img = load(FILE_VOLUME_OUTPUT + '_reg_PSE.nii.gz')
# 3d array for each x y z voxel values for the input nifti image
data = img.get_data()
shape = data.shape
print "Input volume dimensions:"
print shape
snapshot = [[0 for x in xrange(shape[1])] for x in xrange(shape[0])]
for i in range(0,shape[0]):
for j in range(0,shape[1]):
snapshot[i][j] = data[i][j][int(shape[2]/2)]
rotate_snapshot = ndimage.rotate(snapshot, 90)
plt.imshow(rotate_snapshot, cmap = cm.Greys_r)
plt.savefig('snapshot.png')
# crop the warping field due to previous padding
# bad way i think because i do not modify/crop the affine txt file
cmd = 'fslroi PSEWarp.nii.gz PSEWarp_cropped.nii.gz 0 -1 0 -1 ' + str(padding) + ' ' + str(lenght-padding)
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'WarpImageMultiTransform 3 ' + FILE_VOLUME_OUTPUT + '.nii.gz tmp.' + FILE_VOLUME_OUTPUT + '_reg_PSE_warping_field_cropped.nii.gz -R tmp.' + FILE_VOLUME_OUTPUT + '_orthogonal_resampling.nii.gz --use-BSpline PSEWarp_cropped.nii.gz PSEAffine.txt'
print(">> "+cmd)
status, output = getstatusoutput(cmd)
print output
if parameters.debug == 1:
cmd = 'cp ' + 'tmp.' + FILE_VOLUME_OUTPUT + '_reg_PSE_pad' + str(padding) + '.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_reg_PSE_pad' + str(padding) + '.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp ' + FILE_VOLUME_OUTPUT + '_reg_PSE.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_reg_PSE.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
cmd = 'cp tmp.' + FILE_VOLUME_OUTPUT + '_reg_PSE_warping_field_cropped.nii.gz output_images/' + FILE_VOLUME_OUTPUT + '_reg_PSE_warping_field_cropped.nii.gz'
print('>> '+ cmd)
status, output = getstatusoutput(cmd)
elif DEFORMATION == '0':
print 'Warping filed not calculated (no orthogonal centerline plane resampling approach)'
elif DEFORMATION == '':
print 'Warping field not calculated (no orthogonal centerline plane resampling approach) [Default]'
################################################################################################################
##### Remove temporary files #####
################################################################################################################
cmd = 'rm tmp.*'
print('>> ' + cmd)
status, output = getstatusoutput(cmd)
print 'Temporary files deleted'
########################################################################################################################
# START PROGRAM
########################################################################################################################
if __name__ == "__main__":
# call the important variable structures
parameters = parameters()
main()
| mit |
JasonKessler/scattertext | scattertext/termscoring/MannWhitneyU.py | 1 | 3706 | import pandas as pd
import numpy as np
from scipy.stats import norm, mannwhitneyu, ranksums
from scattertext.termscoring.CorpusBasedTermScorer import CorpusBasedTermScorer
class MannWhitneyU(CorpusBasedTermScorer):
'''
term_scorer = (MannWhitneyU(corpus).set_categories('Positive', ['Negative'], ['Plot']))
html = st.produce_frequency_explorer(
corpus,
category='Positive',
not_categories=['Negative'],
neutral_categories=['Plot'],
term_scorer=term_scorer,
metadata=rdf['movie_name'],
grey_threshold=0,
show_neutral=True
)
file_name = 'rotten_fresh_mwu.html'
open(file_name, 'wb').write(html.encode('utf-8'))
IFrame(src=file_name, width=1300, height=700)
'''
def _set_scorer_args(self, **kwargs):
pass
def get_scores(self, *args):
return self.get_score_df()['mwu_z']
def get_score_df(self, correction_method=None):
'''
Computes Mann Whitney corrected p, z-values. Falls back to normal approximation when numerical limits are reached.
:param correction_method: str or None, correction method from statsmodels.stats.multitest.multipletests
'fdr_bh' is recommended.
:return: pd.DataFrame
'''
X = self._get_X().astype(np.float64)
X = X / X.sum(axis=1)
cat_X, ncat_X = self._get_cat_and_ncat(X)
def normal_apx(u, x, y):
# from https://stats.stackexchange.com/questions/116315/problem-with-mann-whitney-u-test-in-scipy
m_u = len(x) * len(y) / 2
sigma_u = np.sqrt(len(x) * len(y) * (len(x) + len(y) + 1) / 12)
z = (u - m_u) / sigma_u
return 2*norm.cdf(z)
scores = []
for i in range(cat_X.shape[1]):
cat_list = cat_X.T[i].A1
ncat_list = ncat_X.T[i].A1
try:
if cat_list.mean() > ncat_list.mean():
mw = mannwhitneyu(cat_list, ncat_list, alternative='greater')
if mw.pvalue in (0, 1):
mw.pvalue = normal_apx(mw.staistic, cat_list, ncat_list)
scores.append({'mwu': mw.statistic, 'mwu_p': mw.pvalue, 'mwu_z': norm.isf(float(mw.pvalue)), 'valid':True})
else:
mw = mannwhitneyu(ncat_list, cat_list, alternative='greater')
if mw.pvalue in (0, 1):
mw.pvalue = normal_apx(mw.staistic, ncat_list, cat_list)
scores.append({'mwu': -mw.statistic, 'mwu_p': 1 - mw.pvalue, 'mwu_z': 1. - norm.isf(float(mw.pvalue)), 'valid':True})
except:
scores.append({'mwu': 0, 'mwu_p': 0, 'mwu_z': 0, 'valid':False})
score_df = pd.DataFrame(scores, index=self.corpus_.get_terms()).fillna(0)
if correction_method is not None:
from statsmodels.stats.multitest import multipletests
for method in ['mwu']:
valid_pvals = score_df[score_df.valid].mwu_p
valid_pvals_abs = np.min([valid_pvals, 1-valid_pvals], axis=0)
valid_pvals_abs_corr = multipletests(valid_pvals_abs, method=correction_method)[1]
score_df[method + '_p_corr'] = 0.5
valid_pvals_abs_corr[valid_pvals > 0.5] = 1. - valid_pvals_abs_corr[valid_pvals > 0.5]
valid_pvals_abs_corr[valid_pvals < 0.5] = valid_pvals_abs_corr[valid_pvals < 0.5]
score_df.loc[score_df.valid, method + '_p_corr'] = valid_pvals_abs_corr
score_df[method + '_z'] = -norm.ppf(score_df[method + '_p_corr'])
return score_df
def get_name(self):
return "Mann Whitney Z"
| apache-2.0 |
mikkokemppainen/complex-networks | ERgraphs.py | 1 | 6428 | """Random graph tools
This module contains tools for numerical demonstrations of basic
properties of Erdos-Renyi random graphs.
We use NetworkX to generate Erdos-Renyi random graphs of n nodes
with fixed expected average degree K.
We generate a graph G using nx.gnp_random_graph(n,p) with wiring
probability p = K/(n-1). Then the expected number of edges is
n*K/2, and the expected average degree is K.
The expected degree distribution of G is given bythe binomial
distribution
choose (n-1,k) * p**k * (1-p)**(n-1-k) for 0 <= k <= n-1.
For large n, this can be approximated by the Poisson distribution
exp(-K) * K**k / k! for k > 0,
when K = p*n stays fixed.
The expected average clustering coefficient is K/(n-1).
The expected average shortest path length grows comparably to
log(n)/log(K). The derivation of this estimate ignores the effect
of cycles, which causes a certain amount of inaccuracy.
Reference: A. Barrat, M. Barthelemy, A. Vespignani:
Dynamical Processes on Complex Networks,
Cambridge University Press, 2008.
"""
# Author: Mikko Kemppainen <[email protected]>
# License: MIT License
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import binom, poisson
"""
Generate an Erdos-Renyi random graph of n nodes with expected
average degree K.
"""
def graph(n,K):
p = K/(n-1)
G = nx.gnp_random_graph(n,p)
G.K = K # store the generating parameter K in G
inspect(G)
return G
"""
Inspect graph G generated with expected average degree K.
"""
def inspect(G):
n = G.number_of_nodes()
#try:
# K = G.K
#except AttributeError:
# K = '--'
print('Number of edges:', G.number_of_edges())
if hasattr(G,'K'):
print('Expected:', int(np.round(n*G.K/2)))
print('Average degree: {0:.3f}'.format(avgdeg(G)))
if hasattr(G,'K'):
print('Expected: {0:.3f}'.format(G.K))
print('Average clustering coefficient: {0:.3f}'.format(nx.average_clustering(G)))
if hasattr(G,'K'):
print('Expected: {0:.3f}'.format(G.K/(n-1)))
print('Average shortest path length: {0:.3f}'.format(nx.average_shortest_path_length(G)))
if hasattr(G,'K'):
print('Expected approximately: {0:.3f}'.format(np.log(n)/np.log(G.K)))
"""
Calculate average degree
"""
def avgdeg(G):
K = 2*G.number_of_edges() / G.number_of_nodes()
return K
"""
Plot degree histogram with average degree (and expected average degree).
"""
def plotdeghist(G):
hist = nx.degree_histogram(G)
degs = range(len(hist))
plt.bar(degs, hist)
ad = avgdeg(G)
avgdegline = plt.axvline(ad, color = 'g', linewidth = 2)
if hasattr(G,'K'):
expavgdegline = plt.axvline(G.K, color ='r', linewidth = 2)
plt.legend([avgdegline, expavgdegline], ['Average degree', 'Expected average degree'], loc=2)
else:
plt.legend([avgdegline], ['Average degree'], loc=2)
plt.title('Degree histogram')
plt.xlabel('Degrees')
plt.ylabel('Number of nodes')
"""
Given K, compare the degree distribution of G with the expected binomial
distribution and the limiting Poisson distribution.
"""
def compare(G):
n = G.number_of_nodes()
plt.figure(1)
plt.subplot(131)
hist = nx.degree_histogram(G)
while len(hist) < n:
hist.append(0)
dist = [k/n for k in hist]
plt.bar(range(n), dist)
M = 1.05 * max(dist)
plt.ylim(0,M)
plt.title('Degree distribution')
plt.xlabel('Degrees')
plt.ylabel('Ratio of nodes')
if hasattr(G, 'K'):
plt.subplot(132)
p = G.K/(n-1)
bm = [binom.pmf(k,n-1,p) for k in range(n)]
plt.bar(range(n), bm)
plt.ylim(0,M)
plt.title('Expected binomial distribution')
plt.xlabel('Degrees')
plt.ylabel('Ratio of nodes')
plt.subplot(133)
po = [poisson.pmf(k, G.K) for k in range(n)]
plt.bar(range(n), po)
plt.ylim(0,M)
plt.title('Limiting Poisson distribution')
plt.xlabel('Degrees')
plt.ylabel('Ratio of nodes')
plt.show()
"""
The first demo generates three graphs with n nodes and varies the ratio
K/n = 0.1, 0.5, 0.9. Use n > 30.
"""
def _plotdeghist(G):
hist = nx.degree_histogram(G)
degs = range(len(hist))
plt.bar(degs, hist)
ad = avgdeg(G)
avgdegline = plt.axvline(ad, color = 'g', linewidth = 2)
expavgdegline = plt.axvline(G.K, color ='r', linewidth = 2)
plt.legend([avgdegline, expavgdegline], ['Average degree', 'Expected average degree'], loc=2)
plt.title('n = {0}, K = {1:.1f}'.format( G.number_of_nodes(), G.K))
plt.xlabel('Degrees')
plt.ylabel('Number of nodes')
def demoK(n):
n = max(n,30)
K1 = 0.1 * n
K2 = 0.5 * n
K3 = 0.9 * n
print('Graph G1')
G1 = graph(n,K1)
print('')
print('Graph G2')
G2 = graph(n,K2)
print('')
print('Graph G3')
G3 = graph(n,K3)
plt.figure(1)
plt.subplot(131)
_plotdeghist(G1)
plt.subplot(132)
_plotdeghist(G2)
plt.subplot(133)
_plotdeghist(G3)
plt.figure(2)
plt.subplot(131)
nx.draw_random(G1)
plt.subplot(132)
nx.draw_random(G2)
plt.subplot(133)
nx.draw_random(G3)
plt.show()
return G1, G2, G3
"""The second demo generates three graphs with varying number of nodes
n = 100, 500, 1000 keeping the average degree K fixed. Use K > 5.
"""
def demon(K):
K = max(5,K)
n1 = int(1.2 * K)
n2 = int(5 * K)
n3 = int(10 * K)
print('Graph G1')
G1 = graph(n1,K)
print('')
print('Graph G2')
G2 = graph(n2,K)
print('')
print('Graph G3')
G3 = graph(n3,K)
plt.figure(1)
plt.subplot(131)
_plotdeghist(G1)
plt.subplot(132)
_plotdeghist(G2)
plt.subplot(133)
_plotdeghist(G3)
plt.figure(2)
plt.subplot(131)
nx.draw_random(G1)
plt.subplot(132)
nx.draw_random(G2)
plt.subplot(133)
nx.draw_random(G3)
plt.show()
return G1, G2, G3
| mit |
abhisg/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
yonglehou/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
sly-ninja/python_for_ml | Module5/assignment7.py | 1 | 5807 | import pandas as pd
import numpy as np
# If you'd like to try this lab with PCA instead of Isomap,
# as the dimensionality reduction technique:
Test_PCA = True
def plotDecisionBoundary(model, X, y):
print("Plotting...")
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot') # Look Pretty
fig = plt.figure()
ax = fig.add_subplot(111)
padding = 0.1
resolution = 0.1
#(2 for benign, 4 for malignant)
colors = {2:'royalblue',4:'lightsalmon'}
# Calculate the boundaris
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
x_range = x_max - x_min
y_range = y_max - y_min
x_min -= x_range * padding
y_min -= y_range * padding
x_max += x_range * padding
y_max += y_range * padding
# Create a 2D Grid Matrix. The values stored in the matrix
# are the predictions of the class at at said location
import numpy as np
xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution),
np.arange(y_min, y_max, resolution))
# What class does the classifier say?
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour map
plt.contourf(xx, yy, Z, cmap=plt.cm.seismic)
plt.axis('tight')
# Plot your testing points as well...
for label in np.unique(y):
indices = np.where(y == label)
plt.scatter(X[indices, 0], X[indices, 1], c=colors[label], alpha=0.8)
p = model.get_params()
plt.title('K = ' + str(p['n_neighbors']))
plt.show()
#
# TODO: Load in the dataset, identify nans, and set proper headers.
# Be sure to verify the rows line up by looking at the file in a text editor.
#
X = pd.read_csv('Datasets//breast-cancer-wisconsin.data', names = ['sample', 'thickness', 'size', 'shape', 'adhesion', 'epithelial', 'nuclei', 'chromatin', 'nucleoli', 'mitoses', 'status'])
#
# TODO: Copy out the status column into a slice, then drop it from the main
# dataframe. Always verify you properly executed the drop by double checking
# (printing out the resulting operating)! Many people forget to set the right
# axis here.
#
# If you goofed up on loading the dataset and notice you have a `sample` column,
# this would be a good place to drop that too if you haven't already.
#
y = X.iloc[:, 0]
X.drop(['sample'], inplace=True, axis=1)
#
# TODO: With the labels safely extracted from the dataset, replace any nan values
# with the mean feature / column value
#
X.replace('?', np.NaN, inplace=True)
X = X.fillna(X.mean())
#
# TODO: Do train_test_split. Use the same variable names as on the EdX platform in
# the reading material, but set the random_state=7 for reproduceability, and keep
# the test_size at 0.5 (50%).
#
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=7)
#
# TODO: Experiment with the basic SKLearn preprocessing scalers. We know that
# the features consist of different units mixed in together, so it might be
# reasonable to assume feature scaling is necessary. Print out a description
# of the dataset, post transformation. Recall: when you do pre-processing,
# which portion of the dataset is your model trained upon? Also which portion(s)
# of your dataset actually get transformed?
#
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(X_test)
X_test = scaler.transform(X_test)
X_train = scaler.transform(X_train)
#
# PCA and Isomap are your new best friends
model = None
if Test_PCA:
print("Computing 2D Principle Components")
#
# TODO: Implement PCA here. Save your model into the variable 'model'.
# You should reduce down to two dimensions.
#
from sklearn.decomposition import PCA
model = PCA(n_components=2, svd_solver='randomized', random_state=1)
model.fit(X_train)
X_train = model.transform(X_train)
X_test = model.transform(X_test)
else:
print("Computing 2D Isomap Manifold")
#
# TODO: Implement Isomap here. Save your model into the variable 'model'
# Experiment with K values from 5-10.
# You should reduce down to two dimensions.
#
from sklearn import manifold
model = manifold.Isomap(n_neighbors = 5, n_components = 2)
model.fit(X_train)
X_train = model.transform(X_train)
X_test = model.transform(X_test)
#
# TODO: Train your model against data_train, then transform both
# data_train and data_test using your model. You can save the results right
# back into the variables themselves.
#
# .. your code here ..
#
# TODO: Implement and train KNeighborsClassifier on your projected 2D
# training data here. You can use any K value from 1 - 15, so play around
# with it and see what results you can come up. Your goal is to find a
# good balance where you aren't too specific (low-K), nor are you too
# general (high-K). You should also experiment with how changing the weights
# parameter affects the results.
#
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
model = KNeighborsClassifier(n_neighbors=9)
model.fit(X_train, y_train.values.ravel())
#
# INFO: Be sure to always keep the domain of the problem in mind! It's
# WAY more important to errantly classify a benign tumor as malignant,
# and have it removed, than to incorrectly leave a malignant tumor, believing
# it to be benign, and then having the patient progress in cancer. Since the UDF
# weights don't give you any class information, the only way to introduce this
# data into SKLearn's KNN Classifier is by "baking" it into your data. For
# example, randomly reducing the ratio of benign samples compared to malignant
# samples from the training set.
#
# TODO: Calculate + Print the accuracy of the testing set
#
print(model.score(X_test, y_test))
plotDecisionBoundary(knmodel, X_test, y_test)
| mit |
johnmgregoire/NanoCalorimetry | maxcovariancecompositiondirection.py | 1 | 5675 | import time, copy
import os
import sys
import numpy
import h5py
#from PnSC_ui import *
#from PnSC_dataimport import *
from PnSC_SCui import *
#from PnSC_math import *
from PnSC_h5io import *
from PnSC_main import *
from matplotlib.ticker import FuncFormatter
import scipy.integrate
p='C:/Users/JohnnyG/Documents/PythonCode/Vlassak/NanoCalorimetry/AuSiCu_pnsc_all.h5'
def myexpformat(x, pos):
for ndigs in range(2):
lab=(('%.'+'%d' %ndigs+'e') %x).replace('e+0','e').replace('e+','e').replace('e0','').replace('e-0','e-')
if eval(lab)==x:
return lab
return lab
ExpTickLabels=FuncFormatter(myexpformat)
def make_ticklabels_invisible(ax, x=True, y=True):
if x:
for tl in ax.get_xticklabels():
tl.set_visible(False)
if y:
for tl in ax.get_yticklabels():
tl.set_visible(False)
cycleindex=0
#p=mm.h5path
#f=h5py.File(p, mode='r+')
#f=h5py.File(p, mode='r')
savef='C:/Users/JohnnyG/Documents/HarvardWork/MG/PnSCplots/batchplotbycell_June2'
plotTlim=(50., 700.)
f=h5py.File(p, mode='r')
#metadictlist=[]
#allsegdict=[]
#cg=f['calbycellmetadata'][`selectcell`]
#for mg in cg.itervalues():
# if isinstance(mg, h5py.Group) and 'Cpregions_enthalpy' in mg.attrs.keys():
# d={}
# for k, v in mg.attrs.iteritems():
# d[k]=v
## if selectcell==1 and d['name'].startswith('heat1'):#heat1a was botched and heat1b we don't know cooling rate and the XRd for heat0 was questionable anyway
## continue
# metadictlist+=[d]
# allsegdict+=[CreateHeatProgSegDictList(p, d['name'], d['h5hpname'])]
comp=f['CompThick/atfrac'][:, :]
cell_comp_weight1_weight2_fomlist=[]
for selectcell in range(1, 26):
xrddictlist=[]
if 'xrdbycell' in f and `selectcell` in f['xrdbycell']:
cg=f['xrdbycell'][`selectcell`]
for mg in cg.itervalues():
if isinstance(mg, h5py.Group):
d={}
for k, v in mg.attrs.iteritems():
d[k]=v
xrddictlist+=[d]
if len(xrddictlist)>0:
amfrac=numpy.array([d['amfrac'] for d in xrddictlist])
cell_comp_weight1_weight2_fomlist+=[(selectcell, comp[selectcell-1, :], amfrac, [1./len(amfrac)]*len(amfrac), amfrac)]
f.close()
xl=[]
yl=[]
weightl1=[]
weightl2=[]
foml=[]
for cell, comp, weight1, weight2, fomlist in cell_comp_weight1_weight2_fomlist:
xl+=[comp[0]]*len(fomlist)
yl+=[comp[1]]*len(fomlist)
weightl1+=list(weight1)
weightl2+=list(weight2)
foml+=list(fomlist)
xl=numpy.float64(xl)
yl=numpy.float64(yl)
weightl1=numpy.float64(weightl1)
weightl2=numpy.float64(weightl2)
foml=numpy.float64(foml)
fomdev=foml-foml.mean()
##find composition that makes maximum covariance of amfrac and disstance from composition
def minfcn(ab, x, y, wts, fomdevfrommean):
a=ab[0]
b=ab[1]
# xr=(xp+a*yp-a*b)/(1.+a**2)
# yr=a*xr+b
# b-=yr.mean()
# d=xp+a*yp-a*b
d=(((x-a)**2+(y-b)**2+(x+y-a-b)**2)/2.)**.5
return -1./(fomdevfrommean*wts*(d-d.mean())).sum()
xp=1.-xl-yl/2.
yp=3**.5*yl/2.
bestans=None
cov=None
for ag, bg in zip(xl, yl):
ans, evals, code=scipy.optimize.fmin_tnc(minfcn, [ag, bg], args=(xl, yl, weightl1*weightl2, fomdev), approx_grad=1)#, bounds=[(0., 1.), (0., 1.)])
if cov is None or minfcn(ans, xl, yl, weightl1*weightl2, fomdev)<cov:
bestans=ans
cov=minfcn(ans, xl, yl, weightl1*weightl2, fomdev)
print ans, cov
a=ans[0]
b=ans[1]
#***
#show optimal composition and distances
print a, b
xr=1.-a-b/2.
yr=3**.5*b/2.
pylab.plot(xp, yp, 'b.')
pylab.plot([xr], [yr], 'r.')
for x1, y1 in zip(xp, yp):
pylab.plot([x1, xr], [y1, yr], 'k-')
pylab.gca().set_aspect(1.)
pylab.show()
##find line that when points are collapsed onto it, the covariance between amfrac and distance along line is maximized - slope is maximized and for each slope the intercept that is centers line on data is chosen
#bminfcn=lambda b, xp, yp, a: ((yp-a*xp-b)**2).sum()
#def minfcn(a, xp, yp, wts, fomdevfrommean):
# b=scipy.optimize.fmin(bminfcn, 1., args=(xp, yp, a))
# xr=(xp+a*yp-a*b)/(1.+a**2)
# yr=a*xr+b
# d=(xp+a*yp-a*b)/(1.+a**2)**.5
# return numpy.abs(1./(fomdevfrommean*wts*(d-d.mean())).sum())
#xp=1.-xl-yl/2.
#yp=3**.5*yl/2.
#ans=scipy.optimize.fmin(minfcn, -.4, args=(xp, yp, weightl1*weightl2, fomdev))#, maxfun=50)
#a=ans[0]
##***
##find line that spaces out the projected points
#bminfcn=lambda b, xp, yp, a: ((yp-a*xp-b)**2).sum()
#def minfcn(a, xp, yp, wts, fomdevfrommean):
# b=scipy.optimize.fmin(bminfcn, 1., args=(xp, yp, a))
# xr=(xp+a*yp-a*b)/(1.+a**2)
# yr=a*xr+b
# d=(xp+a*yp-a*b)/(1.+a**2)**.5
# return (wts/numpy.array([numpy.min((d[d!=v]-v)**2) for i, v in enumerate(d)])).sum()
#
#xp=1.-xl-yl/2.
#yp=3**.5*yl/2.
#ans=scipy.optimize.fmin(minfcn, -.4, args=(xp, yp, weightl2, fomdev))#, maxfun=50)
#a=ans[0]
##****
#line collapse plot for above routines that have optimal lines
#b=scipy.optimize.fmin(bminfcn, 1., args=(xp, yp, a))[0]
#print a, b
#xr=(xp+a*yp-a*b)/(1.+a**2)
#yr=a*xr+b
#pylab.plot(xp, yp, 'b.')
#pylab.plot(xr, yr, 'r.')
#for x1, y1, x2, y2 in zip(xp, yp, xr, yr):
# pylab.plot([x1, x2], [y1, y2], 'k-')
#def minfcn(ab, x, y, wts, fomdevfrommean):
# a=ab[0]
# b=ab[1]
# d=(((x-a)**2+(y-b)**2+(x+y-a-b)**2)/2.)**.5
# return 1./(fomdevfrommean*(d-d.mean())*wts).sum()
#ans=scipy.optimize.fmin_tnc(minfcn, [.4, .3], args=(xl, yl, weightl, fomdev), bounds=[(0., 1.), (0., 1.)], approx_grad=1)
#a, b=ans[0]
#axp=1.-a-b/2.
#ayp=3**.5*b/2.
#xp=1.-xl-yl/2.
#yp=3**.5*yl/2.
#pylab.plot(xp, yp, 'b.')
#pylab.plot(axp, ayp, 'r.')
#pylab.gca().set_aspect(1.)
#pylab.show()
| bsd-3-clause |
xubenben/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
ankurankan/pgmpy | pgmpy/tests/test_models/test_BayesianNetwork.py | 2 | 45449 | import unittest
import networkx as nx
import pandas as pd
import numpy as np
import numpy.testing as np_test
from pgmpy.models import BayesianNetwork, MarkovNetwork
from pgmpy.base import DAG
import pgmpy.tests.help_functions as hf
from pgmpy.factors.discrete import (
TabularCPD,
JointProbabilityDistribution,
DiscreteFactor,
)
from pgmpy.independencies import Independencies
from pgmpy.estimators import (
BayesianEstimator,
BaseEstimator,
MaximumLikelihoodEstimator,
)
from pgmpy.base import DAG
from pgmpy.utils import get_example_model
from pgmpy.sampling import BayesianModelSampling
class TestBaseModelCreation(unittest.TestCase):
def setUp(self):
self.G = BayesianNetwork()
def test_class_init_without_data(self):
self.assertIsInstance(self.G, nx.DiGraph)
def test_class_init_with_data_string(self):
self.g = BayesianNetwork([("a", "b"), ("b", "c")])
self.assertListEqual(sorted(self.g.nodes()), ["a", "b", "c"])
self.assertListEqual(
hf.recursive_sorted(self.g.edges()), [["a", "b"], ["b", "c"]]
)
def test_class_init_with_data_nonstring(self):
BayesianNetwork([(1, 2), (2, 3)])
def test_add_node_string(self):
self.G.add_node("a")
self.assertListEqual(list(self.G.nodes()), ["a"])
def test_add_node_nonstring(self):
self.G.add_node(1)
def test_add_nodes_from_string(self):
self.G.add_nodes_from(["a", "b", "c", "d"])
self.assertListEqual(sorted(self.G.nodes()), ["a", "b", "c", "d"])
def test_add_nodes_from_non_string(self):
self.G.add_nodes_from([1, 2, 3, 4])
def test_add_edge_string(self):
self.G.add_edge("d", "e")
self.assertListEqual(sorted(self.G.nodes()), ["d", "e"])
self.assertListEqual(list(self.G.edges()), [("d", "e")])
self.G.add_nodes_from(["a", "b", "c"])
self.G.add_edge("a", "b")
self.assertListEqual(
hf.recursive_sorted(self.G.edges()), [["a", "b"], ["d", "e"]]
)
def test_add_edge_nonstring(self):
self.G.add_edge(1, 2)
def test_add_edge_selfloop(self):
self.assertRaises(ValueError, self.G.add_edge, "a", "a")
def test_add_edge_result_cycle(self):
self.G.add_edges_from([("a", "b"), ("a", "c")])
self.assertRaises(ValueError, self.G.add_edge, "c", "a")
def test_add_edges_from_string(self):
self.G.add_edges_from([("a", "b"), ("b", "c")])
self.assertListEqual(sorted(self.G.nodes()), ["a", "b", "c"])
self.assertListEqual(
hf.recursive_sorted(self.G.edges()), [["a", "b"], ["b", "c"]]
)
self.G.add_nodes_from(["d", "e", "f"])
self.G.add_edges_from([("d", "e"), ("e", "f")])
self.assertListEqual(sorted(self.G.nodes()), ["a", "b", "c", "d", "e", "f"])
self.assertListEqual(
hf.recursive_sorted(self.G.edges()),
hf.recursive_sorted([("a", "b"), ("b", "c"), ("d", "e"), ("e", "f")]),
)
def test_add_edges_from_nonstring(self):
self.G.add_edges_from([(1, 2), (2, 3)])
def test_add_edges_from_self_loop(self):
self.assertRaises(ValueError, self.G.add_edges_from, [("a", "a")])
def test_add_edges_from_result_cycle(self):
self.assertRaises(
ValueError, self.G.add_edges_from, [("a", "b"), ("b", "c"), ("c", "a")]
)
def test_update_node_parents_bm_constructor(self):
self.g = BayesianNetwork([("a", "b"), ("b", "c")])
self.assertListEqual(list(self.g.predecessors("a")), [])
self.assertListEqual(list(self.g.predecessors("b")), ["a"])
self.assertListEqual(list(self.g.predecessors("c")), ["b"])
def test_update_node_parents(self):
self.G.add_nodes_from(["a", "b", "c"])
self.G.add_edges_from([("a", "b"), ("b", "c")])
self.assertListEqual(list(self.G.predecessors("a")), [])
self.assertListEqual(list(self.G.predecessors("b")), ["a"])
self.assertListEqual(list(self.G.predecessors("c")), ["b"])
def tearDown(self):
del self.G
class TestBayesianModelMethods(unittest.TestCase):
def setUp(self):
self.G = BayesianNetwork([("a", "d"), ("b", "d"), ("d", "e"), ("b", "c")])
self.G1 = BayesianNetwork([("diff", "grade"), ("intel", "grade")])
diff_cpd = TabularCPD("diff", 2, values=[[0.2], [0.8]])
intel_cpd = TabularCPD("intel", 3, values=[[0.5], [0.3], [0.2]])
grade_cpd = TabularCPD(
"grade",
3,
values=[
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8],
],
evidence=["diff", "intel"],
evidence_card=[2, 3],
)
self.G1.add_cpds(diff_cpd, intel_cpd, grade_cpd)
self.G2 = BayesianNetwork([("d", "g"), ("g", "l"), ("i", "g"), ("i", "l")])
self.G3 = BayesianNetwork(
[
("Pop", "EC"),
("Urb", "EC"),
("GDP", "EC"),
("EC", "FFEC"),
("EC", "REC"),
("EC", "EI"),
("REC", "CO2"),
("REC", "CH4"),
("REC", "N2O"),
("FFEC", "CO2"),
("FFEC", "CH4"),
("FFEC", "N2O"),
]
)
def test_moral_graph(self):
moral_graph = self.G.moralize()
self.assertListEqual(sorted(moral_graph.nodes()), ["a", "b", "c", "d", "e"])
for edge in moral_graph.edges():
self.assertTrue(
edge in [("a", "b"), ("a", "d"), ("b", "c"), ("d", "b"), ("e", "d")]
or (edge[1], edge[0])
in [("a", "b"), ("a", "d"), ("b", "c"), ("d", "b"), ("e", "d")]
)
def test_moral_graph_with_edge_present_over_parents(self):
G = BayesianNetwork(
[("a", "d"), ("d", "e"), ("b", "d"), ("b", "c"), ("a", "b")]
)
moral_graph = G.moralize()
self.assertListEqual(sorted(moral_graph.nodes()), ["a", "b", "c", "d", "e"])
for edge in moral_graph.edges():
self.assertTrue(
edge in [("a", "b"), ("c", "b"), ("d", "a"), ("d", "b"), ("d", "e")]
or (edge[1], edge[0])
in [("a", "b"), ("c", "b"), ("d", "a"), ("d", "b"), ("d", "e")]
)
def test_get_ancestors_of_success(self):
ancenstors1 = self.G2._get_ancestors_of("g")
ancenstors2 = self.G2._get_ancestors_of("d")
ancenstors3 = self.G2._get_ancestors_of(["i", "l"])
self.assertEqual(ancenstors1, {"d", "i", "g"})
self.assertEqual(ancenstors2, {"d"})
self.assertEqual(ancenstors3, {"g", "i", "l", "d"})
def test_get_ancestors_of_failure(self):
self.assertRaises(ValueError, self.G2._get_ancestors_of, "h")
def test_get_cardinality(self):
self.assertDictEqual(
self.G1.get_cardinality(), {"diff": 2, "intel": 3, "grade": 3}
)
def test_get_cardinality_with_node(self):
self.assertEqual(self.G1.get_cardinality("diff"), 2)
self.assertEqual(self.G1.get_cardinality("intel"), 3)
self.assertEqual(self.G1.get_cardinality("grade"), 3)
def test_local_independencies(self):
self.assertEqual(
self.G.local_independencies("a"), Independencies(["a", ["b", "c"]])
)
self.assertEqual(
self.G.local_independencies("c"),
Independencies(["c", ["a", "d", "e"], "b"]),
)
self.assertEqual(
self.G.local_independencies("d"), Independencies(["d", "c", ["b", "a"]])
)
self.assertEqual(
self.G.local_independencies("e"),
Independencies(["e", ["c", "b", "a"], "d"]),
)
self.assertEqual(self.G.local_independencies("b"), Independencies(["b", "a"]))
self.assertEqual(self.G1.local_independencies("grade"), Independencies())
def test_get_independencies(self):
chain = BayesianNetwork([("X", "Y"), ("Y", "Z")])
self.assertEqual(
chain.get_independencies(), Independencies(("X", "Z", "Y"), ("Z", "X", "Y"))
)
fork = BayesianNetwork([("Y", "X"), ("Y", "Z")])
self.assertEqual(
fork.get_independencies(), Independencies(("X", "Z", "Y"), ("Z", "X", "Y"))
)
collider = BayesianNetwork([("X", "Y"), ("Z", "Y")])
self.assertEqual(
collider.get_independencies(), Independencies(("X", "Z"), ("Z", "X"))
)
# Latent variables
fork = BayesianNetwork([("Y", "X"), ("Y", "Z")], latents=["Y"])
self.assertEqual(
fork.get_independencies(include_latents=True),
Independencies(("X", "Z", "Y"), ("Z", "X", "Y")),
)
self.assertEqual(
fork.get_independencies(include_latents=False), Independencies()
)
def test_is_imap(self):
val = [
0.01,
0.01,
0.08,
0.006,
0.006,
0.048,
0.004,
0.004,
0.032,
0.04,
0.04,
0.32,
0.024,
0.024,
0.192,
0.016,
0.016,
0.128,
]
JPD = JointProbabilityDistribution(["diff", "intel", "grade"], [2, 3, 3], val)
fac = DiscreteFactor(["diff", "intel", "grade"], [2, 3, 3], val)
self.assertTrue(self.G1.is_imap(JPD))
self.assertRaises(TypeError, self.G1.is_imap, fac)
def test_markov_blanet(self):
G = DAG(
[
("x", "y"),
("z", "y"),
("y", "w"),
("y", "v"),
("u", "w"),
("s", "v"),
("w", "t"),
("w", "m"),
("v", "n"),
("v", "q"),
]
)
self.assertEqual(
set(G.get_markov_blanket("y")), set(["s", "w", "x", "u", "z", "v"])
)
def test_markov_blanket_G3(self):
self.assertEqual(set(self.G3.get_markov_blanket("CH4")), set(["FFEC", "REC"]))
def test_get_immoralities(self):
G = BayesianNetwork([("x", "y"), ("z", "y"), ("x", "z"), ("w", "y")])
self.assertEqual(G.get_immoralities(), {("w", "x"), ("w", "z")})
G1 = BayesianNetwork([("x", "y"), ("z", "y"), ("z", "x"), ("w", "y")])
self.assertEqual(G1.get_immoralities(), {("w", "x"), ("w", "z")})
G2 = BayesianNetwork(
[("x", "y"), ("z", "y"), ("x", "z"), ("w", "y"), ("w", "x")]
)
self.assertEqual(G2.get_immoralities(), {("w", "z")})
def test_is_iequivalent(self):
G = BayesianNetwork([("x", "y"), ("z", "y"), ("x", "z"), ("w", "y")])
self.assertRaises(TypeError, G.is_iequivalent, MarkovNetwork())
G1 = BayesianNetwork([("V", "W"), ("W", "X"), ("X", "Y"), ("Z", "Y")])
G2 = BayesianNetwork([("W", "V"), ("X", "W"), ("X", "Y"), ("Z", "Y")])
self.assertTrue(G1.is_iequivalent(G2))
G3 = BayesianNetwork([("W", "V"), ("W", "X"), ("Y", "X"), ("Z", "Y")])
self.assertFalse(G3.is_iequivalent(G2))
def test_copy(self):
model_copy = self.G1.copy()
self.assertEqual(sorted(self.G1.nodes()), sorted(model_copy.nodes()))
self.assertEqual(sorted(self.G1.edges()), sorted(model_copy.edges()))
self.assertNotEqual(
id(self.G1.get_cpds("diff")), id(model_copy.get_cpds("diff"))
)
self.G1.remove_cpds("diff")
diff_cpd = TabularCPD("diff", 2, values=[[0.3], [0.7]])
self.G1.add_cpds(diff_cpd)
self.assertNotEqual(self.G1.get_cpds("diff"), model_copy.get_cpds("diff"))
self.G1.remove_node("intel")
self.assertNotEqual(sorted(self.G1.nodes()), sorted(model_copy.nodes()))
self.assertNotEqual(sorted(self.G1.edges()), sorted(model_copy.edges()))
def test_get_random(self):
model = BayesianNetwork.get_random(n_nodes=5, edge_prob=0.5)
self.assertEqual(len(model.nodes()), 5)
self.assertEqual(len(model.cpds), 5)
for cpd in model.cpds:
self.assertTrue(np.allclose(np.sum(cpd.get_values(), axis=0), 1, atol=0.01))
model = BayesianNetwork.get_random(n_nodes=5, edge_prob=0.6, n_states=5)
self.assertEqual(len(model.nodes()), 5)
self.assertEqual(len(model.cpds), 5)
for cpd in model.cpds:
self.assertTrue(np.allclose(np.sum(cpd.get_values(), axis=0), 1, atol=0.01))
model = BayesianNetwork.get_random(
n_nodes=5, edge_prob=0.6, n_states=range(2, 7)
)
self.assertEqual(len(model.nodes()), 5)
self.assertEqual(len(model.cpds), 5)
for cpd in model.cpds:
self.assertTrue(np.allclose(np.sum(cpd.get_values(), axis=0), 1, atol=0.01))
def test_remove_node(self):
self.G1.remove_node("diff")
self.assertEqual(sorted(self.G1.nodes()), sorted(["grade", "intel"]))
self.assertRaises(ValueError, self.G1.get_cpds, "diff")
def test_remove_nodes_from(self):
self.G1.remove_nodes_from(["diff", "grade"])
self.assertEqual(sorted(self.G1.nodes()), sorted(["intel"]))
self.assertRaises(ValueError, self.G1.get_cpds, "diff")
self.assertRaises(ValueError, self.G1.get_cpds, "grade")
def tearDown(self):
del self.G
del self.G1
class TestBayesianModelCPD(unittest.TestCase):
def setUp(self):
self.G = BayesianNetwork([("d", "g"), ("i", "g"), ("g", "l"), ("i", "s")])
self.G2 = DAG([("d", "g"), ("i", "g"), ("g", "l"), ("i", "s")])
self.G_latent = DAG(
[("d", "g"), ("i", "g"), ("g", "l"), ("i", "s")], latents=["d", "g"]
)
def test_active_trail_nodes(self):
self.assertEqual(sorted(self.G2.active_trail_nodes("d")["d"]), ["d", "g", "l"])
self.assertEqual(
sorted(self.G2.active_trail_nodes("i")["i"]), ["g", "i", "l", "s"]
)
self.assertEqual(
sorted(self.G2.active_trail_nodes(["d", "i"])["d"]), ["d", "g", "l"]
)
# For model with latent variables
self.assertEqual(
sorted(self.G_latent.active_trail_nodes("d", include_latents=True)["d"]),
["d", "g", "l"],
)
self.assertEqual(
sorted(self.G_latent.active_trail_nodes("i", include_latents=True)["i"]),
["g", "i", "l", "s"],
)
self.assertEqual(
sorted(
self.G_latent.active_trail_nodes(["d", "i"], include_latents=True)["d"]
),
["d", "g", "l"],
)
self.assertEqual(
sorted(self.G_latent.active_trail_nodes("d", include_latents=False)["d"]),
["l"],
)
self.assertEqual(
sorted(self.G_latent.active_trail_nodes("i", include_latents=False)["i"]),
["i", "l", "s"],
)
self.assertEqual(
sorted(
self.G_latent.active_trail_nodes(["d", "i"], include_latents=False)["d"]
),
["l"],
)
def test_active_trail_nodes_args(self):
self.assertEqual(
sorted(self.G2.active_trail_nodes(["d", "l"], observed="g")["d"]),
["d", "i", "s"],
)
self.assertEqual(
sorted(self.G2.active_trail_nodes(["d", "l"], observed="g")["l"]), ["l"]
)
self.assertEqual(
sorted(self.G2.active_trail_nodes("s", observed=["i", "l"])["s"]), ["s"]
)
self.assertEqual(
sorted(self.G2.active_trail_nodes("s", observed=["d", "l"])["s"]),
["g", "i", "s"],
)
def test_is_dconnected_triplets(self):
self.assertTrue(self.G.is_dconnected("d", "l"))
self.assertTrue(self.G.is_dconnected("g", "s"))
self.assertFalse(self.G.is_dconnected("d", "i"))
self.assertTrue(self.G.is_dconnected("d", "i", observed="g"))
self.assertFalse(self.G.is_dconnected("d", "l", observed="g"))
self.assertFalse(self.G.is_dconnected("i", "l", observed="g"))
self.assertTrue(self.G.is_dconnected("d", "i", observed="l"))
self.assertFalse(self.G.is_dconnected("g", "s", observed="i"))
def test_is_dconnected(self):
self.assertFalse(self.G.is_dconnected("d", "s"))
self.assertTrue(self.G.is_dconnected("s", "l"))
self.assertTrue(self.G.is_dconnected("d", "s", observed="g"))
self.assertFalse(self.G.is_dconnected("s", "l", observed="g"))
def test_is_dconnected_args(self):
self.assertFalse(self.G.is_dconnected("s", "l", "i"))
self.assertFalse(self.G.is_dconnected("s", "l", "g"))
self.assertTrue(self.G.is_dconnected("d", "s", "l"))
self.assertFalse(self.G.is_dconnected("d", "s", ["i", "l"]))
def test_get_cpds(self):
cpd_d = TabularCPD("d", 2, values=np.random.rand(2, 1))
cpd_i = TabularCPD("i", 2, values=np.random.rand(2, 1))
cpd_g = TabularCPD(
"g",
2,
values=np.random.rand(2, 4),
evidence=["d", "i"],
evidence_card=[2, 2],
)
cpd_l = TabularCPD(
"l", 2, values=np.random.rand(2, 2), evidence=["g"], evidence_card=[2]
)
cpd_s = TabularCPD(
"s", 2, values=np.random.rand(2, 2), evidence=["i"], evidence_card=[2]
)
self.G.add_cpds(cpd_d, cpd_i, cpd_g, cpd_l, cpd_s)
self.assertEqual(self.G.get_cpds("d").variable, "d")
def test_get_cpds1(self):
self.model = BayesianNetwork([("A", "AB")])
cpd_a = TabularCPD("A", 2, values=np.random.rand(2, 1))
cpd_ab = TabularCPD(
"AB", 2, values=np.random.rand(2, 2), evidence=["A"], evidence_card=[2]
)
self.model.add_cpds(cpd_a, cpd_ab)
self.assertEqual(self.model.get_cpds("A").variable, "A")
self.assertEqual(self.model.get_cpds("AB").variable, "AB")
self.assertRaises(ValueError, self.model.get_cpds, "B")
self.model.add_node("B")
self.assertIsNone(self.model.get_cpds("B"))
def test_add_single_cpd(self):
cpd_s = TabularCPD("s", 2, np.random.rand(2, 2), ["i"], [2])
self.G.add_cpds(cpd_s)
self.assertListEqual(self.G.get_cpds(), [cpd_s])
def test_add_multiple_cpds(self):
cpd_d = TabularCPD("d", 2, values=np.random.rand(2, 1))
cpd_i = TabularCPD("i", 2, values=np.random.rand(2, 1))
cpd_g = TabularCPD(
"g",
2,
values=np.random.rand(2, 4),
evidence=["d", "i"],
evidence_card=[2, 2],
)
cpd_l = TabularCPD(
"l", 2, values=np.random.rand(2, 2), evidence=["g"], evidence_card=[2]
)
cpd_s = TabularCPD(
"s", 2, values=np.random.rand(2, 2), evidence=["i"], evidence_card=[2]
)
self.G.add_cpds(cpd_d, cpd_i, cpd_g, cpd_l, cpd_s)
self.assertEqual(self.G.get_cpds("d"), cpd_d)
self.assertEqual(self.G.get_cpds("i"), cpd_i)
self.assertEqual(self.G.get_cpds("g"), cpd_g)
self.assertEqual(self.G.get_cpds("l"), cpd_l)
self.assertEqual(self.G.get_cpds("s"), cpd_s)
def test_check_model(self):
cpd_g = TabularCPD(
"g",
2,
values=np.array([[0.2, 0.3, 0.4, 0.6], [0.8, 0.7, 0.6, 0.4]]),
evidence=["d", "i"],
evidence_card=[2, 2],
)
cpd_s = TabularCPD(
"s",
2,
values=np.array([[0.2, 0.3], [0.8, 0.7]]),
evidence=["i"],
evidence_card=[2],
)
cpd_l = TabularCPD(
"l",
2,
values=np.array([[0.2, 0.3], [0.8, 0.7]]),
evidence=["g"],
evidence_card=[2],
)
self.G.add_cpds(cpd_g, cpd_s, cpd_l)
self.assertRaises(ValueError, self.G.check_model)
cpd_d = TabularCPD("d", 2, values=[[0.8], [0.2]])
cpd_i = TabularCPD("i", 2, values=[[0.7], [0.3]])
self.G.add_cpds(cpd_d, cpd_i)
self.assertTrue(self.G.check_model())
def test_check_model1(self):
cpd_g = TabularCPD(
"g",
2,
values=np.array([[0.2, 0.3], [0.8, 0.7]]),
evidence=["i"],
evidence_card=[2],
)
self.G.add_cpds(cpd_g)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_g)
cpd_g = TabularCPD(
"g",
2,
values=np.array([[0.2, 0.3, 0.4, 0.6], [0.8, 0.7, 0.6, 0.4]]),
evidence=["d", "s"],
evidence_card=[2, 2],
)
self.G.add_cpds(cpd_g)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_g)
cpd_g = TabularCPD(
"g",
2,
values=np.array([[0.2, 0.3], [0.8, 0.7]]),
evidence=["l"],
evidence_card=[2],
)
self.G.add_cpds(cpd_g)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_g)
cpd_l = TabularCPD(
"l",
2,
values=np.array([[0.2, 0.3], [0.8, 0.7]]),
evidence=["d"],
evidence_card=[2],
)
self.G.add_cpds(cpd_l)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_l)
cpd_l = TabularCPD(
"l",
2,
values=np.array([[0.2, 0.3, 0.4, 0.6], [0.8, 0.7, 0.6, 0.4]]),
evidence=["d", "i"],
evidence_card=[2, 2],
)
self.G.add_cpds(cpd_l)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_l)
cpd_l = TabularCPD(
"l",
2,
values=np.array(
[
[0.2, 0.3, 0.4, 0.6, 0.2, 0.3, 0.4, 0.6],
[0.8, 0.7, 0.6, 0.4, 0.8, 0.7, 0.6, 0.4],
]
),
evidence=["g", "d", "i"],
evidence_card=[2, 2, 2],
)
self.G.add_cpds(cpd_l)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_l)
def test_check_model2(self):
cpd_s = TabularCPD(
"s",
2,
values=np.array([[0.5, 0.3], [0.8, 0.7]]),
evidence=["i"],
evidence_card=[2],
)
self.G.add_cpds(cpd_s)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_s)
cpd_g = TabularCPD(
"g",
2,
values=np.array([[0.2, 0.3, 0.4, 0.6], [0.3, 0.7, 0.6, 0.4]]),
evidence=["d", "i"],
evidence_card=[2, 2],
)
self.G.add_cpds(cpd_g)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_g)
cpd_l = TabularCPD(
"l",
2,
values=np.array([[0.2, 0.3], [0.1, 0.7]]),
evidence=["g"],
evidence_card=[2],
)
self.G.add_cpds(cpd_l)
self.assertRaises(ValueError, self.G.check_model)
self.G.remove_cpds(cpd_l)
def tearDown(self):
del self.G
class TestBayesianModelFitPredict(unittest.TestCase):
def setUp(self):
self.model_disconnected = BayesianNetwork()
self.model_disconnected.add_nodes_from(["A", "B", "C", "D", "E"])
self.model_connected = BayesianNetwork(
[("A", "B"), ("C", "B"), ("C", "D"), ("B", "E")]
)
self.model2 = BayesianNetwork([("A", "C"), ("B", "C")])
self.data1 = pd.DataFrame(data={"A": [0, 0, 1], "B": [0, 1, 0], "C": [1, 1, 0]})
self.data2 = pd.DataFrame(
data={
"A": [0, np.NaN, 1],
"B": [0, 1, 0],
"C": [1, 1, np.NaN],
"D": [np.NaN, "Y", np.NaN],
}
)
# data_link - "https://www.kaggle.com/c/titanic/download/train.csv"
self.titanic_data = pd.read_csv(
"pgmpy/tests/test_estimators/testdata/titanic_train.csv", dtype=str
)
self.titanic_data2 = self.titanic_data[["Survived", "Sex", "Pclass"]]
def test_bayesian_fit(self):
print(isinstance(BayesianEstimator, BaseEstimator))
print(isinstance(MaximumLikelihoodEstimator, BaseEstimator))
self.model2.fit(
self.data1,
estimator=BayesianEstimator,
prior_type="dirichlet",
pseudo_counts={
"A": [[9], [3]],
"B": [[9], [3]],
"C": [[9, 9, 9, 9], [3, 3, 3, 3]],
},
)
self.assertEqual(
self.model2.get_cpds("B"), TabularCPD("B", 2, [[11.0 / 15], [4.0 / 15]])
)
def test_fit_update(self):
model = get_example_model("asia")
model_copy = model.copy()
data = BayesianModelSampling(model).forward_sample(int(1e3))
model.fit_update(data, n_prev_samples=int(1e3))
for var in model.nodes():
self.assertTrue(
model_copy.get_cpds(var).__eq__(model.get_cpds(var), atol=0.1)
)
model = model_copy.copy()
model.fit_update(data)
for var in model.nodes():
self.assertTrue(
model_copy.get_cpds(var).__eq__(model.get_cpds(var), atol=0.1)
)
def test_fit_missing_data(self):
self.model2.fit(
self.data2, state_names={"C": [0, 1]}, complete_samples_only=False
)
cpds = set(
[
TabularCPD("A", 2, [[0.5], [0.5]]),
TabularCPD("B", 2, [[2.0 / 3], [1.0 / 3]]),
TabularCPD(
"C",
2,
[[0, 0.5, 0.5, 0.5], [1, 0.5, 0.5, 0.5]],
evidence=["A", "B"],
evidence_card=[2, 2],
),
]
)
self.assertSetEqual(cpds, set(self.model2.get_cpds()))
def test_disconnected_fit(self):
values = pd.DataFrame(
np.random.randint(low=0, high=2, size=(1000, 5)),
columns=["A", "B", "C", "D", "E"],
)
self.model_disconnected.fit(values)
for node in ["A", "B", "C", "D", "E"]:
cpd = self.model_disconnected.get_cpds(node)
self.assertEqual(cpd.variable, node)
np_test.assert_array_equal(cpd.cardinality, np.array([2]))
value = (
values.loc[:, node].value_counts()
/ values.loc[:, node].value_counts().sum()
)
value = value.reindex(sorted(value.index)).values
np_test.assert_array_equal(cpd.values, value)
def test_predict(self):
titanic = BayesianNetwork()
titanic.add_edges_from([("Sex", "Survived"), ("Pclass", "Survived")])
titanic.fit(self.titanic_data2[500:])
p1 = titanic.predict(self.titanic_data2[["Sex", "Pclass"]][:30])
p2 = titanic.predict(self.titanic_data2[["Survived", "Pclass"]][:30])
p3 = titanic.predict(self.titanic_data2[["Survived", "Sex"]][:30])
p1_res = np.array(
[
"0",
"1",
"0",
"1",
"0",
"0",
"0",
"0",
"0",
"1",
"0",
"1",
"0",
"0",
"0",
"1",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
]
)
p2_res = np.array(
[
"male",
"female",
"female",
"female",
"male",
"male",
"male",
"male",
"female",
"female",
"female",
"female",
"male",
"male",
"male",
"female",
"male",
"female",
"male",
"female",
"male",
"female",
"female",
"female",
"male",
"female",
"male",
"male",
"female",
"male",
]
)
p3_res = np.array(
[
"3",
"1",
"1",
"1",
"3",
"3",
"3",
"3",
"1",
"1",
"1",
"1",
"3",
"3",
"3",
"1",
"3",
"1",
"3",
"1",
"3",
"1",
"1",
"1",
"3",
"1",
"3",
"3",
"1",
"3",
]
)
np_test.assert_array_equal(p1.values.ravel(), p1_res)
np_test.assert_array_equal(p2.values.ravel(), p2_res)
np_test.assert_array_equal(p3.values.ravel(), p3_res)
def test_predict_stochastic(self):
titanic = BayesianNetwork()
titanic.add_edges_from([("Sex", "Survived"), ("Pclass", "Survived")])
titanic.fit(self.titanic_data2[500:])
p1 = titanic.predict(
self.titanic_data2[["Sex", "Pclass"]][:30], stochastic=True
)
p2 = titanic.predict(
self.titanic_data2[["Survived", "Pclass"]][:30], stochastic=True
)
p3 = titanic.predict(
self.titanic_data2[["Survived", "Sex"]][:30], stochastic=True
)
# Acceptable range between 15 - 20.
# TODO: Is there a better way to test this?
self.assertTrue(p1.value_counts().values[0] <= 23)
self.assertTrue(p1.value_counts().values[0] >= 15)
self.assertTrue(p2.value_counts().values[0] <= 22)
self.assertTrue(p2.value_counts().values[0] >= 15)
self.assertTrue(p3.value_counts().values[0] <= 19)
self.assertTrue(p3.value_counts().values[0] >= 8)
def test_connected_predict(self):
np.random.seed(42)
values = pd.DataFrame(
np.array(np.random.randint(low=0, high=2, size=(1000, 5)), dtype=str),
columns=["A", "B", "C", "D", "E"],
)
fit_data = values[:800]
predict_data = values[800:].copy()
self.model_connected.fit(fit_data)
self.assertRaises(ValueError, self.model_connected.predict, predict_data)
predict_data.drop("E", axis=1, inplace=True)
e_predict = self.model_connected.predict(predict_data)
np_test.assert_array_equal(
e_predict.values.ravel(),
np.array(
[
1,
0,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
1,
1,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
0,
0,
1,
1,
0,
1,
1,
0,
1,
1,
1,
0,
0,
1,
1,
0,
1,
1,
1,
0,
1,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
1,
1,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
0,
1,
1,
0,
1,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
1,
0,
1,
1,
1,
1,
1,
1,
1,
0,
1,
0,
1,
1,
1,
1,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
1,
0,
1,
0,
1,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
1,
1,
0,
1,
1,
1,
1,
0,
],
dtype=str,
),
)
def test_connected_predict_probability(self):
np.random.seed(42)
values = pd.DataFrame(
np.random.randint(low=0, high=2, size=(100, 5)),
columns=["A", "B", "C", "D", "E"],
)
fit_data = values[:80]
predict_data = values[80:].copy()
self.model_connected.fit(fit_data)
predict_data.drop("E", axis=1, inplace=True)
e_prob = self.model_connected.predict_probability(predict_data)
np_test.assert_allclose(
e_prob.values.ravel(),
np.array(
[
0.57894737,
0.42105263,
0.57894737,
0.42105263,
0.57894737,
0.42105263,
0.5,
0.5,
0.57894737,
0.42105263,
0.5,
0.5,
0.57894737,
0.42105263,
0.57894737,
0.42105263,
0.57894737,
0.42105263,
0.5,
0.5,
0.57894737,
0.42105263,
0.57894737,
0.42105263,
0.5,
0.5,
0.57894737,
0.42105263,
0.57894737,
0.42105263,
0.5,
0.5,
0.57894737,
0.42105263,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
]
),
atol=0,
)
predict_data = pd.DataFrame(
np.random.randint(low=0, high=2, size=(1, 5)),
columns=["A", "B", "C", "F", "E"],
)[:]
def test_predict_probability_errors(self):
np.random.seed(42)
values = pd.DataFrame(
np.random.randint(low=0, high=2, size=(2, 5)),
columns=["A", "B", "C", "D", "E"],
)
fit_data = values[:1]
predict_data = values[1:].copy()
self.model_connected.fit(fit_data)
self.assertRaises(
ValueError, self.model_connected.predict_probability, predict_data
)
predict_data = pd.DataFrame(
np.random.randint(low=0, high=2, size=(1, 5)),
columns=["A", "B", "C", "F", "E"],
)[:]
self.assertRaises(
ValueError, self.model_connected.predict_probability, predict_data
)
def test_do(self):
# One confounder var with treatement T and outcome C: S -> T -> C ; S -> C
model = BayesianNetwork([("S", "T"), ("T", "C"), ("S", "C")])
cpd_s = TabularCPD(
variable="S",
variable_card=2,
values=[[0.5], [0.5]],
state_names={"S": ["m", "f"]},
)
cpd_t = TabularCPD(
variable="T",
variable_card=2,
values=[[0.25, 0.75], [0.75, 0.25]],
evidence=["S"],
evidence_card=[2],
state_names={"S": ["m", "f"], "T": [0, 1]},
)
cpd_c = TabularCPD(
variable="C",
variable_card=2,
values=[[0.3, 0.4, 0.7, 0.8], [0.7, 0.6, 0.3, 0.2]],
evidence=["S", "T"],
evidence_card=[2, 2],
state_names={"S": ["m", "f"], "T": [0, 1], "C": [0, 1]},
)
model.add_cpds(cpd_s, cpd_t, cpd_c)
model_do_inplace = model.do(["T"], inplace=True)
model_do_new = model.do(["T"], inplace=False)
for m in [model_do_inplace, model_do_new]:
self.assertEqual(sorted(list(m.edges())), sorted([("S", "C"), ("T", "C")]))
self.assertEqual(len(m.cpds), 3)
np_test.assert_array_equal(
m.get_cpds(node="S").values, np.array([0.5, 0.5])
)
np_test.assert_array_equal(
m.get_cpds(node="T").values, np.array([0.5, 0.5])
)
np_test.assert_array_equal(
m.get_cpds(node="C").values,
np.array([[[0.3, 0.4], [0.7, 0.8]], [[0.7, 0.6], [0.3, 0.2]]]),
)
def test_simulate(self):
asia = get_example_model("asia")
n_samples = int(1e3)
samples = asia.simulate(n_samples=n_samples)
self.assertEqual(samples.shape[0], n_samples)
def tearDown(self):
del self.model_connected
del self.model_disconnected
class TestDAGCPDOperations(unittest.TestCase):
def setUp(self):
self.graph = BayesianNetwork()
def test_add_single_cpd(self):
cpd = TabularCPD(
"grade",
2,
values=np.random.rand(2, 4),
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.graph.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.graph.add_cpds(cpd)
self.assertListEqual(self.graph.get_cpds(), [cpd])
def test_add_multiple_cpds(self):
cpd1 = TabularCPD("diff", 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD("intel", 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD(
"grade",
2,
values=np.random.rand(2, 4),
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.graph.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertListEqual(self.graph.get_cpds(), [cpd1, cpd2, cpd3])
def test_remove_single_cpd(self):
cpd1 = TabularCPD("diff", 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD("intel", 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD(
"grade",
2,
values=np.random.rand(2, 4),
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.graph.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds(cpd1)
self.assertListEqual(self.graph.get_cpds(), [cpd2, cpd3])
def test_remove_multiple_cpds(self):
cpd1 = TabularCPD("diff", 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD("intel", 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD(
"grade",
2,
values=np.random.rand(2, 4),
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.graph.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds(cpd1, cpd3)
self.assertListEqual(self.graph.get_cpds(), [cpd2])
def test_remove_single_cpd_string(self):
cpd1 = TabularCPD("diff", 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD("intel", 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD(
"grade",
2,
values=np.random.rand(2, 4),
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.graph.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds("diff")
self.assertListEqual(self.graph.get_cpds(), [cpd2, cpd3])
def test_remove_multiple_cpds_string(self):
cpd1 = TabularCPD("diff", 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD("intel", 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD(
"grade",
2,
values=np.random.rand(2, 4),
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.graph.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.graph.remove_cpds("diff", "grade")
self.assertListEqual(self.graph.get_cpds(), [cpd2])
def test_get_values_for_node(self):
cpd1 = TabularCPD("diff", 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD("intel", 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD(
"grade",
2,
values=np.random.rand(2, 4),
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.graph.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertEqual(self.graph.get_cpds("diff"), cpd1)
self.assertEqual(self.graph.get_cpds("intel"), cpd2)
self.assertEqual(self.graph.get_cpds("grade"), cpd3)
def test_get_values_raises_error(self):
cpd1 = TabularCPD("diff", 2, values=np.random.rand(2, 1))
cpd2 = TabularCPD("intel", 2, values=np.random.rand(2, 1))
cpd3 = TabularCPD(
"grade",
2,
values=np.random.rand(2, 4),
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.graph.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.graph.add_cpds(cpd1, cpd2, cpd3)
self.assertRaises(ValueError, self.graph.get_cpds, "sat")
def tearDown(self):
del self.graph
| mit |
camallen/aggregation | experimental/penguins/clusterAnalysis/penguins_at_5.py | 2 | 5268 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import sys
import urllib
import matplotlib.cbook as cbook
from PIL import Image
import matplotlib.pyplot as plt
import warnings
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
#from divisiveDBSCAN import DivisiveDBSCAN
from divisiveDBSCAN_multi import DivisiveDBSCAN
from clusterCompare import cluster_compare
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
client = pymongo.MongoClient()
db = client['penguin_2014-10-12']
collection = db["penguin_classifications"]
collection2 = db["penguin_subjects"]
steps = [5,20]
penguins_at = {k:[] for k in steps}
alreadyThere = False
subject_index = 0
import cPickle as pickle
to_sample = pickle.load(open(base_directory+"/Databases/sample.pickle","rb"))
import random
#for subject in collection2.find({"classification_count": 20}):
noise_list = {k:[] for k in steps}
for zooniverse_id in random.sample(to_sample,len(to_sample)):
zooniverse_id = "APZ00010ep"
subject = collection2.find_one({"zooniverse_id": zooniverse_id})
subject_index += 1
#if subject_index == 2:
# break
#zooniverse_id = subject["zooniverse_id"]
print "=== " + str(subject_index)
print zooniverse_id
alreadyThere = True
user_markings = {k:[] for k in steps}
user_ips = {k:[] for k in steps}
penguins_tagged = []
user_index = 0
for classification in collection.find({"subjects" : {"$elemMatch": {"zooniverse_id":zooniverse_id}}}):
user_index += 1
if user_index == 21:
break
per_user = []
ip = classification["user_ip"]
tt = 0
try:
markings_list = classification["annotations"][1]["value"]
if isinstance(markings_list,dict):
for marking in markings_list.values():
if marking["value"] in ["adult","chick"]:
x,y = (float(marking["x"]),float(marking["y"]))
if not((x,y) in per_user):
per_user.append((x,y))
for s in steps:
if user_index <= s:
print user_index,ip,(x,y)
user_markings[s].append((x,y))
user_ips[s].append(ip)
tt += 1
except (KeyError, ValueError):
#classification["annotations"]
user_index += -1
penguins_tagged.append(tt)
if user_markings[5] == []:
print "skipping empty"
subject_index += -1
continue
url = subject["location"]["standard"]
object_id= str(subject["_id"])
image_path = base_directory+"/Databases/penguins/images/"+object_id+".JPG"
if not(os.path.isfile(image_path)):
urllib.urlretrieve(url, image_path)
penguins = []
penguins_center = {}
noise_points = {}
try:
for s in steps:
if s == 25:
user_identified_penguins,penguin_clusters,noise__ = DivisiveDBSCAN(3).fit(user_markings[s],user_ips[s],debug=True,jpeg_file=base_directory + "/Databases/penguins/images/"+object_id+".JPG")
else:
user_identified_penguins,penguin_clusters,noise__ = DivisiveDBSCAN(3).fit(user_markings[s],user_ips[s],debug=True)
penguins_at[s].append(len(user_identified_penguins))
penguins_center[s] = user_identified_penguins
#noise_list[s].append(noise)
penguins.append(penguin_clusters)
#print penguin_clusters
#print noise__
noise_points[s] = [x for x,u in noise__]
print str(s) + " - " + str(len(user_identified_penguins))
if len(user_identified_penguins) > 20:
break
except AssertionError:
continue
#if len(user_identified_penguins) == 0:
# continue
if len(user_identified_penguins) <= 20:
#print noise__
not_found = cluster_compare(penguins[0],penguins[-1])
#if not_found == []:
# continue
image_file = cbook.get_sample_data(base_directory + "/Databases/penguins/images/"+object_id+".JPG")
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
try:
X,Y = zip(*penguins_center[5])
plt.plot(X,Y,'.',color="red")
except ValueError:
pass
print penguins_tagged
print np.mean(penguins_tagged[0:5])
print np.mean(penguins_tagged)
#X,Y = zip(*noise_points[5])
print user_markings[5]
X,Y = zip(*user_markings[5])
#plt.plot(X,Y,'.',color="green")
#print [(x,y) for i,(x,y) in enumerate(user_identified_penguins) if i in not_found]
#X,Y = zip(*[(x,y) for i,(x,y) in enumerate(user_identified_penguins) if i in not_found])
#X,Y = zip(*noise)
#plt.plot(X,Y,'.',color="blue")
plt.show()
break | apache-2.0 |
cqychen/quants | quants/loaddata/skyeye_ods_classified_zz500s.py | 1 | 1324 | #coding=utf8
import tushare as ts;
import pymysql;
import time as dt
from datashape.coretypes import string
from pandas.io.sql import SQLDatabase
import sqlalchemy
import datetime
from sqlalchemy import create_engine
from pandas.io import sql
import threading
import pandas as pd;
import sys
sys.path.append('../') #添加配置文件
from common_function import *
def load_data():
#下载公司基本信息,包括股票代码、pe、市盈率等数据
try:
rs=ts.get_zz500s()
pd.DataFrame.to_sql(rs, name=table_name, con=con , schema=db, if_exists='replace',index=True)
except Exception as e:
print(e.message)
print("公司基本分类信息数据出错")
if __name__ == '__main__':
#--------------------设置基本信息---------------------------------
print("--------------加载公司基本分类信息开始-----------------------------")
startTime=dt.time()
con=get_mysql_conn()
db='ods_data'
table_name='ods_classified_zz500s'
#conn = pymysql.connect(user=user, passwd=passwd,host=iphost, db=db,charset=charset)
#--------------------脚本运行开始--------------------------------
load_data()
endTime=dt.time()
print("---------------脚本运行完毕,共计耗费时间%sS------------------"%(endTime-startTime))
| epl-1.0 |
madjelan/scikit-learn | sklearn/__init__.py | 154 | 3014 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
xubenben/scikit-learn | sklearn/mixture/gmm.py | 128 | 31069 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
hirofumi0810/tensorflow_end2end_speech_recognition | utils/training/plot.py | 1 | 2592 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
plt.style.use('ggplot')
import seaborn as sns
blue = '#4682B4'
orange = '#D2691E'
def plot_loss(train_losses, dev_losses, steps, save_path):
"""Save history of training & dev loss as figure.
Args:
train_losses (list): train losses
dev_losses (list): dev losses
steps (list): steps
"""
# Save as csv file
loss_graph = np.column_stack((steps, train_losses, dev_losses))
if os.path.isfile(os.path.join(save_path, "ler.csv")):
os.remove(os.path.join(save_path, "ler.csv"))
np.savetxt(os.path.join(save_path, "loss.csv"), loss_graph, delimiter=",")
# TODO: error check for inf loss
# Plot & save as png file
plt.clf()
plt.plot(steps, train_losses, blue, label="Train")
plt.plot(steps, dev_losses, orange, label="Dev")
plt.xlabel('step', fontsize=12)
plt.ylabel('loss', fontsize=12)
plt.legend(loc="upper right", fontsize=12)
if os.path.isfile(os.path.join(save_path, "loss.png")):
os.remove(os.path.join(save_path, "loss.png"))
plt.savefig(os.path.join(save_path, "loss.png"), dvi=500)
def plot_ler(train_lers, dev_lers, steps, label_type, save_path):
"""Save history of training & dev LERs as figure.
Args:
train_lers (list): train losses
dev_lers (list): dev losses
steps (list): steps
"""
if 'word' in label_type:
name = 'WER'
elif 'char' in label_type or 'kana' in label_type or 'kanji' in label_type:
name = 'CER'
elif 'phone' in label_type:
name = 'PER'
else:
name = 'LER'
# Save as csv file
loss_graph = np.column_stack((steps, train_lers, dev_lers))
if os.path.isfile(os.path.join(save_path, "ler.csv")):
os.remove(os.path.join(save_path, "ler.csv"))
np.savetxt(os.path.join(save_path, "ler.csv"), loss_graph, delimiter=",")
# Plot & save as png file
plt.clf()
plt.plot(steps, train_lers, blue, label="Train")
plt.plot(steps, dev_lers, orange, label="Dev")
plt.xlabel('step', fontsize=12)
plt.ylabel(name, fontsize=12)
plt.legend(loc="upper right", fontsize=12)
if os.path.isfile(os.path.join(save_path, name.lower() + '.png')):
os.remove(os.path.join(save_path, name.lower() + '.png'))
plt.savefig(os.path.join(save_path, name.lower() + '.png'), dvi=500)
| mit |
allrod5/extra-trees | benchmarks/regression/regression.py | 1 | 2437 |
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_squared_log_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
import numpy as np
from extra_trees.ensemble.forest import ExtraTreesRegressor
# Create random regression
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regressions model
regr_1 = ExtraTreesRegressor(min_samples_split=5)
regr_2 = RandomForestRegressor()
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
print(len(y))
print(len(y_1))
# Print some statitics
print("ExtraForest\n-----------------")
print("explained_variance={}".format(explained_variance_score(y, regr_1.predict(X))))
print("mean_absolute_error={}".format(mean_absolute_error(y, regr_1.predict(X))))
print("mean_squared_error={}".format(mean_squared_error(y, regr_1.predict(X))))
#print("mean_squared_log_error={}".format(mean_squared_log_error(y, regr_1.predict(X))))
print("median_absolute_error={}".format(median_absolute_error(y, regr_1.predict(X))))
print("r2_score={}".format(r2_score(y, regr_1.predict(X))))
print()
print("RandomForest\n-----------------")
print("explained_variance={}".format(explained_variance_score(y, regr_2.predict(X))))
print("mean_absolute_error={}".format(mean_absolute_error(y, regr_2.predict(X))))
print("mean_squared_error={}".format(mean_squared_error(y, regr_2.predict(X))))
#print("mean_squared_log_error={}".format(mean_squared_log_error(y, regr_2.predict(X))))
print("median_absolute_error={}".format(median_absolute_error(y, regr_2.predict(X))))
print("r2_score={}".format(r2_score(y, regr_2.predict(X))))
print()
# Plot the results
plt.figure()
plt.scatter(X, y, s=20, edgecolor="black", c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label=regr_1.__class__.__name__, linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label=regr_2.__class__.__name__, linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Extremely Randomized Tree Regression")
plt.legend()
plt.show()
| mit |
vitale232/ves | ves/ves_inverse.py | 1 | 8387 | import os
import sys
import random # not for prod
import numpy as np
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
os.chdir('./templates') # not for prod
print(os.getcwd())
print(os.listdir())
from templates.tempData import coefficients
schlumbergerFilterCoefficients, wennerFilterCoefficients = coefficients
# Schlumberger filter
# fltr1 = [0., .00046256, -.0010907, .0017122, -.0020687,
# .0043048, -.0021236, .015995, .017065, .098105, .21918, .64722,
# 1.1415, .47819, -3.515, 2.7743, -1.201, .4544, -.19427, .097364,
# -.054099, .031729, -.019109, .011656, -.0071544, .0044042,
# -.002715, .0016749, -.0010335, .00040124]
# #Wenner Filter
# fltr2 = [0., .000238935, .00011557, .00017034, .00024935,
# .00036665, .00053753, .0007896, .0011584, .0017008, .0024959,
# .003664, .0053773, .007893, .011583, .016998, .024934, .036558,
# .053507, .078121, .11319, .16192, .22363, .28821, .30276, .15523,
# -.32026, -.53557, .51787, -.196, .054394, -.015747, .0053941,
# -.0021446, .000665125]
#I know there must be a better method to assign lists. And probably numpy
#arrays would be best. But my Python wasn't up to it. If the last letter
#is an 'l' that means it is a log10 of the value
# 65 is completely arbitrary - > Nearing retirement?
p = [0] * 20
r = [0] * 65
rl = [0] * 65
t = [0] * 50
b = [0] * 65
asav = [0] * 65
asavl = [0] * 65
adatl = [0] * 65
rdatl = [0] * 65
adat = [0] * 65
rdat = [0] * 65
pkeep = [0] * 65
rkeep = [0] * 65
rkeepl = [0] * 65
pltanswer = [0] * 65
pltanswerl = [0] * 65
pltanswerkeep = [0] * 65
pltanswerkeepl = [0] * 65
rl = [0] * 65
small = [0] * 65
xlarge = [0] * 65
x=[0] * 100
y = [0] * 100
y2 = [0] * 100
u = [0] * 5000
new_x = [0] * 1000
new_y = [0] * 1000
ndat = 13
#hard coded data input - spacing and apparent resistivities measured
#in the field
adat = [0., 0.55, 0.95, 1.5, 2.5, 3., 4.5, 5.5, 9., 12., 20., 30., 70.]
rdat = [0., 125., 110., 95., 40., 24., 15., 10.5, 8., 6., 6.5, 11., 25.]
one30 = 1.e30
rms = one30
errmin = 1.e10
# this is where the range in parameters should be input from a GUI
# I'm hard coding this in for now
#enter thickenss range for each layer and then resistivity range.
#for 3 layers small[1] and small[2] are low end of thickness range
# small[3], small[4] and small[5] are the low end of resistivities
small[1] = 1.
xlarge[1] = 5
small[2] = 10.
xlarge[2] = 75.
small[3] = 20.
xlarge[3] = 200.
small[4] = 2.
xlarge[4] = 100
small[5] = 500.
xlarge[5] = 3000.
iter_ = 10000 #number of iterations for the Monte Carlo guesses. to be input on GUI
# INPUT
arrayType = 'schlumberger'
e = 3 #number of layers
n = 2*e-1
spac = 0.2 # smallest electrode spacing
m = 20 # number of points where resistivity is calculated
spac = np.log(spac)
delx = np.log(10.0) / 6.
# these lines apparently find the computer precision ep
ep = 1.0
ep = ep / 2.0
fctr = ep + 1.
while fctr > 1.:
ep = ep / 2.0
fctr = ep + 1.
def readData():
#normally this is where the data would be read from the csv file
# but now I'm just hard coding it in as global lists
for i in range(1,ndat,1):
adatl[i] = np.log10(adat[i])
rdatl[i] = np.log10(rdat[i])
return adatl
def error():
sumerror = 0.
#pltanswer = [0]*64
spline(m, one30, one30, asavl, rl, y2)
for i in range(1,ndat, 1):
ans = splint(m, adatl[i], asavl, rl, y2)
sumerror = sumerror + (rdatl[i] - ans) * (rdatl[i] - ans)
#print(i,sum1,rdat[i],rdatl[i],ans)
pltanswerl[i] = ans
pltanswer[i] = np.power(10, ans)
rms = np.sqrt(sumerror / (ndat - 1))
return rms
def transf(y, i):
u = 1. / np.exp(y)
t[1] = p[n]
for j in range(2, e + 1, 1):
pwr = -2. * u * p[e + 1 - j]
if pwr < np.log(2. * ep):
pwr = np.log(2. * ep)
a = np.exp(pwr)
b = (1. - a) / (1. + a)
rs = p[n + 1 - j]
tpr = b * rs
t[j] = (tpr + t[j - 1]) / (1. + tpr * t[j - 1] / (rs * rs))
r[i] = t[e]
return
def filters(b, k):
for i in range(1, m + 1, 1):
re = 0.
for j in range(1, k + 1, 1):
re = re + b[j] * r[i + k - j]
r[i] = re
return
def rmsfit():
if arrayType.lower() == 'schlumberger':
y = spac - 19. * delx - 0.13069
mum1 = m + 28
for i in range(1, mum1 + 1, 1):
transf(y, i)
y = y + delx
filters(schlumbergerFilterCoefficients, 29)
elif arrayType.lower() == 'wenner':
s = np.log(2.)
y = spac - 10.8792495 * delx
mum2 = m + 33
for i in range(1, mum2 + 1, 1):
transf(y, i)
a = r[i]
y1 = y + s
transf(y1, i)
r[i] = 2. * a - r[i]
y = y + delx
filters(wennerFilterCoefficients, 34)
else:
print(" type of survey not indicated")
sys.exit()
x = spac
for i in range(1, m+1, 1):
a = np.exp(x)
asav[i] = a
asavl[i] = np.log10(a)
rl[i] = np.log10(r[i])
x = x + delx
rms = error()
return rms
# my code to do a spline fit to predicted data at the nice spacing of Ghosh
# use splint to determine the spline interpolated prediction at the
# spacing where the measured resistivity was taken - to compare observation
# to prediction
def spline(n, yp1, ypn, x=[] ,y=[] ,y2=[]):
u = [0] * 1000
one29 = 0.99e30
#print(x,y)
if yp1 > one29:
y2[0] = 0.
u[0] = 0.
else:
y2[0] = -0.5
u[0] = (3. / (x[1] - x[0])) * ((y[1] - y[0]) / (x[1] - x[0]) - yp1)
for i in range(1, n):
#print(i,x[i])
sig = (x[i] - x[i-1]) / (x[i+1] - x[i-1])
p=sig * y2[i - 1] + 2.
y2[i] = (sig-1.) / p
u[i] = (
((6. * ((y[i + 1] - y[i]) / (x[i + 1] - x[i]) - (y[i] - y[i-1]) /
x[i] - x[i-1])) / ( x[i + 1] - x[i - 1]) - sig * u[i - 1]) / p )
if ypn > one29:
qn = 0.
un = 0.
else:
qn = 0.5
un = (
(3. / (x[n] - x[n - 1])) *
(ypn - (y[n] - y[n - 1]) /
(x[n] - x[n - 1]))
)
y2[n] = (un - qn * u[n - 1]) / (qn * y2[n - 1] + 1.)
for k in range(n-1, -1, -1):
y2[k] = y2[k] * y2[k + 1] + u[k]
return
def splint(n, x ,xa=[], ya=[], y2a=[]):
klo = 0
khi = n
while khi - klo > 1:
k = int((khi + klo) // 2)
if xa[k] > x:
khi = k
else:
klo = k
h = xa[khi] - xa[klo]
if abs(h) < 1e-20:
print(" bad xa input")
a = (xa[khi] - x) / h
b = (x - xa[klo]) / h
y = (a * ya[klo] + b * ya[khi] + ((a * a * a - a) * y2a[klo] +
(b * b * b - b) * y2a[khi]) * (h * h) /6.)
return y
readData()
print(adat[1:ndat],rdat[1:ndat])
print('log stufffff')
print(adatl[1:ndat],rdatl[1:ndat])
for iloop in range(1, iter_ + 1):
#print( ' iloop is ', iloop)
for i in range(1, n + 1):
randNumber = random.random()
#print(randNumber, ' random')
p[i] = (xlarge[i] - small[i]) * randNumber + small[i]
rms = rmsfit()
if rms < errmin:
print('rms ', rms, ' errmin ', errmin)
for i in range(1,n+1,1):
pkeep[i] = p[i]
for i in range(1, m+1, 1):
rkeep[i] = r[i]
rkeepl[i] = rl[i]
for i in range(1,ndat+1,1):
pltanswerkeepl[i] = pltanswerl[i]
pltanswerkeep[i] = pltanswer[i]
errmin = rms
#output the best fitting earth model
print(' Layer ', ' Thickness ', ' Res_ohm-m ')
for i in range(1,e,1):
print(i, pkeep[i], pkeep[e+i-1])
print( e, ' Infinite ', pkeep[n])
for i in range(1,m+1, 1):
asavl[i] = np.log10(asav[i])
#output the error of fit
print( ' RMS error ', errmin)
print( ' Spacing', ' Res_pred ', ' Log10_spacing ', ' Log10_Res_pred ')
for i in range(1,m+1,1):
#print(asav[i], rkeep[i], asavl[i], rkeepl[i])
print("%7.2f %9.3f %9.3f %9.3f" % ( asav[i], rkeep[i],
asavl[i], rkeepl[i]))
plt.loglog(asav[1:m], rkeep[1:m], '-') # resistivity prediction curve
plt.loglog(adat[1:ndat], pltanswerkeep[1:ndat], 'ro') # predicted data red dots
s=7
plt.loglog(adat[1:ndat], rdat[1:ndat], 'bo', markersize=s)#originaldatabluedots
plt.show()
plt.grid(True)
sys.exit(0)
| lgpl-3.0 |
jorgemauricio/INIFAP_Course | algoritmos/algoritmo_ith.py | 1 | 1932 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 16:17:25 2017
@author: jorgemauricio
"""
#%% import libraries
import pandas as pd
import os
#%% Clear terminal
os.system('clear')
#%% load data from csv file
print('***** leer archivo csv')
data = pd.read_csv('../data/data_course_aguascalientes.csv')
#%% Display head of file
data.head()
#%% How many rows the dataset
data['number'].count()
#%% Drop NA values from the rows
print('***** eliminar valores nulos')
data = data.dropna()
#%% How many rows the dataset after drop NA
data['number'].count()
#%% Create Fecha Format AAAA-MM
print('***** crear columna dateFormat')
data['dateFormat'] = data.apply(lambda x: '%d-%d' % (x['year'], x['month']), axis=1)
#%% Funcion para calcular el ITH
def calcularITH(ta, hr):
"""
Calculate ith from ta and hr
return ith
ith = 1.8 * ta + 32 - (0.55 - (0.55 * (hr/100.0))) * (1.8 * ta) - 26.0
param: ta: temperatura ambiente
param: hr: humedad relativa
param: ith: indice termohigrometrico
"""
cal1 = 1.8 * ta
cal2 = (0.55 - (0.55 * (hr/100.0)))
cal3 = (1.8 * ta) - 26.0
ith = cal1 + 32 - cal2 * cal3
return ith
#%% crear columna tmed
data['tmed'] = (data['tmax'] + data['tmin']) / 2
#%% display head of file
data.head()
#%% calcular ith
data['ith'] = data.apply(lambda x: calcularITH(x['tmed'], x['humr']), axis=1)
#%% display head of file
data.head()
#%% Aggregation
aggregations = {
'ith' : ['min', 'max', 'median', 'mean', 'std']
}
#%% Apply aggregation
print('***** agrupar datos por columna dateFormat')
data.groupby('dateFormat').agg(aggregations)
#%% data head
data.head()
#%% Save to CSV
print('***** guardar archivo')
grouped = data.groupby(['lat', 'long','number','dateFormat']).agg(aggregations)
grouped.columns = ["_".join(x) for x in grouped.columns.ravel()]
grouped.to_csv('../resultados/baseAgrupadaPorAnioMesITH.csv')
#%% grouped data head()
grouped.head()
| mit |
bavardage/statsmodels | statsmodels/tsa/base/tsa_model.py | 3 | 10216 | import statsmodels.base.model as base
from statsmodels.base import data
import statsmodels.base.wrapper as wrap
from statsmodels.tsa.base import datetools
from numpy import arange, asarray
from pandas import Index
from pandas import datetools as pandas_datetools
import datetime
_freq_to_pandas = datetools._freq_to_pandas
_tsa_doc = """
%(model)s
Parameters
----------
%(params)s
dates : array-like of datetime, optional
An array-like object of datetime objects. If a pandas object is given
for endog or exog, it is assumed to have a DateIndex.
freq : str, optional
The frequency of the time-series. A Pandas offset or 'B', 'D', 'W',
'M', 'A', or 'Q'. This is optional if dates are given.
%(extra_params)s
%(extra_sections)s
"""
_model_doc = "Timeseries model base class"
_generic_params = base._model_params_doc
_missing_param_doc = base._missing_param_doc
class TimeSeriesModel(base.LikelihoodModel):
__doc__ = _tsa_doc % {"model" : _model_doc, "params" : _generic_params,
"extra_params" : _missing_param_doc,
"extra_sections" : ""}
def __init__(self, endog, exog=None, dates=None, freq=None, missing='none'):
super(TimeSeriesModel, self).__init__(endog, exog, missing=missing)
self._init_dates(dates, freq)
def _init_dates(self, dates, freq):
if dates is None:
dates = self.data.row_labels
if dates is not None:
if (not isinstance(dates[0], datetime.datetime) and
isinstance(self.data, data.PandasData)):
raise ValueError("Given a pandas object and the index does "
"not contain dates")
if not freq:
try:
freq = datetools._infer_freq(dates)
except:
raise ValueError("Frequency inference failed. Use `freq` "
"keyword.")
dates = Index(dates)
self.data.dates = dates
if freq:
try: #NOTE: Can drop this once we move to pandas >= 0.8.x
_freq_to_pandas[freq]
except:
raise ValueError("freq %s not understood" % freq)
self.data.freq = freq
def _get_exog_names(self):
return self.data.xnames
def _set_exog_names(self, vals):
if not isinstance(vals, list):
vals = [vals]
self.data.xnames = vals
#overwrite with writable property for (V)AR models
exog_names = property(_get_exog_names, _set_exog_names)
def _get_dates_loc(self, dates, date):
if hasattr(dates, 'indexMap'): # 0.7.x
date = dates.indexMap[date]
else:
date = dates.get_loc(date)
try: # pandas 0.8.0 returns a boolean array
len(date)
from numpy import where
date = where(date)[0].item()
except TypeError: # this is expected behavior
pass
return date
def _str_to_date(self, date):
"""
Takes a string and returns a datetime object
"""
return datetools.date_parser(date)
def _set_predict_start_date(self, start):
dates = self.data.dates
if dates is None:
return
if start > len(dates):
raise ValueError("Start must be <= len(endog)")
if start == len(dates):
self.data.predict_start = datetools._date_from_idx(dates[-1],
start, self.data.freq)
elif start < len(dates):
self.data.predict_start = dates[start]
else:
raise ValueError("Start must be <= len(dates)")
def _get_predict_start(self, start):
"""
Returns the index of the given start date. Subclasses should define
default behavior for start = None. That isn't handled here.
Start can be a string or an integer if self.data.dates is None.
"""
dates = self.data.dates
if isinstance(start, str):
if dates is None:
raise ValueError("Got a string for start and dates is None")
dtstart = self._str_to_date(start)
self.data.predict_start = dtstart
try:
start = self._get_dates_loc(dates, dtstart)
except KeyError:
raise ValueError("Start must be in dates. Got %s | %s" %
(str(start), str(dtstart)))
self._set_predict_start_date(start)
return start
def _get_predict_end(self, end):
"""
See _get_predict_start for more information. Subclasses do not
need to define anything for this.
"""
out_of_sample = 0 # will be overwritten if needed
if end is None: # use data for ARIMA - endog changes
end = len(self.data.endog) - 1
dates = self.data.dates
freq = self.data.freq
if isinstance(end, str):
if dates is None:
raise ValueError("Got a string for end and dates is None")
try:
dtend = self._str_to_date(end)
self.data.predict_end = dtend
end = self._get_dates_loc(dates, dtend)
except KeyError, err: # end is greater than dates[-1]...probably
if dtend > self.data.dates[-1]:
end = len(self.data.endog) - 1
freq = self.data.freq
out_of_sample = datetools._idx_from_dates(dates[-1], dtend,
freq)
else:
if freq is None:
raise ValueError("There is no frequency for these "
"dates and date %s is not in dates "
"index. Try giving a date that is in "
"the dates index or use an integer."
% dtend)
else: #pragma: no cover
raise err # should never get here
self._make_predict_dates() # attaches self.data.predict_dates
elif isinstance(end, int) and dates is not None:
try:
self.data.predict_end = dates[end]
except IndexError, err:
nobs = len(self.data.endog) - 1 # as an index
out_of_sample = end - nobs
end = nobs
if freq is not None:
self.data.predict_end = datetools._date_from_idx(dates[-1],
out_of_sample, freq)
elif out_of_sample <= 0: # have no frequency but are in sample
#TODO: what error to catch here to make sure dates is
#on the index?
try:
self.data.predict_end = self._get_dates_loc(dates,
end)
except KeyError:
raise
else:
self.data.predict_end = end + out_of_sample
self.data.predict_start = self._get_dates_loc(dates,
self.data.predict_start)
self._make_predict_dates()
elif isinstance(end, int):
nobs = len(self.data.endog) - 1 # is an index
if end > nobs:
out_of_sample = end - nobs
end = nobs
elif freq is None: # should have a date with freq = None
raise ValueError("When freq is None, you must give an integer "
"index for end.")
return end, out_of_sample
def _make_predict_dates(self):
data = self.data
dtstart = data.predict_start
dtend = data.predict_end
freq = data.freq
if freq is not None:
pandas_freq = _freq_to_pandas[freq]
try:
from pandas import DatetimeIndex
dates = DatetimeIndex(start=dtstart, end=dtend,
freq=pandas_freq)
except ImportError, err:
from pandas import DateRange
dates = DateRange(dtstart, dtend, offset = pandas_freq).values
# handle
elif freq is None and (isinstance(dtstart, int) and
isinstance(dtend, int)):
from pandas import Index
dates = Index(range(dtstart, dtend+1))
# if freq is None and dtstart and dtend aren't integers, we're
# in sample
else:
dates = self.data.dates
start = self._get_dates_loc(dates, dtstart)
end = self._get_dates_loc(dates, dtend)
dates = dates[start:end+1] # is this index inclusive?
self.data.predict_dates = dates
class TimeSeriesModelResults(base.LikelihoodModelResults):
def __init__(self, model, params, normalized_cov_params, scale=1.):
self.data = model.data
super(TimeSeriesModelResults,
self).__init__(model, params, normalized_cov_params, scale)
class TimeSeriesResultsWrapper(wrap.ResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_attrs,
_attrs)
_methods = {'predict' : 'dates'}
_wrap_methods = wrap.union_dicts(base.LikelihoodResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(TimeSeriesResultsWrapper,
TimeSeriesModelResults)
if __name__ == "__main__":
import statsmodels.api as sm
import datetime
import pandas
data = sm.datasets.macrodata.load()
#make a DataFrame
#TODO: attach a DataFrame to some of the datasets, for quicker use
dates = [str(int(x[0])) +':'+ str(int(x[1])) \
for x in data.data[['year','quarter']]]
df = pandas.DataFrame(data.data[['realgdp','realinv','realcons']], index=dates)
ex_mod = TimeSeriesModel(df)
#ts_series = pandas.TimeSeries()
| bsd-3-clause |
Subsets and Splits