repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
vigilv/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
morelab/teseo2014 | data/analyzer.py | 2 | 25355 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 22 08:55:14 2014
@author: aitor
"""
import mysql.connector
import networkx as nx
from networkx.generators.random_graphs import barabasi_albert_graph
import json
import os.path
import numpy as np
import pandas as pd
from pandas import Series
from pandas import DataFrame
import matplotlib.pyplot as plt
config = {
'user': 'aitor',
'password': 'pelicano',
'host': 'thor.deusto.es',
'database': 'teseo_clean',
}
persons_university = []
persons_id = []
first_level_topic_list = {
11: 'Logic',
12: 'Mathematics',
21: 'Astronomy, Astrophysics',
22: 'Physics',
23: 'Chemistry',
24: 'Life Sciences',
25: 'Earth and space science',
31: 'Agricultural Sciences',
32: 'Medical Sciences',
33: 'Technological Sciences',
51: 'Anthropology',
52: 'Demography',
53: 'Economic Sciences',
54: 'Geography',
55: 'History',
56: 'Juridical Science and Law',
57: 'Linguistics',
58: 'Pedagogy',
59: 'Political Science',
61: 'Psychology',
62: 'Sciences of Arts and Letters',
63: 'Sociology',
71: 'Ethics',
72: 'Philosophy',
}
# Execute it once
def get_persons_university():
p_u = {}
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
query = "SELECT thesis.author_id, thesis.university_id, university.name, university.location, person.name FROM thesis, university, person WHERE thesis.university_id = university.id AND thesis.author_id = person.id"
cursor.execute(query)
for thesis in cursor:
p_u[thesis[0]] = {
"university" : {"id" : thesis[1], "name" : thesis[2], "location" : thesis[3]},
"author" : {"name" : thesis[4]}
}
cursor.close()
cnx.close()
json.dump(p_u, open("./cache/persons_university.json", "w"), indent=2)
def load_persons_university():
print "Loading the persons_university cache..."
if not os.path.isfile("./cache/persons_university.json"):
print " - Building the persons_university cache..."
get_persons_university()
p_u = json.load(open("./cache/persons_university.json", "r"))
print "done"
return p_u
def get_persons_id():
p_i = {}
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
query = "SELECT person.id, person.name FROM person"
cursor.execute(query)
for person in cursor:
p_i[person[0]] = person[1]
cursor.close()
cnx.close()
json.dump(p_i, open("./cache/persons_id.json", "w"), indent = 2)
def load_persons_id():
print "Loading the persons_id cache..."
if not os.path.isfile("./cache/persons_id.json"):
print " - Building the persons_id cache..."
get_persons_university()
p_u = json.load(open("./cache/persons_id.json", "r"))
print "done"
return p_u
persons_university = load_persons_university()
persons_id = load_persons_id()
def build_thesis_genealogy():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
query = "SELECT thesis.author_id, advisor.person_id FROM thesis, advisor WHERE thesis.id = advisor.thesis_id"
cursor.execute(query)
G = nx.DiGraph()
for thesis in cursor:
G.add_edge(thesis[1], thesis[0])
i = 0
for n in G.nodes():
try:
node = str(n)
G.node[n]["name"] = persons_id[node]
try:
G.node[n]["university"] = persons_university[node]["university"]["name"]
G.node[n]["location"] = persons_university[node]["university"]["location"]
i += 1
except:
G.node[n]["university"] = "none"
G.node[n]["location"] = "none"
except:
print n
print "Total persons with a location:", i
cursor.close()
cnx.close()
nx.write_gexf(G, "./networks/genealogy.gexf")
return G
def build_panel_network(with_weigh = True):
cnx = mysql.connector.connect(**config)
print "Recovering thesis ids"
cursor = cnx.cursor()
query = "SELECT id FROM thesis"
cursor.execute(query)
thesis_ids = []
for thesis in cursor:
thesis_ids.append(thesis[0])
cursor.close()
print "Creating panel network"
cursor = cnx.cursor()
G = nx.Graph()
for c, thesis_id in enumerate(thesis_ids):
if c % 1000 == 0:
print c, "of", len(thesis_ids)
cursor.execute("SELECT person_id FROM panel_member WHERE thesis_id = " + str(thesis_id))
members = []
for member in cursor:
members.append(member[0])
for i, m1 in enumerate(members):
for m2 in members[i+1:]:
if with_weigh:
if not G.has_edge(m1, m2):
G.add_edge(m1,m2, weight = 1)
else:
G.edge[m1][m2]['weight'] += 1
else:
G.add_edge(m1,m2)
cursor.close()
cnx.close()
nx.write_gexf(G, "./networks/panels.gexf")
return G
def get_first_level_descriptors():
cnx = mysql.connector.connect(**config)
print "Recovering first level descriptors"
cursor = cnx.cursor()
query = "select id, text, code from descriptor where parent_code IS NULL"
cursor.execute(query)
descriptors = {}
for d in cursor:
descriptors[d[2]] = {"id" : d[0], "text" : d[1]}
cursor.close()
cnx.close()
return descriptors
def build_panel_network_by_descriptor(unesco_code):
cnx = mysql.connector.connect(**config)
print "Recovering thesis ids"
cursor = cnx.cursor()
query = """SELECT thesis_id
FROM association_thesis_description, descriptor
WHERE association_thesis_description.descriptor_id = descriptor.id
AND descriptor.code DIV 10000 = """ + str(unesco_code)
cursor.execute(query)
thesis_ids = []
for thesis in cursor:
thesis_ids.append(thesis[0])
cursor.close()
print "Creating panel network"
cursor = cnx.cursor()
G = nx.Graph()
for c, thesis_id in enumerate(thesis_ids):
if c % 1000 == 0:
print c, "of", len(thesis_ids)
cursor.execute("SELECT person_id FROM panel_member WHERE thesis_id = " + str(thesis_id))
members = []
for member in cursor:
members.append(member[0])
for i, m1 in enumerate(members):
for m2 in members[i+1:]:
if not G.has_edge(m1, m2):
G.add_edge(m1,m2, weight = 1)
else:
G.edge[m1][m2]['weight'] += 1
cursor.close()
cnx.close()
nx.write_gexf(G, "./networks/panels-" + str(unesco_code) + ".gexf")
return G
def generate_random_graph(n, m):
print "Building random graph"
G = barabasi_albert_graph(n, m, 10)
return G
def analize_cliques(G):
print "Calculating cliques..."
cliques = nx.find_cliques(G)
print "Analysing the results..."
tot_cliques = 0
tot_size = 0
max_size = 0
min_size = 10000
high_5 = 0
hist_clic = {}
for c in cliques:
tot_cliques += 1
tot_size += len(c)
if len(c) > 5: #5 is the panel size in Spain
high_5 += 1
if len(c) > max_size :
max_size = len(c)
if len(c) < min_size:
min_size = len(c)
if hist_clic.has_key(len(c)):
hist_clic[len(c)] += 1
else:
hist_clic[len(c)] = 1
print "CLIQUES:"
print " - Total cliques:", tot_cliques
print " - Avg cliques size:", tot_size * 1.0 / tot_cliques
print " - Max clique:", max_size
print " - Min clique:", min_size
print " - Cliques with a size higher than 5:", high_5
print " - histogram:", hist_clic
results = {}
results['clique_tot'] = tot_cliques
results['clique_avg'] = tot_size * 1.0 / tot_cliques
results['clique_max'] = max_size
results['clique_min'] = min_size
results['clique_greater_5'] = high_5
results['clique_greater_5_norm'] = high_5 * 1.0 / tot_cliques
#results['clique_histogram'] = hist_clic
return results
def analize_degrees(G):
print "Calculating degrees..."
degrees = nx.degree(G)
hist = nx.degree_histogram(G)
print "DEGREES:"
print " - Max degree:", max(degrees.values())
print " - Min degree:", min(degrees.values())
print " - Avg. degree:", sum(degrees.values()) * 1.0 / len(degrees)
print " - histogram:", hist
results = {}
results['degree_avg'] = sum(degrees.values()) * 1.0 / len(degrees)
results['degree_max'] = max(degrees.values())
results['degree_min'] = min(degrees.values())
#results['degree_histogram'] = hist
return results
def analize_edges(G):
print "Analizing edges..."
min_weight = 10000
max_weight = 0
acum_weight = 0
hist_weight = {}
for e in G.edges(data=True):
acum_weight += e[2]['weight']
if max_weight < e[2]['weight']:
max_weight = e[2]['weight']
if min_weight > e[2]['weight']:
min_weight = e[2]['weight']
if hist_weight.has_key(e[2]['weight']):
hist_weight[e[2]['weight']] += 1
else:
hist_weight[e[2]['weight']] = 1
print "EDGES:"
print " - Max weight:", max_weight
print " - Min weight:", min_weight
print " - Avg weight:", acum_weight * 1.0 / len(G.edges())
print " - histogram:", hist_weight
results = {}
results['weight_avg'] = acum_weight * 1.0 / len(G.edges())
results['weight_max'] = max_weight
results['weight_min'] = min_weight
#results['weight_histogram'] = hist_weight
return results
def analyze_rdn_graph():
G = generate_random_graph(188979, 7) #nodes and nodes/edges
nx.write_gexf(G, "./networks/barabasi_panel.gexf")
print "Nodes:", G.number_of_nodes()
print "Edges:", G.number_of_edges()
analize_cliques(G)
analize_degrees(G)
def analyze_first_level_panels():
results = {}
for d in first_level_topic_list:
print "\n*********DESCRIPTOR: " + first_level_topic_list[d] + "(" + str(d) + ")"
G = build_panel_network_by_descriptor(d)
print "\nDESCRIPTOR: " + first_level_topic_list[d] + "(" + str(d) + ")"
print "Nodes:", G.number_of_nodes()
print "Edges:", G.number_of_edges()
res_clique = analize_cliques(G)
res_degree = analize_degrees(G)
res_weight = analize_edges(G)
d_final = dict(res_clique)
d_final.update(res_degree)
d_final.update(res_weight)
d_final['id'] = d
d_final['avg_clustering'] = nx.average_clustering(G)
results[first_level_topic_list[d]] = d_final
print "Writing json..."
json.dump(results, open('./networks/first_level_panels_analysis.json','w'), indent = 2)
print "Writing csvs..."
df = DataFrame(results)
df.to_csv('./networks/first_level_panels_analysis.csv')
dfinv = df.transpose()
dfinv.to_csv('./networks/first_level_panels_analysis_inv.csv')
def from_json_to_dataframe():
results = json.load(open('./networks/first_level_analysis.json','r'))
df = DataFrame(results)
df.to_csv("panels.csv")
dft = df.transpose()
dft.to_csv("panels_trans.csv")
return df
#df = DataFrame(['id', 'name', 'clique_tot', 'clique_avg', 'clique_max', 'clique_min', 'clique_greater_5', 'degree_max', 'degree_min', 'degree_avg', 'weight_max', 'weight_min', 'weight_avg']);
def panel_repetition_per_advisor():
cnx = mysql.connector.connect(**config)
print "Recovering thesis ids for each advisor..."
cursor = cnx.cursor()
query = "SELECT person_id, thesis_id FROM advisor"
cursor.execute(query)
thesis_advisor = {}
for thesis in cursor:
adv_id = thesis[0]
thesis_id = thesis[1]
if thesis_advisor.has_key(adv_id):
thesis_advisor[adv_id].append(thesis_id)
else:
thesis_advisor[adv_id] = [thesis_id]
cursor.close()
print "Counting repetitions..."
cursor = cnx.cursor()
results = {}
for c, adv in enumerate(thesis_advisor):
if c % 500 == 0:
print c, "of", len(thesis_advisor)
thesis_ids = thesis_advisor[adv]
adv_id = adv
for thesis_id in thesis_ids:
cursor.execute("SELECT person_id FROM panel_member WHERE thesis_id = " + str(thesis_id))
for member in cursor:
if results.has_key(adv_id):
if results[adv_id].has_key(member[0]):
results[adv_id][member[0]] += 1
else:
results[adv_id][member[0]] = 0
else:
results[adv_id] = {member[0] : 0}
cursor.close()
cnx.close()
json.dump(results, open('./networks/repetitions_per_advisor.json', 'w'), indent=2)
print "Procesing total repetitons"
repetitions_per_advisor = {}
for adv in results:
total_rep = 0
for rep in results[adv]:
total_rep += results[adv][rep]
repetitions_per_advisor[adv] = total_rep
return repetitions_per_advisor
def thesis_per_year():
results = {}
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
for year in range(1977,2015):
query = "SELECT count(defense_date) FROM thesis WHERE year(defense_date)=year('" + str(year) + "-01-01')"
cursor.execute(query)
for r in cursor:
results[year] = r[0]
cursor.close()
cnx.close()
return results
def thesis_per_location():
results = {}
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
cursor.execute("select distinct(location) from university")
locations = []
for l in cursor:
locations.append(l[0])
results = {}
for location in locations:
query = "SELECT count(thesis.id) FROM thesis, university WHERE university.location = '" + location + "'"
cursor.execute(query)
for r in cursor:
results[location] = r[0]
cursor.close()
cnx.close()
return results
def advisor_genders_by_topic():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
results = {}
for topic in first_level_topic_list:
print "Topic:", topic
print 'Getting thesis ids for topic...'
thesis_ids = []
cursor.execute("SELECT thesis_id FROM association_thesis_description, descriptor WHERE descriptor.id = association_thesis_description.descriptor_id AND descriptor.code DIV 10000 = " + str(topic))
for t_id in cursor:
thesis_ids.append(t_id)
print 'Number of thesis:', len(thesis_ids)
print 'Counting genders...'
male = 0
female = 0
unknown = 0
for thesis in thesis_ids:
query = "SELECT COUNT(advisor.person_id) FROM advisor, person, thesis WHERE thesis.id = advisor.thesis_id AND person.id = advisor.person_id AND person.gender = 'male' AND thesis.id = " + str(thesis[0])
cursor.execute(query)
for r in cursor:
male += r[0]
query = "SELECT COUNT(advisor.person_id) FROM advisor, person, thesis WHERE thesis.id = advisor.thesis_id AND person.id = advisor.person_id AND person.gender = 'female' AND thesis.id = " + str(thesis[0])
cursor.execute(query)
for r in cursor:
female += r[0]
query = "SELECT COUNT(advisor.person_id) FROM advisor, person, thesis WHERE thesis.id = advisor.thesis_id AND person.id = advisor.person_id AND person.gender = 'none' AND thesis.id = " + str(thesis[0])
cursor.execute(query)
for r in cursor:
unknown += r[0]
if len(thesis_ids) > 0:
results[first_level_topic_list[topic]] = {'male' : male, 'female' : female, 'unknown' : unknown}
cursor.close()
cnx.close()
print "Saving json"
json.dump(results, open('advisor_gender_by_topic.json','w'))
print "Saving csv"
df = DataFrame(results)
df.to_csv("advisor_gender_by_topic.csv")
return results
def analyze_advisor_student_genders():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
print "Recovering advisor-student pairs..."
cursor.execute("SELECT thesis.author_id, advisor.person_id FROM thesis, advisor WHERE thesis.id = advisor.thesis_id")
adv_stu = []
for advisor in cursor:
adv_stu.append([advisor[1], advisor[0]])
print "Recovering genders..."
genders = {}
cursor.execute("SELECT person.id, person.gender FROM person")
for person in cursor:
genders[person[0]] = person[1]
cursor.close()
cnx.close()
print "Counting..."
results = {}
results["MM"] = 0
results["FF"] = 0
results["FM"] = 0
results["MF"] = 0
for pair in adv_stu:
try:
adv_gender = genders[pair[0]]
stu_gender = genders[pair[1]]
except:
adv_gender = 'none'
stu_gender = 'none'
if adv_gender == 'male':
if stu_gender == 'male':
results['MM'] += 1
elif stu_gender == 'female':
results['MF'] += 1
elif adv_gender == 'female':
if stu_gender == 'male':
results['FM'] += 1
elif stu_gender == 'female':
results['FF'] += 1
return results
def analyze_advisor_student_genders_by_topic():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
print "Recovering genders..."
genders = {}
cursor.execute("SELECT person.id, person.gender FROM person")
for person in cursor:
genders[person[0]] = person[1]
topic_genders = json.load(open('advisor_gender_by_topic.json','r'))
topic_gender_pairs = {}
for topic in first_level_topic_list:
print "Topic:", topic
print "Recovering advisor-student pairs..."
query = """ SELECT thesis.author_id, advisor.person_id
FROM thesis, advisor, descriptor, association_thesis_description
WHERE descriptor.id = association_thesis_description.descriptor_id
AND thesis.id = advisor.thesis_id
AND thesis.id = association_thesis_description.thesis_id
AND descriptor.code DIV 10000 = """ + str(topic)
cursor.execute(query)
adv_stu = []
for advisor in cursor:
adv_stu.append([advisor[1], advisor[0]])
if len(adv_stu) > 0:
print "Counting..."
results = {}
results["MM"] = 0
results["FF"] = 0
results["FM"] = 0
results["MF"] = 0
for pair in adv_stu:
try:
adv_gender = genders[pair[0]]
stu_gender = genders[pair[1]]
except:
adv_gender = 'none'
stu_gender = 'none'
if adv_gender == 'male':
if stu_gender == 'male':
results['MM'] += 1
elif stu_gender == 'female':
results['MF'] += 1
elif adv_gender == 'female':
if stu_gender == 'male':
results['FM'] += 1
elif stu_gender == 'female':
results['FF'] += 1
results["MM_norm"] = results["MM"] * 1.0 / topic_genders[str(topic)]['male']
results["FF_norm"] = results["FF"] * 1.0 / topic_genders[str(topic)]['female']
results["FM_norm"] = results["FM"] * 1.0 / topic_genders[str(topic)]['female']
results["MF_norm"] = results["MF"] * 1.0 / topic_genders[str(topic)]['male']
topic_gender_pairs[first_level_topic_list[topic]] = results
cursor.close()
cnx.close()
print "Saving json"
json.dump(topic_gender_pairs, open('gender_pairs_by_topic.json','w'))
print "Saving csv"
df = DataFrame(topic_gender_pairs)
df.to_csv("gender_pairs_by_topic.csv")
return topic_gender_pairs
def count_persons_with_multiple_thesis():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
persons_id = []
cursor.execute("SELECT person.id FROM person")
for person in cursor:
persons_id.append(person[0])
results = {}
histogram = {}
for i, p_id in enumerate(persons_id):
if i % 2000 == 0:
print i, 'of', len(persons_id)
cursor.execute("SELECT COUNT(thesis.id) FROM thesis WHERE thesis.author_id = " + str(p_id))
for r in cursor:
if r[0] > 1:
results[p_id] = r[0]
if histogram.has_key(r[0]):
histogram[r[0]] += 1
else:
histogram[r[0]] = 1
cursor.close()
cnx.close()
print "Writing json..."
json.dump(results, open('multiple_thesis.json','w'))
json.dump(histogram, open('multiple_thesis_hist.json','w'))
return results, histogram
def count_panel_members():
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
print "Getting thesis ids..."
cursor.execute("SELECT id FROM thesis")
thesis_ids = []
for r in cursor:
thesis_ids.append(r[0])
results = {}
print "Counting panel members"
for i, t_id in enumerate(thesis_ids):
if i % 2000 == 0:
print i, 'of', len(thesis_ids)
cursor.execute("SELECT count(panel_member.person_id) FROM panel_member WHERE panel_member.thesis_id = " + str(t_id))
for r in cursor:
if results.has_key(r[0]):
results[r[0]] += 1
else:
results[r[0]] = 1
cursor.close()
cnx.close()
return results
def create_gender_pie():
male = 221579.0
female = 80363.0
none = 21428.0
total = male + female + none
labels = ['Male', 'Female', 'Unknown']
sizes = [male/total*100, female/total*100, none/total*100]
colors = ['lightblue', 'pink', 'gold']
plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%')
plt.axis('equal')
plt.show()
def create_advisor_gender_pie():
male = 165506.0
female = 37012.0
none = 11229.0
total = male + female + none
labels = ['Male', 'Female', 'Unknown']
sizes = [male/total*100, female/total*100, none/total*100]
colors = ['lightblue', 'pink', 'gold']
plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%')
plt.axis('equal')
plt.show()
def create_student_gender_pie():
male = 115423.0
female = 52184.0
none = 9742.0
total = male + female + none
labels = ['Male', 'Female', 'Unknown']
sizes = [male/total*100, female/total*100, none/total*100]
colors = ['lightblue', 'pink', 'gold']
plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%')
plt.axis('equal')
plt.show()
def create_panel_gender_pie():
male = 674748.0
female = 139170.0
none = 44765.0
total = male + female + none
labels = ['Male', 'Female', 'Unknown']
sizes = [male/total*100, female/total*100, none/total*100]
colors = ['lightblue', 'pink', 'gold']
plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%')
plt.axis('equal')
plt.show()
def create_number_of_thesis_bar():
values = [1552, 126, 33, 7, 2]
fig, ax = plt.subplots()
index = np.arange(len(values))
width = 0.30
plt.bar(index, values)
plt.xlabel('Number of thesis')
plt.ylabel('Total persons')
plt.title('Number of thesis by person (> 2)')
plt.xticks(index + width, ('2', '3', '4', '5', '6'))
plt.legend()
plt.tight_layout()
plt.show()
if __name__=='__main__':
print "starting"
print create_number_of_thesis_bar()
print "fin" | apache-2.0 |
lzyeasyboy/tushare | tushare/datayes/fundamental.py | 16 | 18026 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Fundamental():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def FdmtBS(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的合并资产负债表模板,收集了2007年以来沪深上市公司定期报告中各个会计期间的资产负债表数据;
2、仅收集合并报表数据,包括期末和期初数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTBS%(reportType, secID, ticker,
beginDate, endDate, publishDateBegin,
publishDateEnd, field))
return _ret_data(code, result)
def FdmtBSBank(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的银行业资产负债表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的资产负债表数据;(主要是银行业上市公司)
2、仅收集合并报表数据,包括期末和期初数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTBSBANK%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtBSSecu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的证券业资产负债表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的资产负债表数据;(主要是证券业上市公司)
2、仅收集合并报表数据,包括期末和期初数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTBSSECU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin,
publishDateEnd, field))
return _ret_data(code, result)
def FdmtBSIndu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的一般工商业资产负债表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的资产负债表数据;(主要是一般工商业上市公司)
2、仅收集合并报表数据,包括期末和期初数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTBSINDU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtBSInsu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的保险业资产负债表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的资产负债表数据;(主要是保险业上市公司)
2、仅收集合并报表数据,包括期末和期初数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元。
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTBSINSU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtCF(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的合并现金流量表模板,收集了2007年以来沪深上市公司定期报告中各个会计期间的现金流量表数据;
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTCF%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtCFBank(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的银行业现金流量表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的现金流量表数据;(主要是银行业上市公司) 2、仅收集合并报表数据,包括本期和上期数据; 3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示; 4、本表中单位为人民币元;5、每季更新。
"""
code, result = self.client.getData(vs.FDMTCFBANK%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtCFSecu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的证券业现金流量表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的现金流量表数据;(主要是证券业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTCFSECU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtCFIndu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的一般工商业现金流量表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的现金流量表数据;(主要是一般工商业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTCFINDU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtCFInsu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的保险业现金流量表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的现金流量表数据;(主要是保险业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTCFINSU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtIS(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的合并利润表模板,收集了2007年以来沪深上市公司定期报告中各个会计期间的利润表数据;
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTIS%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtISBank(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的银行业利润表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的利润表数据;(主要是银行业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTISBANK%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtISSecu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的证券业利润表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的利润表数据;(主要是证券业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTISSECU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtISIndu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的一般工商业利润表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的利润表数据;(主要是一般工商业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTISINDU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtISInsu(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
1、根据2007年新会计准则制定的保险业利润表模板,收集了2007年以来沪深上市公司定期报告中所有以此模板披露的利润表数据;(主要是保险业上市公司)
2、仅收集合并报表数据,包括本期和上期数据;
3、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
4、本表中单位为人民币元;
5、每季更新。
"""
code, result = self.client.getData(vs.FDMTISINSU%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtEe(self, reportType='', secID='', ticker='', beginDate='', endDate='',
publishDateBegin='', publishDateEnd='', field=''):
"""
获取2007年及以后年度上市公司披露的业绩快报中的主要财务指标等其他数据,
包括本期,去年同期,及本期与期初数值同比数据。每季证券交易所披露相关公告时更新数据,
公司ipo时发布相关信息也会同时更新。每日9:00前完成证券交易所披露的数据更新,中午发布公告每日12:45前完成更新。
"""
code, result = self.client.getData(vs.FDMTEE%(reportType, secID, ticker,
beginDate, endDate,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtEf(self, reportType='', secID='', ticker='', beginDate='', endDate='',
forecastType='', publishDateBegin='', publishDateEnd='', field=''):
"""
1、获取2007年及以后年度上市公司披露的公告中的预期下一报告期收入、净利润、归属于母公司净利润、基本每股收益及其幅度变化数据。
2、上市公司对经营成果科目的预计情况数据一般为其上限与下限,上限取值为公告中披露该科目中绝对值较大值,下限取值为公告中披露该科目中绝对值较小值。
3、数值为"正"代表该公司预计盈利,数值为"负"代表该公司预计亏损。若上下限"正"、"负"符号不同,代表该公司盈利亏损情况尚不确定。
4、业绩预期类型以公告中文字披露预期类型为准,若公告中未有文字披露预期类型,则根据数据情况判断预期类型。
5、每季证券交易所披露相关公告时更新数据,公司ipo时发布相关信息也会同时更新。每日9:00前完成证券交易所披露的数据更新,中午发布公告每日12:45前完成更新。
"""
code, result = self.client.getData(vs.FDMTEF%(reportType, secID, ticker,
beginDate, endDate, forecastType,
publishDateBegin, publishDateEnd, field))
return _ret_data(code, result)
def FdmtISLately(self, field=''):
"""
1、可获取上市公司最近一次数据,根据2007年新会计准则制定的合并利润表模板,仅收集合并报表数据;
2、如果上市公司对外财务报表进行更正,调整,均有采集并对外展示;
3、本表中单位为人民币元;
4、每季更新。
"""
code, result = self.client.getData(vs.FDMTISLATELY%(field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
yhalpern/anchorExplorer | Backend.py | 2 | 27593 | from Tkinter import *
import os
from Anchors import Anchor
import random
from copy import deepcopy
import tkFileDialog
import itertools
from multiprocessing import Pool
import ttk
import shelve
from collections import defaultdict
import time
import scipy.sparse as sparse
import xml.etree.ElementTree as ET
from Logging import LogElement
from collections import namedtuple
from copy import *
import cPickle as pickle
import string
import numpy as np
from helpers import *
from scipy.sparse import csr_matrix
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.grid_search import GridSearchCV
import sklearn.metrics as metrics
def getPatient(v):
visitShelf = shelve.open('visitShelf')
pat = visitShelf[v]
pat['anchors'] = set()
return v, pat
def randomString(length=16):
return "".join([random.choice(string.letters) for _ in xrange(length)])
def noPunctuation(w):
return len(set("{}()") & set(w[0]))==0
def readLabels(filename):
labels = {}
f = file(filename)
for l in f:
id, label = l.split()
labels[id] = int(label)
return filename
def readAnchors(filename, parent, max_anchors=-1):
Tree = ET.parse(filename)
root = Tree.getroot()
anchors = {}
for concept in root.findall('.//concept'):
name = concept.attrib['name']
print 'initializing concept', name
saveloc = concept.attrib['saveloc']+'/'+name+'.pk'
label_saveloc = concept.attrib['saveloc']+'/'+name+'.labels.pk'
flags_saveloc = concept.attrib['saveloc']+'/'+name+'.flags.pk'
labels = None
try:
anch = pickle.load(file(saveloc))
except:
nodes = concept.text.strip().split('|')
edges = []
anch = [Anchor(n, [n], []) for n in nodes if len(n)]
try:
labels = pickle.load(file(label_saveloc))
except:
labels = {}
try:
flags = pickle.load(file(flags_saveloc))
except:
print 'could not load flags from ', flags_saveloc
flags = {}
anchors[name] = Concept(name, anch, parent=parent, saveloc=saveloc, labels=labels, flags=flags)
try:
os.makedirs(parent.saveloc)
except:
pass
for loc in os.listdir(parent.saveloc):
if '.labels' in loc:
continue
if '.svn' in loc:
continue
if '.flags' in loc:
continue
if 'elkan' in loc:
continue
if '.eval' in loc:
continue
if '.weights' in loc:
continue
name = loc.split('/')[-1].replace('.pk', '')
if not name in anchors:
anch = pickle.load(file(parent.saveloc+'/'+loc))
try:
labels = pickle.load(file(parent.saveloc+'/'+loc.replace('.pk', '.labels.pk')))
except:
labels = {}
anchors[name] = Concept(name, anch, parent=parent, saveloc=parent.saveloc+'/'+name+'.pk', labels=labels)
print 'anchors initialized', anchors
return anchors
def update_sparse_X(X):
return csr_matrix(X)
class Concept:
def __init__(self, name, anchors, parent=None, description="", saveloc='', labels=None, flags=None):
self.anchors = set(anchors)
self.evaluators = set()
self.name = name
self.id = randomString(16)
self.anchoredPatients = {}
self.evaluatorPatients = {}
self.recall = 0.8
self.pos_patients = []
self.description=description
self.human_labels = {}
self.evaluations = []
self.online=True
if labels == None:
self.labels = {}
else:
self.labels = labels
for pid,label in self.labels.items():
self.human_labels[pid] = label
if flags == None:
self.flagged_patients = {}
else:
self.flagged_patients = flags
self.sparse_X = {}
self.sparse_X_csr = None
self.masked_elements = defaultdict(set)
self.Y = []
self.Y_counts = []
self.Y_negCounts = []
self.log = []
self.vocab = None
self.inv_vocab = None
self.display_vocab = None
self.patient_index = None
self.patient_list = None
self.estimator=None
self.ranking=None
self.recentPatients = set()
self.initialized = False
#state that is not preserved
self.pool=Pool(2)
self.wordShelf=None
self.backend = parent
self.saveloc=saveloc
self.label_saveloc = saveloc.replace('.pk', '.labels.pk')
self.flags_saveloc = saveloc.replace('.pk', '.flags.pk')
self.eval_saveloc = saveloc.replace('.pk', '.eval.pk')
try:
self.evaluators = pickle.load(file(self.eval_saveloc))
except:
pass
self.dumpAnchors()
self.dumpLabels()
def set_name(self, new_name):
self.saveloc = self.saveloc.replace(self.name, new_name)
self.label_saveloc = self.label_saveloc.replace(self.name, new_name)
for pid in union(self.anchoredPatients.values()):
if pid in self.backend.patients:
self.backend.patients[pid]['anchors'].remove(self.name)
self.backend.patients[pid]['anchors'].add(new_name)
self.name = new_name
def dumpAnchors(self):
try:
pickle.dump(self.anchors, file(self.saveloc, 'w'))
except:
print 'warning could not save to ', self.saveloc
def dumpLabels(self):
try:
pickle.dump(self.human_labels, file(self.label_saveloc, 'w'))
except:
print 'warning could not save to ', self.saveloc
def dumpFlags(self):
try:
print 'dumping flags', self.flagged_patients.items()
pickle.dump(self.flagged_patients, file(self.flags_saveloc, 'w'))
except:
print 'warning could not save to ', self.flags_saveloc
def dumpEvaluators(self):
try:
print 'dumping evaluators', self.evaluators
pickle.dump(self.evaluators, file(self.eval_saveloc, 'w'))
except:
print 'warning could not save to ', self.eval_saveloc
def dumpDecisionRule(self):
loc = self.saveloc.replace('.pk', '.weights.pk')
try:
pickle.dump(zip(self.vocab, self.estimator.coef_), file(loc, 'w'))
except Exception, e:
print 'could not dump rule to ', loc, "%s", e
pass
def saveState(self):
print 'saving state'
f = file(self.saveloc, 'w')
state = [self.anchors,
self.name,
self.id,
self.anchoredPatients,
self.description,
self.human_labels,
self.sparse_X,
self.sparse_X_csr,
self.masked_elements,
self.Y,
self.Y_counts,
self.log,
self.patient_index,
self.patient_list,
self.estimator,
self.ranking,
self.recentPatients,
]
pickle.dump(state, f)
f.close()
def loadState(self, parent, wordshelf):
try:
assert 0
f = file(self.saveloc)
except:
print 'could not load from pickle'
return False
[self.anchors,
self.name,
self.id,
self.anchoredPatients,
self.description,
self.human_labels,
self.sparse_X,
self.sparse_X_csr,
self.masked_elements,
self.Y,
self.Y_counts,
self.log,
self.vocab,
self.inv_vocab,
self.display_vocab,
self.patient_index,
self.patient_list,
self.estimator,
self.ranking,
self.recentPatients,
] = pickle.load(f)
f.close()
self.parent = parent
self.wordshelf = wordshelf
return True
def initPatients(self, patients, wordShelf, vocab, inv_vocab, display_vocab):
print 'concept initialize patients'
self.vocab, self.inv_vocab, self.display_vocab = vocab, inv_vocab, display_vocab
self.wordShelf = wordShelf
for anchor in self.anchors:
for a in anchor.getMembers():
a = a.lstrip('!')
if a in wordShelf:
self.anchoredPatients[a] = wordShelf[a]
else:
print "warning: word", a, "not indexed!"
for pid in union(self.anchoredPatients.values()):
if pid in patients:
patients[pid]['anchors'].add(self.name)
def initLog(self):
self.log.append(LogElement('init'))
def done_updating(self, result):
self.sparse_X_csr = result
print "done updating"
def configureLearnButton(self, state):
self.backend.parent.anchorDisplay.learnButton.configure({'state':state})
def initRepresentation(self, patients, sparse_X):
print 'init representation'
print >> self.backend.parent.logfile, str(time.time())+' init representation', self.name
self.patient_index = dict(zip([pat['index'] for pat in patients], xrange(len(patients))))
self.patient_list = patients
print len(patients)
self.sparse_X = sparse_X.copy()
self.sparse_X_csr = None
self.sparse_X_csr_eval = None
if self.online:
self.pool.apply_async(update_sparse_X, args=[self.sparse_X], callback=self.done_updating)
self.sparse_X_csr_eval = csr_matrix(self.backend.sparse_X_validate)
else:
self.done_updating(update_sparse_X(self.sparse_X))
self.Y = [0]*len(patients)
self.Y_counts = [0]*len(patients)
self.Y_negCounts = [0]*len(patients)
for anchor in self.anchors:
self.addAnchor(anchor)
for evaluator in self.evaluators:
self.addEvaluator(evaluator)
def addAnchor(self, new_anchor):
if new_anchor.id == '':
return
print >> self.backend.parent.logfile, str(time.time())+' added anchor', new_anchor.id, self.name
print 'new anchor', new_anchor.id
self.backend.parent.logfile.flush()
self.backend.doIndexing(new_anchor)
assert type(new_anchor) == Anchor, type(new_anchor)
self.anchors.add(new_anchor)
newly_anchored_patients = set()
for a in new_anchor.getMembers():
a = a.lstrip('!')
if a in self.wordShelf:
newly_anchored_patients |= set(self.wordShelf[a])
print 'anchor component', a, len(set(self.wordShelf[a])), 'total', len(newly_anchored_patients)
else:
print "anchor", a, "not indexed!"
sys.exit()
self.anchoredPatients[new_anchor.id] = newly_anchored_patients
self.recentPatients = newly_anchored_patients
print new_anchor in self.vocab
print new_anchor in self.inv_vocab
for pid in self.recentPatients:
try:
i = self.patient_index[pid]
except:
continue
self.Y[i]=1
self.Y_counts[i] += 1
if new_anchor.id[0] == '!':
self.Y_negCounts[i] += 1
self.Y[i] = 0
continue
for a in new_anchor.getExclusions():
if not a in self.inv_vocab:
continue
j = self.inv_vocab[a]
if self.sparse_X[i,j] > 0:
self.masked_elements[j].add(i)
self.sparse_X[i,j] = 0
#self.configureLearnButton('disabled')
if self.online:
self.pool.apply_async(update_sparse_X, args=[self.sparse_X], callback=self.done_updating)
else:
self.done_updating(update_sparse_X(self.sparse_X))
self.dumpAnchors()
def addEvaluator(self, new_anchor):
print >> self.backend.parent.logfile, str(time.time())+' added evaluator', new_anchor.id, self.name
print 'new evaluator', new_anchor.id
self.backend.parent.logfile.flush()
self.backend.doIndexing(new_anchor)
assert type(new_anchor) == Anchor, type(new_anchor)
self.evaluators.add(new_anchor)
newly_anchored_patients = set()
for a in new_anchor.getMembers():
if a in self.wordShelf:
newly_anchored_patients |= (set(self.wordShelf[a.lstrip('!')]) & self.backend.validate_patient_set)
print len(set(self.wordShelf[a])), 'intersect', len(self.backend.validate_patient_set), '=', len(newly_anchored_patients)
#print set(self.wordShelf[a])
#print self.backend.validate_patient_set
else:
print "anchor", a, "not indexed!"
sys.exit()
self.evaluatorPatients[new_anchor.id] = newly_anchored_patients
self.dumpEvaluators()
try:
self.do_evaluation()
except:
pass
def removeEvaluator(self, anchorid):
print >> self.backend.parent.logfile, str(time.time())+' removed evaluator', anchorid, self.name
self.backend.parent.logfile.flush()
#find an anchor with the same name
print 'removing id', anchorid
for anchor in [a for a in self.evaluators if a.id == anchorid]:
print 'removing anchor', anchorid
self.evaluators.remove(anchor)
self.evaluatorPatients[anchorid] = set()
self.dumpEvaluators()
def do_evaluation(self):
patients = []
print 'there are', len(union(self.evaluatorPatients.values())), "evaluator patients"
for pid in union(self.evaluatorPatients.values()):
print pid, 'is a positive case'
patients.append((self.ranking[pid], pid))
patients.sort()
self.threshold = patients[int((1-self.recall)*len(patients))][0]
self.pos_patients = patients[int((1-self.recall)*len(patients)):]
random.shuffle(self.pos_patients)
self.targets = [p[1] for p in self.pos_patients[:10]]
print "evaluated!"
#print 'precision:', self.prec
print 'recall:', self.recall
print 'threshold', self.threshold
def get_precision(self):
total = 0
pos = 0
print 'getting precision'
for r,pid in self.pos_patients:
if pid in self.human_labels:
if self.human_labels[pid] > 0:
pos += 1
total += 1
elif self.human_labels[pid] < 0:
total += 1
if total == 0:
return '?'
if all([pid in self.human_labels for pid in self.targets]):
print "complete evaluation!"
self.evaluations.append(pos/float(total))
return str(pos) + '/' + str(total)
def get_recall(self):
return self.recall
def removeAnchor(self, anchorid):
print >> self.backend.parent.logfile, str(time.time())+' removed anchor', anchorid, self.name
self.backend.parent.logfile.flush()
#find an anchor with the same name
print 'removing id', anchorid
print 'here are ids', [a.id for a in self.anchors]
for anchor in [a for a in self.anchors if a.id == anchorid]:
print 'removing anchor', anchorid
self.anchors.remove(anchor)
anchored_patients = self.anchoredPatients[anchor.id]
for pid in anchored_patients:
try:
i = self.patient_index[pid]
except:
continue
self.Y_counts[i] -= 1
if anchor.id[0] == '!':
self.Y_negCounts[i] -= 1
assert self.Y_counts >= 0, "Y_counts negative?"
assert self.Y_negCounts >= 0, "Y_negCounts negative?"
self.Y[i]= int(self.Y_counts[i] > 0 and self.Y_negCounts[i] == 0)
for a in anchor.getExclusions():
if not a in self.inv_vocab:
continue
j = self.inv_vocab[a]
if i in self.masked_elements[j]:
self.sparse_X[i,j] = 1
self.masked_elements[j].remove(i)
#self.configureLearnButton('disabled')
self.pool.apply_async(update_sparse_X, args=[self.sparse_X], callback=self.done_updating)
self.dumpAnchors()
def doLearning(self):
C = [10**(k) for k in xrange(-4,4)]
params = [{'C':C, 'penalty':['l1'],}]
print "learning!"
print >> self.backend.parent.logfile, str(time.time())+' learning' , self.name
self.backend.parent.logfile.flush()
s = time.time()
if self.online:
#Learner=SGDClassifier(loss='log', alpha=0.0001)
Learner=GridSearchCV(LogisticRegression(), params, cv=3, scoring='log_loss')
else:
Learner=GridSearchCV(LogisticRegression(), params, cv=3, scoring='log_loss')
X = self.sparse_X_csr
while X == None:
time.sleep(1)
print 'waiting for sparse csr'
X = self.sparse_X_csr
print 'transform', time.time() -s
print 'pos examples', sum(self.Y)
print 'pos features', X.sum()
try:
Learner.fit(X, self.Y)
print 'best params', Learner.best_params_
print 'grid scores', Learner.grid_scores_
Learner = Learner.best_estimator_
except:
print "could not learn!"
self.estimator = Learner
self.dumpDecisionRule()
print 'fit', time.time() -s
self.predictions = self.sparse_X_csr * Learner.coef_.T + Learner.intercept_
print 'predict', time.time() -s
self.predictions = np.exp(self.predictions) / (1+np.exp(self.predictions))
print 'scale', time.time() -s
self.eval_predictions = self.sparse_X_csr_eval * Learner.coef_.T + Learner.intercept_
self.eval_predictions = np.exp(self.eval_predictions) / (1+np.exp(self.eval_predictions))
self.ranking = zip([pat['index'] for pat in self.patient_list], np.ravel(self.predictions).tolist())
self.ranking += zip(self.backend.validate_patient_ids, np.ravel(self.eval_predictions).tolist())
self.ranking = dict(self.ranking)
print 'rank', time.time() -s
print "done"
try:
self.do_evaluation()
print 'evaluating new model'
except:
pass
def getSuggestions(self):
suggestions = []
try:
return filter(noPunctuation, sorted(zip(self.vocab, self.estimator.coef_[0]), key=lambda e: e[1], reverse=True))
except:
return []
def tag_patient(self, patid, tagval):
if tagval == 0:
if patid in self.human_labels:
del self.human_labels[patid]
else:
self.human_labels[patid] = tagval
print >> self.backend.parent.logfile, str(time.time())+' tagged patient', self.name, patid, tagval
self.backend.parent.logfile.flush()
self.dumpLabels()
class Backend:
def __init__(self, parent, loadfile):
self.settings = parent.settings
self.parent = parent
self.concepts = {}
self.patients = {}
self.validate_patients = set()
self.visitShelf = None
self.wordShelf = None
self.patientList = []
self.patientIndex = {}
self.visitIDs = None
self.sparse_X = None
self.saveloc = self.settings.find('anchors').attrib['loc']
print "loading vocab"
self.vocab, self.inv_vocab, self.display_vocab = pickle.load(file(self.settings.find('./vocab').attrib['src']))
print "done"
if not loadfile:
print 'init patients'
self.initPatients()
print 'init anchors'
self.initAnchors()
print 'done'
else:
print "loading file", loadfile
self.doLoad(loadfile)
def doIndexing(self, anchor):
for a in anchor.getMembers():
a = a.lstrip('!')
if not a in self.wordShelf:
print 'indexing', a
split_a = a.split()
split_a_set = set(split_a)
indexed = set()
if len(split_a) > 1: #only index compound words
print a, 'is a compound word'
for p in self.patientList + self.validate_patient_list:
if split_a_set.issubset(set(p['Text'].split())):
print "it is!"
for f in p.keys():
if not 'parsed' in f:
continue
for i in xrange(len(p[f])-len(split_a)):
match = True
for j in xrange(len(split_a)):
print 'compare', split_a[j], p[f][i+j]['repr']
if not split_a[j] in p[f][i+j]['repr']:
match = False
break
if match:
print 'match!', p['index']
indexed.add(p['index'])
for j in xrange(len(split_a)):
p[f][i+j]['repr'].append(a)
self.visitShelf[p['index']] = p
print 'indexed!', p['index']
self.wordShelf[a] = indexed
self.visitShelf.sync()
self.wordShelf.sync()
print 'done'
def getActiveConcept(self):
return self.concepts[self.parent.currentConcept]
def initPatients(self, patientSet="train"):
visitIDs = file(self.settings.find('./patients').attrib['src'])
self.visitShelf = shelve.open(self.settings.find('./patients').attrib['shelf'])
self.wordShelf = shelve.open(self.settings.find('./vocab').attrib['shelf'])
start = int(filter(lambda s: s.attrib['name'] == "train", self.settings.findall('./patientSets/set'))[0].attrib['start'])
end = int(filter(lambda s: s.attrib['name'] == "train", self.settings.findall('./patientSets/set'))[0].attrib['end'])
visit_ids = [z.strip() for z in visitIDs.readlines()[start:end]]
self.visitIDs = visit_ids
print "reading in patients", len(visit_ids)
print 'from shelve'
sparse_X = []
s = time.time()
for i,v in enumerate(self.visitIDs):
if i%1000 == 0:
print i, time.time() - s
if i > end:
break
pat = self.visitShelf[v]
pat['anchors'] = set()
self.patients[v] = pat
sparse_X.append(pat['sparse_X'])
#print self.patients.keys()
self.sparse_X = sparse.vstack(sparse_X, 'lil')
self.train_patient_ids = visit_ids
self.patientList = [self.patients[v] for v in self.visitIDs]
self.patientIndex = dict(zip([pat['index'] for pat in self.patientList], xrange(len(self.patientList))))
visitIDs.seek(0)
start = int(filter(lambda s: s.attrib['name'] == "validate", self.settings.findall('./patientSets/set'))[0].attrib['start'])
end = int(filter(lambda s: s.attrib['name'] == "validate", self.settings.findall('./patientSets/set'))[0].attrib['end'])
visit_ids = [z.strip() for z in visitIDs.readlines()[start:end]]
self.validate_patient_set = set(visit_ids)
self.validate_patient_ids = visit_ids
self.validate_patient_list = []
print "reading in validate patients", len(visit_ids)
print 'from shelve'
sparse_X_validate = []
s = time.time()
for i,v in enumerate(visit_ids):
if i%1000 == 0:
print i, time.time() - s
if i > end:
break
pat = self.visitShelf[v]
pat['anchors'] = set()
self.patients[v] = pat
self.validate_patient_list.append(pat)
sparse_X_validate.append(pat['sparse_X'])
self.sparse_X_validate = sparse.vstack(sparse_X_validate, 'lil')
def initAnchors(self):
conceptList = self.parent.conceptListbox
anchorfilename = self.settings.find('./anchors').attrib['src']
self.concepts = readAnchors(anchorfilename, self, 0)
for concept in sorted(self.concepts.values(), key=lambda c: c.name):
conceptList.insertConcept(concept.name, concept.id)
def initConcept(self, concept, online=True):
concept = self.concepts[concept]
concept.online = online
if concept.initialized == True:
return True
if not concept.loadState(self, self.wordShelf):
for a in concept.anchors:
self.doIndexing(a)
concept.initPatients(self.patients, self.wordShelf, self.vocab, self.inv_vocab, self.display_vocab)
concept.initLog()
concept.initRepresentation(self.patientList, self.sparse_X)
try:
concept.doLearning()
except:
print "could not learn concept"
#concept.saveState()
else:
print 'loaded from pickle'
concept.initialized=True
self.parent.conceptListbox.activateConcept(concept.name)
return True
def newConcept(self, name):
concept = Concept(name, [], parent=self, saveloc=self.saveloc+'/'+name+'.pk')
self.concepts[name] = concept
self.parent.conceptListbox.insertConcept(concept.name, concept.id)
def delete_concept(self, name):
try:
del self.concepts[name]
print >> self.parent.logfile, str(time.time())+' deleted concept', name
except:
print 'could not delete concept', name
try:
os.remove(self.saveloc +'/'+name+'.pk')
os.remove(self.saveloc +'/'+name+'.labels.pk')
except:
print 'could not delete files for concept', name
def rename_concept(self, oldname, newname):
print 'renaming', oldname, 'as', newname
self.concepts[newname] = self.concepts[oldname]
self.concepts[oldname] = None
self.concepts[newname].set_name(newname)
print >> self.parent.logfile, str(time.time())+' renamed concept', oldname, newname
try:
os.rename(self.saveloc +'/'+oldname+'.pk', self.saveloc+'/'+newname+'.pk')
os.rename(self.saveloc +'/'+oldname+'.labels.pk', self.saveloc+'/'+newname+'.labels.pk')
except:
print 'could not move files for concept', oldname, 'to', newname
| bsd-2-clause |
PatrickOReilly/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 94 | 10801 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef property works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
def test_same_multiple_output_sparse_dense():
for normalize in [True, False]:
l = ElasticNet(normalize=normalize)
X = [[0, 1, 2, 3, 4],
[0, 2, 5, 8, 11],
[9, 10, 11, 12, 13],
[10, 11, 12, 13, 14]]
y = [[1, 2, 3, 4, 5],
[1, 3, 6, 9, 12],
[10, 11, 12, 13, 14],
[11, 12, 13, 14, 15]]
ignore_warnings(l.fit)(X, y)
sample = np.array([1, 2, 3, 4, 5]).reshape(1, -1)
predict_dense = l.predict(sample)
l_sp = ElasticNet(normalize=normalize)
X_sp = sp.coo_matrix(X)
ignore_warnings(l_sp.fit)(X_sp, y)
sample_sparse = sp.coo_matrix(sample)
predict_sparse = l_sp.predict(sample_sparse)
assert_array_almost_equal(predict_sparse, predict_dense)
| bsd-3-clause |
Unidata/MetPy | dev/userguide/startingguide-1.py | 4 | 1432 | import matplotlib.pyplot as plt
import numpy as np
import metpy.calc as mpcalc
from metpy.plots import SkewT
from metpy.units import units
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig)
# Create arrays of pressure, temperature, dewpoint, and wind components
p = [902, 897, 893, 889, 883, 874, 866, 857, 849, 841, 833, 824, 812, 796, 776, 751,
727, 704, 680, 656, 629, 597, 565, 533, 501, 468, 435, 401, 366, 331, 295, 258,
220, 182, 144, 106] * units.hPa
t = [-3, -3.7, -4.1, -4.5, -5.1, -5.8, -6.5, -7.2, -7.9, -8.6, -8.9, -7.6, -6, -5.1,
-5.2, -5.6, -5.4, -4.9, -5.2, -6.3, -8.4, -11.5, -14.9, -18.4, -21.9, -25.4,
-28, -32, -37, -43, -49, -54, -56, -57, -58, -60] * units.degC
td = [-22, -22.1, -22.2, -22.3, -22.4, -22.5, -22.6, -22.7, -22.8, -22.9, -22.4,
-21.6, -21.6, -21.9, -23.6, -27.1, -31, -38, -44, -46, -43, -37, -34, -36,
-42, -46, -49, -48, -47, -49, -55, -63, -72, -88, -93, -92] * units.degC
# Calculate parcel profile
prof = mpcalc.parcel_profile(p, t[0], td[0]).to('degC')
u = np.linspace(-10, 10, len(p)) * units.knots
v = np.linspace(-20, 20, len(p)) * units.knots
skew.plot(p, t, 'r')
skew.plot(p, td, 'g')
skew.plot(p, prof, 'k') # Plot parcel profile
skew.plot_barbs(p[::5], u[::5], v[::5])
skew.ax.set_xlim(-50, 15)
skew.ax.set_ylim(1000, 100)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
plt.show() | bsd-3-clause |
EmreAtes/spack | var/spack/repos/builtin/packages/py-seaborn/package.py | 5 | 2086 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PySeaborn(PythonPackage):
"""Seaborn: statistical data visualization.
Seaborn is a library for making attractive and informative statistical
graphics in Python. It is built on top of matplotlib and tightly
integrated with the PyData stack, including support for numpy and pandas
data structures and statistical routines from scipy and statsmodels."""
homepage = "http://seaborn.pydata.org/"
url = "https://pypi.io/packages/source/s/seaborn/seaborn-0.7.1.tar.gz"
version('0.7.1', 'ef07e29e0f8a1f2726abe506c1a36e93')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
| lgpl-2.1 |
jaeilepp/mne-python | examples/time_frequency/plot_time_frequency_simulated.py | 1 | 8415 | """
======================================================================
Time-frequency on simulated data (Multitaper vs. Morlet vs. Stockwell)
======================================================================
This example demonstrates the different time-frequency estimation methods
on simulated data. It shows the time-frequency resolution trade-off
and the problem of estimation variance. In addition it highlights
alternative functions for generating TFRs without averaging across
trials, or by operating on numpy arrays.
"""
# Authors: Hari Bharadwaj <[email protected]>
# Denis Engemann <[email protected]>
# Chris Holdgraf <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from matplotlib import pyplot as plt
from mne import create_info, EpochsArray
from mne.baseline import rescale
from mne.time_frequency import (tfr_multitaper, tfr_stockwell, tfr_morlet,
tfr_array_morlet)
print(__doc__)
###############################################################################
# Simulate data
# -------------
#
# We'll simulate data with a known spectro-temporal structure.
sfreq = 1000.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = int(sfreq) # 1 second long epochs
n_epochs = 40
seed = 42
rng = np.random.RandomState(seed)
noise = rng.randn(n_epochs, len(ch_names), n_times)
# Add a 50 Hz sinusoidal burst to the noise and ramp it.
t = np.arange(n_times, dtype=np.float) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
data = noise + signal
reject = dict(grad=4000)
events = np.empty((n_epochs, 3), dtype=int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=data, info=info, events=events, event_id=event_id,
reject=reject)
###############################################################################
# Calculate a time-frequency representation (TFR)
# -----------------------------------------------
#
# Below we'll demonstrate the output of several TFR functions in MNE:
#
# * :func:`mne.time_frequency.tfr_multitaper`
# * :func:`mne.time_frequency.tfr_stockwell`
# * :func:`mne.time_frequency.tfr_morlet`
#
# Multitaper transform
# ====================
# First we'll use the multitaper method for calculating the TFR.
# This creates several orthogonal tapering windows in the TFR estimation,
# which reduces variance. We'll also show some of the parameters that can be
# tweaked (e.g., ``time_bandwidth``) that will result in different multitaper
# properties, and thus a different TFR. You can trade time resolution or
# frequency resolution or both in order to get a reduction in variance.
freqs = np.arange(5., 100., 3.)
vmin, vmax = -3., 3. # Define our color limits.
###############################################################################
# **(1) Least smoothing (most variance/background fluctuations).**
n_cycles = freqs / 2.
time_bandwidth = 2.0 # Least possible frequency-smoothing (1 taper)
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Least smoothing, most variance')
###############################################################################
# **(2) Less frequency smoothing, more time smoothing.**
n_cycles = freqs # Increase time-window length to 1 second.
time_bandwidth = 4.0 # Same frequency-smoothing as (1) 3 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less frequency smoothing, more time smoothing')
###############################################################################
# **(3) Less time smoothing, more frequency smoothing.**
n_cycles = freqs / 2.
time_bandwidth = 8.0 # Same time-smoothing as (1), 7 tapers.
power = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=False)
# Plot results. Baseline correct based on first 100 ms.
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Sim: Less time smoothing, more frequency smoothing')
##############################################################################
# Stockwell (S) transform
# =======================
#
# Stockwell uses a Gaussian window to balance temporal and spectral resolution.
# Importantly, frequency bands are phase-normalized, hence strictly comparable
# with regard to timing, and, the input signal can be recoverd from the
# transform in a lossless way if we disregard numerical errors. In this case,
# we control the spectral / temporal resolution by specifying different widths
# of the gaussian window using the ``width`` parameter.
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
fmin, fmax = freqs[[0, -1]]
for width, ax in zip((0.2, .7, 3.0), axs):
power = tfr_stockwell(epochs, fmin=fmin, fmax=fmax, width=width)
power.plot([0], baseline=(0., 0.1), mode='mean', axes=ax, show=False,
colorbar=False)
ax.set_title('Sim: Using S transform, width = {:0.1f}'.format(width))
plt.tight_layout()
###############################################################################
# Morlet Wavelets
# ===============
#
# Finally, show the TFR using morlet wavelets, which are a sinusoidal wave
# with a gaussian envelope. We can control the balance between spectral and
# temporal resolution with the ``n_cycles`` parameter, which defines the
# number of cycles to include in the window.
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
all_n_cycles = [1, 3, freqs / 2.]
for n_cycles, ax in zip(all_n_cycles, axs):
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False)
power.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
axes=ax, show=False, colorbar=False)
n_cycles = 'scaled by freqs' if not isinstance(n_cycles, int) else n_cycles
ax.set_title('Sim: Using Morlet wavelet, n_cycles = %s' % n_cycles)
plt.tight_layout()
###############################################################################
# Calculating a TFR without averaging over epochs
# -----------------------------------------------
#
# It is also possible to calculate a TFR without averaging across trials.
# We can do this by using ``average=False``. In this case, an instance of
# :class:`mne.time_frequency.EpochsTFR` is returned.
n_cycles = freqs / 2.
power = tfr_morlet(epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=False, average=False)
print(type(power))
avgpower = power.average()
avgpower.plot([0], baseline=(0., 0.1), mode='mean', vmin=vmin, vmax=vmax,
title='Using Morlet wavelets and EpochsTFR', show=False)
###############################################################################
# Operating on arrays
# -------------------
#
# MNE also has versions of the functions above which operate on numpy arrays
# instead of MNE objects. They expect inputs of the shape
# ``(n_epochs, n_channels, n_times)``. They will also return a numpy array
# of shape ``(n_epochs, n_channels, n_frequencies, n_times)``.
power = tfr_array_morlet(epochs.get_data(), sfreq=epochs.info['sfreq'],
frequencies=freqs, n_cycles=n_cycles,
output='avg_power')
# Baseline the output
rescale(power, epochs.times, (0., 0.1), mode='mean', copy=False)
fig, ax = plt.subplots()
mesh = ax.pcolormesh(epochs.times * 1000, freqs, power[0],
cmap='RdBu_r', vmin=vmin, vmax=vmax)
ax.set_title('TFR calculated on a numpy array')
ax.set(ylim=freqs[[0, -1]], xlabel='Time (ms)')
fig.colorbar(mesh)
plt.tight_layout()
plt.show()
| bsd-3-clause |
adijo/local-search | abstract_nqueens.py | 1 | 4184 | from abc import ABCMeta, abstractmethod
import random
import math
from ggplot import *
import pandas as pd
from board_base import BoardBase
class Board(BoardBase):
def __init__(self):
self.limit = 8
self.board = [[None for y in xrange(self.limit)] for x in xrange(self.limit)]
for row in self.board:
row[random.randrange(self.limit)] = 1
self.moves = [
lambda x, y : (x, y + 1),
lambda x, y : (x + 1, y + 1),
lambda x, y : (x + 1, y),
lambda x, y : (x + 1, y - 1),
lambda x, y : (x, y - 1),
lambda x, y : (x - 1, y - 1),
lambda x, y : (x - 1, y),
lambda x, y : (x - 1, y + 1)
]
def __str__(self):
return self._print_board(self.board)
def get_board(self):
return self.board
def next_moves(self, board):
for i in xrange(len(board)):
for j in xrange(len(board)):
if board[i][j] == 1:
ctr = 0
for move in self.moves:
x, y = move(i, j)
ctr += 1
if self._is_valid(x, y) and board[x][y] == None:
board[i][j] = None
board[x][y] = 1
yield (board, (i, j), (x, y), self.eval(board))
board[x][y] = None
board[i][j] = 1
def _print_board(self, board):
answer = [" ".join(map(lambda x : "Q" if x else ".", row)) for row in board]
return "\n".join(answer)
def _randomize(self, board):
board = [[None for y in xrange(self.limit)] for x in xrange(self.limit)]
for row in board:
row[random.randrange(self.limit)] = 1
return board
def improvise(self, board, limit = 100, random_restart = False, restart_probability = 0.5, show = False):
costs = []
board = self._randomize(board)
heuristic_cost = self.eval(board)
iterations = 0
costs.append(heuristic_cost)
while iterations < limit:
if random_restart:
random_value = random.random()
if random_value <= restart_probability:
board = self._randomize(board)
heuristic_cost = self.eval(board)
iterations += 1
if heuristic_cost == 0:
least_board = board
return 0
curr_cost = heuristic_cost
initial_f = None
future_f = None
least_board = self._print_board(board)
for next_config in self.next_moves(board):
future_board, initial, future, new_cost = next_config
if new_cost < curr_cost:
curr_cost = new_cost
initial_f = initial
future_f = future
least_board = self._print_board(board)
if initial_f != None:
board[initial_f[0]][initial_f[1]] = None
board[future_f[0]][future_f[1]] = 1
heuristic_cost = curr_cost
costs.append(heuristic_cost)
if show:
print "Min heuristic cost attained:", min(costs)
data = pd.DataFrame({'Indices' : range(len(costs)), "Cost" : costs})
plt = ggplot(aes(x = 'Indices',y = 'Cost'), data = data) + \
geom_point()
plt.__repr__()
return min(costs)
def _is_valid(self, x, y):
return 0 <= x < self.limit and 0 <= y < self.limit
def _iter(self, x, y, move, board):
x, y = move(x, y)
while self._is_valid(x, y):
if board[x][y] == 1:
return 1
x, y = move(x, y)
return 0
def eval(self, board):
total = 0
for i in xrange(len(board)):
for j in xrange(len(board)):
if board[i][j] == 1:
# there is a queen here.
for move in self.moves:
total += self._iter(i, j, move, board)
return total / 2
| apache-2.0 |
brdfdr/trading-with-python | lib/csvDatabase.py | 77 | 6045 | # -*- coding: utf-8 -*-
"""
intraday data handlers in csv format.
@author: jev
"""
from __future__ import division
import pandas as pd
import datetime as dt
import os
from extra import ProgressBar
dateFormat = "%Y%m%d" # date format for converting filenames to dates
dateTimeFormat = "%Y%m%d %H:%M:%S"
def fileName2date(fName):
'''convert filename to date'''
name = os.path.splitext(fName)[0]
return dt.datetime.strptime(name.split('_')[1],dateFormat).date()
def parseDateTime(dateTimeStr):
return dt.datetime.strptime(dateTimeStr,dateTimeFormat)
def loadCsv(fName):
''' load DataFrame from csv file '''
with open(fName,'r') as f:
lines = f.readlines()
dates= []
header = [h.strip() for h in lines[0].strip().split(',')[1:]]
data = [[] for i in range(len(header))]
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(parseDateTime(fields[0]))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
return pd.DataFrame(data=dict(zip(header,data)),index=pd.Index(dates))
class HistDataCsv(object):
'''class for working with historic database in .csv format'''
def __init__(self,symbol,dbDir,autoCreateDir=False):
self.symbol = symbol
self.dbDir = os.path.normpath(os.path.join(dbDir,symbol))
if not os.path.exists(self.dbDir) and autoCreateDir:
print 'Creating data directory ', self.dbDir
os.mkdir(self.dbDir)
self.dates = []
for fName in os.listdir(self.dbDir):
self.dates.append(fileName2date(fName))
def saveData(self,date, df,lowerCaseColumns=True):
''' add data to database'''
if lowerCaseColumns: # this should provide consistency to column names. All lowercase
df.columns = [ c.lower() for c in df.columns]
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
dest = os.path.join(self.dbDir,s) # full path destination
print 'Saving data to: ', dest
df.to_csv(dest)
def loadDate(self,date):
''' load data '''
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
df = pd.DataFrame.from_csv(os.path.join(self.dbDir,s))
cols = [col.strip() for col in df.columns.tolist()]
df.columns = cols
#df = loadCsv(os.path.join(self.dbDir,s))
return df
def loadDates(self,dates):
''' load multiple dates, concantenating to one DataFrame '''
tmp =[]
print 'Loading multiple dates for ' , self.symbol
p = ProgressBar(len(dates))
for i,date in enumerate(dates):
tmp.append(self.loadDate(date))
p.animate(i+1)
print ''
return pd.concat(tmp)
def createOHLC(self):
''' create ohlc from intraday data'''
ohlc = pd.DataFrame(index=self.dates, columns=['open','high','low','close'])
for date in self.dates:
print 'Processing', date
try:
df = self.loadDate(date)
ohlc.set_value(date,'open',df['open'][0])
ohlc.set_value(date,'high',df['wap'].max())
ohlc.set_value(date,'low', df['wap'].min())
ohlc.set_value(date,'close',df['close'][-1])
except Exception as e:
print 'Could not convert:', e
return ohlc
def __repr__(self):
return '{symbol} dataset with {nrDates} days of data'.format(symbol=self.symbol, nrDates=len(self.dates))
class HistDatabase(object):
''' class working with multiple symbols at once '''
def __init__(self, dataDir):
# get symbols from directory names
symbols = []
for l in os.listdir(dataDir):
if os.path.isdir(os.path.join(dataDir,l)):
symbols.append(l)
#build dataset
self.csv = {} # dict of HistDataCsv halndlers
for symbol in symbols:
self.csv[symbol] = HistDataCsv(symbol,dataDir)
def loadDates(self,dates=None):
'''
get data for all symbols as wide panel
provide a dates list. If no dates list is provided, common dates are used.
'''
if dates is None: dates=self.commonDates
tmp = {}
for k,v in self.csv.iteritems():
tmp[k] = v.loadDates(dates)
return pd.WidePanel(tmp)
def toHDF(self,dataFile,dates=None):
''' write wide panel data to a hdfstore file '''
if dates is None: dates=self.commonDates
store = pd.HDFStore(dataFile)
wp = self.loadDates(dates)
store['data'] = wp
store.close()
@property
def commonDates(self):
''' return dates common for all symbols '''
t = [v.dates for v in self.csv.itervalues()] # get all dates in a list
d = list(set(t[0]).intersection(*t[1:]))
return sorted(d)
def __repr__(self):
s = '-----Hist CSV Database-----\n'
for k,v in self.csv.iteritems():
s+= (str(v)+'\n')
return s
#--------------------
if __name__=='__main__':
dbDir =os.path.normpath('D:/data/30sec')
vxx = HistDataCsv('VXX',dbDir)
spy = HistDataCsv('SPY',dbDir)
#
date = dt.date(2012,8,31)
print date
#
pair = pd.DataFrame({'SPY':spy.loadDate(date)['close'],'VXX':vxx.loadDate(date)['close']})
print pair.tail() | bsd-3-clause |
vaisaghvt/gameAnalyzer | python plots/plotCumulativeFrequencyTime.py | 1 | 2066 |
import os
import math
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import rc
import numpy as np
import csv
with open('2DormCorr-Limited.csv', 'r') as csvfile:
fileReader = csv.reader(csvfile, delimiter=',')
nameToVisitTimeMap = {}
currentName= ""
startValue=0
for row in fileReader:
for num,word in enumerate(row):
if num == 0:
nameToVisitTimeMap[word] = []
currentName= word
startValue=0
else:
value = long(word)/1000.0
if startValue == 0:
startValue = value
nameToVisitTimeMap[currentName].append(value-startValue)
fig = plt.figure(num=None, figsize=(50, 12), dpi=80)
for name in nameToVisitTimeMap.keys():
plt.plot(nameToVisitTimeMap.get(name),range(0,len(nameToVisitTimeMap[name]),1))
plt.xlim(0,2100)
plt.yticks(np.arange(0, 8, 1))
plt.xticks(np.arange(0, 2100, 100))
# # for tl in plt.get_yticklabels():
# # tl.set_color('r')
# font = {'size': 30}
# rc('font', **font)
# # for tick in mpl.axis.Axis.get_major_ticks():
# # tick.label.set_fontsize(30);
# # for tick in mpl.axis.YAxis.get_major_ticks():
# # tick.label.set_fontsize(30);
# plt.tick_params(axis='both', which='major', labelsize=30)
# # handles, labels = plt.get_legend_handles_labels()
# # # reverse the order
# # plt.legend(handles[::-1], labels[::-1])
# # or sort them by labels
# plt.ylabel("Total Number of Hotspots",
# fontsize=30,
# verticalalignment='center',
# horizontalalignment='right',
# rotation='vertical' )
# plt.xlabel("Percentage Trusting Device",
# fontsize=30)
# # print labels2
# # plt.legend(handles2, labels2, loc="upper right")
# # plt.legend(loc="upper right")
plt.show()
# plt.savefig("TrustProb-Scenario{0}".format(scenarioNumber), pad_inches=0)
# plt.close() | mit |
Ized06/GID_Internal | client/examples/faster-rcnn.py | 2 | 3577 | # Config of faster rcnn
import sys, os, logging
sys.path.append('..')
from unrealcv import client
# RCNN config
rcnn_path = '/home/qiuwch/workspace/py-faster-rcnn'
sys.path.append(os.path.join(rcnn_path, 'tools'))
import demo as D # Use demo.py provided in faster-rcnn
import numpy as np
import matplotlib.pyplot as plt
net = None
def init_caffe(): # TODO: parse args into here
global net
prototxt = os.path.join(D.cfg.MODELS_DIR, D.NETS['vgg16'][0],
'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
caffemodel = os.path.join(D.cfg.DATA_DIR, 'faster_rcnn_models'
, D.NETS['vgg16'][1])
gpu_id = 0
D.caffe.set_mode_gpu()
D.caffe.set_device(gpu_id)
D.cfg.GPU_ID = gpu_id
D.cfg.TEST.HAS_RPN = True
net = D.caffe.Net(prototxt, caffemodel, D.caffe.TEST)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype = np.uint8)
for _ in xrange(2):
_, _ = D.im_detect(net, im)
def plot_image(image, boxes=None, scores=None):
ax.cla() # Clear axis
ax.imshow(image, aspect='equal')
ax.axis('off')
if boxes != None and scores != None:
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(D.CLASSES[1:]):
cls_ind += 1 # Skip background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind+1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes, cls_scores[:,np.newaxis])).astype(np.float32)
keep = D.nms(dets, NMS_THRESH)
dets = dets[keep, :]
plot_bb(cls, dets, thresh=CONF_THRESH)
fig.canvas.draw()
def plot_bb(class_name, dets, thresh=0.5):
inds = np.where(dets[:, -1] >= thresh)[0] #
if len(inds) == 0:
return
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
patch = plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0]
, bbox[3] - bbox[1], fill=False, edgecolor='red', linewidth=3.5)
ax.add_patch(patch)
text = '{:s} {:.3f}'.format(class_name, score)
ax.text(bbox[0], bbox[1] - 2, text, bbox=dict(facecolor='blue', alpha=0.5)
, fontsize=14, color='white')
def process_image(filename):
if not net:
init_caffe() # Caffe needs to be started in this thread, otherwise GIL will make it very slow
print 'Process image: %s' % filename
if not os.path.isfile(filename):
print 'Image file %s not exist' % filename
return
image = D.cv2.imread(filename)
timer = D.Timer()
timer.tic()
scores, boxes = D.im_detect(net, image)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
show_img = image[:,:, (2,1,0)] # Reorder to RGB
plot_image(show_img, boxes, scores)
# plot_image(show_img)
def message_handler(message):
print 'Got server message %s' % repr(message)
if message == 'clicked':
image = client.request('vget /camera/0/lit')
process_image(image)
if __name__ == '__main__':
_L = logging.getLogger('unrealcv')
_L.setLevel(logging.ERROR)
client.message_handler = message_handler
client.connect()
if not client.isconnected():
print 'UnrealCV server is not running. Run the game downloaded from http://unrealcv.github.io first.'
else:
# Initialize the matplotlib
fig, ax = plt.subplots()
# Show an empty image
image = np.zeros((300, 300))
ax.imshow(image)
plt.axis('off')
plt.tight_layout()
plt.show()
client.disconnect()
| mit |
cauchycui/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
rkwitt/quicksilver | 3rd_party_software/pyca/Testing/InterpUnitTest.py | 1 | 4179 | #
# This file contains testing where PyCA results are compared to
# results from numpy. All tests can be run from the command line by:
#
# > python -m unittest discover -v -p '*UnitTest.py'
#
# To run an individual test with graphical output from ipython:
#
# import InterpUnitTest as cgtest
# cgtc = cgtest.InterpTestCase()
# cgtc.test_ResampleInterp(disp=True)
#
import sys
import unittest
import PyCATest
from PyCA.Core import *
import PyCA.Common as common
reload(common)
import numpy as np
# try:
# import matplotlib.pyplot as plt
# plt.ion()
# except ImportError:
# print "Warning: matplotlib.pyplot not found, some functionality disabled"
#
# Test Class
#
class InterpTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(InterpTestCase, self).__init__(methodName)
self.cudaEnabled = (GetNumberOfCUDADevices() > 0)
def skipIfNoCUDA(self):
if not self.cudaEnabled:
self.skipTest('Cannot run test, no CUDA device found or CUDA support not compiled')
################################################################
#
# Begin Tests
#
################################################################
#
# Check exception wrapping
#
@PyCATest.AddSetUp(skipIfNoCUDA)
def test_ResampleInterp(self, disp=False):
# generate small integer-valued image
initMax = 5
randArrSmall = (np.random.rand(10,10)*initMax).astype(int)
randImSmall = common.ImFromNPArr(randArrSmall)
imLarge = Image3D(50,50,1)
Resample(imLarge, randImSmall,
BACKGROUND_STRATEGY_CLAMP,
INTERP_NN)
nUnique = len(np.unique(imLarge.asnp()))
self.assertEqual(nUnique,initMax)
# runTest is only added so that the class can be instantiated
# directly in order to call individual tests
def runTest():
print 'No tests to run directly, all are member functions'
if __name__ == '__main__':
"""
Run a test showing different interpolation methods used for
upsampling and deformation.
"""
import PyCA.Core as ca
import PyCA.Common as common
import PyCA.Display as display
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
initMax = 5
randArrSmall = (np.random.rand(10,10)*initMax).astype(int)
imSmall = common.ImFromNPArr(randArrSmall)
imLargeNN = ca.Image3D(50,50,1)
imLargeLinear = ca.Image3D(50,50,1)
imLargeCubic = ca.Image3D(50,50,1)
ca.Resample(imLargeNN, imSmall,
ca.BACKGROUND_STRATEGY_CLAMP,
ca.INTERP_NN)
ca.Resample(imLargeLinear, imSmall,
ca.BACKGROUND_STRATEGY_CLAMP,
ca.INTERP_LINEAR)
ca.Resample(imLargeCubic, imSmall,
ca.BACKGROUND_STRATEGY_CLAMP,
ca.INTERP_CUBIC)
plt.figure('interp test')
plt.subplot(2,3,1)
display.DispImage(imLargeNN, 'NN', newFig=False)
plt.subplot(2,3,2)
display.DispImage(imLargeLinear, 'Linear', newFig=False)
plt.subplot(2,3,3)
display.DispImage(imLargeCubic, 'Cubic', newFig=False)
plt.subplot(2,3,5)
display.DispImage(imSmall, 'small', newFig=False)
plt.show()
h = common.WavyDef([50,50], nWaves=1, waveAmp=10, waveDim=0,
mType=ca.MEM_HOST, deformation=True)
imDefNN = imLargeNN.copy()
ca.ApplyH(imDefNN, imLargeNN, h, ca.BACKGROUND_STRATEGY_CLAMP, ca.INTERP_NN)
imDefLinear = imLargeNN.copy()
ca.ApplyH(imDefLinear, imLargeNN, h, ca.BACKGROUND_STRATEGY_CLAMP, ca.INTERP_LINEAR)
imDefCubic = imLargeNN.copy()
ca.ApplyH(imDefCubic, imLargeNN, h, ca.BACKGROUND_STRATEGY_CLAMP, ca.INTERP_CUBIC)
plt.figure('interp def test')
plt.subplot(2,3,1)
display.DispImage(imDefNN, 'NN', newFig=False)
plt.subplot(2,3,2)
display.DispImage(imDefLinear, 'Linear', newFig=False)
plt.subplot(2,3,3)
display.DispImage(imDefCubic, 'Cubic', newFig=False)
plt.subplot(2,3,5)
display.DispImage(imLargeNN, 'orig', newFig=False)
plt.show()
# make sure plots don't close on exit
plt.ioff()
plt.show()
| apache-2.0 |
MechCoder/scikit-learn | sklearn/metrics/tests/test_classification.py | 5 | 61465 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import MockDataFrame
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.exceptions import UndefinedMetricWarning
from scipy.spatial.distance import hamming as sp_hamming
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='binary')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_precision_recall_f_unused_pos_label():
# Check warning that pos_label unused when set to non-default value
# but average != 'binary'; even if data is binary.
assert_warns_message(UserWarning,
"Note that pos_label (set to 2) is "
"ignored when average != 'binary' (got 'macro'). You "
"may use labels=[pos_label] to specify a single "
"positive class.", precision_recall_fscore_support,
[1, 2, 1], [1, 2, 2], pos_label=2, average='macro')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
# Weighting example: none, linear, quadratic.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 50 + [1] * 40 + [2] * 10)
assert_almost_equal(cohen_kappa_score(y1, y2), .9315, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2, weights="linear"), .9412, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2, weights="quadratic"), .9541, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_matthews_corrcoef_against_numpy_corrcoef():
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, size=20)
y_pred = rng.randint(0, 2, size=20)
assert_almost_equal(matthews_corrcoef(y_true, y_pred),
np.corrcoef(y_true, y_pred)[0, 1], 10)
def test_matthews_corrcoef_against_jurman():
# Check that the multiclass matthews_corrcoef agrees with the definition
# presented in Jurman, Riccadonna, Furlanello, (2012). A Comparison of MCC
# and CEN Error Measures in MultiClass Prediction
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, size=20)
y_pred = rng.randint(0, 2, size=20)
sample_weight = rng.rand(20)
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
N = len(C)
cov_ytyp = sum([
C[k, k] * C[m, l] - C[l, k] * C[k, m]
for k in range(N) for m in range(N) for l in range(N)
])
cov_ytyt = sum([
C[:, k].sum() *
np.sum([C[g, f] for f in range(N) for g in range(N) if f != k])
for k in range(N)
])
cov_ypyp = np.sum([
C[k, :].sum() *
np.sum([C[f, g] for f in range(N) for g in range(N) if f != k])
for k in range(N)
])
mcc_jurman = cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)
mcc_ours = matthews_corrcoef(y_true, y_pred, sample_weight)
assert_almost_equal(mcc_ours, mcc_jurman, 10)
def test_matthews_corrcoef():
rng = np.random.RandomState(0)
y_true = ["a" if i == 0 else "b" for i in rng.randint(0, 2, size=20)]
# corrcoef of same vectors must be 1
assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0)
# corrcoef, when the two vectors are opposites of each other, should be -1
y_true_inv = ["b" if i == "a" else "a" for i in y_true]
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv), -1)
y_true_inv2 = label_binarize(y_true, ["a", "b"])
y_true_inv2 = np.where(y_true_inv2, 'a', 'b')
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv2), -1)
# For the zero vector case, the corrcoef cannot be calculated and should
# result in a RuntimeWarning
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, [0, 0, 0, 0], [0, 0, 0, 0])
# But will output 0
assert_almost_equal(mcc, 0.)
# And also for any other vector with 0 variance
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, y_true, ['a'] * len(y_true))
# But will output 0
assert_almost_equal(mcc, 0.)
# These two vectors have 0 correlation and hence mcc should be 0
y_1 = [1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1]
y_2 = [1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1]
assert_almost_equal(matthews_corrcoef(y_1, y_2), 0.)
# Check that sample weight is able to selectively exclude
mask = [1] * 10 + [0] * 10
# Now the first half of the vector elements are alone given a weight of 1
# and hence the mcc will not be a perfect 0 as in the previous case
assert_raises(AssertionError, assert_almost_equal,
matthews_corrcoef(y_1, y_2, sample_weight=mask), 0.)
def test_matthews_corrcoef_multiclass():
rng = np.random.RandomState(0)
ord_a = ord('a')
n_classes = 4
y_true = [chr(ord_a + i) for i in rng.randint(0, n_classes, size=20)]
# corrcoef of same vectors must be 1
assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0)
# with multiclass > 2 it is not possible to achieve -1
y_true = [0, 0, 1, 1, 2, 2]
y_pred_bad = [2, 2, 0, 0, 1, 1]
assert_almost_equal(matthews_corrcoef(y_true, y_pred_bad), -.5)
# Maximizing false positives and negatives minimizes the MCC
# The minimum will be different for depending on the input
y_true = [0, 0, 1, 1, 2, 2]
y_pred_min = [1, 1, 0, 0, 0, 0]
assert_almost_equal(matthews_corrcoef(y_true, y_pred_min),
-12 / np.sqrt(24 * 16))
# Zero variance will result in an mcc of zero and a Runtime Warning
y_true = [0, 1, 2]
y_pred = [3, 3, 3]
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, y_true, y_pred)
assert_almost_equal(mcc, 0.0)
# These two vectors have 0 correlation and hence mcc should be 0
y_1 = [0, 1, 2, 0, 1, 2, 0, 1, 2]
y_2 = [1, 1, 1, 2, 2, 2, 0, 0, 0]
assert_almost_equal(matthews_corrcoef(y_1, y_2), 0.)
# We can test that binary assumptions hold using the multiclass computation
# by masking the weight of samples not in the first two classes
# Masking the last label should let us get an MCC of -1
y_true = [0, 0, 1, 1, 2]
y_pred = [1, 1, 0, 0, 2]
sample_weight = [1, 1, 1, 1, 0]
assert_almost_equal(matthews_corrcoef(y_true, y_pred, sample_weight), -1)
# For the zero vector case, the corrcoef cannot be calculated and should
# result in a RuntimeWarning
y_true = [0, 0, 1, 2]
y_pred = [0, 0, 1, 2]
sample_weight = [1, 1, 0, 0]
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, y_true, y_pred,
sample_weight)
# But will output 0
assert_almost_equal(mcc, 0.)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_binary_averaged():
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
ps, rs, fs, _ = precision_recall_fscore_support(y_true, y_pred,
average=None)
p, r, f, _ = precision_recall_fscore_support(y_true, y_pred,
average='macro')
assert_equal(p, np.mean(ps))
assert_equal(r, np.mean(rs))
assert_equal(f, np.mean(fs))
p, r, f, _ = precision_recall_fscore_support(y_true, y_pred,
average='weighted')
support = np.bincount(y_true)
assert_equal(p, np.average(ps, weights=support))
assert_equal(r, np.average(rs, weights=support))
assert_equal(f, np.average(fs, weights=support))
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='macro'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='macro'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='macro'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_sample_weight():
"""Test confusion matrix - case with sample_weight"""
y_true, y_pred, _ = make_prediction(binary=False)
weights = [.1] * 25 + [.2] * 25 + [.3] * 25
cm = confusion_matrix(y_true, y_pred, sample_weight=weights)
true_cm = (.1 * confusion_matrix(y_true[:25], y_pred[:25]) +
.2 * confusion_matrix(y_true[25:50], y_pred[25:50]) +
.3 * confusion_matrix(y_true[50:], y_pred[50:]))
assert_array_almost_equal(cm, true_cm)
assert_raises(
ValueError, confusion_matrix, y_true, y_pred,
sample_weight=weights[:-1])
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
# a label not in y_true should result in zeros for that row/column
extra_label = np.max(y_true) + 1
cm = confusion_matrix(y_true, y_pred, labels=[2, extra_label])
assert_array_equal(cm, [[18, 0],
[0, 0]])
# check for exception when none of the specified labels are in y_true
assert_raises(ValueError, confusion_matrix, y_true, y_pred,
labels=[extra_label, extra_label + 1])
def test_confusion_matrix_dtype():
y = [0, 1, 1]
weight = np.ones(len(y))
# confusion_matrix returns int64 by default
cm = confusion_matrix(y, y)
assert_equal(cm.dtype, np.int64)
# The dtype of confusion_matrix is always 64 bit
for dtype in [np.bool_, np.int32, np.uint64]:
cm = confusion_matrix(y, y, sample_weight=weight.astype(dtype))
assert_equal(cm.dtype, np.int64)
for dtype in [np.float32, np.float64, None, object]:
cm = confusion_matrix(y, y, sample_weight=weight.astype(dtype))
assert_equal(cm.dtype, np.float64)
# np.iinfo(np.uint32).max should be accumulated correctly
weight = np.ones(len(y), dtype=np.uint32) * 4294967295
cm = confusion_matrix(y, y, sample_weight=weight)
assert_equal(cm[0, 0], 4294967295)
assert_equal(cm[1, 1], 8589934590)
# np.iinfo(np.int64).max should cause an overflow
weight = np.ones(len(y), dtype=np.int64) * 9223372036854775807
cm = confusion_matrix(y, y, sample_weight=weight)
assert_equal(cm[0, 0], 9223372036854775807)
assert_equal(cm[1, 1], -2)
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_long_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array(["blue", "green"*5, "red"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
greengreengreengreengreen 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_labels_target_names_unequal_length():
y_true = [0, 0, 2, 0, 0]
y_pred = [0, 2, 2, 0, 0]
target_names = ['class 0', 'class 1', 'class 2']
assert_warns_message(UserWarning,
"labels size, 2, does not "
"match size of target_names, 3",
classification_report,
y_true, y_pred, target_names=target_names)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
w = np.array([1, 3])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, 1 - y2), 1)
assert_equal(hamming_loss(y1, 1 - y1), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
assert_equal(hamming_loss(y1, y2, sample_weight=w), 1. / 12)
assert_equal(hamming_loss(y1, 1-y2, sample_weight=w), 11. / 12)
assert_equal(hamming_loss(y1, np.zeros_like(y1), sample_weight=w), 2. / 3)
# sp_hamming only works with 1-D arrays
assert_equal(hamming_loss(y1[0], y2[0]), sp_hamming(y1[0], y2[0]))
assert_warns(DeprecationWarning, hamming_loss, y1, y2, classes=[0, 1])
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='binary')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='binary')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_binary_data_non_binary():
# Error if user does not explicitly set non-binary average mode
y_true_mc = [1, 2, 3, 3]
y_pred_mc = [1, 2, 3, 1]
y_true_ind = np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])
y_pred_ind = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
for y_true, y_pred, y_type in [
(y_true_mc, y_pred_mc, 'multiclass'),
(y_true_ind, y_pred_ind, 'multilabel-indicator'),
]:
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
assert_raise_message(ValueError,
"Target is %s but average='binary'. Please "
"choose another average setting." % y_type,
metric, y_true, y_pred)
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test__check_targets_multiclass_with_both_y_true_and_y_pred_binary():
# https://github.com/scikit-learn/scikit-learn/issues/8098
y_true = [0, 1]
y_pred = [0, -1]
assert_equal(_check_targets(y_true, y_pred)[0], 'multiclass')
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[+1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, +0.24],
[-2.36, -0.79, -0.27, +0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
# test labels option
y_true = [2, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5]]
y_score = np.array([[0.1, 0.9], [0.1, 0.9]])
error_str = ('y_true contains only one label (2). Please provide '
'the true labels explicitly through the labels argument.')
assert_raise_message(ValueError, error_str, log_loss, y_true, y_pred)
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.2, 0.3]]
error_str = ('Found input variables with inconsistent numbers of samples: '
'[3, 2]')
assert_raise_message(ValueError, error_str, log_loss, y_true, y_pred)
# works when the labels argument is used
true_log_loss = -np.mean(np.log(y_score[:, 1]))
calculated_log_loss = log_loss(y_true, y_score, labels=[1, 2])
assert_almost_equal(calculated_log_loss, true_log_loss)
# ensure labels work when len(np.unique(y_true)) != y_pred.shape[1]
y_true = [1, 2, 2]
y_score2 = [[0.2, 0.7, 0.3], [0.6, 0.5, 0.3], [0.3, 0.9, 0.1]]
loss = log_loss(y_true, y_score2, labels=[1, 2, 3])
assert_almost_equal(loss, 1.0630345, decimal=6)
def test_log_loss_pandas_input():
# case when input is a pandas series and dataframe gh-5715
y_tr = np.array(["ham", "spam", "spam", "ham"])
y_pr = np.array([[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]])
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TrueInputType, PredInputType in types:
# y_pred dataframe, y_true series
y_true, y_pred = TrueInputType(y_tr), PredInputType(y_pr)
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
# calculate even if only single class in y_true (#6980)
assert_almost_equal(brier_score_loss([0], [0.5]), 0.25)
assert_almost_equal(brier_score_loss([1], [0.5]), 0.25)
| bsd-3-clause |
gergopokol/renate-od | crm_solver/beamlet.py | 1 | 6344 | import utility
from utility.constants import Constants
import pandas
from lxml import etree
from crm_solver.coefficientmatrix import CoefficientMatrix
from crm_solver.ode import Ode
from crm_solver.atomic_db import AtomicDB
class Beamlet:
def __init__(self, param=None, profiles=None, components=None, atomic_db=None,
solver='numerical', data_path="beamlet/testimp0001.xml"):
self.param = param
if not isinstance(self.param, etree._ElementTree):
self.__read_beamlet_param(data_path)
self.profiles = profiles
self.components = components
self.atomic_db = atomic_db
if not (isinstance(self.components, pandas.DataFrame) and isinstance(self.profiles, pandas.DataFrame)):
self.__read_beamlet_profiles()
if atomic_db is None:
self.atomic_db = AtomicDB(param=self.param, components=self.components)
self.const = Constants()
self.coefficient_matrix = None
self.initial_condition = None
self.calculate_beamevolution(solver)
def __read_beamlet_param(self, data_path):
self.param = utility.getdata.GetData(data_path_name=data_path).data
assert isinstance(self.param, etree._ElementTree)
print('Beamlet.param read from file: ' + data_path)
def __read_beamlet_profiles(self):
hdf5_path = self.param.getroot().find('body').find('beamlet_source').text
self.components = utility.getdata.GetData(data_path_name=hdf5_path, data_key=['components']).data
assert isinstance(self.components, pandas.DataFrame)
print('Beamlet.imp_components read from file: ' + hdf5_path)
self.profiles = utility.getdata.GetData(data_path_name=hdf5_path, data_key=['profiles']).data
assert isinstance(self.profiles, pandas.DataFrame)
print('Beamlet.imp_profiles read from file: ' + hdf5_path)
def __initialize_ode(self):
self.coefficient_matrix = CoefficientMatrix(self.param, self.profiles, self.components, self.atomic_db)
self.initial_condition = [self.__get_linear_density()] + [0] * (self.atomic_db.atomic_levels - 1)
def __get_linear_density(self):
current = float(self.param.getroot().find('body').find('beamlet_current').text)
return current / (self.atomic_db.velocity * self.const.charge_electron)
def __solve_numerically(self):
if self.coefficient_matrix is None or self.initial_condition is None:
self.__initialize_ode()
ode = Ode(coeff_matrix=self.coefficient_matrix.matrix, init_condition=self.initial_condition)
numerical = ode.calculate_numerical_solution(self.profiles['beamlet grid']['distance']['m'])
for level in range(self.atomic_db.atomic_levels):
label = 'level ' + self.atomic_db.inv_atomic_dict[level]
self.profiles[label] = numerical[:, level]
return
def calculate_beamevolution(self, solver):
assert isinstance(solver, str)
if solver is 'numerical':
self.__solve_numerically()
elif solver is 'analytical':
# TODO: Implement analytical solver
pass
elif solver is 'disregard':
print('Beam evolution not calculated.')
return
else:
raise Exception('The numerical solver: ' + solver + ' is not supported. '
'Supported solvers are: numerical, analytical, disregard.')
def __was_beamevolution_performed(self):
try:
dummy = self.profiles['level ' + self.atomic_db.set_default_atomic_levels()[2]]
return True
except KeyError:
return False
def compute_linear_emission_density(self, to_level=None, from_level=None):
if to_level is None or from_level is None:
from_level, to_level, ground_level, transition_label = self.atomic_db.set_default_atomic_levels()
if isinstance(to_level, str) and isinstance(from_level, str):
if self.atomic_db.atomic_dict[to_level] >= self.atomic_db.atomic_dict[from_level]:
raise Exception('Dude! Please stop screwing around. '
'Electrons spontaneously transit from higher to lower atomic states.')
else:
raise Exception('The expected input for atomic transitions are strings. '
'Bundled-n for H,D,T beam species ex:[1, 2, ... 6]. '
'l-n resolved labels for Li ex: [2s, 2p, ... 4f] and Na ex: [3s, 3p, ... 5s]')
if self.__was_beamevolution_performed():
transition_label = from_level + '-' + to_level
self.profiles[transition_label] = \
self.profiles['level '+from_level] * self.atomic_db.spontaneous_trans[self.atomic_db.atomic_dict
[to_level], self.atomic_db.atomic_dict[from_level]]
else:
print('Beam evolution calculations were not performed. Execute solver first.')
def compute_linear_density_attenuation(self):
if self.__was_beamevolution_performed():
self.profiles['linear_density_attenuation'] = self.profiles['level ' + self.atomic_db.inv_atomic_dict[0]]
for level in range(1, self.atomic_db.atomic_levels):
self.profiles['linear_density_attenuation'] += self.profiles['level ' +
self.atomic_db.inv_atomic_dict[level]]
else:
print('Beam evolution calculations were not performed. Execute solver first.')
def compute_relative_populations(self, reference_level=None):
if self.__was_beamevolution_performed():
if reference_level is None:
reference_level = self.atomic_db.set_default_atomic_levels()[2]
assert isinstance(reference_level, str)
for level in range(self.atomic_db.atomic_levels):
self.profiles['rel.pop ' + self.atomic_db.inv_atomic_dict[level]] = \
self.profiles['level ' + self.atomic_db.inv_atomic_dict[level]] / \
self.profiles['level ' + reference_level]
else:
print('Beam evolution calculations were not performed. Execute solver first.')
| lgpl-3.0 |
colinoflynn/T-962-improvements | serial-control.py | 3 | 5926 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Log the temperatures reported by the oven in a live plot and
# in a CSV file.
#
# Requires
# python 2.7
# - pyserial (python-serial in ubuntu, pip install pyserial)
# - matplotlib (python-matplotlib in ubuntu, pip install matplotlib)
#
import csv
import datetime
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import serial
import sys
from time import time
# settings
#
FIELD_NAMES = 'Time,Temp0,Temp1,Temp2,Temp3,Set,Actual,Heat,Fan,ColdJ,Mode'
TTYs = ('/dev/ttyUSB0', '/dev/ttyUSB1', '/dev/ttyUSB2')
BAUD_RATE = 115200
logdir = 'logs/'
MAX_X = 470
MAX_Y_temperature = 300
MAX_Y_pwm = 260
#
# end of settings
def timestamp(dt=None):
if dt is None:
dt = datetime.datetime.now()
return dt.strftime('%Y-%m-%d-%H%M%S')
def logname(filetype, profile):
return '%s%s-%s.%s' % (
logdir,
timestamp(),
profile.replace(' ', '_').replace('/', '_'),
filetype
)
def get_tty():
for devname in TTYs:
try:
port = serial.Serial(devname, baudrate=BAUD_RATE)
print 'Using serial port %s' % port.name
return port
except:
print 'Tried serial port %s, but failed.' % str(devname)
pass
return None
class Line(object):
def __init__(self, axis, key, label=None):
self.xvalues = []
self.yvalues = []
self._key = key
self._line, = axis.plot(self.xvalues, self.yvalues, label=label or key)
def add(self, log):
self.xvalues.append(log['Time'])
self.yvalues.append(log[self._key])
self.update()
def update(self):
self._line.set_data(self.xvalues, self.yvalues)
def clear(self):
self.xvalues = []
self.yvalues = []
self.update()
class Log(object):
profile = ''
last_action = None
def __init__(self):
self.init_plot()
self.clear_logs()
def clear_logs(self):
self.raw_log = []
map(Line.clear, self.lines)
self.mode = ''
def init_plot(self):
plt.ion()
gs = gridspec.GridSpec(2, 1, height_ratios=(4, 1))
fig = plt.figure(figsize=(14, 10))
axis_upper = fig.add_subplot(gs[0])
axis_lower = fig.add_subplot(gs[1])
plt.subplots_adjust(hspace=0.05, top=0.95, bottom=0.05, left=0.05, right=0.95)
# setup axis for upper graph (temperature values)
axis_upper.set_ylabel(u'Temperature [°C]')
axis_upper.set_xlim(0, MAX_X)
axis_upper.set_xticklabels([])
axis_upper.set_ylim(0, MAX_Y_temperature)
# setup axis for lower graph (PWM values)
axis_lower.set_xlim(0, MAX_X)
axis_lower.set_ylim(0, MAX_Y_pwm)
axis_lower.set_ylabel('PWM value')
axis_lower.set_xlabel('Time [s]')
# select values to be plotted
self.lines = [
Line(axis_upper, 'Actual'),
Line(axis_upper, 'Temp0'),
Line(axis_upper, 'Temp1'),
Line(axis_upper, 'Set', u'Setpoint'),
Line(axis_upper, 'ColdJ', u'Coldjunction'),
# Line(axis_upper, 'Temp2'),
# Line(axis_upper, 'Temp3'),
Line(axis_lower, 'Fan'),
Line(axis_lower, 'Heat', 'Heater')
]
axis_upper.legend()
axis_lower.legend()
plt.draw()
self.axis_upper = axis_upper
self.axis_lower = axis_lower
def save_logfiles(self):
print 'Saved log in %s ' % logname('csv', self.profile)
plt.savefig(logname('png', self.profile))
plt.savefig(logname('pdf', self.profile))
with open(logname('csv', self.profile), 'w+') as csvout:
writer = csv.DictWriter(csvout, FIELD_NAMES.split(','))
writer.writeheader()
for l in self.raw_log:
writer.writerow(l)
def parse(self, line):
values = map(str.strip, line.split(','))
# Convert all values to float, except the mode
values = map(float, values[0:-1]) + [values[-1], ]
fields = FIELD_NAMES.split(',')
if len(values) != len(fields):
raise ValueError('Expected %d fields, found %d' % (len(fields), len(values)))
return dict(zip(fields, values))
def process_log(self, logline):
# ignore 'comments'
if logline.startswith('#'):
print logline
return
# parse Profile name
if logline.startswith('Starting reflow with profile: '):
self.profile = logline[30:].strip()
return
if logline.startswith('Selected profile'):
self.profile = logline[20:].strip()
return
try:
log = self.parse(logline)
except ValueError, e:
if len(logline) > 0:
print '!!', logline
return
if 'Mode' in log:
# clean up log before starting reflow
if self.mode == 'STANDBY' and log['Mode'] in ('BAKE', 'REFLOW'):
self.clear_logs()
# save png graph an csv file when bake or reflow ends.
if self.mode in ('BAKE', 'REFLOW') and log['Mode'] == 'STANDBY':
self.save_logfiles()
self.mode = log['Mode']
if log['Mode'] == 'BAKE':
self.profile = 'bake'
if log['Mode'] in ('REFLOW', 'BAKE'):
self.last_action = time()
self.axis_upper.set_title('Profile: %s Mode: %s ' % (self.profile, self.mode))
if 'Time' in log and log['Time'] != 0.0:
if 'Actual' not in log:
return
# update all lines
map(lambda x: x.add(log), self.lines)
self.raw_log.append(log)
# update view
plt.draw()
def isdone(self):
return (
self.last_action is not None and
time() - self.last_action > 5
)
def loop_all_profiles(num_profiles=6):
log = Log()
with get_tty() as port:
profile = 0
def select_profile(profile):
port.write('stop\n')
port.write('select profile %d\n' % profile)
port.write('reflow\n')
select_profile(profile)
while True:
logline = port.readline().strip()
if log.isdone():
log.last_action = None
profile += 1
if profile > 6:
print 'Done.'
sys.exit()
select_profile(profile)
log.process_log(logline)
def logging_only():
log = Log()
with get_tty() as port:
while True:
log.process_log(port.readline().strip())
if __name__ == '__main__':
action = sys.argv[1] if len(sys.argv) > 1 else 'log'
if action == 'log':
print 'Logging reflow sessions...'
logging_only()
elif action == 'test':
print 'Looping over all profiles'
loop_all_profiles()
else:
print 'Unknown action', action
| gpl-3.0 |
bsipocz/pyspeckit | pyspeckit/wrappers/fith2co.py | 2 | 6619 | """
===================
H2CO fitter wrapper
===================
Wrapper to fit formaldehyde spectra.
"""
import pyspeckit
from matplotlib import pyplot
import copy
title_dict = {'oneone':'H$_2$CO 1$_{11}$-1$_{10}$',
'twotwo':'H$_2$CO 2$_{12}$-2$_{11}$',
'threethree':'H$_2$CO 3$_{23}$-3$_{22}$'
}
def plot_h2co(spdict, spectra, fignum=1, show_components=False,
show_hyperfine_components=False, residfignum=None, annotate=None,
clear=True, residkwargs={}, plot_fit_kwargs={}, residclear=True,
resid_overlay=False, resid_yoffsets=None,
**plotkwargs):
"""
Plot the results from a multi-h2co fit
"""
spectra.plotter.figure = pyplot.figure(fignum)
spectra.plotter.axis = spectra.plotter.figure.gca()
if clear:
spectra.plotter.figure.clf()
splist = spdict.values()
for sp in splist:
sp.xarr.convert_to_unit('km/s',quiet=True)
if hasattr(spectra.specfit,'fitter'):
sp.specfit.fitter = copy.copy(spectra.specfit.fitter)
sp.specfit.modelpars = spectra.specfit.modelpars
sp.specfit.npeaks = spectra.specfit.npeaks
sp.specfit.fitter.npeaks = spectra.specfit.npeaks
if spectra.specfit.modelpars is not None:
mf = sp.specfit.fitter.n_modelfunc
kw = spectra.specfit.fitter.modelfunc_kwargs
sp.specfit.model = mf(pars=spectra.specfit.modelpars,
**kw)(sp.xarr)
if len(splist) == 2:
axdict = {'oneone':pyplot.subplot(211),
'twotwo':pyplot.subplot(212)}
elif len(splist) == 3:
axdict = {'oneone':pyplot.subplot(211),
'twotwo':pyplot.subplot(223),
'threethree':pyplot.subplot(224)}
elif len(splist) == 4:
axdict = {'oneone':pyplot.subplot(221),
'twotwo':pyplot.subplot(222),
'threethree':pyplot.subplot(223),
'fourfour':pyplot.subplot(224)}
for linename,sp in spdict.iteritems():
sp.plotter.axis=axdict[linename] # permanent
sp.plotter(axis=axdict[linename],
title=title_dict[linename],
clear=clear,
**plotkwargs)
sp.specfit.Spectrum.plotter = sp.plotter
#sp.specfit.selectregion(reset=True)
if sp.specfit.modelpars is not None:
sp.specfit.plot_fit(annotate=False,
show_components=show_components,
show_hyperfine_components=show_hyperfine_components,
**plot_fit_kwargs)
sp.plotter.reset_limits()
if spdict['oneone'].specfit.modelpars is not None and annotate:
spdict['oneone'].specfit.annotate(labelspacing=0.05,
prop={'size':'small',
'stretch':'extra-condensed'},
frameon=False)
residaxdict = None
if residfignum is not None:
pyplot.figure(residfignum)
if residclear:
pyplot.clf()
if len(splist) == 2:
residaxdict = {'oneone':pyplot.subplot(211),
'twotwo':pyplot.subplot(212)}
elif len(splist) == 3:
residaxdict = {'oneone':pyplot.subplot(211),
'twotwo':pyplot.subplot(223),
'threethree':pyplot.subplot(224),
'fourfour':pyplot.subplot(224)}
elif len(splist) == 4:
residaxdict = {'oneone':pyplot.subplot(221),
'twotwo':pyplot.subplot(222),
'threethree':pyplot.subplot(223),
'fourfour':pyplot.subplot(224)}
elif resid_overlay:
residaxdict = axdict
residclear = False # override defaults...
residfignum = fignum
if residaxdict is not None:
for linename,sp in spdict.iteritems():
sp.specfit.Spectrum.plotter = sp.plotter
try:
yoffset = resid_yoffsets[linename]
except TypeError:
yoffset = 0.0
sp.specfit.plotresiduals(axis=residaxdict[linename],
figure=residfignum,
clear=residclear,
set_limits=False,
label=False,
yoffset=yoffset,
**residkwargs)
spectra.residaxdict = residaxdict
spectra.axisdict = axdict
spectra.plotter.axis = axdict['oneone']
spectra.specfit.fitleg = spdict['oneone'].specfit.fitleg
def BigSpectrum_to_H2COdict(sp, vrange=None):
"""
A rather complicated way to make the spdicts above given a spectrum...
"""
spdict = {}
for linename,freq in pyspeckit.spectrum.models.formaldehyde.central_freq_dict.iteritems():
if vrange is not None:
freq_test_low = freq - freq * vrange[0]/pyspeckit.units.speedoflight_kms
freq_test_high = freq - freq * vrange[1]/pyspeckit.units.speedoflight_kms
else:
freq_test_low = freq_test_high = freq
if (sp.xarr.as_unit('Hz').in_range(freq_test_low) or
sp.xarr.as_unit('Hz').in_range(freq_test_high)):
spdict[linename] = sp.copy()
spdict[linename].xarr.convert_to_unit('GHz')
spdict[linename].xarr.refX = freq
spdict[linename].xarr.refX_unit = 'Hz'
#spdict[linename].baseline = copy.copy(sp.baseline)
#spdict[linename].baseline.Spectrum = spdict[linename]
spdict[linename].specfit = sp.specfit.copy(parent=spdict[linename])
spdict[linename].xarr.convert_to_unit('km/s')
if vrange is not None:
try:
spdict[linename].crop(*vrange, units='km/s')
except IndexError:
# if the freq in range, but there's no data in range, remove
spdict.pop(linename)
return spdict
def plotter_override(sp, vrange=None, **kwargs):
"""
Do plot_h2co with syntax similar to plotter()
"""
spdict = BigSpectrum_to_H2COdict(sp, vrange=vrange)
if len(spdict) not in (2,3,4):
raise ValueError("Not enough lines; don't need to use the H2CO plot wrapper")
plot_h2co(spdict, sp, **kwargs)
return spdict
| mit |
Srisai85/scikit-learn | examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
rajat1994/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
aleksandr-bakanov/astropy | astropy/visualization/wcsaxes/ticks.py | 12 | 6569 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.lines import Path, Line2D
from matplotlib.transforms import Affine2D
from matplotlib import rcParams
class Ticks(Line2D):
"""
Ticks are derived from Line2D, and note that ticks themselves
are markers. Thus, you should use set_mec, set_mew, etc.
To change the tick size (length), you need to use
set_ticksize. To change the direction of the ticks (ticks are
in opposite direction of ticklabels by default), use
set_tick_out(False).
Note that Matplotlib's defaults dictionary :data:`~matplotlib.rcParams`
contains default settings (color, size, width) of the form `xtick.*` and
`ytick.*`. In a WCS projection, there may not be a clear relationship
between axes of the projection and 'x' or 'y' axes. For this reason,
we read defaults from `xtick.*`. The following settings affect the
default appearance of ticks:
* `xtick.direction`
* `xtick.major.size`
* `xtick.major.width`
* `xtick.minor.size`
* `xtick.color`
"""
def __init__(self, ticksize=None, tick_out=None, **kwargs):
if ticksize is None:
ticksize = rcParams['xtick.major.size']
self.set_ticksize(ticksize)
self.set_minor_ticksize(rcParams['xtick.minor.size'])
self.set_tick_out(rcParams['xtick.direction'] == 'out')
self.clear()
line2d_kwargs = {'color': rcParams['xtick.color'],
'linewidth': rcParams['xtick.major.width']}
line2d_kwargs.update(kwargs)
Line2D.__init__(self, [0.], [0.], **line2d_kwargs)
self.set_visible_axes('all')
self._display_minor_ticks = False
def display_minor_ticks(self, display_minor_ticks):
self._display_minor_ticks = display_minor_ticks
def get_display_minor_ticks(self):
return self._display_minor_ticks
def set_tick_out(self, tick_out):
"""
set True if tick need to be rotated by 180 degree.
"""
self._tick_out = tick_out
def get_tick_out(self):
"""
Return True if the tick will be rotated by 180 degree.
"""
return self._tick_out
def set_ticksize(self, ticksize):
"""
set length of the ticks in points.
"""
self._ticksize = ticksize
def get_ticksize(self):
"""
Return length of the ticks in points.
"""
return self._ticksize
def set_minor_ticksize(self, ticksize):
"""
set length of the minor ticks in points.
"""
self._minor_ticksize = ticksize
def get_minor_ticksize(self):
"""
Return length of the minor ticks in points.
"""
return self._minor_ticksize
@property
def out_size(self):
if self._tick_out:
return self._ticksize
else:
return 0.
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == 'all':
return self.world.keys()
else:
return [x for x in self._visible_axes if x in self.world]
def clear(self):
self.world = {}
self.pixel = {}
self.angle = {}
self.disp = {}
self.minor_world = {}
self.minor_pixel = {}
self.minor_angle = {}
self.minor_disp = {}
def add(self, axis, world, pixel, angle, axis_displacement):
if axis not in self.world:
self.world[axis] = [world]
self.pixel[axis] = [pixel]
self.angle[axis] = [angle]
self.disp[axis] = [axis_displacement]
else:
self.world[axis].append(world)
self.pixel[axis].append(pixel)
self.angle[axis].append(angle)
self.disp[axis].append(axis_displacement)
def get_minor_world(self):
return self.minor_world
def add_minor(self, minor_axis, minor_world, minor_pixel, minor_angle,
minor_axis_displacement):
if minor_axis not in self.minor_world:
self.minor_world[minor_axis] = [minor_world]
self.minor_pixel[minor_axis] = [minor_pixel]
self.minor_angle[minor_axis] = [minor_angle]
self.minor_disp[minor_axis] = [minor_axis_displacement]
else:
self.minor_world[minor_axis].append(minor_world)
self.minor_pixel[minor_axis].append(minor_pixel)
self.minor_angle[minor_axis].append(minor_angle)
self.minor_disp[minor_axis].append(minor_axis_displacement)
def __len__(self):
return len(self.world)
_tickvert_path = Path([[0., 0.], [1., 0.]])
def draw(self, renderer, ticks_locs):
"""
Draw the ticks.
"""
if not self.get_visible():
return
offset = renderer.points_to_pixels(self.get_ticksize())
self._draw_ticks(renderer, self.pixel, self.angle, offset, ticks_locs)
if self._display_minor_ticks:
offset = renderer.points_to_pixels(self.get_minor_ticksize())
self._draw_ticks(renderer, self.minor_pixel, self.minor_angle, offset, ticks_locs)
def _draw_ticks(self, renderer, pixel_array, angle_array, offset, ticks_locs):
"""
Draw the minor ticks.
"""
path_trans = self.get_transform()
gc = renderer.new_gc()
gc.set_foreground(self.get_color())
gc.set_alpha(self.get_alpha())
gc.set_linewidth(self.get_linewidth())
marker_scale = Affine2D().scale(offset, offset)
marker_rotation = Affine2D()
marker_transform = marker_scale + marker_rotation
initial_angle = 180. if self.get_tick_out() else 0.
for axis in self.get_visible_axes():
if axis not in pixel_array:
continue
for loc, angle in zip(pixel_array[axis], angle_array[axis]):
# Set the rotation for this tick
marker_rotation.rotate_deg(initial_angle + angle)
# Draw the markers
locs = path_trans.transform_non_affine(np.array([loc, loc]))
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
Path(locs), path_trans.get_affine())
# Reset the tick rotation before moving to the next tick
marker_rotation.clear()
ticks_locs[axis].append(locs)
gc.restore()
| bsd-3-clause |
JohnGriffiths/ConWhAt | conwhat/utils/readers.py | 1 | 8702 | """
Utils for reading ConWhAt and external data files
"""
# Author: John Griffiths
# License: simplified BSD
import os,sys,yaml,h5py
import numpy as np,networkx as nx, pandas as pd
import nibabel as nib, nilearn as nl
from nibabel.affines import apply_affine
from dipy.io import Dpy
import indexed_gzip as igzip
# Atlas base dir
abd = os.path.split(__file__)[0] + '/../data'
def load_vol_file_mappings(atlas_name=None,atlas_dir=None):
print 'loading file mapping'
if atlas_dir == None: atlas_dir = os.path.join(abd,atlas_name)
mappings = pd.read_csv(atlas_dir + '/mappings.txt', sep=',')
return mappings,atlas_dir
def load_vol_bboxes(atlas_name=None,atlas_dir=None):
print 'loading vol bbox'
if atlas_dir == None: atlas_dir = os.path.join(abd,atlas_name)
bbox = pd.read_csv(atlas_dir + '/bounding_boxes.txt', sep=',')
return bbox
def load_connectivity(atlas_name=None,atlas_dir=None,weights_name='weights'):
print 'loading connectivity'
if not atlas_dir: atlas_dir = os.path.join(abd,atlas_name)
# Mandatory files
ws_file = '%s/%s.txt' %(atlas_dir,weights_name)
rls_file = '%s/region_labels.txt' % atlas_dir
ws = np.loadtxt(ws_file)
rls = [l[:-1] for l in open(rls_file, 'r').readlines()]
# Optional files
tls_file = '%s/tract_lengths.txt' % atlas_dir
rxyzs_file = '%s/region_xyzs.txt' % atlas_dir
rnii_file = '%s/region_masks.nii.gz' % atlas_dir
hs_file = '%s/hemispheres.txt' % atlas_dir
ctx_file = '%s/cortex.txt' % atlas_dir
rmfslh_file = '%s/region_mapping_fsav_lh.txt' % atlas_dir
rmfsrh_file = '%s/region_mapping_fsav_rh.txt' % atlas_dir
tls,rxyzs,rnii,ctxi,hs,rmfslh,rmfsrh = None,None,None,None,None,None,None
if os.path.isfile(tls_file): itls = np.loadtxt(tls_file)
if os.path.isfile(rxyzs_file): rxyzs = np.loadtxt(rxyzs_file)
if os.path.isfile(rnii_file): rnii = nib.load(rnii_file)
if os.path.isfile(hs_file): hs = np.loadtxt(hs_file)
if os.path.isfile(ctx_file): ctx = np.loadtxt(ctx_file)
if os.path.isfile(rmfslh_file): rmfslh =np.loadtxt(rmfslh_file)
if os.path.isfile(rmfsrh_file): rmfsrh = np.loadtxt(rmfsrh_file)
return ws,rls,tls,rxyzs,rnii,ctx,hs,rmfslh,rmfsrh
def make_nx_graph(vfms,bboxes,weights,region_labels,hemis,cortex):
G = nx.Graph()
# add node info
for node_it,node in enumerate(region_labels):
rl = region_labels[node_it]
hemi = hemis[node_it]
ctx = cortex[node_it]
G.add_node(node_it, **{'region_label': rl,
'hemisphere': hemi,
'cortex': ctx})
# add edge info
for idx in vfms.index:
vfm = vfms.ix[idx]
name = vfm['name']
# Allow either '33-55' or '33_to_55' naming conventions
if '_to_' in name:
roi1,roi2 = name.split('_to_')
else:
roi1,roi2 = name.split('-')
roi1 = int(roi1); roi2 = int(roi2)
ad = vfm.to_dict()
ad.update(bboxes.ix[idx])
ad['idx'] = idx
ad['weight'] = weights[roi1,roi2]
n1,n2 = G.node[roi1],G.node[roi2]
# (ibid...)
if '_to_' in name:
fullname = n1['region_label'] + '_to_' + n2['region_label']
else:
fullname = n1['region_label'] + '-' + n2['region_label']
ad['fullname'] = fullname
G.add_edge(roi1,roi2,**ad)
return G
def load_stream_file_mappings(atlas_name=None,atlas_dir=None):
print 'loading streamline file mappings'
if not atlas_dir: atlas_dir = os.path.join(abd,atlas_name)
F = h5py.File(atlas_dir + '/mappings.h5', 'r')
KVs = {k: v.value.astype(int) for k,v in F.items()}
F.close()
mappings = pd.DataFrame(np.array(KVs.values()),
index=KVs.keys())
mappings.columns = ['idxlist']
mappings.index.names = ['name']
mappings = mappings.reset_index()
return mappings,atlas_dir
def load_stream_file_mappings_multifile(atlas_name=None,atlas_dir=None):
print 'loading mult-file streamline file mappings'
if not atlas_dir: atlas_dir = os.path.join(abd,atlas_name)
# Difference from above is that the keys are now (sub,cnxn), rather than cnxn
F = h5py.File(atlas_dir + '/mappings_multifile.h5', 'r')
KVs = {k: v.value for k,v in F.items()}
F.close()
mappings = pd.DataFrame(np.array(KVs.values()),
index=KVs.keys())
mappings.columns = ['idxlist']
mappings.index.names = ['sub','name']
mappings = mappings.reset_index()
return mappings,atlas_dir
# (this is identical to load vox bboxes. Remove both
# and replace with single func?)
def load_stream_bboxes(atlas_name=None,atlas_dir=None):
print 'loading stream bbox'
if not atlas_dir: atlas_dir = os.path.join(abd,atlas_name)
bbox = pd.read_csv(atlas_dir + '/bounding_boxes.txt', sep=',',
names=['xmin', 'xmax', 'ymin', 'ymax', 'zmin', 'zmax'])
return bbox
def make_streams_nx_graph(sfms,bboxes,weights,region_labels,hemis,cortex):
# I THINK THIS CAN BE THE SAME FUNC FOR BOTH \
# VOLUMETRIC AND STREAMLINETRIC
# ...just writing one for streamlines first to get clear...
G = nx.Graph()
# add node info
for node_it,node in enumerate(region_labels):
rl = region_labels[node_it]
hemi = hemis[node_it]
ctx = cortex[node_it]
G.add_node(node_it, **{'region_label': rl,
'hemisphere': hemi,
'cortex': ctx})
# add edge info
for idx in sfms.index:
sfm = sfms.ix[idx]
name = sfm['name']
# Allow either '33-55' or '33_to_55' naming conventions
if '_to_' in name:
roi1,roi2 = name.split('_to_')
else:
roi1,roi2 = name.split('-')
roi1 = int(roi1); roi2 = int(roi2)
ad = sfm.to_dict()
ad.update(bboxes.ix[idx])
ad['idx'] = idx
ad['weight'] = weights[roi1,roi2]
n1,n2 = G.node[roi1],G.node[roi2]
# (ibid...)
if '_to_' in name:
fullname = n1['region_label'] + '_to_' + n2['region_label']
else:
fullname = n1['region_label'] + '-' + n2['region_label']
ad['fullname'] = fullname
G.add_edge(roi1,roi2,**ad)
def igzip4dnii(fname,inds3d,
inds0d='all',inds1d='all',inds2d='all',
atlas_name=None,
atlas_dir=None):
# If atlas dir given, file is assumed to be in there
if atlas_dir:
fname = '%s/%s' %(atlas_dir,fname)
else:
# If atlas dir not given but atlas name is given, assumes path is relative
# to local conwhat atlas dir
if atlas_name:
fname = '%s/%s/%s' %(abd,atlas_name,fname)
# Here we are usin 4MB spacing between
# seek points, and using a larger read
# buffer (than the default size of 16KB).
fobj = igzip.IndexedGzipFile(
filename=fname,#'big_image.nii.gz',
spacing=4194304,
readbuf_size=131072)
# Create a nibabel image using
# the existing file handle.
fmap = nib.Nifti1Image.make_file_map()
fmap['image'].fileobj = fobj
image = nib.Nifti1Image.from_file_map(fmap)
if inds3d == 'N/A' or np.isnan(inds3d):
dims0,dims1,dims2 = image.shape
dat = np.squeeze(image.dataobj[:,:,:])
else:
# Use the image ArrayProxy to access the
# data - the index will automatically be
# built as data is accessed.
dims0,dims1,dims2,dims3 = image.shape
#if inds0d == 'all': inds0d = range(dims0)
#if inds1d == 'all': inds1d = range(dims1)
#if inds2d == 'all': inds2d = range(dims2)
dat = np.squeeze(image.dataobj[:,:,:,int(inds3d)])
#dat = np.squeeze(image.dataobj[inds0d,inds1d,inds2d,int(inds3d)])
#if type(inds3d) == int: # len(inds3d) == 1:
# dat = np.squeeze(image.dataobj[inds0d,inds1d,inds2d,int(inds3d)])
#else:
# dat = np.array([(image.dataobj[inds0d,inds1d,inds2d,int(i3)]) for i3 in inds3d])
# dat = dat.reshape([dims[1],dims[2],dims[3],dims[0]])
return dat
def dpy_to_trk(dpy_file,ref,outfile,inds='all'):
if os.path.isfile(ref):
ref_img = nib.load(ref)
else:
ref_img = ref
# Make trackvis header
hdr = nib.trackvis.empty_header()
hdr['voxel_size'] = ref_img.get_header().get_zooms()
hdr['dim'] = ref_img.shape
hdr['voxel_order'] = "LAS"#"RAS"
hdr['vox_to_ras'] = ref_img.Affine
zooms = ref_img.header.get_zooms()
# Load streamlines
D = Dpy(dpy_file, 'r')
if inds == 'all':
dpy_streams = D.read_tracks()
else:
dpy_streams = D.read_tracksi(inds)
D.close()
# Convert to trackvis space + format
[apply_affine(hdr['vox_to_ras'], s*zooms) for s in dpy_streams]
trk_streams = [(s,None,None) for s in dpy_streams]
nib.trackvis.write(outfile,trk_streams,hdr)
| bsd-3-clause |
quheng/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
puruckertom/ubertool | ubertool/therps/tests/therps_process_qaqc.py | 1 | 1380 | from __future__ import division #brings in Python 3.0 mixed type calculation rules
import os
# needs to be run whenever the qaqc csv is updated
csv_path = os.path.join(os.path.dirname(__file__),"therps_qaqc.csv")
csv_in = os.path.join(os.path.dirname(__file__),"therps_qaqc_in_transpose.csv")
csv_exp = os.path.join(os.path.dirname(__file__),"therps_qaqc_exp_transpose.csv")
import pandas as pd
#skiprows 0-indexed (supposedly, but does not seem to be the case)
#skipfooter- number of rows at bottom to skip
try:
pd_obj_inputs = pd.read_csv(csv_path, index_col=0, header=None, skiprows=1, skipfooter=162, engine='python')
pd_obj_inputs = pd_obj_inputs.drop(labels=pd_obj_inputs.columns[range(5)], axis=1)
pd_obj_inputs.index.name = None
pd_obj_inputs.columns -= 6
pd_obj_inputs_transposed = pd_obj_inputs.transpose()
print(pd_obj_inputs_transposed)
pd_obj_inputs_transposed.to_csv(csv_in)
pd_obj_exp_out = pd.read_csv(csv_path, index_col=0, header=None, skiprows=111, engine='python', na_values='')
pd_obj_exp_out = pd_obj_exp_out.drop(labels=pd_obj_exp_out.columns[range(5)], axis=1)
pd_obj_exp_out.index.name = None
pd_obj_exp_out.columns -= 6
pd_obj_exp_out_transposed = pd_obj_exp_out.transpose()
print(pd_obj_exp_out_transposed)
pd_obj_exp_out_transposed.to_csv(csv_exp)
except Exception as e:
print (e.message) | unlicense |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/matplotlib/testing/compare.py | 10 | 13454 | """
Provides a collection of utilities for comparing (image) results.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import hashlib
import os
import shutil
import numpy as np
import matplotlib
from matplotlib.compat import subprocess
from matplotlib.testing.exceptions import ImageComparisonFailure
from matplotlib import _png
from matplotlib import _get_cachedir
from matplotlib import cbook
from distutils import version
__all__ = ['compare_float', 'compare_images', 'comparable_formats']
def make_test_filename(fname, purpose):
"""
Make a new filename by inserting `purpose` before the file's
extension.
"""
base, ext = os.path.splitext(fname)
return '%s-%s%s' % (base, purpose, ext)
def compare_float(expected, actual, relTol=None, absTol=None):
"""
Fail if the floating point values are not close enough, with
the given message.
You can specify a relative tolerance, absolute tolerance, or both.
"""
if relTol is None and absTol is None:
raise ValueError("You haven't specified a 'relTol' relative "
"tolerance or a 'absTol' absolute tolerance "
"function argument. You must specify one.")
msg = ""
if absTol is not None:
absDiff = abs(expected - actual)
if absTol < absDiff:
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Abs diff: {absDiff}',
'Abs tol: {absTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
if relTol is not None:
# The relative difference of the two values. If the expected value is
# zero, then return the absolute value of the difference.
relDiff = abs(expected - actual)
if expected:
relDiff = relDiff / abs(expected)
if relTol < relDiff:
# The relative difference is a ratio, so it's always unit-less.
template = ['',
'Expected: {expected}',
'Actual: {actual}',
'Rel diff: {relDiff}',
'Rel tol: {relTol}']
msg += '\n '.join([line.format(**locals()) for line in template])
return msg or None
def get_cache_dir():
cachedir = _get_cachedir()
if cachedir is None:
raise RuntimeError('Could not find a suitable configuration directory')
cache_dir = os.path.join(cachedir, 'test_cache')
if not os.path.exists(cache_dir):
try:
cbook.mkdirs(cache_dir)
except IOError:
return None
if not os.access(cache_dir, os.W_OK):
return None
return cache_dir
def get_file_hash(path, block_size=2 ** 20):
md5 = hashlib.md5()
with open(path, 'rb') as fd:
while True:
data = fd.read(block_size)
if not data:
break
md5.update(data)
if path.endswith('.pdf'):
from matplotlib import checkdep_ghostscript
md5.update(checkdep_ghostscript()[1].encode('utf-8'))
elif path.endswith('.svg'):
from matplotlib import checkdep_inkscape
md5.update(checkdep_inkscape().encode('utf-8'))
return md5.hexdigest()
def make_external_conversion_command(cmd):
def convert(old, new):
cmdline = cmd(old, new)
pipe = subprocess.Popen(
cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if not os.path.exists(new) or errcode:
msg = "Conversion command failed:\n%s\n" % ' '.join(cmdline)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
return convert
def _update_converter():
gs, gs_v = matplotlib.checkdep_ghostscript()
if gs_v is not None:
def cmd(old, new):
return [str(gs), '-q', '-sDEVICE=png16m', '-dNOPAUSE', '-dBATCH',
'-sOutputFile=' + new, old]
converter['pdf'] = make_external_conversion_command(cmd)
converter['eps'] = make_external_conversion_command(cmd)
if matplotlib.checkdep_inkscape() is not None:
def cmd(old, new):
return [str('inkscape'), '-z', old, '--export-png', new]
converter['svg'] = make_external_conversion_command(cmd)
#: A dictionary that maps filename extensions to functions which
#: themselves map arguments `old` and `new` (filenames) to a list of strings.
#: The list can then be passed to Popen to convert files with that
#: extension to png format.
converter = {}
_update_converter()
def comparable_formats():
"""
Returns the list of file formats that compare_images can compare
on this system.
"""
return ['png'] + list(six.iterkeys(converter))
def convert(filename, cache):
"""
Convert the named file into a png file. Returns the name of the
created file.
If *cache* is True, the result of the conversion is cached in
`matplotlib._get_cachedir() + '/test_cache/'`. The caching is based
on a hash of the exact contents of the input file. The is no limit
on the size of the cache, so it may need to be manually cleared
periodically.
"""
base, extension = filename.rsplit('.', 1)
if extension not in converter:
raise ImageComparisonFailure(
"Don't know how to convert %s files to png" % extension)
newname = base + '_' + extension + '.png'
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
# Only convert the file if the destination doesn't already exist or
# is out of date.
if (not os.path.exists(newname) or
os.stat(newname).st_mtime < os.stat(filename).st_mtime):
if cache:
cache_dir = get_cache_dir()
else:
cache_dir = None
if cache_dir is not None:
hash_value = get_file_hash(filename)
new_ext = os.path.splitext(newname)[1]
cached_file = os.path.join(cache_dir, hash_value + new_ext)
if os.path.exists(cached_file):
shutil.copyfile(cached_file, newname)
return newname
converter[extension](filename, newname)
if cache_dir is not None:
shutil.copyfile(newname, cached_file)
return newname
#: Maps file extensions to a function which takes a filename as its
#: only argument to return a list suitable for execution with Popen.
#: The purpose of this is so that the result file (with the given
#: extension) can be verified with tools such as xmllint for svg.
verifiers = {}
# Turning this off, because it seems to cause multiprocessing issues
if matplotlib.checkdep_xmllint() and False:
verifiers['svg'] = lambda filename: [
'xmllint', '--valid', '--nowarning', '--noout', filename]
def verify(filename):
"""Verify the file through some sort of verification tool."""
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
base, extension = filename.rsplit('.', 1)
verifier = verifiers.get(extension, None)
if verifier is not None:
cmd = verifier(filename)
pipe = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if errcode != 0:
msg = "File verification command failed:\n%s\n" % ' '.join(cmd)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
def crop_to_same(actual_path, actual_image, expected_path, expected_image):
# clip the images to the same size -- this is useful only when
# comparing eps to pdf
if actual_path[-7:-4] == 'eps' and expected_path[-7:-4] == 'pdf':
aw, ah = actual_image.shape
ew, eh = expected_image.shape
actual_image = actual_image[int(aw / 2 - ew / 2):int(
aw / 2 + ew / 2), int(ah / 2 - eh / 2):int(ah / 2 + eh / 2)]
return actual_image, expected_image
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"image sizes do not match expected size: {0} "
"actual size {1}".format(expectedImage.shape, actualImage.shape))
num_values = np.prod(expectedImage.shape)
abs_diff_image = abs(expectedImage - actualImage)
# On Numpy 1.6, we can use bincount with minlength, which is much
# faster than using histogram
expected_version = version.LooseVersion("1.6")
found_version = version.LooseVersion(np.__version__)
if found_version >= expected_version:
histogram = np.bincount(abs_diff_image.ravel(), minlength=256)
else:
histogram = np.histogram(abs_diff_image, bins=np.arange(257))[0]
sum_of_squares = np.sum(histogram * np.arange(len(histogram)) ** 2)
rms = np.sqrt(float(sum_of_squares) / num_values)
return rms
def compare_images(expected, actual, tol, in_decorator=False):
"""
Compare two "image" files checking differences within a tolerance.
The two given filenames may point to files which are convertible to
PNG via the `.converter` dictionary. The underlying RMS is calculated
with the `.calculate_rms` function.
Parameters
----------
expected : str
The filename of the expected image.
actual :str
The filename of the actual image.
tol : float
The tolerance (a color value difference, where 255 is the
maximal difference). The test fails if the average pixel
difference is greater than this value.
in_decorator : bool
If called from image_comparison decorator, this should be
True. (default=False)
Example
-------
img1 = "./baseline/plot.png"
img2 = "./output/plot.png"
compare_images( img1, img2, 0.001 ):
"""
if not os.path.exists(actual):
msg = "Output image %s does not exist." % actual
raise Exception(msg)
if os.stat(actual).st_size == 0:
msg = "Output image file %s is empty." % actual
raise Exception(msg)
verify(actual)
# Convert the image to png
extension = expected.split('.')[-1]
if not os.path.exists(expected):
raise IOError('Baseline image %r does not exist.' % expected)
if extension != 'png':
actual = convert(actual, False)
expected = convert(expected, True)
# open the image files and remove the alpha channel (if it exists)
expectedImage = _png.read_png_int(expected)
actualImage = _png.read_png_int(actual)
expectedImage = expectedImage[:, :, :3]
actualImage = actualImage[:, :, :3]
actualImage, expectedImage = crop_to_same(
actual, actualImage, expected, expectedImage)
diff_image = make_test_filename(actual, 'failed-diff')
if tol <= 0.0:
if np.array_equal(expectedImage, actualImage):
return None
# convert to signed integers, so that the images can be subtracted without
# overflow
expectedImage = expectedImage.astype(np.int16)
actualImage = actualImage.astype(np.int16)
rms = calculate_rms(expectedImage, actualImage)
if rms <= tol:
return None
save_diff_image(expected, actual, diff_image)
results = dict(rms=rms, expected=str(expected),
actual=str(actual), diff=str(diff_image), tol=tol)
if not in_decorator:
# Then the results should be a string suitable for stdout.
template = ['Error: Image files did not match.',
'RMS Value: {rms}',
'Expected: \n {expected}',
'Actual: \n {actual}',
'Difference:\n {diff}',
'Tolerance: \n {tol}', ]
results = '\n '.join([line.format(**results) for line in template])
return results
def save_diff_image(expected, actual, output):
expectedImage = _png.read_png(expected)
actualImage = _png.read_png(actual)
actualImage, expectedImage = crop_to_same(
actual, actualImage, expected, expectedImage)
expectedImage = np.array(expectedImage).astype(np.float)
actualImage = np.array(actualImage).astype(np.float)
assert expectedImage.ndim == actualImage.ndim
assert expectedImage.shape == actualImage.shape
absDiffImage = abs(expectedImage - actualImage)
# expand differences in luminance domain
absDiffImage *= 255 * 10
save_image_np = np.clip(absDiffImage, 0, 255).astype(np.uint8)
height, width, depth = save_image_np.shape
# The PDF renderer doesn't produce an alpha channel, but the
# matplotlib PNG writer requires one, so expand the array
if depth == 3:
with_alpha = np.empty((height, width, 4), dtype=np.uint8)
with_alpha[:, :, 0:3] = save_image_np
save_image_np = with_alpha
# Hard-code the alpha channel to fully solid
save_image_np[:, :, 3] = 255
_png.write_png(save_image_np, output)
| mit |
cainiaocome/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 244 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
NSLS-II/docs | source/_cookbook/csv_writer.py | 2 | 3004 | # -*- coding: utf-8 -*-
"""
========================================
A Minimal CSV writer for data collection
========================================
Problem
-------
Write (a subset of) the data to a CSV file during data collection.
Approach
--------
Write a callback function that integrates Python's built-in csv module with
bluesky.
Example Solution
----------------
"""
###############################################################################
# Boiler plate imports and configuration
import path
import os
import bluesky as bs
import bluesky.plans as bp
import bluesky.callbacks as bc
import csv
from ophyd.sim import motor, det
import matplotlib.pyplot as plt
# Do this if running the example interactively;
# skip it when building the documentation.
import os
if 'BUILDING_DOCS' not in os.environ:
from bluesky.utils import install_qt_kicker # for notebooks, qt -> nb
install_qt_kicker()
plt.ion()
det.exposure_time = .1 # simulate detector exposure time
RE = bs.RunEngine({})
###############################################################################
# Define a callback class which writes out a CSV file
class CSVWriter(bc.CallbackBase):
def __init__(self, fields, fname_format, fpath):
self._path = path.Path(fpath)
os.makedirs(self._path, exist_ok=True)
self._fname_fomat = fname_format
self._fields = fields
self._writer = None
self._fout = None
def close(self):
if self._fout is not None:
self._fout.close()
self._fout = None
self._writer = None
def start(self, doc):
self.close()
fname = self._path / self._fname_fomat.format(**doc)
self._fout = open(fname, 'xt')
self._writer = csv.writer(self._fout)
def descriptor(self, doc):
if self._writer is not None:
self._writer.writerow(self._fields)
def event(self, doc):
data = doc['data']
if self._writer is not None:
self._writer.writerow(data[k] for k in self._fields)
def stop(self, doc):
self.close()
###############################################################################
# Set up some callbacks
def create_cbs():
return [bc.LiveTable([motor, det]), bc.LivePlot('det', 'motor')]
fmt = '{user}_{uid:.6s}.csv'
export_path = '/tmp/export_demo'
csv_writer = CSVWriter(('motor', 'det'), fmt, export_path)
# send all documents to the CSV writer
RE.subscribe('all', csv_writer)
###############################################################################
# run the scan
uid, = RE(bp.scan([det], motor, -5, 5, 11),
create_cbs(), user='tcaswell')
###############################################################################
# check file
fname = os.path.join(export_path,
'{user}_{uid:.6s}.csv'.format(user='tcaswell', uid=uid))
print("--- {} ---".format(fname))
with open(fname, 'r') as fin:
for ln in fin:
print(ln.strip())
| bsd-2-clause |
sserrot/champion_relationships | venv/Lib/site-packages/jupyter_console/app.py | 1 | 5353 | """ A minimal application using the ZMQ-based terminal IPython frontend.
This is not a complete console app, as subprocess will not be able to receive
input, there is no real readline support, among other limitations.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import logging
import signal
import sys
from traitlets import (
Dict, Any
)
from traitlets.config import catch_config_error, boolean_flag
from jupyter_core.application import JupyterApp, base_aliases, base_flags, NoStart
from jupyter_client.consoleapp import (
JupyterConsoleApp, app_aliases, app_flags,
)
from jupyter_console.ptshell import ZMQTerminalInteractiveShell
from jupyter_console import __version__
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
_examples = """
jupyter console # start the ZMQ-based console
jupyter console --existing # connect to an existing ipython session
"""
#-----------------------------------------------------------------------------
# Flags and Aliases
#-----------------------------------------------------------------------------
# copy flags from mixin:
flags = dict(base_flags)
# start with mixin frontend flags:
frontend_flags = dict(app_flags)
# update full dict with frontend flags:
flags.update(frontend_flags)
flags.update(boolean_flag(
'simple-prompt', 'ZMQTerminalInteractiveShell.simple_prompt',
"Force simple minimal prompt using `raw_input`",
"Use a rich interactive prompt with prompt_toolkit"
))
# copy flags from mixin
aliases = dict(base_aliases)
# start with mixin frontend flags
frontend_aliases = dict(app_aliases)
# load updated frontend flags into full dict
aliases.update(frontend_aliases)
# get flags&aliases into sets, and remove a couple that
# shouldn't be scrubbed from backend flags:
frontend_aliases = set(frontend_aliases.keys())
frontend_flags = set(frontend_flags.keys())
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class ZMQTerminalIPythonApp(JupyterApp, JupyterConsoleApp):
name = "jupyter-console"
version = __version__
"""Start a terminal frontend to the IPython zmq kernel."""
description = """
The Jupyter terminal-based Console.
This launches a Console application inside a terminal.
The Console supports various extra features beyond the traditional
single-process Terminal IPython shell, such as connecting to an
existing ipython session, via:
jupyter console --existing
where the previous session could have been created by another ipython
console, an ipython qtconsole, or by opening an ipython notebook.
"""
examples = _examples
classes = [ZMQTerminalInteractiveShell] + JupyterConsoleApp.classes
flags = Dict(flags)
aliases = Dict(aliases)
frontend_aliases = Any(frontend_aliases)
frontend_flags = Any(frontend_flags)
subcommands = Dict()
force_interact = True
def parse_command_line(self, argv=None):
super(ZMQTerminalIPythonApp, self).parse_command_line(argv)
self.build_kernel_argv(self.extra_args)
def init_shell(self):
JupyterConsoleApp.initialize(self)
# relay sigint to kernel
signal.signal(signal.SIGINT, self.handle_sigint)
self.shell = ZMQTerminalInteractiveShell.instance(parent=self,
manager=self.kernel_manager,
client=self.kernel_client,
confirm_exit=self.confirm_exit,
)
self.shell.own_kernel = not self.existing
def init_gui_pylab(self):
# no-op, because we don't want to import matplotlib in the frontend.
pass
def handle_sigint(self, *args):
if self.shell._executing:
if self.kernel_manager:
self.kernel_manager.interrupt_kernel()
else:
print("ERROR: Cannot interrupt kernels we didn't start.",
file = sys.stderr)
else:
# raise the KeyboardInterrupt if we aren't waiting for execution,
# so that the interact loop advances, and prompt is redrawn, etc.
raise KeyboardInterrupt
@catch_config_error
def initialize(self, argv=None):
"""Do actions after construct, but before starting the app."""
super(ZMQTerminalIPythonApp, self).initialize(argv)
if self._dispatching:
return
# create the shell
self.init_shell()
# and draw the banner
self.init_banner()
def init_banner(self):
"""optionally display the banner"""
self.shell.show_banner()
# Make sure there is a space below the banner.
#if self.log_level <= logging.INFO: print()
def start(self):
# JupyterApp.start dispatches on NoStart
super(ZMQTerminalIPythonApp, self).start()
self.log.debug("Starting the jupyter console mainloop...")
self.shell.mainloop()
main = launch_new_instance = ZMQTerminalIPythonApp.launch_instance
if __name__ == '__main__':
main()
| mit |
PyRsw/PyRsw | testing/test_1d_accuracy/test_linear_waves.py | 1 | 1449 | import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../src')
import Steppers as Step
import Fluxes as Flux
from PyRsw import Simulation
from constants import minute, hour, day
def test():
sim = Simulation()
# Geometry and Model Equations
sim.geomy = 'periodic'
sim.stepper = Step.AB3
sim.method = 'Spectral'
sim.dynamics = 'Linear'
sim.flux_method = Flux.spectral_sw
# Specify paramters
sim.Ly = 4000e3
sim.Ny = 256
sim.f0 = 0.
sim.Hs = [100.]
sim.rho = [1025.]
sim.end_time = sim.Ly/(np.sqrt(sim.Hs[0]*sim.g))
# Plotting parameters
sim.animate = 'None'
sim.output = False
sim.diagnose = False
# Initialize the grid and zero solutions
sim.initialize()
for ii in range(sim.Nz):
sim.soln.h[:,:,ii] = sim.Hs[ii]
# Gaussian initial conditions
x0 = 1.*sim.Lx/2.
W = 200.e3
amp = 1.
sim.soln.h[:,:,0] += amp*np.exp(-(sim.Y)**2/(W**2))
IC = sim.soln.h[:,:,0].copy()
sim.run()
# Compare final state to initial conditions
# error_h is normalized using the triangle inequality
error_h = np.linalg.norm(IC - sim.soln.h[:,:,0])/(np.linalg.norm(IC) + np.linalg.norm(sim.soln.h[:,:,0]))
error_v = np.linalg.norm(sim.soln.v[:,:,0])
assert (error_h < 1e-6) and (error_v < 5e-5)
test()
| mit |
florian-f/sklearn | sklearn/linear_model/tests/test_logistic.py | 16 | 5067 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.linear_model import logistic
from sklearn import datasets
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = datasets.load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
"""Simple sanity check on a 2 classes dataset
Make sure it predicts the correct result on simple datasets.
"""
check_predictions(logistic.LogisticRegression(random_state=0), X, Y1)
check_predictions(logistic.LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(logistic.LogisticRegression(C=100, random_state=0),
X, Y1)
check_predictions(logistic.LogisticRegression(C=100, random_state=0),
X_sp, Y1)
check_predictions(logistic.LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(logistic.LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
"""Test for appropriate exception on errors"""
assert_raises(ValueError, logistic.LogisticRegression(C=-1).fit, X, Y1)
def test_predict_3_classes():
check_predictions(logistic.LogisticRegression(C=10), X, Y2)
check_predictions(logistic.LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
"""Test logistic regression with the iris dataset"""
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = logistic.LogisticRegression(C=len(iris.data)).fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_sparsify():
"""Test sparsify and densify members."""
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = logistic.LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
"""Test that an exception is raised on inconsistent input"""
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = logistic.LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
"""Test that we can write to coef_ and intercept_"""
#rng = np.random.RandomState(0)
#X = rng.random_sample((5, 10))
#y = np.ones(X.shape[0])
clf = logistic.LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
"""Test proper NaN handling.
Regression test for Issue #252: fit used to go into an infinite loop.
"""
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
logistic.LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_liblinear_random_state():
X, y = datasets.make_classification(n_samples=20)
lr1 = logistic.LogisticRegression(random_state=0)
lr1.fit(X, y)
lr2 = logistic.LogisticRegression(random_state=0)
lr2.fit(X, y)
assert_array_almost_equal(lr1.coef_, lr2.coef_)
| bsd-3-clause |
yanlend/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
Mistobaan/tensorflow | tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py | 130 | 9577 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.metrics.python.ops import histogram_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class Strict1dCumsumTest(test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = constant_op.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = constant_op.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = constant_op.constant([3], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = constant_op.constant([1, 3, 6], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = constant_op.constant([], shape=[0], dtype=dtypes.bool)
scores = constant_op.constant([], shape=[0], dtype=dtypes.float32)
score_range = [0, 1.]
auc, update_op = histogram_ops.auc_using_histogram(labels, scores,
score_range)
variables.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(
nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(
nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(
nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(
nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(
nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = array_ops.placeholder(dtypes.bool, shape=[num_records])
scores = array_ops.placeholder(dtypes.float32, shape=[num_records])
auc, update_op = histogram_ops.auc_using_histogram(
labels, scores, score_range, nbins=nbins)
variables.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
test.main()
| apache-2.0 |
michaelpacer/scikit-image | doc/examples/plot_equalize.py | 18 | 2786 | """
======================
Histogram Equalization
======================
This examples enhances an image with low contrast, using a method called
*histogram equalization*, which "spreads out the most frequent intensity
values" in an image [1]_. The equalized image has a roughly linear cumulative
distribution function.
While histogram equalization has the advantage that it requires no parameters,
it sometimes yields unnatural looking images. An alternative method is
*contrast stretching*, where the image is rescaled to include all intensities
that fall within the 2nd and 98th percentiles [2]_.
.. [1] http://en.wikipedia.org/wiki/Histogram_equalization
.. [2] http://homepages.inf.ed.ac.uk/rbf/HIPR2/stretch.htm
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from skimage import data, img_as_float
from skimage import exposure
matplotlib.rcParams['font.size'] = 8
def plot_img_and_hist(img, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
img = img_as_float(img)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(img, cmap=plt.cm.gray)
ax_img.set_axis_off()
# Display histogram
ax_hist.hist(img.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(img, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
# Load an example image
img = data.moon()
# Contrast stretching
p2, p98 = np.percentile(img, (2, 98))
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
# Equalization
img_eq = exposure.equalize_hist(img)
# Adaptive Equalization
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
# Display results
fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(8, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
y_min, y_max = ax_hist.get_ylim()
ax_hist.set_ylabel('Number of pixels')
ax_hist.set_yticks(np.linspace(0, y_max, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Contrast stretching')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Histogram equalization')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])
ax_img.set_title('Adaptive equalization')
ax_cdf.set_ylabel('Fraction of total intensity')
ax_cdf.set_yticks(np.linspace(0, 1, 5))
# prevent overlap of y-axis labels
fig.subplots_adjust(wspace=0.4)
plt.show()
| bsd-3-clause |
chubbymaggie/datasketch | benchmark/weighted_minhash_benchmark.py | 2 | 2417 | '''
Benchmarking the performance and accuracy of WeightedMinHash.
'''
import time, logging, random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from datasketch import WeightedMinHashGenerator
logging.basicConfig(level=logging.INFO)
def run_perf(dim, num_rep, sample_size):
wmg = WeightedMinHashGenerator(dim, sample_size=sample_size)
logging.info("WeightedMinHash using %d samples" % sample_size)
data = np.random.uniform(0, dim, (num_rep, dim))
durs = []
for i in range(num_rep):
start = time.clock()
wmg.minhash(data[i])
duration = (time.clock() - start) * 1000
durs.append(duration)
ave = np.mean(durs)
logging.info("Generated %d minhashes, average time %.4f ms" % (num_rep, ave))
return ave
def jaccard(v1, v2):
min_sum = np.sum(np.minimum(v1, v2))
max_sum = np.sum(np.maximum(v1, v2))
return float(min_sum) / float(max_sum)
def run_acc(dim, num_rep, sample_size):
logging.info("WeightedMinHash using %d samples" % sample_size)
wmg = WeightedMinHashGenerator(dim, sample_size=sample_size)
data1 = np.random.uniform(0, dim, (num_rep, dim))
data2 = np.random.uniform(0, dim, (num_rep, dim))
errs = []
for i in range(num_rep):
wm1 = wmg.minhash(data1[i])
wm2 = wmg.minhash(data2[i])
j_e = wm1.jaccard(wm2)
j = jaccard(data1[i], data2[i])
errs.append(abs(j - j_e))
ave = np.mean(errs)
logging.info("%d runs, mean error %.4f" % (num_rep, ave))
return ave
sample_sizes = range(10, 160, 10)
num_rep = 100
dim = 5000
output = "weighted_minhash_benchmark.png"
logging.info("> Running performance tests")
run_times = [run_perf(dim, num_rep, n) for n in sample_sizes]
logging.info("> Running accuracy tests")
errs = [run_acc(dim, num_rep, n) for n in sample_sizes]
logging.info("> Plotting result")
fig, axe = plt.subplots(1, 2, sharex=True, figsize=(10, 4))
ax = axe[1]
ax.plot(sample_sizes, run_times, marker='+')
ax.set_xlabel("Number of samples")
ax.set_ylabel("Running time (ms)")
ax.set_title("WeightedMinHash performance")
ax.grid()
ax = axe[0]
ax.plot(sample_sizes, errs, marker='+')
ax.set_xlabel("Number of samples")
ax.set_ylabel("Absolute error in Jaccard estimation")
ax.set_title("WeightedMinHash accuracy")
ax.grid()
fig.savefig(output, bbox_inches="tight")
logging.info("Plot saved to %s" % output)
| mit |
jakobworldpeace/scikit-learn | benchmarks/bench_lof.py | 49 | 3548 | """
============================
LocalOutlierFactor benchmark
============================
A test of LocalOutlierFactor on classical anomaly detection datasets.
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
print(__doc__)
np.random.seed(2)
# datasets available: ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
datasets = ['shuttle']
novelty_detection = True # if False, training set polluted by outliers
for dataset_name in datasets:
# loading and vectorization
print('loading data')
if dataset_name in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dataset_name, shuffle=True,
percent10=False)
X = dataset.data
y = dataset.target
if dataset_name == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
X, y = sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dataset_name == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dataset_name == 'SF':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != 'normal.').astype(int)
if dataset_name == 'SA':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
lb.fit(X[:, 2])
x2 = lb.transform(X[:, 2])
lb.fit(X[:, 3])
x3 = lb.transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != 'normal.').astype(int)
if dataset_name == 'http' or dataset_name == 'smtp':
y = (y != 'normal.').astype(int)
n_samples, n_features = np.shape(X)
n_samples_train = n_samples // 2
n_samples_test = n_samples - n_samples_train
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
if novelty_detection:
X_train = X_train[y_train == 0]
y_train = y_train[y_train == 0]
print('LocalOutlierFactor processing...')
model = LocalOutlierFactor(n_neighbors=20)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = -model.decision_function(X_test) # the lower, the more normal
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
AUC = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1,
label=('ROC for %s (area = %0.3f, train-time: %0.2fs,'
'test-time: %0.2fs)' % (dataset_name, AUC, fit_time,
predict_time)))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
dallascard/guac | core/preprocessing/label_reader.py | 1 | 1622 | import os
import json
import codecs
from optparse import OptionParser
from collections import defaultdict
import numpy as np
import pandas as pd
from ..util import file_handling as fh
from ..util import dirs
def get_labels(label_file, target, weight_col=None, combine_by_voting=False):
input_dir = dirs.data_raw_labels_dir
input_filename = fh.make_filename(input_dir, fh.get_basename_wo_ext(label_file), 'csv')
label_df = pd.read_csv(input_filename, header=0, index_col=0)
n, p = label_df.shape
if weight_col >= 0:
col = label_df.columns[int(weight_col)]
weights = label_df[col].ravel()
else:
weights = None
if type(target) == int:
label_name = label_df.columns[target]
else:
label_name = target
true_dict = defaultdict(set)
for index, item in enumerate(label_df.index.tolist()):
true_dict[item].add(label_df.ix[index, target])
unanimous_pairs = {i: v.pop() for i, v in true_dict.items() if len(v) == 1}
if combine_by_voting:
label_df['temp'] = 1
grouped = label_df.groupby(label_df.index).sum()
counts = grouped[label_name].ravel()
totals = grouped['temp'].ravel()
# using this list comprehension, because np.round(0.5) := 0
grouped['labels'] = [int(round(t)) for t in counts/totals]
labels = grouped['labels'].ravel()
index = grouped.index.tolist()
weights = np.ones(len(index))
else:
labels = label_df[label_name].ravel()
index = label_df.index.tolist()
return index, label_name, labels, weights, unanimous_pairs
| apache-2.0 |
hainm/scikit-learn | examples/missing_values.py | 233 | 3056 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
lbishal/scikit-learn | sklearn/gaussian_process/tests/test_kernels.py | 23 | 11813 | """Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
from collections import Hashable
from sklearn.externals.funcsigs import signature
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.base import clone
from sklearn.utils.testing import (assert_equal, assert_almost_equal,
assert_not_equal, assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (10, 2))
Y = np.random.RandomState(0).normal(0, 1, (11, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
def test_kernel_gradient():
""" Compare analytic and numeric gradient of kernels. """
for kernel in kernels:
K, K_gradient = kernel(X, eval_gradient=True)
assert_equal(K_gradient.shape[0], X.shape[0])
assert_equal(K_gradient.shape[1], X.shape[0])
assert_equal(K_gradient.shape[2], kernel.theta.shape[0])
K_gradient_approx = np.empty_like(K_gradient)
for i in range(K.shape[0]):
for j in range(K.shape[1]):
def eval_kernel_ij_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K[i, j]
K_gradient_approx[i, j] = \
approx_fprime(kernel.theta, eval_kernel_ij_for_theta,
1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
def test_kernel_theta():
""" Check that parameter vector theta of kernel is set correctly. """
for kernel in kernels:
if isinstance(kernel, KernelOperator) \
or isinstance(kernel, Exponentiation): # skip non-basic kernels
continue
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s.rstrip("_bounds"),
filter(lambda s: s.endswith("_bounds"), args))
assert_equal(
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters),
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert_equal(theta[i],
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)
assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)
if i > 0:
assert_equal(theta[:i], new_kernel.theta[:i])
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert_equal(theta[i+1:], new_kernel.theta[i:])
assert_array_equal(K_gradient[..., i+1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
def test_auto_vs_cross():
""" Auto-correlation and cross-correlation should be consistent. """
for kernel in kernels:
if kernel == kernel_white:
continue # Identity is not satisfied on diagonal
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
def test_kernel_diag():
""" Test that diag method of kernel returns consistent results. """
for kernel in kernels:
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
""" Adding kernels and multiplying kernels should be commutative. """
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
""" Anisotropic kernel should be consistent with isotropic kernels."""
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
def test_kernel_stationary():
""" Test stationarity of kernels."""
for kernel in kernels:
if not kernel.is_stationary():
continue
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def test_kernel_clone():
""" Test that sklearn's clone works correctly on kernels. """
for kernel in kernels:
kernel_cloned = clone(kernel)
assert_equal(kernel, kernel_cloned)
assert_not_equal(id(kernel), id(kernel_cloned))
for attr in kernel.__dict__.keys():
attr_value = getattr(kernel, attr)
attr_value_cloned = getattr(kernel_cloned, attr)
if attr.startswith("hyperparameter_"):
assert_equal(attr_value.name, attr_value_cloned.name)
assert_equal(attr_value.value_type,
attr_value_cloned.value_type)
assert_array_equal(attr_value.bounds,
attr_value_cloned.bounds)
assert_equal(attr_value.n_elements,
attr_value_cloned.n_elements)
elif np.iterable(attr_value):
for i in range(len(attr_value)):
if np.iterable(attr_value[i]):
assert_array_equal(attr_value[i],
attr_value_cloned[i])
else:
assert_equal(attr_value[i], attr_value_cloned[i])
else:
assert_equal(attr_value, attr_value_cloned)
if not isinstance(attr_value, Hashable):
# modifiable attributes must not be identical
assert_not_equal(id(attr_value), id(attr_value_cloned))
def test_matern_kernel():
""" Test consistency of Matern kernel for special values of nu. """
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
def test_kernel_versus_pairwise():
"""Check that GP kernels can also be used as pairwise kernels."""
for kernel in kernels:
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
def test_set_get_params():
"""Check that set_params()/get_params() is consistent with kernel.theta."""
for kernel in kernels:
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value]*size})
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
[value]*size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
| bsd-3-clause |
Ziqi-Li/bknqgis | pandas/pandas/io/formats/common.py | 16 | 1094 | # -*- coding: utf-8 -*-
"""
Common helper methods used in different submodules of pandas.io.formats
"""
def get_level_lengths(levels, sentinel=''):
"""For each index in each level the function returns lengths of indexes.
Parameters
----------
levels : list of lists
List of values on for level.
sentinel : string, optional
Value which states that no new index starts on there.
Returns
----------
Returns list of maps. For each level returns map of indexes (key is index
in row and value is length of index).
"""
if len(levels) == 0:
return []
control = [True for x in levels[0]]
result = []
for level in levels:
last_index = 0
lengths = {}
for i, key in enumerate(level):
if control[i] and key == sentinel:
pass
else:
control[i] = False
lengths[last_index] = i - last_index
last_index = i
lengths[last_index] = len(level) - last_index
result.append(lengths)
return result
| gpl-2.0 |
NelisVerhoef/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
daanwierstra/pybrain | pybrain/rl/environments/cartpole/cartpole.py | 1 | 4784 | __author__ = 'Thomas Rueckstiess, [email protected]'
from matplotlib.mlab import rk4
from math import sin, cos
import time
from scipy import eye, matrix, random
from pybrain.rl.environments.graphical import GraphicalEnvironment
class CartPoleEnvironment(GraphicalEnvironment):
""" This environment implements the cart pole balancing benchmark, as stated in:
Riedmiller, Peters, Schaal: "Evaluation of Policy Gradient Methods and
Variants on the Cart-Pole Benchmark". ADPRL 2007.
It implements a set of differential equations, solved with a 4th order
Runge-Kutta method.
"""
indim = 1
outdim = 4
# some physical constants
g = 9.81
l = 0.5
mp = 0.1
mc = 1.0
dt = 0.02
def __init__(self, polelength = None):
GraphicalEnvironment.__init__(self)
if polelength != None:
self.l = polelength
# initialize the environment (randomly)
self.reset()
self.action = 0.0
self.delay = False
def getSensors(self):
""" returns the state one step (dt) ahead in the future. stores the state in
self.sensors because it is needed for the next calculation. The sensor return
vector has 4 elements: theta, theta', s, s' (s being the distance from the
origin).
"""
return self.sensors
def performAction(self, action):
""" stores the desired action for the next runge-kutta step.
"""
self.action = action
self.step()
def step(self):
self.sensors = rk4(self._derivs, self.sensors, [0, self.dt])
self.sensors = self.sensors[-1]
if self.hasRenderer():
self.getRenderer().updateData(self.sensors)
if self.delay:
time.sleep(0.05)
def reset(self):
""" re-initializes the environment, setting the cart back in a random position.
"""
angle = random.uniform(-0.2, 0.2)
pos = random.uniform(-0.5, 0.5)
self.sensors = (angle, 0.0, pos, 0.0)
def _derivs(self, x, t):
""" This function is needed for the Runge-Kutta integration approximation method. It calculates the
derivatives of the state variables given in x. for each variable in x, it returns the first order
derivative at time t.
"""
F = self.action
(theta, theta_, _s, s_) = x
u = theta_
sin_theta = sin(theta)
cos_theta = cos(theta)
mp = self.mp
mc = self.mc
l = self.l
u_ = (self.g*sin_theta*(mc+mp) - (F + mp*l*theta**2*sin_theta) * cos_theta) / (4/3*l*(mc+mp) - mp*l*cos_theta**2)
v = s_
v_ = (F - mp*l * (u_*cos_theta - (s_**2 * sin_theta))) / (mc+mp)
return (u, u_, v, v_)
def getPoleAngles(self):
""" auxiliary access to just the pole angle(s), to be used by BalanceTask """
return [self.sensors[0]]
def getCartPosition(self):
""" auxiliary access to just the cart position, to be used by BalanceTask """
return self.sensors[2]
class CartPoleLinEnvironment(CartPoleEnvironment):
""" This is a linearized implementation of the cart-pole system, as described in
Peters J, Vijayakumar S, Schaal S (2003) Reinforcement learning for humanoid robotics.
Polelength is fixed, the order of sensors has been changed to the above."""
tau = 1./60. # sec
def __init__(self, **kwargs):
CartPoleEnvironment.__init__(self,**kwargs)
nu = 13.2 # sec^-2
tau = self.tau
# linearized movement equations
self.A = matrix(eye(4))
self.A[0,1] = tau
self.A[2,3] = tau
self.A[1,0] = nu*tau
self.b = matrix([0.0, nu*tau/9.80665, 0.0, tau])
def step(self):
self.sensors = random.normal(loc=self.sensors*self.A + self.action*self.b, scale=0.001).flatten()
if self.hasRenderer():
self.getRenderer().updateData(self.sensors)
if self.delay:
time.sleep(self.tau)
def reset(self):
""" re-initializes the environment, setting the cart back in a random position.
"""
self.sensors = random.normal(scale=0.1,size=4)
def getSensors(self):
return self.sensors.flatten()
def getPoleAngles(self):
""" auxiliary access to just the pole angle(s), to be used by BalanceTask """
return [self.sensors[0]]
def getCartPosition(self):
""" auxiliary access to just the cart position, to be used by BalanceTask """
return self.sensors[2]
| bsd-3-clause |
oreilly-japan/deep-learning-from-scratch | ch08/misclassified_mnist.py | 5 | 1646 | # coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from deep_convnet import DeepConvNet
from dataset.mnist import load_mnist
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)
network = DeepConvNet()
network.load_params("deep_convnet_params.pkl")
print("calculating test accuracy ... ")
#sampled = 1000
#x_test = x_test[:sampled]
#t_test = t_test[:sampled]
classified_ids = []
acc = 0.0
batch_size = 100
for i in range(int(x_test.shape[0] / batch_size)):
tx = x_test[i*batch_size:(i+1)*batch_size]
tt = t_test[i*batch_size:(i+1)*batch_size]
y = network.predict(tx, train_flg=False)
y = np.argmax(y, axis=1)
classified_ids.append(y)
acc += np.sum(y == tt)
acc = acc / x_test.shape[0]
print("test accuracy:" + str(acc))
classified_ids = np.array(classified_ids)
classified_ids = classified_ids.flatten()
max_view = 20
current_view = 1
fig = plt.figure()
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.2, wspace=0.2)
mis_pairs = {}
for i, val in enumerate(classified_ids == t_test):
if not val:
ax = fig.add_subplot(4, 5, current_view, xticks=[], yticks=[])
ax.imshow(x_test[i].reshape(28, 28), cmap=plt.cm.gray_r, interpolation='nearest')
mis_pairs[current_view] = (t_test[i], classified_ids[i])
current_view += 1
if current_view > max_view:
break
print("======= misclassified result =======")
print("{view index: (label, inference), ...}")
print(mis_pairs)
plt.show()
| mit |
pinkavaj/gnuradio | gr-digital/examples/example_costas.py | 49 | 5316 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_costas(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.cst = digital.costas_loop_cc(bw, 2)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_cst = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.cst, self.vsnk_cst)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.cst,1), self.vsnk_frq)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.0,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.707,
help="Set the simulation's phase offset [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_costas(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset)
put.run()
data_src = scipy.array(put.vsnk_src.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = scipy.array(put.vsnk_frq.data()) / (2.0*scipy.pi)
# adjust this to align with the data.
data_cst = scipy.array(3*[0,]+list(put.vsnk_cst.data()))
# Plot the Costas loop's LO frequency
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("Costas LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,2)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_cst.real, data_cst.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
s3.set_xlim([-2, 2])
s3.set_ylim([-2, 2])
# Plot the symbols in time
s4 = f1.add_subplot(2,2,3)
s4.set_position([0.125, 0.05, 0.775, 0.4])
s4.plot(data_src.real, "o-")
s4.plot(data_cst.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
maxalbert/bokeh | examples/plotting/file/elements.py | 2 | 1491 | import pandas as pd
from bokeh.plotting import figure, show, output_file
from bokeh.sampledata import periodic_table
elements = periodic_table.elements
elements = elements[elements["atomic number"] <= 82]
elements = elements[~pd.isnull(elements["melting point"])]
mass = [float(x.strip("[]")) for x in elements["atomic mass"]]
elements["atomic mass"] = mass
palette = list(reversed([
"#67001f","#b2182b","#d6604d","#f4a582","#fddbc7","#f7f7f7","#d1e5f0","#92c5de","#4393c3","#2166ac","#053061"
]))
melting_points = elements["melting point"]
low = min(melting_points)
high= max(melting_points)
melting_point_inds = [int(10*(x-low)/(high-low)) for x in melting_points] #gives items in colors a value from 0-10
meltingpointcolors = [palette[i] for i in melting_point_inds]
output_file("elements.html", title="elements.py example")
TOOLS = "pan,wheel_zoom,box_zoom,reset,resize,save"
p = figure(tools=TOOLS, toolbar_location="left", logo="grey", plot_width=1200)
p.title = "Density vs Atomic Weight of Elements (colored by melting point)"
p.background_fill_color= "#cccccc"
p.circle(elements["atomic mass"], elements["density"], size=12,
color=meltingpointcolors, line_color="black", fill_alpha=0.8)
p.text(elements["atomic mass"], elements["density"]+0.3,
text=elements["symbol"],text_color="#333333",
text_align="center", text_font_size="10pt")
p.xaxis.axis_label="atomic weight (amu)"
p.yaxis.axis_label="density (g/cm^3)"
p.grid.grid_line_color="white"
show(p)
| bsd-3-clause |
MaayanLab/clustergrammer-widget | clustergrammer_widget/clustergrammer/normalize_fun.py | 2 | 3555 | import pandas as pd
import numpy as np
from copy import deepcopy
def run_norm(net, df=None, norm_type='zscore', axis='row', keep_orig=False):
'''
A dataframe (more accurately a dictionary of dataframes, e.g. mat,
mat_up...) can be passed to run_norm and a normalization will be run (
e.g. zscore) on either the rows or columns
'''
# df here is actually a dictionary of several dataframes, 'mat', 'mat_orig',
# etc
if df is None:
df = net.dat_to_df()
if norm_type == 'zscore':
df = zscore_df(df, axis, keep_orig)
if norm_type == 'qn':
df = qn_df(df, axis, keep_orig)
net.df_to_dat(df)
def qn_df(df, axis='row', keep_orig=False):
'''
do quantile normalization of a dataframe dictionary, does not write to net
'''
df_qn = {}
for mat_type in df:
inst_df = df[mat_type]
# using transpose to do row qn
if axis == 'row':
inst_df = inst_df.transpose()
missing_values = inst_df.isnull().values.any()
# make mask of missing values
if missing_values:
# get nan mask
missing_mask = pd.isnull(inst_df)
# tmp fill in na with zero, will not affect qn
inst_df = inst_df.fillna(value=0)
# calc common distribution
common_dist = calc_common_dist(inst_df)
# swap in common distribution
inst_df = swap_in_common_dist(inst_df, common_dist)
# swap back in missing values
if missing_values:
inst_df = inst_df.mask(missing_mask, other=np.nan)
# using transpose to do row qn
if axis == 'row':
inst_df = inst_df.transpose()
df_qn[mat_type] = inst_df
return df_qn
def swap_in_common_dist(df, common_dist):
col_names = df.columns.tolist()
qn_arr = np.array([])
orig_rows = df.index.tolist()
# loop through each column
for inst_col in col_names:
# get the sorted list of row names for the given column
tmp_series = deepcopy(df[inst_col])
tmp_series = tmp_series.sort_values(ascending=False)
sorted_names = tmp_series.index.tolist()
qn_vect = np.array([])
for inst_row in orig_rows:
inst_index = sorted_names.index(inst_row)
inst_val = common_dist[inst_index]
qn_vect = np.hstack((qn_vect, inst_val))
if qn_arr.shape[0] == 0:
qn_arr = qn_vect
else:
qn_arr = np.vstack((qn_arr, qn_vect))
# transpose (because of vstacking)
qn_arr = qn_arr.transpose()
qn_df = pd.DataFrame(data=qn_arr, columns=col_names, index=orig_rows)
return qn_df
def calc_common_dist(df):
'''
calculate a common distribution (for col qn only) that will be used to qn
'''
# axis is col
tmp_arr = np.array([])
col_names = df.columns.tolist()
for inst_col in col_names:
# sort column
tmp_vect = df[inst_col].sort_values(ascending=False).values
# stacking rows vertically (will transpose)
if tmp_arr.shape[0] == 0:
tmp_arr = tmp_vect
else:
tmp_arr = np.vstack((tmp_arr, tmp_vect))
tmp_arr = tmp_arr.transpose()
common_dist = tmp_arr.mean(axis=1)
return common_dist
def zscore_df(df, axis='row', keep_orig=False):
'''
take the zscore of a dataframe dictionary, does not write to net (self)
'''
df_z = {}
for mat_type in df:
if keep_orig and mat_type == 'mat':
mat_orig = deepcopy(df[mat_type])
inst_df = df[mat_type]
if axis == 'row':
inst_df = inst_df.transpose()
df_z[mat_type] = (inst_df - inst_df.mean())/inst_df.std()
if axis == 'row':
df_z[mat_type] = df_z[mat_type].transpose()
if keep_orig:
df_z['mat_orig'] = mat_orig
return df_z
| mit |
lrei/twitter_annotator | sgd.py | 1 | 15829 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# SGD Text Classifier
Luis Rei <[email protected]> @lmrei
25 Aug 2015
## Running
### Running as a pipe:
```
chmod +x sgd.py
cat test.txt | ./sgd.py --model models/model_file --preprocess > result.txt
```
Where test.txt is line-delimited text
### Running as a zmq socket
### Library
```
clf = sgd.load('model_file')
sgd.classify(clf, text, preprocess=twokenize.preprocess)
```
### Train
To train and Test, files should be headerless TSV files with
col[0] = tokenized text
col[1] = class value
"""
from __future__ import print_function
import sys
import argparse
import zmq
import numpy as np
import pandas as pd
from sklearn.linear_model import SGDClassifier
from sklearn.feature_extraction.text import CountVectorizer, HashingVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import Normalizer
from sklearn.pipeline import Pipeline
from sklearn.externals import joblib
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix
from sklearn.grid_search import GridSearchCV
import nltk
from nltk.corpus import stopwords
from collections import Counter
from sklearn.metrics import make_scorer
import twokenize
import undersampler
default_class = 1
def f1_class(pred, truth, class_val):
'''Calculates f1 score for a single class
'''
n = len(truth)
truth_class = 0
pred_class = 0
tp = 0
for ii in range(0, n):
if truth[ii] == class_val:
truth_class += 1
if truth[ii] == pred[ii]:
tp += 1
pred_class += 1
continue
if pred[ii] == class_val:
pred_class += 1
precision = tp / float(pred_class)
recall = tp / float(truth_class)
return (2.0 * precision * recall) / (precision + recall)
def semeval_senti_f1(pred, truth, pos='POSITIVE', neg='NEGATIVE'):
'''Calculates Semaval Sentiment F1 score: ignores neutral class
'''
pos_label = np.asarray([pos], dtype="|S8")[0]
neg_label = np.asarray([neg], dtype="|S8")[0]
f1_pos = f1_class(pred, truth, pos_label)
f1_neg = f1_class(pred, truth, neg_label)
return (f1_pos + f1_neg) / 2.0
def train(train_file, undersample=False, ngram=(1, 4), min_df=1, max_df=1.0,
dim_reduction=None, n_dims=0, n_iter=200, class_weight='auto',
n_jobs=1, verbose=False):
'''Train a classifier
'''
if verbose:
print('loading...')
train = pd.read_csv(train_file, delimiter='\t', encoding='utf-8', header=0,
names=['text', 'label'])
if undersample != 0:
if verbose:
print('undersampling (n={})...'.format(undersample))
train = undersampler.undersample(train, 'label', undersample)
X = train['text']
Y = np.asarray(train['label'], dtype="|S8")
del train
if verbose:
count = Counter()
count.update(Y)
print('num of labels:')
print(count)
del count
# create pipeline
clf = None
# basic parameters
params = {'vect__token_pattern': r"\S+",
'vect__ngram_range': ngram,
'vect__min_df': min_df,
'vect__max_df': max_df,
'vect__binary': True,
'sgd__n_iter': n_iter,
'sgd__shuffle': True,
'sgd__class_weight': class_weight,
'sgd__n_jobs': n_jobs
}
# No dimensionality reduction
if dim_reduction is None:
clf = Pipeline([('vect', CountVectorizer()), ('sgd', SGDClassifier())])
# TruncatedSVD (LSA)
elif dim_reduction == 'svd':
clf = Pipeline([('vect', CountVectorizer()), ('svd', TruncatedSVD()),
('norm', Normalizer()), ('sgd', SGDClassifier())])
params['svd__n_components'] = n_dims
params['norm__copy'] = False
# Hashing Vectorizer
else:
clf = Pipeline([('vect', HashingVectorizer()),
('sgd', SGDClassifier())])
params['vect__n_features'] = n_dims
del params['vect__max_df']
del params['vect__min_df']
clf.set_params(**params)
if verbose:
print('fitting...')
clf.fit(X, Y)
return clf
def tune(train_file, n_jobs, verbose, class_weight, stop_words):
'''Used for GridSearchCV based parameter tuning
'''
if verbose:
print('loading...')
data = pd.read_csv(train_file, delimiter='\t', encoding='utf-8', header=0,
names=['text', 'label'])
X = data['text']
Y = np.asarray(data['label'], dtype="|S8")
del data
# create pipeline
if stop_words:
pipeline = Pipeline([('vect', CountVectorizer(stop_words=stop_words)),
('sgd', SGDClassifier())])
else:
pipeline = Pipeline([('vect', CountVectorizer()),
('sgd', SGDClassifier())])
params = {
'vect__token_pattern': [r"\S+"],
'vect__ngram_range': [(1, 2), (1, 3), (2, 3), (1, 4)],
'vect__min_df': [1, 10, 50, 100],
'vect__max_df': [1.0, 0.9, 0.8, 0.6],
'vect__binary': [True],
'sgd__shuffle': [True],
'sgd__class_weight': [class_weight]
}
verbose_gv = 0
if verbose:
verbose_gv = 3
scorer_f1 = make_scorer(f1_score, greater_is_better=True)
grid_search = GridSearchCV(pipeline, params, n_jobs=n_jobs,
verbose=verbose_gv, scoring=scorer_f1)
if verbose:
count = Counter()
count.update(Y)
print('num of labels:')
print(count)
del count
if verbose:
print('fitting...')
grid_search.fit(X, Y)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(params.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
return grid_search.best_estimator_
def save(clf, save_path):
'''Saves a classifier to disk
'''
joblib.dump(clf, save_path)
def load(load_path):
'''Load a saved classifier from disk
'''
return joblib.load(load_path)
def run(clf, preprocess=False):
'''Classify data from stdin
'''
for line in sys.stdin:
if not line.strip():
return
if preprocess:
line = twokenize.preprocess(line)
if not line:
print(str(default_class))
else:
print(clf.predict([line.strip()])[0])
def classify(tweet, clf, preprocess=None):
'''Classify a single tweet/line/sentence
'''
if preprocess:
tweet = preprocess(tweet)
else:
tweet = tweet.strip()
if not tweet:
return default_class
line = [tweet]
return clf.predict(line)[0]
def classify_file(clf, test_file):
'''Classify data stored in a file
'''
test_lines = None
with open(test_file) as test:
test_lines = test.readlines()
test_lines = [x.strip() for x in test_lines]
return clf.predict(test_lines)
def classify_output(pred, output_file=None):
'''Write classification result to a file
'''
if output_file is None:
out = sys.stdout
else:
out = open(output_file, 'w')
for r in pred:
out.write(str(r) + '\n')
if out != sys.stdout:
out.close()
def run_zmp(clf, port, preprocess=False, verbose=False):
'''Classify data coming from a ZMQ socket, reply to each request with the
result.
'''
context = zmq.Context()
socket = context.socket(zmq.REP)
address = 'tcp://*:' + str(port)
socket.bind(address)
if verbose:
print('ZMQ Service Running: on %s' % (address,))
while True:
# Wait for next request from client
message = socket.recv()
# preprocess
if preprocess:
message = twokenize.tokenize(message)
message = twokenize.preprocess(message)
# check for empty message
if not message:
socket.send(str(default_class))
# classify and reply
else:
socket.send(str(clf.predict([message])[0]))
def plot_cm(cm, labels, destfile='confusion.png'):
import matplotlib.pyplot as plt
plt.figure()
plt.imshow(cm, interpolation='nearest')
plt.title('Confusion Matrix')
plt.colorbar()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=45)
plt.yticks(tick_marks, labels)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig(destfile)
plt.close()
def evaluate(clf, test_file, undersample=False, calc_semeval_f1=True,
export_cm_file=None, verbose=False):
'''Evaluate classifier on a given test set.
'''
if verbose:
print('evaluating...')
test = pd.read_csv(test_file, delimiter='\t', encoding='utf-8', header=0,
names=['text', 'label'])
if undersample:
test = undersampler.undersample(test, 'label')
Y = np.asarray(test['label'], dtype="|S8")
# labels and their counts
count = Counter()
count.update(Y)
labels = count.keys()
if verbose:
print('num of labels:')
print(count)
del count
# predictions
pred = clf.predict(test['text'])
# calculate accuracy
acc = accuracy_score(Y, pred)
# calculate f1 score
f1 = f1_score(Y, pred, average='micro')
# calculate semeval f1
semeval_f1 = 0.0
if calc_semeval_f1:
try:
semeval_f1 = semeval_senti_f1(pred, Y)
except:
semeval_f1 = 0.0
else:
semeval_f1 = 0.0
# display
print('SGD:')
print('\tacc=%f\n\tsemeval_f1=%f\n\tmicro_f1=%f\n' % (acc, semeval_f1, f1))
# confusion matrix
cm = confusion_matrix(Y, pred)
print(cm)
if export_cm_file:
if verbose:
print('Saving confusion matrix to %s' % export_cm_file)
plot_cm(cm, labels, export_cm_file)
def main():
'''Read command line arguments and call the appropriate functions
'''
parser = argparse.ArgumentParser(description='Run SGD.')
# train, load, save
parser.add_argument('--train', help='path of the train tsv')
parser.add_argument('--save', help='save this model to path')
parser.add_argument('--load', help='path of the model to load')
# tune
parser.add_argument('--tune', action='store_true', help='path of the dev '
'tsv')
parser.add_argument('--language', type=str, default=None,
help='Use stopwords for this language (nltk only).')
# eval, classify file, run, zmq run
parser.add_argument('--eval', help='path of the test tsv')
parser.add_argument('--eval-undersample', action='store_true',
default=False,
help='Rebalance test set by undersampling')
parser.add_argument('--eval-cm', help='path to save confusion matrix to')
'''
parser.add_argument('--classify', help='path of the test tsv')
parser.add_argument('--classify-output', default=None,
help='path of the result for --classify')
'''
parser.add_argument('--run', dest='run', action='store_true',
default=False,
help='read lines stdin output to stdout e.g '
'cat test.txt | python sgd.py --load model --run')
parser.add_argument('--zmq', type=int, default=0,
help='read/write to zmq socket at specified port')
parser.add_argument('--preprocess', action='store_true',
help='preprocess text (applies to run, classify, zmq)')
# training parameters
parser.add_argument('--undersample', default=0, type=int,
help='''Rebalance training set by undersampling:
default: 0 - do not rebalance.
-1 to rebalance to smallest class
n [int] to rebalance to at most n examples''')
parser.add_argument('--ngrams', default='1,4',
help='N-grams considered e.g. 1,3 is uni+bi+tri-grams')
parser.add_argument('--dim_reduction', type=str, default=None,
help='''default: None
'svd' for TruncatedSVD (LSA)
'hash' for Hashing Trick
''')
parser.add_argument('--n_dims', type=int, default=20,
help='Number of dimensions (2**n) for SVD or Hashing')
parser.add_argument('--min_df', type=int, default=1,
help='Minimum document frequency')
parser.add_argument('--max_df', type=float, default=1.0,
help='Maximum document frequency')
parser.add_argument('--n_iter', default=200, type=int,
help='SGD iteratios')
parser.add_argument('--no-auto', action='store_true',
default=False)
# common options
parser.add_argument('--n_jobs', type=int, default=-1,
help='number of cores to use in parallel')
parser.add_argument('--verbose', action='store_true',
help='outputs status messages')
# Parse
args = parser.parse_args()
verbose = args.verbose
clf = None
if not args.train and not args.load and not args.tune:
print('No model loaded')
parser.print_help()
sys.exit(1)
# Language
stop_words = None
if args.language:
try:
stop_words = stopwords.words(args.language)
except:
nltk.download()
try:
stop_words = stopwords.words(args.language)
except Exception as e:
print(str(e))
sys.exit()
# class weight for sgd train/tune
class_weight = 'auto'
if args.no_auto:
class_weight = None
# dimensionality reduction
n_dims = 2 ** args.n_dims
# Train
if args.train and not args.tune:
ngram = tuple([int(x) for x in args.ngrams.split(',')])
clf = train(args.train, args.undersample, ngram=ngram,
min_df=args.min_df, max_df=args.max_df,
dim_reduction=args.dim_reduction,
n_dims=n_dims, n_iter=args.n_iter,
class_weight=class_weight, n_jobs=args.n_jobs,
verbose=verbose)
if verbose:
print('ngrams: {}'.format(str(ngram)))
# Tune
if args.tune and not args.train:
print('No train file.')
sys.exit(1)
if args.tune:
clf = tune(args.train, n_jobs=args.n_jobs,
verbose=verbose, class_weight=class_weight,
stop_words=stop_words)
# Load
if args.load:
if verbose:
print('loading...')
clf = load(args.load)
# Save
if args.save:
if clf is None:
print('No model to save')
else:
save(clf, args.save)
# Eval
if args.eval:
if clf is None:
print('No model to evaluate')
else:
evaluate(clf, args.eval, args.eval_undersample,
True, args.eval_cm, verbose=verbose)
# Run
if args.run:
if clf is None:
print('No model to evaluate')
else:
run(clf, args.preprocess)
if args.zmq:
if clf is None:
print('No model to evaluate')
else:
run_zmp(clf, args.zmq, preprocess=args.preprocess,
verbose=args.verbose)
if __name__ == "__main__":
main()
| mit |
yoavram/GrowthRatesPy | tests/test_growthratespy.py | 1 | 2089 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of GrowthRatesPy.
# https://github.com/yoavram/GrowthRatesPy
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2016, Yoav Ram <[email protected]>
from unittest import TestCase
import os
import shutil
import tempfile
import pkg_resources
import pandas as pd
import growthratespy
class Test(TestCase):
def setUp(self):
self.filename_tsv = pkg_resources.resource_filename("growthratespy", "data/Tecan_210115.tsv")
self.filename_results = pkg_resources.resource_filename("growthratespy", "data/Tecan_210115.results")
self.filename_summary = pkg_resources.resource_filename("growthratespy", "data/Tecan_210115.summary")
self.folder = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.folder)
def test_read_summary(self):
data = growthratespy.read_summary(self.filename_summary)
self.assertEquals(data.shape, (96, 5))
self.assertEquals(data.Well.tolist()[-1], 'H12')
def test_read_results(self):
data = growthratespy.read_results(self.filename_results)
self.assertEquals(len([line for line in data if 'Well' in line]), 96)
def test_growthrates_from_file(self):
shutil.copy(self.filename_tsv, self.folder)
just_filename = os.path.split(self.filename_tsv)[-1]
filename = os.path.join(self.folder, just_filename)
growthratespy.growthrates(filename, blank_well=96)
base_filename = os.path.splitext(filename)[0]
self.assertTrue(os.path.exists(base_filename + '.summary'))
self.assertTrue(os.path.exists(base_filename + '.results'))
def test_growthrates_from_data(self):
data = pd.read_csv(self.filename_tsv, sep='\t')
ret_val = growthratespy.growthrates(data=data, blank_well=96)
self.assertIsNotNone(ret_val)
def test_find_growthrates(self):
path = growthratespy.find_growthrates()
self.assertIsNotNone(path)
self.assertTrue(os.path.exists(path), path)
| mit |
PennyQ/astro-vispy | glue_vispy_viewers/common/vispy_widget.py | 3 | 6264 | import sys
import numpy as np
from vispy import scene
from .axes import AxesVisual3D
from ..utils import NestedSTTransform
from matplotlib.colors import ColorConverter
from glue.config import settings
rgb = ColorConverter().to_rgb
LIMITS_PROPS = [coord + attribute for coord in 'xyz' for attribute in ['_min', '_max', '_stretch']]
class VispyWidgetHelper(object):
def __init__(self, parent=None, viewer_state=None):
# Prepare Vispy canvas. We set the depth_size to 24 to avoid issues
# with isosurfaces on MacOS X
self.canvas = scene.SceneCanvas(keys=None, show=False,
config={'depth_size': 24},
bgcolor=rgb(settings.BACKGROUND_COLOR))
# Set up a viewbox
self.view = self.canvas.central_widget.add_view()
self.view.parent = self.canvas.scene
# Set whether we are emulating a 3D texture. This needs to be enabled
# as a workaround on Windows otherwise VisPy crashes.
self.emulate_texture = (sys.platform == 'win32' and
sys.version_info[0] < 3)
self.scene_transform = scene.STTransform()
self.limit_transforms = {}
fc = rgb(settings.FOREGROUND_COLOR)
self.axis = AxesVisual3D(axis_color=fc, tick_color=fc, text_color=fc,
tick_width=1, minor_tick_length=2,
major_tick_length=4, axis_width=0,
tick_label_margin=10, axis_label_margin=25,
tick_font_size=6, axis_font_size=8,
view=self.view,
transform=self.scene_transform)
# Create a turntable camera. For now, this is the only camerate type
# we support, but if we support more in future, we should implement
# that here
# Orthographic perspective view as default
self.view.camera = scene.cameras.TurntableCamera(parent=self.view.scene,
fov=0., distance=4.0)
# We need to call render here otherwise we'll later encounter an OpenGL
# program validation error.
# self.canvas.render()
self.viewer_state = viewer_state
try:
self.viewer_state.add_callback('*', self._update_from_state, as_kwargs=True)
except TypeError: # glue-core >= 0.11
self.viewer_state.add_global_callback(self._update_from_state)
self._update_from_state(force=True)
def _update_appearance_from_settings(self):
self.canvas.bgcolor = rgb(settings.BACKGROUND_COLOR)
self.axis.axis_color = rgb(settings.FOREGROUND_COLOR)
self.axis.tick_color = rgb(settings.FOREGROUND_COLOR)
self.axis.label_color = rgb(settings.FOREGROUND_COLOR)
def add_data_visual(self, visual):
self.limit_transforms[visual] = NestedSTTransform()
self._update_limits()
visual.transform = self.limit_transforms[visual]
self.view.add(visual)
def _update_from_state(self, force=False, **props):
if force or 'visible_axes' in props:
self._toggle_axes()
if force or 'perspective_view' in props:
self._toggle_perspective()
if force or any(key in props for key in ('x_att', 'y_att', 'z_att')):
self._update_attributes()
if force or any(key in props for key in ('x_stretch', 'y_stretch',
'z_stretch', 'native_aspect')):
self._update_stretch()
if force or any(p in props for p in LIMITS_PROPS) or 'native_aspect' in props:
self._update_limits()
self.canvas.update()
def _toggle_axes(self):
if self.viewer_state.visible_axes:
self.axis.parent = self.view.scene
else:
self.axis.parent = None
def _toggle_perspective(self):
if self.viewer_state.perspective_view:
self.view.camera.fov = 30
self.axis.tick_font_size = 28
self.axis.axis_font_size = 35
else:
self.view.camera.fov = 0
self.axis.tick_font_size = 6
self.axis.axis_font_size = 8
def _update_attributes(self):
if self.viewer_state.x_att is not None:
self.axis.xlabel = self.viewer_state.x_att.label
if self.viewer_state.y_att is not None:
self.axis.ylabel = self.viewer_state.y_att.label
if self.viewer_state.z_att is not None:
self.axis.zlabel = self.viewer_state.z_att.label
def _update_stretch(self):
self.scene_transform.scale = (self.viewer_state.x_stretch * self.viewer_state.aspect[0],
self.viewer_state.y_stretch * self.viewer_state.aspect[1],
self.viewer_state.z_stretch * self.viewer_state.aspect[2])
def _update_limits(self):
dx = self.viewer_state.x_max - self.viewer_state.x_min
sx = (np.inf if dx == 0 else 2. / dx *
self.viewer_state.x_stretch * self.viewer_state.aspect[0])
dy = self.viewer_state.y_max - self.viewer_state.y_min
sy = (np.inf if dy == 0 else 2. / dy *
self.viewer_state.y_stretch * self.viewer_state.aspect[1])
dz = self.viewer_state.z_max - self.viewer_state.z_min
sz = (np.inf if dz == 0 else 2. / dz *
self.viewer_state.z_stretch * self.viewer_state.aspect[2])
scale = [sx, sy, sz]
translate = [-0.5 * (self.viewer_state.x_min + self.viewer_state.x_max) * scale[0],
-0.5 * (self.viewer_state.y_min + self.viewer_state.y_max) * scale[1],
-0.5 * (self.viewer_state.z_min + self.viewer_state.z_max) * scale[2]]
for visual in self.limit_transforms:
self.limit_transforms[visual].scale = scale
self.limit_transforms[visual].translate = translate
self.axis.xlim = self.viewer_state.x_min, self.viewer_state.x_max
self.axis.ylim = self.viewer_state.y_min, self.viewer_state.y_max
self.axis.zlim = self.viewer_state.z_min, self.viewer_state.z_max
| bsd-2-clause |
466152112/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 206 | 7643 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
great-expectations/great_expectations | tests/data_context/test_data_context_datasource_non_sql_methods.py | 1 | 9120 | import os
import shutil
from typing import List, Union
import pandas as pd
from ruamel.yaml import YAML
from great_expectations.core.batch import Batch, BatchRequest
from great_expectations.data_context.util import file_relative_path
from ..test_utils import create_files_in_directory
yaml = YAML()
def test_get_batch_list_from_new_style_datasource_with_file_system_datasource_inferred_assets(
empty_data_context, tmp_path_factory
):
context = empty_data_context
base_directory = str(
tmp_path_factory.mktemp(
"test_get_batch_list_from_new_style_datasource_with_file_system_datasource_inferred_assets"
)
)
create_files_in_directory(
directory=base_directory,
file_name_list=[
"path/A-100.csv",
"path/A-101.csv",
"directory/B-1.csv",
"directory/B-2.csv",
],
file_content_fn=lambda: "x,y,z\n1,2,3\n2,3,5",
)
config = yaml.load(
f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
my_data_connector:
class_name: InferredAssetFilesystemDataConnector
base_directory: {base_directory}
glob_directive: "*/*.csv"
default_regex:
pattern: (.+)/(.+)-(\\d+)\\.csv
group_names:
- data_asset_name
- letter
- number
""",
)
context.add_datasource(
"my_datasource",
**config,
)
batch_request: Union[dict, BatchRequest] = {
"datasource_name": "my_datasource",
"data_connector_name": "my_data_connector",
"data_asset_name": "path",
"data_connector_query": {
"batch_filter_parameters": {
# "data_asset_name": "path",
"letter": "A",
"number": "101",
}
},
}
batch_list: List[Batch] = context.get_batch_list(**batch_request)
assert len(batch_list) == 1
batch: Batch = batch_list[0]
assert batch.batch_spec is not None
assert batch.batch_definition["data_asset_name"] == "path"
assert batch.batch_definition["batch_identifiers"] == {
"letter": "A",
"number": "101",
}
assert isinstance(batch.data.dataframe, pd.DataFrame)
assert batch.data.dataframe.shape == (2, 3)
def test_get_batch_list_from_new_style_datasource_with_file_system_datasource_configured_assets(
empty_data_context, tmp_path_factory
):
context = empty_data_context
base_directory = str(
tmp_path_factory.mktemp(
"test_get_batch_list_from_new_style_datasource_with_file_system_datasource_configured_assets"
)
)
titanic_asset_base_directory_path: str = os.path.join(base_directory, "data")
os.makedirs(titanic_asset_base_directory_path)
titanic_csv_source_file_path: str = file_relative_path(
__file__, "../test_sets/Titanic.csv"
)
titanic_csv_destination_file_path: str = str(
os.path.join(base_directory, "data/Titanic_19120414_1313.csv")
)
shutil.copy(titanic_csv_source_file_path, titanic_csv_destination_file_path)
config = yaml.load(
f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
my_data_connector:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: {base_directory}
glob_directive: "*.csv"
default_regex:
pattern: (.+)\\.csv
group_names:
- name
assets:
Titanic:
base_directory: {titanic_asset_base_directory_path}
pattern: (.+)_(\\d+)_(\\d+)\\.csv
group_names:
- name
- timestamp
- size
""",
)
context.add_datasource(
"my_datasource",
**config,
)
batch_request: Union[dict, BatchRequest] = {
"datasource_name": "my_datasource",
"data_connector_name": "my_data_connector",
"data_asset_name": "Titanic",
"data_connector_query": {
"batch_filter_parameters": {
"name": "Titanic",
"timestamp": "19120414",
"size": "1313",
}
},
}
batch_list: List[Batch] = context.get_batch_list(**batch_request)
assert len(batch_list) == 1
batch: Batch = batch_list[0]
assert batch.batch_spec is not None
assert batch.batch_definition["data_asset_name"] == "Titanic"
assert batch.batch_definition["batch_identifiers"] == {
"name": "Titanic",
"timestamp": "19120414",
"size": "1313",
}
assert isinstance(batch.data.dataframe, pd.DataFrame)
assert batch.data.dataframe.shape == (1313, 7)
def test_get_batch_list_from_new_style_datasource_with_runtime_data_connector(
empty_data_context, tmp_path_factory
):
context = empty_data_context
config = yaml.load(
f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
test_runtime_data_connector:
module_name: great_expectations.datasource.data_connector
class_name: RuntimeDataConnector
batch_identifiers:
- airflow_run_id
""",
)
context.add_datasource(
"my_datasource",
**config,
)
test_df: pd.DataFrame = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
data_connector_name: str = "test_runtime_data_connector"
data_asset_name: str = "test_asset_1"
batch_request: dict = {
"datasource_name": "my_datasource",
"data_connector_name": data_connector_name,
"data_asset_name": data_asset_name,
"runtime_parameters": {
"batch_data": test_df,
},
"batch_identifiers": {
"airflow_run_id": 1234567890,
},
}
batch_list = context.get_batch_list(**batch_request)
assert len(batch_list) == 1
batch: Batch = batch_list[0]
assert batch.batch_spec is not None
assert batch.batch_definition["data_asset_name"] == "test_asset_1"
assert batch.batch_definition["batch_identifiers"] == {
"airflow_run_id": 1234567890,
}
assert isinstance(batch.data.dataframe, pd.DataFrame)
assert batch.data.dataframe.shape == (2, 2)
def test_get_batch_list_from_new_style_datasource_with_file_system_datasource_configured_assets_testing_query(
empty_data_context, tmp_path_factory
):
context = empty_data_context
base_directory = str(
tmp_path_factory.mktemp(
"test_get_batch_list_from_new_style_datasource_with_file_system_datasource_configured_assets_queries"
)
)
create_files_in_directory(
directory=base_directory,
file_name_list=[
"Test_1998.csv",
"Test_1999.csv",
"Test_2000.csv",
"Test_2010.csv",
"Test_2021.csv",
],
file_content_fn=lambda: "x,y,z\n1,2,3\n2,3,5",
)
config = yaml.load(
f"""
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
my_data_connector:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: {base_directory}
glob_directive: "*.csv"
default_regex:
pattern: (.+)_(\\d.*)\\.csv
group_names:
- name
- year
sorters:
- orderby: desc
class_name: NumericSorter
name: year
assets:
YearTest:
base_directory: {base_directory}
pattern: (.+)_(\\d.*)\\.csv
group_names:
- name
- year
""",
)
context.add_datasource(
"my_datasource",
**config,
)
# only select files from after 2000
def my_custom_batch_selector(batch_identifiers: dict) -> bool:
return int(batch_identifiers["year"]) > 2000
batch_request: Union[dict, BatchRequest] = {
"datasource_name": "my_datasource",
"data_connector_name": "my_data_connector",
"data_asset_name": "YearTest",
"data_connector_query": {
"custom_filter_function": my_custom_batch_selector,
},
}
batch_list: List[Batch] = context.get_batch_list(**batch_request)
assert len(batch_list) == 2
# first batch
batch: Batch = batch_list[0]
assert batch.batch_spec is not None
assert batch.batch_definition["data_asset_name"] == "YearTest"
assert batch.batch_definition["batch_identifiers"] == {
"name": "Test",
"year": "2021",
}
# second batch
batch: Batch = batch_list[1]
assert batch.batch_spec is not None
assert batch.batch_definition["data_asset_name"] == "YearTest"
assert batch.batch_definition["batch_identifiers"] == {
"name": "Test",
"year": "2010",
}
| apache-2.0 |
edouardpoitras/NowTrade | nowtrade/report.py | 1 | 20017 | """
Contains the Report class, which keeps track of a strategy's various
performance metrics and trading history.
"""
import numpy as np
import pandas as pd
from nowtrade import logger
from nowtrade.trade import Trade
from nowtrade.action import LONG, LONG_EXIT, SHORT, SHORT_EXIT
class InvalidExit(Exception):
"""
Exception that occurs when an exit (LongExit or ShortExit) occurs
without a corresponding entry trade.
"""
pass
class Report(object):
"""
The Report class is used to generate stratetgy metrics and reports.
"""
def __init__(self, strategy, trading_profile):
self.strategy = strategy
self.trading_profile = trading_profile
self.trade_history = {}
self.available_money = self.trading_profile.capital
self.capital = self.trading_profile.capital
self.available_money_history = pd.Series(index=self.strategy.dataset.data_frame.index)
self.available_capital_history = pd.Series(index=self.strategy.dataset.data_frame.index)
self.ongoing_trades = {}
self.trades = 0
self.average_gain = 0.0
self.sharpe_ratio = 0.0
self.average_trading_amount = 0.0
self.average_bars = 0.0
self.total_fees = 0.0
self.average_fees = 0.0
self.total_slippage = 0.0
self.average_slippage = 0.0
self.winning_trades = 0
self.losing_trades = 0
self.average_winning_gain = 0.0
self.average_losing_gain = 0.0
self.percent_profitable = 0.0
self.gross_profit = 0.0
self.gross_loss = 0.0
self.net_profit = 0.0
self.lacking_capital = 0
self._require_finalize_calculations = False
self.logger = logger.Logger(self.__class__.__name__)
self.logger.info('strategy: %s trading_profile: %s' %(strategy, trading_profile))
def add_preprocess_metrics(self, symbol, data_frame):
"""
Adds required metrics for certain criteria that can't be easily added
as technical indicators. IE: P/L, ACTIONS, STATUS, etc
This only updates the last data entry.
"""
if symbol not in self.ongoing_trades:
self.ongoing_trades[symbol] = None
if 'PL_%s' %symbol not in data_frame:
data_frame['PL_%s' %symbol] = pd.Series(index=data_frame.index)
if 'CHANGE_VALUE_%s' %symbol not in data_frame:
data_frame['CHANGE_VALUE_%s' %symbol] = pd.Series(index=data_frame.index)
if 'CHANGE_PERCENT_%s' %symbol not in data_frame:
data_frame['CHANGE_PERCENT_%s' %symbol] = pd.Series(index=data_frame.index)
trade = self.ongoing_trades[symbol]
action = data_frame['ACTIONS_%s' %symbol][-1]
exiting = action == LONG_EXIT or action == SHORT_EXIT
if trade and exiting: # Exiting Ongoing Trade
_exiting_ongoing_trade(data_frame, trade, action)
elif trade: # Ongoing Trade
_ongoing_trade(data_frame, trade)
elif data_frame['ACTIONS_%s' %symbol][-1] != 0: # New Enter/Exit Trade
_new_trade(data_frame, symbol, self.trading_profile, self.available_money)
else: # Not in trade
data_frame['PL_%s' %symbol][-1] = np.nan
data_frame['CHANGE_VALUE_%s' %symbol][-1] = np.nan
data_frame['CHANGE_PERCENT_%s' %symbol][-1] = np.nan
self._require_finalize_calculations = True
def handle_action(self, symbol, data_frame):
"""
Perform all necessary operations to keep track of a new action in
the market.
"""
datetime = data_frame.index[-1]
data = data_frame.iloc[-1]
action = data['ACTIONS_%s' %symbol]
if action == LONG:
self.long(data, datetime, symbol)
elif action == SHORT:
self.short(data, datetime, symbol)
elif action == LONG_EXIT:
self.long_exit(data, datetime, symbol)
elif action == SHORT_EXIT:
self.short_exit(data, datetime, symbol)
def long(self, data, datetime, symbol):
"""
Perform LONG action in the market.
"""
price = data['%s_Open' %symbol]
shares = self.trading_profile.trading_amount.get_shares(price, self.available_money)
fee = self.trading_profile.trading_fee.get_fee(price, shares)
money = price * shares
slippage = money * self.trading_profile.slippage / 100
self.total_fees += fee
self.total_slippage += slippage
self.average_trading_amount += money
self.available_money -= (money + fee + slippage)
trade = Trade(datetime, 'LONG', symbol, price, shares, money, fee, slippage)
self.ongoing_trades[symbol] = trade
if symbol not in self.trade_history:
self.trade_history[symbol] = []
self.trade_history[symbol].append(Trade(datetime,
'LONG',
symbol,
price,
shares,
money,
fee,
slippage))
self.available_money_history[datetime] = self.available_money
self.available_capital_history[datetime] = self.capital
def short(self, data, datetime, symbol):
"""
Perform SHORT action in the market.
"""
price = data['%s_Open' %symbol]
shares = self.trading_profile.trading_amount.get_shares(price, self.available_money)
fee = self.trading_profile.trading_fee.get_fee(price, shares)
money = price * shares
slippage = money * self.trading_profile.slippage / 100
self.total_fees += fee
self.total_slippage += slippage
self.average_trading_amount += money
self.available_money -= (money + fee + slippage)
self.ongoing_trades[symbol] = Trade(datetime,
'SHORT',
symbol,
price,
shares,
money,
fee,
slippage)
if symbol not in self.trade_history:
self.trade_history[symbol] = []
self.trade_history[symbol].append(Trade(datetime,
'SHORT',
symbol,
price,
shares,
money,
fee,
slippage))
self.available_money_history[datetime] = self.available_money
self.available_capital_history[datetime] = self.capital
def long_exit(self, data, datetime, symbol):
"""
Perform LONG_EXIT action in the market.
"""
if not self.ongoing_trades[symbol]:
raise InvalidExit('Could not simulate LongExit for %s on %s, \
no corresponding action' %(symbol, datetime))
trade = self.ongoing_trades[symbol]
self.ongoing_trades[symbol] = None
price = data['%s_Open' %symbol]
fee = self.trading_profile.trading_fee.get_fee(price, trade.shares)
money = price * trade.shares
slippage = money * self.trading_profile.slippage / 100
self.total_fees += fee
self.total_slippage += slippage
profit_loss = money - trade.money # Made this much off the trade
profit_loss -= trade.fee # Minus Fees In
profit_loss -= fee # Minus Fees Out
profit_loss -= trade.slippage # Minus Slippage In
profit_loss -= slippage # Minus Slippage Out
self.available_money += (money - fee - slippage)
gains = profit_loss / (money - fee - slippage)
self.average_gain += gains
self.capital += profit_loss
self.trade_history[symbol].append(Trade(datetime,
'LONG_EXIT',
symbol,
price,
trade.shares,
money,
fee,
slippage))
self.available_money_history[datetime] = self.available_money
self.available_capital_history[datetime] = self.capital
if gains > 0:
self.winning_trades += 1
self.average_winning_gain += gains
self.gross_profit += profit_loss
else:
self.losing_trades += 1
self.average_losing_gain += gains
self.gross_loss += profit_loss
self.trades += 1
def short_exit(self, data, datetime, symbol):
"""
Perform SHORT_EXIT action in the market.
"""
if not self.ongoing_trades[symbol]:
raise InvalidExit('Did not simulate ShortExit for %s on %s \
- probably not enough capital' %(symbol, datetime))
trade = self.ongoing_trades[symbol]
self.ongoing_trades[symbol] = None
price = data['%s_Open' %symbol]
fee = self.trading_profile.trading_fee.get_fee(price, trade.shares)
money = price * trade.shares
slippage = money * self.trading_profile.slippage / 100
self.total_fees += fee
self.total_slippage += slippage
profit_loss = trade.money - money # Made this much off the trade
profit_loss -= trade.fee # Minus Fees In
profit_loss -= fee # Minus Fees Out
profit_loss -= trade.slippage # Minus Slippage In
profit_loss -= slippage # Minus Slippage Out
self.available_money += (money - fee - slippage)
gains = profit_loss / (money - fee - slippage)
self.average_gain += gains
self.capital += profit_loss
self.trade_history[symbol].append(Trade(datetime,
'SHORT_EXIT',
symbol,
price,
trade.shares,
money,
fee,
slippage))
self.available_money_history[datetime] = self.available_money
self.available_capital_history[datetime] = self.capital
if gains > 0:
self.winning_trades += 1
self.average_winning_gain += gains
self.gross_profit += profit_loss
else:
self.losing_trades += 1
self.average_losing_gain += gains
self.gross_loss += profit_loss
self.trades += 1
def finalize_calculations(self):
"""
Finalizes all report metric calculations, such as average gains,
average trading amount, average fees, average slippage, etc.
"""
if self.trades == 0:
# No trades for this time period
return
self.average_gain = self.average_gain / self.trades
self.average_trading_amount = self.average_trading_amount / self.trades
self.average_fees = self.total_fees / self.trades
self.average_slippage = self.total_slippage / self.trades
if self.winning_trades != 0:
self.average_winning_gain = self.average_winning_gain / self.winning_trades
if self.losing_trades != 0:
self.average_losing_gain = self.average_losing_gain / self.losing_trades
self.percent_profitable = self.winning_trades * 1.0 / self.trades * 100
self.net_profit = self.gross_profit + self.gross_loss
# Capital history should start at trading_profile.capital, not at nan/0
if np.isnan(self.available_capital_history[0]):
self.available_capital_history[0] = self.trading_profile.capital
self.available_capital_history = self.available_capital_history.fillna(method='ffill')
self.sharpe_ratio = self.get_sharpe_ratio()
self._require_finalize_calculations = False
def get_sharpe_ratio(self, periods=252, benchmark=None):
"""
Benchmark can be an int representing the annualized return (5 for 5%)
or another time series.
"""
returns = self.available_capital_history.pct_change()
if benchmark is None:
return np.sqrt(periods) * returns.mean() / returns.std()
elif isinstance(benchmark, (int, float, long)):
excess_returns = returns - 0.05/periods
return np.sqrt(periods) * excess_returns.mean() / excess_returns.std()
else: # Assume a Series for the benchmark
bench_returns = benchmark.pct_change()
excess_returns = returns - bench_returns
return np.sqrt(periods) * excess_returns.mean() / excess_returns.std()
def get_average_bars(self, data_frame):
"""
Helper method that calculates the average number of bars in the
market for a strategy.
"""
bars = 0.0
for symbol in self.trade_history:
last_trade = None
for trade in self.trade_history[symbol]:
if trade.action == 'SHORT_EXIT' or trade.action == 'LONG_EXIT':
# -1 because we don't count the exit bar where we exit on the OPEN
length = len(data_frame[last_trade.datetime:trade.datetime])
bars += length - 1
last_trade = None
else: last_trade = trade
return bars / self.trades
def overview(self):
"""
Generates an overview dict containing all report metrics.
"""
if self._require_finalize_calculations:
self.finalize_calculations()
overview = {}
if self.trades > 0:
overview['average_trading_amount'] = self.average_trading_amount
overview['average_fees'] = self.average_fees
overview['average_slippage'] = self.average_slippage
overview['average_gains'] = self.average_gain*100
overview['average_winner'] = self.average_winning_gain*100
overview['average_loser'] = self.average_losing_gain*100
overview['average_bars'] = self.get_average_bars(self.strategy.dataset.data_frame)
overview['profitability'] = self.percent_profitable
overview['gross_profit'] = self.gross_profit
overview['gross_loss'] = self.gross_loss
overview['net_profit'] = self.net_profit
overview['winning_trades'] = self.winning_trades
overview['losing_trades'] = self.losing_trades
overview['sharpe_ratio'] = self.sharpe_ratio
overview['total_fees'] = self.total_fees
overview['total_slippage'] = self.total_slippage
overview['trades'] = self.trades
overview['lacking_capital'] = self.lacking_capital
ongoing_trades = [symbol for symbol in self.ongoing_trades if self.ongoing_trades[symbol]]
overview['ongoing_trades'] = len(ongoing_trades)
overview['trade_history'] = self.trade_history
overview['available_money_history'] = self.available_money_history
overview['available_capital_history'] = self.available_capital_history
return overview
def pretty_overview(self):
"""
Similar to overview() except returns a string that can be printed
to the console.
"""
overview = self.overview()
if overview['trades'] == 0:
ret = 'No trades\n'
ret += 'Ongoing Trades: %s\n' %overview['ongoing_trades']
ret += 'Trades Lacking Capital: %s' %overview['lacking_capital']
return ret
ret = 'Trades:\n'
for symbol in overview['trade_history']:
ret += '%s\n' %symbol
for trade in overview['trade_history'][symbol]:
ret += '%s\n' %str(trade)
ret += 'Profitability: %s\n' %overview['profitability']
ret += '# Trades: %s\n' %overview['trades']
ret += 'Net Profit: %s\n' %overview['net_profit']
ret += 'Gross Profit: %s\n' %overview['gross_profit']
ret += 'Gross Loss: %s\n' %overview['gross_loss']
ret += 'Winning Trades: %s\n' %overview['winning_trades']
ret += 'Losing Trades: %s\n' %overview['losing_trades']
ret += 'Sharpe Ratio: %s\n' %overview['sharpe_ratio']
ret += 'Avg. Trading Amount: %s\n' %overview['average_trading_amount']
ret += 'Avg. Fees: %s\n' %overview['average_fees']
ret += 'Avg. Slippage: %s\n' %overview['average_slippage']
ret += 'Avg. Gains: %s\n' %overview['average_gains']
ret += 'Avg. Winner: %s\n' %overview['average_winner']
ret += 'Avg. Loser: %s\n' %overview['average_loser']
ret += 'Avg. Bars: %s\n' %overview['average_bars']
ret += 'Total Fees: %s\n' %overview['total_fees']
ret += 'Total Slippage: %s\n' %overview['total_slippage']
ret += 'Trades Lacking Capital: %s\n' %overview['lacking_capital']
ret += 'Ongoing Trades: %s' %overview['ongoing_trades']
return ret
def _new_trade(data_frame, symbol, trading_profile, available_money):
"""
Helper function that populates the dataframe with trade information
based on a new trade in the market.
"""
status = data_frame['STATUS_%s' %symbol][-1]
open_value = data_frame['%s_Open' %symbol][-1]
close_value = data_frame['%s_Close' %symbol][-1]
change = close_value - open_value
shares = trading_profile.trading_amount.get_shares(open_value, available_money)
fee = trading_profile.trading_fee.get_fee(open_value, shares)
money = open_value * shares
profit_loss = shares * close_value - money
percent_change = profit_loss / money
if status < 0: # Shorting
profit_loss = profit_loss * -1
data_frame['PL_%s' %symbol][-1] = profit_loss - fee
data_frame['CHANGE_VALUE_%s' %symbol][-1] = change
data_frame['CHANGE_PERCENT_%s' %symbol][-1] = percent_change
def _ongoing_trade(data_frame, trade):
"""
Helper function that populates the dataframe with trade information
from an ongoing trade in the market.
"""
status = data_frame['STATUS_%s' %trade.symbol][-1]
close_value = data_frame['%s_Close' %trade.symbol][-1]
enter_change = close_value - trade.price
percent_change = enter_change / trade.price
profit_loss = percent_change * trade.money
if status < 0: # Shorting
profit_loss = profit_loss * -1
data_frame['PL_%s' %trade.symbol][-1] = profit_loss - trade.fee
data_frame['CHANGE_VALUE_%s' %trade.symbol][-1] = enter_change
data_frame['CHANGE_PERCENT_%s' %trade.symbol][-1] = percent_change
def _exiting_ongoing_trade(data_frame, trade, action):
"""
Helper function that populates the dataframe with trade information
based on the exit action executed in the market.
"""
open_value = data_frame['%s_Open' %trade.symbol][-1]
enter_change = open_value - trade.price
percent_change = enter_change / trade.price
profit_loss = percent_change * trade.money
if action == SHORT_EXIT: # Shorting
profit_loss = profit_loss * -1
# We need to account for fees on both the enter and exit.
fee = trade.fee * 2
data_frame['PL_%s' %trade.symbol][-1] = profit_loss - fee
data_frame['CHANGE_VALUE_%s' %trade.symbol][-1] = enter_change
data_frame['CHANGE_PERCENT_%s' %trade.symbol][-1] = percent_change
| mit |
kod3r/neon | examples/conv_autoencoder.py | 12 | 3300 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Convolutional autoencoder example network for MNIST data set
"""
import numpy as np
from neon.backends import gen_backend
from neon.data import DataIterator, load_mnist
from neon.initializers import Uniform
from neon.layers import Conv, Pooling, GeneralizedCost, Deconv
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, SumSquared
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
batch_size = 128
num_epochs = args.epochs
# setup backend
be = gen_backend(backend=args.backend,
batch_size=batch_size,
rng_seed=args.rng_seed,
device_id=args.device_id,
default_dtype=args.datatype)
# Load dataset
(X_train, y_train), (X_test, y_test), nclass = load_mnist(path=args.data_dir)
# Set input and target to X_train
train = DataIterator(X_train, lshape=(1, 28, 28))
# Initialize the weights and the learning rule
init_uni = Uniform(low=-0.1, high=0.1)
opt_gdm = GradientDescentMomentum(learning_rate=0.001, momentum_coef=0.9)
# Define the layers
layers = []
layers.append(Conv((4, 4, 8), init=init_uni, activation=Rectlin()))
layers.append(Pooling(2))
layers.append(Conv((4, 4, 32), init=init_uni, activation=Rectlin()))
layers.append(Pooling(2))
layers.append(Deconv(fshape=(4, 4, 8), init=init_uni))
layers.append(Deconv(fshape=(2, 2, 8), init=init_uni, strides=2))
layers.append(Deconv(fshape=(2, 2, 1), init=init_uni, strides=2))
# Define the cost
cost = GeneralizedCost(costfunc=SumSquared())
mlp = Model(layers=layers)
# Fit the model
# configure callbacks
callbacks = Callbacks(mlp, train, output_file=args.output_file,
progress_bar=args.progress_bar)
mlp.fit(train, optimizer=opt_gdm, num_epochs=1, cost=cost, callbacks=callbacks)
print mlp.layers[-1]
# Plot the reconstructed digits
try:
from matplotlib import pyplot, cm
fi = 0
nrows = 10
ncols = 12
test = np.zeros((28*nrows, 28*ncols))
idxs = [(row, col) for row in range(nrows) for col in range(ncols)]
for row, col in idxs:
im = mlp.layers[-1].outputs.get()[:, fi].reshape((28, 28))
test[28*row:28*(row+1):, 28*col:28*(col+1)] = im
fi = fi + 1
pyplot.matshow(test, cmap=cm.gray)
pyplot.savefig('Reconstructed.png')
except ImportError:
print 'matplotlib needs to be manually installed to generate plots'
| apache-2.0 |
hainm/scikit-learn | sklearn/feature_selection/tests/test_base.py | 170 | 3666 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
jskDr/jamespy | jsklearn/binary_model.py | 3 | 11245 | """
Binary_model - binary regression model including MBR
Developer: (James) Sung-Jin Kim, [email protected]
Creation Date: July 11, 2015
Update Date: July 11, 2015
Version: ver 0.1 rev 0
"""
from sklearn import linear_model
import numpy as np
import jpyx
class BIKE_A_Ridge( linear_model.Ridge): # Later on, Viking will be built
"""
BIKE - BInary Kernel Ensemble (BIKE) method
"""
def __init__(self, A, alpha = 0.5):
"""
A is precomputed similarity matrix of xM(all)
Depending on k-fold indexing, the associated A[train] and A[test] matrices will be selected.
"""
self.A = A
super(BIKE_A_Ridge, self).__init__(alpha = alpha)
def _fit( self, xM_train_idx, yV):
self.train_idx = xM_train_idx[:,0]
A_train = self.A[ np.ix_(xM_train_idx[:,0], self.train_idx)]
super(BIKE_Ridge, self).fit( A_train, yV)
def fit( self, xM_train_idx, yV):
self.train_idx = xM_train_idx.T
A_train = self.A[ [xM_train_idx, self.train_idx]]
super(BIKE_A_Ridge, self).fit( A_train, yV)
def predict( self, xM_test_idx):
"""
The index vector of a train sequence will be used to pick up
testing similarity matrix (or precomputed kernel output matrix).
"""
A_test = self.A[ [xM_test_idx, self.train_idx]]
return super(BIKE_A_Ridge, self).predict( A_test)
class BIKE_Ridge( linear_model.Ridge): # Later on, Viking will be built
"""
BIKE - BInary Kernel Ensemble (BIKE) method
"""
def __init__(self, A_list = [], X = None, alpha = 0.5):
"""
A is precomputed similarity matrix of xM(all)
Depending on k-fold indexing, the associated A[train] and A[test] matrices will be selected.
"""
self.A_list = A_list
self.X = X
super(BIKE_Ridge, self).__init__(alpha = alpha)
def gen_AX( self, xM_idx):
AX_list = list()
for A in self.A_list:
AX_list.append( A[ [xM_idx, self.xM_train_idx_T]])
# Now X will be added as well since it is also a part of descriptor set.
if self.X is not None:
#print 'xM_idx[:,0] =', xM_idx[:,0]
xM_con = self.X[ xM_idx[:,0], :]
#print 'xM_con.shape = ', xM_con.shape
AX_list.append( xM_con)
# All kernel outputs and linear descriptors will be used as an input matrix.
return np.concatenate( AX_list, axis = 1)
def fit( self, xM_train_idx, yV):
"""
A common part between fit() and predict() are made be a function, gen_AX
"""
self.xM_train_idx_T = xM_train_idx.T
AX = self.gen_AX( xM_train_idx)
super(BIKE_Ridge, self).fit( AX, yV)
def predict( self, xM_test_idx):
"""
The index vector of a train sequence will be used to pick up
testing similarity matrix (or precomputed kernel output matrix).
"""
AX = self.gen_AX( xM_test_idx)
return super(BIKE_Ridge, self).predict( AX)
"""
MBR Ensemble
- Since 'direct' is a part of this resembling mode,
the performance of the direct (MLR) cases can be evaluated with this MBR-Ensemble method.
"""
class MBR_Ensemble_Ridge( linear_model.Ridge):
def __init__(self, alpha = 0.5, fsp_l = [], fpm_l = []):
"""
fsp_l: feature split points for spiting different descriptors
- refer to np.split()
fpm_l: feature preprocessing mode
- 'tanimoto', 'direct' (supporting), 'rbf', 'tm-rbf' (under-developing)
Note: len( fsp_l) == len( fpm_1) - 1 to specify preprocessing modes for each feature group
"""
self.fsp_l = fsp_l
if len(fpm_l) == 0:
fpm_l = ['tanimoto'] * (len( fsp_l) + 1)
else:
if len( fsp_l) == len( fpm_l) - 1:
self.fpm_l = fpm_l
else:
raise ValueError( "Check to be: len( fsp_l) == len( fpm_l) - 1")
super(MBR_Ensemble_Ridge, self).__init__(alpha = alpha)
def fit( self, xM_train_con, yV):
self.xM_train_l = np.split( xM_train_con, self.fsp_l, axis = 1)
#A_train_l = map(jpyx.calc_tm_sim_M, self.xM_train_l)
A_train_l = list()
for xM_train, fpm in zip( self.xM_train_l, self.fpm_l):
# print 'fpm, xM_train.shape', '->', fpm, xM_train.shape
if fpm == 'tanimoto':
# Since tanimoto is applied, xM must be binary but
# it is non binary type because it is combined with other type (float)
A_train_l.append( jpyx.calc_tm_sim_M( xM_train.astype( int)))
elif fpm == 'direct':
A_train_l.append( xM_train)
else:
raise ValueError("For fpm, the given mode is not supported:" + fpm)
A_train_ensemble = np.concatenate( A_train_l, axis = 1)
super(MBR_Ensemble_Ridge, self).fit( A_train_ensemble, yV)
def predict( self, xM_test_con):
xM_test_l = np.split( xM_test_con, self.fsp_l, axis = 1)
A_test_l = list()
for xM_train, xM_test, fpm in zip(self.xM_train_l, xM_test_l, self.fpm_l):
if fpm == 'tanimoto':
xM_all = np.concatenate( (xM_train, xM_test), axis = 0)
A_all = jpyx.calc_tm_sim_M( xM_all.astype( int))
A_test = A_all[ xM_train.shape[0]:, :xM_train.shape[0]]
A_test_l.append( A_test)
elif fpm == 'direct':
A_test_l.append( xM_test)
else:
raise ValueError("For fpm, the given mode is not supported:" + fpm)
A_test_ensemble = np.concatenate( A_test_l, axis = 1)
return super(MBR_Ensemble_Ridge, self).predict( A_test_ensemble)
"""
MBR EnsembleBin
- if MBR_Ensemble is meta class inherented from six.with_metaclass(ABCMeta, LinearModel),
MBR_Ensemble_Ridge and MBR_Ensemble_Lasso can be more compact such as
describing only __init__ by inhereting both MBR_Ensemble and either
linear_model.Ridge or linear_model.Lasso depending on the mode.
- Now it is implemnted more simply. Later, such deep implementation will be applied.
"""
class _MBR_EnsembleBin_Ridge_r0( linear_model.Ridge):
def __init__(self, alpha = 0.5):
super(MBR_EnsembleBin_Ridge, self).__init__(alpha = alpha)
def fit( self, xM_train_l, yV):
self.xM_train_l = xM_train_l
A_train_l = map(jpyx.calc_tm_sim_M, xM_train_l)
A_train_ensemble = np.concatenate( A_train_l, axis = 1)
super(MBR_EnsembleBin_Ridge, self).fit( A_train_ensemble, yV)
def predict( self, xM_test_l):
xM_all_l = [np.concatenate( (xM_train, xM_test), axis = 0)
for xM_train, xM_test in zip( self.xM_train_l, xM_test_l)]
A_all_l = map( jpyx.calc_tm_sim_M, xM_all_l)
A_test_l = [A_all[ xM_train.shape[0]:, :xM_train.shape[0]]
for A_all, xM_train in zip( A_all_l, self.xM_train_l)]
A_test_ensemble = np.concatenate( A_test_l, axis = 1)
return super(MBR_EnsembleBin_Ridge, self).predict( A_test_ensemble)
class MBR_EnsembleBin_Ridge( linear_model.Ridge):
def __init__(self, alpha = 0.5, fsp_l = []):
"""
fsp_l = feature split points for spliting differnt descriptors
"""
self.fsp_l = fsp_l
super(MBR_EnsembleBin_Ridge, self).__init__(alpha = alpha)
def fit( self, xM_train_con, yV):
self.xM_train_l = np.split( xM_train_con, self.fsp_l, axis = 1)
#self.xM_train_l = xM_train_l
A_train_l = map(jpyx.calc_tm_sim_M, self.xM_train_l)
A_train_ensemble = np.concatenate( A_train_l, axis = 1)
super(MBR_EnsembleBin_Ridge, self).fit( A_train_ensemble, yV)
def predict( self, xM_test_con):
xM_test_l = np.split( xM_test_con, self.fsp_l, axis = 1)
xM_all_l = [np.concatenate( (xM_train, xM_test), axis = 0)
for xM_train, xM_test in zip( self.xM_train_l, xM_test_l)]
A_all_l = map( jpyx.calc_tm_sim_M, xM_all_l)
A_test_l = [A_all[ xM_train.shape[0]:, :xM_train.shape[0]]
for A_all, xM_train in zip( A_all_l, self.xM_train_l)]
A_test_ensemble = np.concatenate( A_test_l, axis = 1)
return super(MBR_EnsembleBin_Ridge, self).predict( A_test_ensemble)
"""
MBR TM
- Gamma is not considered.
"""
class MBR_TM_Ridge( linear_model.Ridge):
def __init__(self, alpha = 0.5):
super(MBR_TM_Ridge, self).__init__(alpha = alpha)
def fit( self, xM_train, yV):
self.xM_train = xM_train
A_train = jpyx.calc_tm_sim_M( xM_train)
super(MBR_TM_Ridge, self).fit( A_train, yV)
def predict( self, xM_test):
#A = jpyx.calc_bin_sim_M( xM_test.astype(int), gamma = self.gamma)
xM_all = np.concatenate( (self.xM_train, xM_test), axis = 0)
A_all = jpyx.calc_tm_sim_M( xM_all)
A_test = A_all[ self.xM_train.shape[0]:, :self.xM_train.shape[0]]
return super(MBR_TM_Ridge, self).predict( A_test)
class MBR_TM_Lasso( linear_model.Lasso):
def __init__(self, alpha = 1.0, gamma = 1):
super(MBR_TM_Ridge, self).__init__(alpha = alpha)
def fit( self, xM_train, yV):
self.xM_train = xM_train
A_train = jpyx.calc_tm_sim_M( xM_train)
super(MBR_TM_Lasso, self).fit( A_train, yV)
def predict( self, xM_test):
xM_all = np.concatenate( (self.xM_train, xM_test), axis = 0)
A_all = jpyx.calc_tm_sim_M( xM_all)
A_test = A_all[ self.xM_train.shape[0]:, :self.xM_train.shape[0]]
return super(MBR_TM_Lasso, self).predict( A_test)
"""
MBR Sim
Similarityy control MBR
Original MBR does not have a functionality to change gamma,
although SVM has it. It will be considered later on.
"""
class MBR_Ridge( linear_model.Ridge):
def __init__(self, alpha = 0.5, gamma = 1):
self.alpha = alpha
self.gamma = gamma
#self.clf = linear_model.Ridge( alpha = self.alpha)
#linear_model.Ridge( self, alpha = self.alpha)
super(MBR_Ridge, self).__init__(alpha = self.alpha)
def fit( self, xM_train, yV):
self.xM_train = xM_train
A_train = jpyx.calc_bin_sim_M( xM_train, gamma = self.gamma)
super(MBR_Ridge, self).fit( A_train, yV)
def predict( self, xM_test):
#A = jpyx.calc_bin_sim_M( xM_test.astype(int), gamma = self.gamma)
xM_all = np.concatenate( (self.xM_train, xM_test), axis = 0)
A_all = jpyx.calc_bin_sim_M( xM_all, gamma = self.gamma)
A_test = A_all[ self.xM_train.shape[0]:, :self.xM_train.shape[0]]
return super(MBR_Ridge, self).predict( A_test)
class MBR_Lasso( linear_model.Lasso):
def __init__(self, alpha = 1.0, gamma = 1):
self.alpha = alpha
self.gamma = gamma
#self.clf = linear_model.Ridge( alpha = self.alpha)
#linear_model.Ridge( self, alpha = self.alpha)
super(MBR_Lasso, self).__init__(alpha = self.alpha)
def fit( self, xM_train, yV):
self.xM_train = xM_train
A_train = jpyx.calc_bin_sim_M( xM_train, gamma = self.gamma)
super(MBR_Lasso, self).fit( A_train, yV)
def predict( self, xM_test):
#A = jpyx.calc_bin_sim_M( xM_test.astype(int), gamma = self.gamma)
xM_all = np.concatenate( (self.xM_train, xM_test), axis = 0)
A_all = jpyx.calc_bin_sim_M( xM_all, gamma = self.gamma)
A_test = A_all[ self.xM_train.shape[0]:, :self.xM_train.shape[0]]
return super(MBR_Lasso, self).predict( A_test)
"""
MBR_Dist
"""
def sim_to_dist( A):
A *= -1
A = A + 1
A = np.power( np.abs(A), 2)
np.exp( A, A)
return A
class MBR_Dist_Lasso( linear_model.Lasso):
def __init__(self, alpha = 1.0, gamma = 1):
self.alpha = alpha
self.gamma = gamma
#self.clf = linear_model.Ridge( alpha = self.alpha)
#linear_model.Ridge( self, alpha = self.alpha)
super(MBR_Dist_Lasso, self).__init__(alpha = self.alpha)
def fit( self, xM_train, yV):
self.xM_train = xM_train
A_train = jpyx.calc_bin_sim_M( xM_train, gamma = self.gamma)
super(MBR_Dist_Lasso, self).fit( sim_to_dist( A_train), yV)
def predict( self, xM_test):
#A = jpyx.calc_bin_sim_M( xM_test.astype(int), gamma = self.gamma)
xM_all = np.concatenate( (self.xM_train, xM_test), axis = 0)
A_all = jpyx.calc_bin_sim_M( xM_all, gamma = self.gamma)
A_test = A_all[ self.xM_train.shape[0]:, :self.xM_train.shape[0]]
return super(MBR_Dist_Lasso, self).predict( sim_to_dist(A_test))
| mit |
paalge/scikit-image | viewer_examples/plugins/watershed_demo.py | 35 | 1277 | import matplotlib.pyplot as plt
from skimage import data
from skimage import filters
from skimage import morphology
from skimage.viewer import ImageViewer
from skimage.viewer.widgets import history
from skimage.viewer.plugins.labelplugin import LabelPainter
class OKCancelButtons(history.OKCancelButtons):
def update_original_image(self):
# OKCancelButtons updates the original image with the filtered image
# by default. Override this method to update the overlay.
self.plugin._show_watershed()
self.plugin.close()
class WatershedPlugin(LabelPainter):
def help(self):
helpstr = ("Watershed plugin",
"----------------",
"Use mouse to paint each region with a different label.",
"Press OK to display segmented image.")
return '\n'.join(helpstr)
def _show_watershed(self):
viewer = self.image_viewer
edge_image = filter.sobel(viewer.image)
labels = morphology.watershed(edge_image, self.paint_tool.overlay)
viewer.ax.imshow(labels, cmap=plt.cm.jet, alpha=0.5)
viewer.redraw()
image = data.coins()
plugin = WatershedPlugin()
plugin += OKCancelButtons()
viewer = ImageViewer(image)
viewer += plugin
viewer.show()
| bsd-3-clause |
ElDeveloper/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
ZenDevelopmentSystems/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
andrewnc/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
rs2/pandas | pandas/tests/indexes/period/test_ops.py | 1 | 13288 | import numpy as np
import pytest
import pandas as pd
from pandas import Index, NaT, PeriodIndex, Series
import pandas._testing as tm
class TestPeriodIndexOps:
@pytest.mark.parametrize(
"freq,expected",
[
("A", "year"),
("Q", "quarter"),
("M", "month"),
("D", "day"),
("H", "hour"),
("T", "minute"),
("S", "second"),
("L", "millisecond"),
("U", "microsecond"),
],
)
def test_resolution(self, freq, expected):
idx = pd.period_range(start="2013-04-01", periods=30, freq=freq)
assert idx.resolution == expected
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range("2011-01-01 09:00", freq="H", periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx._values, range(1, len(idx) + 1)), freq="H")
exp_idx = PeriodIndex(
[
"2011-01-01 18:00",
"2011-01-01 17:00",
"2011-01-01 16:00",
"2011-01-01 15:00",
"2011-01-01 14:00",
"2011-01-01 13:00",
"2011-01-01 12:00",
"2011-01-01 11:00",
"2011-01-01 10:00",
"2011-01-01 09:00",
],
freq="H",
)
expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64")
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.period_range("2011-01-01 09:00", freq="H", periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(
[
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 08:00",
"2013-01-01 08:00",
NaT,
],
freq="H",
)
exp_idx = PeriodIndex(["2013-01-01 09:00", "2013-01-01 08:00"], freq="H")
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = PeriodIndex(["2013-01-01 09:00", "2013-01-01 08:00", NaT], freq="H")
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
@pytest.mark.parametrize("freq", ["D", "3D", "H", "2H", "T", "2T", "S", "3S"])
def test_drop_duplicates_metadata(self, freq):
# GH 10115
idx = pd.period_range("2011-01-01", periods=10, freq=freq, name="idx")
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
@pytest.mark.parametrize("freq", ["D", "3D", "H", "2H", "T", "2T", "S", "3S"])
@pytest.mark.parametrize(
"keep, expected, index",
[
("first", np.concatenate(([False] * 10, [True] * 5)), np.arange(0, 10)),
("last", np.concatenate(([True] * 5, [False] * 10)), np.arange(5, 15)),
(
False,
np.concatenate(([True] * 5, [False] * 5, [True] * 5)),
np.arange(5, 10),
),
],
)
def test_drop_duplicates(self, freq, keep, expected, index):
# to check Index/Series compat
idx = pd.period_range("2011-01-01", periods=10, freq=freq, name="idx")
idx = idx.append(idx[:5])
tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected)
expected = idx[~expected]
result = idx.drop_duplicates(keep=keep)
tm.assert_index_equal(result, expected)
result = Series(idx).drop_duplicates(keep=keep)
tm.assert_series_equal(result, Series(expected, index=index))
def test_order_compat(self):
def _check_freq(index, expected_index):
if isinstance(index, PeriodIndex):
assert index.freq == expected_index.freq
pidx = PeriodIndex(["2011", "2012", "2013"], name="pidx", freq="A")
# for compatibility check
iidx = Index([2011, 2012, 2013], name="idx")
for idx in [pidx, iidx]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, idx[::-1])
_check_freq(ordered, idx[::-1])
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, idx[::-1])
tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False)
_check_freq(ordered, idx[::-1])
pidx = PeriodIndex(
["2011", "2013", "2015", "2012", "2011"], name="pidx", freq="A"
)
pexpected = PeriodIndex(
["2011", "2011", "2012", "2013", "2015"], name="pidx", freq="A"
)
# for compatibility check
iidx = Index([2011, 2013, 2015, 2012, 2011], name="idx")
iexpected = Index([2011, 2011, 2012, 2013, 2015], name="idx")
for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
pidx = PeriodIndex(["2011", "2013", "NaT", "2011"], name="pidx", freq="D")
result = pidx.sort_values()
expected = PeriodIndex(["NaT", "2011", "2011", "2013"], name="pidx", freq="D")
tm.assert_index_equal(result, expected)
assert result.freq == "D"
result = pidx.sort_values(ascending=False)
expected = PeriodIndex(["2013", "2011", "2011", "NaT"], name="pidx", freq="D")
tm.assert_index_equal(result, expected)
assert result.freq == "D"
def test_order(self):
for freq in ["D", "2D", "4D"]:
idx = PeriodIndex(
["2011-01-01", "2011-01-02", "2011-01-03"], freq=freq, name="idx"
)
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq == freq
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
assert ordered.freq == idx.freq
assert ordered.freq == freq
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq == freq
idx1 = PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
freq="D",
name="idx1",
)
exp1 = PeriodIndex(
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
freq="D",
name="idx1",
)
idx2 = PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
freq="D",
name="idx2",
)
exp2 = PeriodIndex(
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
freq="D",
name="idx2",
)
idx3 = PeriodIndex(
[NaT, "2011-01-03", "2011-01-05", "2011-01-02", NaT], freq="D", name="idx3"
)
exp3 = PeriodIndex(
[NaT, NaT, "2011-01-02", "2011-01-03", "2011-01-05"], freq="D", name="idx3"
)
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq == "D"
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq == "D"
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq == "D"
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq == "D"
def test_nat(self):
assert pd.PeriodIndex._na_value is NaT
assert pd.PeriodIndex([], freq="M")._na_value is NaT
idx = pd.PeriodIndex(["2011-01-01", "2011-01-02"], freq="D")
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert idx.hasnans is False
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp))
idx = pd.PeriodIndex(["2011-01-01", "NaT"], freq="D")
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans is True
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp))
@pytest.mark.parametrize("freq", ["D", "M"])
def test_equals(self, freq):
# GH#13107
idx = pd.PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq=freq)
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq="H")
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = pd.PeriodIndex._simple_new(
idx._values._simple_new(idx._values.asi8, freq="H")
)
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.astype(object))
assert not idx.astype(object).equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
def test_freq_setter_deprecated(self):
# GH 20678
idx = pd.period_range("2018Q1", periods=4, freq="Q")
# no warning for getter
with tm.assert_produces_warning(None):
idx.freq
# warning for setter
with pytest.raises(AttributeError, match="can't set attribute"):
idx.freq = pd.offsets.Day()
@pytest.mark.xfail(reason="Datetime-like sort_values currently unstable (GH 35922)")
def test_order_stability_compat():
# GH 35584. The new implementation of sort_values for Index.sort_values
# is stable when sorting in descending order. Datetime-like sort_values
# currently aren't stable. xfail should be removed after
# the implementations' behavior is synchronized (xref GH 35922)
pidx = PeriodIndex(["2011", "2013", "2015", "2012", "2011"], name="pidx", freq="A")
iidx = Index([2011, 2013, 2015, 2012, 2011], name="idx")
ordered1, indexer1 = pidx.sort_values(return_indexer=True, ascending=False)
ordered2, indexer2 = iidx.sort_values(return_indexer=True, ascending=False)
tm.assert_numpy_array_equal(indexer1, indexer2)
| bsd-3-clause |
lin-credible/scikit-learn | sklearn/neighbors/base.py | 115 | 29783 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([1., 1., 1.])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| bsd-3-clause |
DLTK/DLTK | examples/applications/MRBrainS13_tissue_segmentation/train.py | 1 | 8610 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import argparse
import os
import pandas as pd
import tensorflow as tf
import numpy as np
from dltk.core.metrics import dice
from dltk.networks.segmentation.unet import residual_unet_3d
from dltk.io.abstract_reader import Reader
from reader import read_fn
EVAL_EVERY_N_STEPS = 100
EVAL_STEPS = 1
NUM_CLASSES = 9
NUM_CHANNELS = 3
NUM_FEATURES_IN_SUMMARIES = min(4, NUM_CHANNELS)
BATCH_SIZE = 16
SHUFFLE_CACHE_SIZE = 64
MAX_STEPS = 50000
def model_fn(features, labels, mode, params):
"""Model function to construct a tf.estimator.EstimatorSpec. It creates a
network given input features (e.g. from a dltk.io.abstract_reader) and
training targets (labels). Further, loss, optimiser, evaluation ops and
custom tensorboard summary ops can be added. For additional information,
please refer to https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator#model_fn.
Args:
features (tf.Tensor): Tensor of input features to train from. Required
rank and dimensions are determined by the subsequent ops
(i.e. the network).
labels (tf.Tensor): Tensor of training targets or labels. Required rank
and dimensions are determined by the network output.
mode (str): One of the tf.estimator.ModeKeys: TRAIN, EVAL or PREDICT
params (dict, optional): A dictionary to parameterise the model_fn
(e.g. learning_rate)
Returns:
tf.estimator.EstimatorSpec: A custom EstimatorSpec for this experiment
"""
# 1. create a model and its outputs
net_output_ops = residual_unet_3d(
inputs=features['x'],
num_classes=NUM_CLASSES,
num_res_units=2,
filters=(16, 32, 64, 128),
strides=((1, 1, 1), (1, 2, 2), (1, 2, 2), (1, 2, 2)),
mode=mode,
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-4))
# 1.1 Generate predictions only (for `ModeKeys.PREDICT`)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=net_output_ops,
export_outputs={'out': tf.estimator.export.PredictOutput(net_output_ops)})
# 2. set up a loss function
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=net_output_ops['logits'],
labels=labels['y'])
loss = tf.reduce_mean(ce)
# 3. define a training op and ops for updating moving averages
# (i.e. for batch normalisation)
global_step = tf.train.get_global_step()
optimiser = tf.train.MomentumOptimizer(
learning_rate=params["learning_rate"],
momentum=0.9)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimiser.minimize(loss, global_step=global_step)
# 4.1 (optional) create custom image summaries for tensorboard
my_image_summaries = {}
my_image_summaries['feat_t1'] = features['x'][0, 0, :, :, 0]
my_image_summaries['feat_t1_ir'] = features['x'][0, 0, :, :, 1]
my_image_summaries['feat_t2_flair'] = features['x'][0, 0, :, :, 2]
my_image_summaries['labels'] = tf.cast(labels['y'], tf.float32)[0, 0, :, :]
my_image_summaries['predictions'] = tf.cast(net_output_ops['y_'], tf.float32)[0, 0, :, :]
expected_output_size = [1, 128, 128, 1] # [B, W, H, C]
[tf.summary.image(name, tf.reshape(image, expected_output_size))
for name, image in my_image_summaries.items()]
# 4.2 (optional) create custom metric summaries for tensorboard
dice_tensor = tf.py_func(dice, [net_output_ops['y_'],
labels['y'],
tf.constant(NUM_CLASSES)], tf.float32)
[tf.summary.scalar('dsc_l{}'.format(i), dice_tensor[i])
for i in range(NUM_CLASSES)]
# 5. Return EstimatorSpec object
return tf.estimator.EstimatorSpec(mode=mode,
predictions=net_output_ops,
loss=loss,
train_op=train_op,
eval_metric_ops=None)
def train(args):
np.random.seed(42)
tf.set_random_seed(42)
print('Setting up...')
# Parse csv files for file names
all_filenames = pd.read_csv(
args.train_csv,
dtype=object,
keep_default_na=False,
na_values=[]).as_matrix()
train_filenames = all_filenames[1:4]
val_filenames = all_filenames[4:5]
# Set up a data reader to handle the file i/o.
reader_params = {'n_examples': 18,
'example_size': [4, 128, 128],
'extract_examples': True}
reader_example_shapes = {'features': {'x': reader_params['example_size'] + [NUM_CHANNELS, ]},
'labels': {'y': reader_params['example_size']}}
reader = Reader(read_fn,
{'features': {'x': tf.float32},
'labels': {'y': tf.int32}})
# Get input functions and queue initialisation hooks for training and
# validation data
train_input_fn, train_qinit_hook = reader.get_inputs(
file_references=train_filenames,
mode=tf.estimator.ModeKeys.TRAIN,
example_shapes=reader_example_shapes,
batch_size=BATCH_SIZE,
shuffle_cache_size=SHUFFLE_CACHE_SIZE,
params=reader_params)
val_input_fn, val_qinit_hook = reader.get_inputs(
file_references=val_filenames,
mode=tf.estimator.ModeKeys.EVAL,
example_shapes=reader_example_shapes,
batch_size=BATCH_SIZE,
shuffle_cache_size=SHUFFLE_CACHE_SIZE,
params=reader_params)
# Instantiate the neural network estimator
nn = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=args.model_path,
params={"learning_rate": 0.001},
config=tf.estimator.RunConfig())
# Hooks for validation summaries
val_summary_hook = tf.contrib.training.SummaryAtEndHook(
os.path.join(args.model_path, 'eval'))
step_cnt_hook = tf.train.StepCounterHook(
every_n_steps=EVAL_EVERY_N_STEPS,
output_dir=args.model_path)
print('Starting training...')
try:
for _ in range(MAX_STEPS // EVAL_EVERY_N_STEPS):
nn.train(
input_fn=train_input_fn,
hooks=[train_qinit_hook, step_cnt_hook],
steps=EVAL_EVERY_N_STEPS)
if args.run_validation:
results_val = nn.evaluate(
input_fn=val_input_fn,
hooks=[val_qinit_hook, val_summary_hook],
steps=EVAL_STEPS)
print('Step = {}; val loss = {:.5f};'.format(
results_val['global_step'], results_val['loss']))
except KeyboardInterrupt:
pass
print('Stopping now.')
export_dir = nn.export_savedmodel(
export_dir_base=args.model_path,
serving_input_receiver_fn=reader.serving_input_receiver_fn(reader_example_shapes))
print('Model saved to {}.'.format(export_dir))
if __name__ == '__main__':
# Set up argument parser
parser = argparse.ArgumentParser(
description='Example: MRBrainS13 example segmentation training script')
parser.add_argument('--run_validation', default=True)
parser.add_argument('--restart', default=False, action='store_true')
parser.add_argument('--verbose', default=False, action='store_true')
parser.add_argument('--cuda_devices', '-c', default='0')
parser.add_argument('--model_path', '-p', default='/tmp/mrbrains_segmentation/')
parser.add_argument('--train_csv', default='mrbrains.csv')
args = parser.parse_args()
# Set verbosity
if args.verbose:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.logging.set_verbosity(tf.logging.INFO)
else:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.ERROR)
# GPU allocation options
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_devices
# Handle restarting and resuming training
if args.restart:
print('Restarting training from scratch.')
os.system('rm -rf {}'.format(args.model_path))
if not os.path.isdir(args.model_path):
os.system('mkdir -p {}'.format(args.model_path))
else:
print('Resuming training on model_path {}'.format(args.model_path))
# Call training
train(args)
| apache-2.0 |
nesterione/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
cjermain/numpy | numpy/lib/twodim_base.py | 83 | 26903 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| bsd-3-clause |
PrashntS/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
Funtimezzhou/TradeBuildTools | lib/hft_portfolio.py | 3 | 9070 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# portfolio.py
from __future__ import print_function
import datetime
from math import floor
try:
import Queue as queue
except ImportError:
import queue
import numpy as np
import pandas as pd
from event import FillEvent, OrderEvent
from performance import create_sharpe_ratio, create_drawdowns
class PortfolioHFT(object):
"""
The Portfolio class handles the positions and market
value of all instruments at a resolution of one
minutely bar. It is almost identical to the standard
Portfolio class, except that the Sharpe Ratio
calculation is modified and the correct call is made
to the HFT Data object for the 'close' price with
DTN IQFeed data.
The positions DataFrame stores a time-index of the
quantity of positions held.
The holdings DataFrame stores the cash and total market
holdings value of each symbol for a particular
time-index, as well as the percentage change in
portfolio total across bars.
"""
def __init__(self, bars, events, start_date, initial_capital=100000.0):
"""
Initialises the portfolio with bars and an event queue.
Also includes a starting datetime index and initial capital
(USD unless otherwise stated).
Parameters:
bars - The DataHandler object with current market data.
events - The Event Queue object.
start_date - The start date (bar) of the portfolio.
initial_capital - The starting capital in USD.
"""
self.bars = bars
self.events = events
self.symbol_list = self.bars.symbol_list
self.start_date = start_date
self.initial_capital = initial_capital
self.all_positions = self.construct_all_positions()
self.current_positions = dict( (k,v) for k, v in [(s, 0) for s in self.symbol_list] )
self.all_holdings = self.construct_all_holdings()
self.current_holdings = self.construct_current_holdings()
def construct_all_positions(self):
"""
Constructs the positions list using the start_date
to determine when the time index will begin.
"""
d = dict( (k,v) for k, v in [(s, 0) for s in self.symbol_list] )
d['datetime'] = self.start_date
return [d]
def construct_all_holdings(self):
"""
Constructs the holdings list using the start_date
to determine when the time index will begin.
"""
d = dict( (k,v) for k, v in [(s, 0.0) for s in self.symbol_list] )
d['datetime'] = self.start_date
d['cash'] = self.initial_capital
d['commission'] = 0.0
d['total'] = self.initial_capital
return [d]
def construct_current_holdings(self):
"""
This constructs the dictionary which will hold the instantaneous
value of the portfolio across all symbols.
"""
d = dict( (k,v) for k, v in [(s, 0.0) for s in self.symbol_list] )
d['cash'] = self.initial_capital
d['commission'] = 0.0
d['total'] = self.initial_capital
return d
def update_timeindex(self, event):
"""
Adds a new record to the positions matrix for the current
market data bar. This reflects the PREVIOUS bar, i.e. all
current market data at this stage is known (OHLCV).
Makes use of a MarketEvent from the events queue.
"""
latest_datetime = self.bars.get_latest_bar_datetime(self.symbol_list[0])
# Update positions
# ================
dp = dict( (k,v) for k, v in [(s, 0) for s in self.symbol_list] )
dp['datetime'] = latest_datetime
for s in self.symbol_list:
dp[s] = self.current_positions[s]
# Append the current positions
self.all_positions.append(dp)
# Update holdings
# ===============
dh = dict( (k,v) for k, v in [(s, 0) for s in self.symbol_list] )
dh['datetime'] = latest_datetime
dh['cash'] = self.current_holdings['cash']
dh['commission'] = self.current_holdings['commission']
dh['total'] = self.current_holdings['cash']
for s in self.symbol_list:
# Approximation to the real value
market_value = self.current_positions[s] * \
self.bars.get_latest_bar_value(s, "close")
dh[s] = market_value
dh['total'] += market_value
# Append the current holdings
self.all_holdings.append(dh)
# ======================
# FILL/POSITION HANDLING
# ======================
def update_positions_from_fill(self, fill):
"""
Takes a Fill object and updates the position matrix to
reflect the new position.
Parameters:
fill - The Fill object to update the positions with.
"""
# Check whether the fill is a buy or sell
fill_dir = 0
if fill.direction == 'BUY':
fill_dir = 1
if fill.direction == 'SELL':
fill_dir = -1
# Update positions list with new quantities
self.current_positions[fill.symbol] += fill_dir*fill.quantity
def update_holdings_from_fill(self, fill):
"""
Takes a Fill object and updates the holdings matrix to
reflect the holdings value.
Parameters:
fill - The Fill object to update the holdings with.
"""
# Check whether the fill is a buy or sell
fill_dir = 0
if fill.direction == 'BUY':
fill_dir = 1
if fill.direction == 'SELL':
fill_dir = -1
# Update holdings list with new quantities
fill_cost = self.bars.get_latest_bar_value(
fill.symbol, "close"
)
cost = fill_dir * fill_cost * fill.quantity
self.current_holdings[fill.symbol] += cost
self.current_holdings['commission'] += fill.commission
self.current_holdings['cash'] -= (cost + fill.commission)
self.current_holdings['total'] -= (cost + fill.commission)
def update_fill(self, event):
"""
Updates the portfolio current positions and holdings
from a FillEvent.
"""
if event.type == 'FILL':
self.update_positions_from_fill(event)
self.update_holdings_from_fill(event)
def generate_naive_order(self, signal):
"""
Simply files an Order object as a constant quantity
sizing of the signal object, without risk management or
position sizing considerations.
Parameters:
signal - The tuple containing Signal information.
"""
order = None
symbol = signal.symbol
direction = signal.signal_type
strength = signal.strength
mkt_quantity = 100
cur_quantity = self.current_positions[symbol]
order_type = 'MKT'
if direction == 'LONG' and cur_quantity == 0:
order = OrderEvent(symbol, order_type, mkt_quantity, 'BUY')
if direction == 'SHORT' and cur_quantity == 0:
order = OrderEvent(symbol, order_type, mkt_quantity, 'SELL')
if direction == 'EXIT' and cur_quantity > 0:
order = OrderEvent(symbol, order_type, abs(cur_quantity), 'SELL')
if direction == 'EXIT' and cur_quantity < 0:
order = OrderEvent(symbol, order_type, abs(cur_quantity), 'BUY')
return order
def update_signal(self, event):
"""
Acts on a SignalEvent to generate new orders
based on the portfolio logic.
"""
if event.type == 'SIGNAL':
order_event = self.generate_naive_order(event)
self.events.put(order_event)
# ========================
# POST-BACKTEST STATISTICS
# ========================
def create_equity_curve_dataframe(self):
"""
Creates a pandas DataFrame from the all_holdings
list of dictionaries.
"""
curve = pd.DataFrame(self.all_holdings)
curve.set_index('datetime', inplace=True)
curve['returns'] = curve['total'].pct_change()
curve['equity_curve'] = (1.0+curve['returns']).cumprod()
self.equity_curve = curve
def output_summary_stats(self):
"""
Creates a list of summary statistics for the portfolio.
"""
total_return = self.equity_curve['equity_curve'][-1]
returns = self.equity_curve['returns']
pnl = self.equity_curve['equity_curve']
sharpe_ratio = create_sharpe_ratio(returns, periods=252*6.5*60)
drawdown, max_dd, dd_duration = create_drawdowns(pnl)
self.equity_curve['drawdown'] = drawdown
stats = [("Total Return", "%0.2f%%" % ((total_return - 1.0) * 100.0)),
("Sharpe Ratio", "%0.2f" % sharpe_ratio),
("Max Drawdown", "%0.2f%%" % (max_dd * 100.0)),
("Drawdown Duration", "%d" % dd_duration)]
self.equity_curve.to_csv('equity.csv')
return stats
| gpl-3.0 |
robcarver17/pysystemtrade | sysinit/futures/multipleprices_from_arcticprices_and_csv_calendars_to_arctic.py | 1 | 6580 | """
We create multiple prices using:
- roll calendars, stored in csv
- individual futures contract prices, stored in arctic
We then store those multiple prices in: (depending on options)
- arctic
- .csv
"""
from syscore.objects import arg_not_supplied
from sysobjects.dict_of_futures_per_contract_prices import dictFuturesContractFinalPrices
import datetime
import pandas as pd
from sysdata.arctic.arctic_futures_per_contract_prices import (
arcticFuturesContractPriceData,
)
from sysobjects.rolls import rollParameters, contractDateWithRollParameters
from sysobjects.contract_dates_and_expiries import contractDate
from sysdata.csv.csv_roll_calendars import csvRollCalendarData
from sysdata.csv.csv_multiple_prices import csvFuturesMultiplePricesData
from sysdata.arctic.arctic_multiple_prices import arcticFuturesMultiplePricesData
from sysdata.arctic.arctic_futures_per_contract_prices import arcticFuturesContractPriceData
from sysinit.futures.build_roll_calendars import adjust_to_price_series
from sysobjects.multiple_prices import futuresMultiplePrices
from sysdata.mongodb.mongo_roll_data import mongoRollParametersData
def _get_data_inputs(csv_roll_data_path, csv_multiple_data_path):
csv_roll_calendars = csvRollCalendarData(csv_roll_data_path)
arctic_individual_futures_prices = arcticFuturesContractPriceData()
arctic_multiple_prices = arcticFuturesMultiplePricesData()
csv_multiple_prices = csvFuturesMultiplePricesData(csv_multiple_data_path)
return (
csv_roll_calendars,
arctic_individual_futures_prices,
arctic_multiple_prices,
csv_multiple_prices,
)
def process_multiple_prices_all_instruments(
csv_multiple_data_path=arg_not_supplied,
csv_roll_data_path=arg_not_supplied,
ADD_TO_ARCTIC=True,
ADD_TO_CSV=False,
):
(
_not_used1,
arctic_individual_futures_prices,
_not_used2,
_not_used3,
) = _get_data_inputs(csv_roll_data_path, csv_multiple_data_path)
instrument_list = arctic_individual_futures_prices.get_list_of_instrument_codes_with_price_data()
for instrument_code in instrument_list:
print(instrument_code)
process_multiple_prices_single_instrument(
instrument_code,
csv_multiple_data_path=csv_multiple_data_path,
csv_roll_data_path=csv_roll_data_path,
ADD_TO_ARCTIC=ADD_TO_ARCTIC,
ADD_TO_CSV=ADD_TO_CSV,
)
def process_multiple_prices_single_instrument(
instrument_code,
adjust_calendar_to_prices = True,
csv_multiple_data_path=arg_not_supplied,
csv_roll_data_path=arg_not_supplied,
ADD_TO_ARCTIC=True,
ADD_TO_CSV=False,
):
(
csv_roll_calendars,
arctic_individual_futures_prices,
arctic_multiple_prices,
csv_multiple_prices,
) = _get_data_inputs(csv_roll_data_path, csv_multiple_data_path)
dict_of_futures_contract_prices = (
arctic_individual_futures_prices.get_all_prices_for_instrument(instrument_code))
dict_of_futures_contract_closing_prices = (
dict_of_futures_contract_prices.final_prices()
)
roll_calendar = csv_roll_calendars.get_roll_calendar(instrument_code)
# Add first phantom row so that the last calendar entry won't be consumed by adjust_roll_calendar()
m = mongoRollParametersData()
roll_parameters = m.get_roll_parameters(instrument_code)
roll_calendar = add_phantom_row(roll_calendar, dict_of_futures_contract_closing_prices, roll_parameters)
if adjust_calendar_to_prices:
roll_calendar = adjust_roll_calendar(instrument_code, roll_calendar)
# Second phantom row is needed in order to process the whole set of closing prices (and not stop after the last roll-over)
roll_calendar = add_phantom_row(roll_calendar, dict_of_futures_contract_closing_prices, roll_parameters)
multiple_prices = futuresMultiplePrices.create_from_raw_data(
roll_calendar, dict_of_futures_contract_closing_prices
)
print(multiple_prices)
if ADD_TO_ARCTIC:
arctic_multiple_prices.add_multiple_prices(
instrument_code, multiple_prices, ignore_duplication=True
)
if ADD_TO_CSV:
csv_multiple_prices.add_multiple_prices(
instrument_code, multiple_prices, ignore_duplication=True
)
return multiple_prices
def adjust_roll_calendar(instrument_code, roll_calendar):
arctic_prices_per_contract = arcticFuturesContractPriceData()
print("Getting prices to adjust roll calendar")
dict_of_prices = arctic_prices_per_contract.get_all_prices_for_instrument(instrument_code)
dict_of_futures_contract_prices = dict_of_prices.final_prices()
roll_calendar = adjust_to_price_series(roll_calendar, dict_of_futures_contract_prices)
return roll_calendar
def add_phantom_row(roll_calendar, dict_of_futures_contract_prices: dictFuturesContractFinalPrices,
roll_parameters: rollParameters):
final_row = roll_calendar.iloc[-1]
if datetime.datetime.now()<final_row.name:
return roll_calendar
virtual_datetime = datetime.datetime.now() + datetime.timedelta(days=5)
current_contract_date_str = str(final_row.next_contract)
current_contract = contractDateWithRollParameters(contractDate(current_contract_date_str),
roll_parameters)
next_contract = current_contract.next_held_contract()
carry_contract = current_contract.carry_contract()
list_of_contract_names = dict_of_futures_contract_prices.keys()
try:
assert current_contract.date_str in list_of_contract_names
except:
print("Can't add extra row as data missing")
return roll_calendar
new_row = pd.DataFrame(dict(current_contract = current_contract_date_str,
next_contract = next_contract.date_str,
carry_contract = carry_contract.date_str), index=[virtual_datetime])
roll_calendar = pd.concat([roll_calendar, new_row], axis=0)
return roll_calendar
if __name__ == "__main__":
input("Will overwrite existing prices are you sure?! CTL-C to abort")
# change if you want to write elsewhere
csv_multiple_data_path=arg_not_supplied
# only change if you have written the files elsewhere
csv_roll_data_path=arg_not_supplied
# modify flags as required
process_multiple_prices_all_instruments(csv_multiple_data_path= csv_multiple_data_path,
csv_roll_data_path = csv_roll_data_path) | gpl-3.0 |
datachand/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_bernoulli_synthetic_data_mediumGBM.py | 6 | 2598 | import sys
sys.path.insert(1, "../../../")
import h2o, tests
from h2o import H2OFrame
import numpy as np
import numpy.random
import scipy.stats
from sklearn import ensemble
from sklearn.metrics import roc_auc_score
def bernoulli_synthetic_data_mediumGBM():
# Generate training dataset (adaptation of http://www.stat.missouri.edu/~speckman/stat461/boost.R)
train_rows = 10000
train_cols = 10
# Generate variables V1, ... V10
X_train = np.random.randn(train_rows, train_cols)
# y = +1 if sum_i x_{ij}^2 > chisq median on 10 df
y_train = np.asarray([1 if rs > scipy.stats.chi2.ppf(0.5, 10) else -1 for rs in [sum(r) for r in
np.multiply(X_train,X_train).tolist()]])
# Train scikit gbm
# TODO: grid-search
distribution = "bernoulli"
ntrees = 150
min_rows = 1
max_depth = 2
learn_rate = .01
nbins = 20
gbm_sci = ensemble.GradientBoostingClassifier(learning_rate=learn_rate, n_estimators=ntrees, max_depth=max_depth,
min_samples_leaf=min_rows, max_features=None)
gbm_sci.fit(X_train,y_train)
# Generate testing dataset
test_rows = 2000
test_cols = 10
# Generate variables V1, ... V10
X_test = np.random.randn(test_rows, test_cols)
# y = +1 if sum_i x_{ij}^2 > chisq median on 10 df
y_test = np.asarray([1 if rs > scipy.stats.chi2.ppf(0.5, 10) else -1 for rs in [sum(r) for r in
np.multiply(X_test,X_test).tolist()]])
# Score (AUC) the scikit gbm model on the test data
auc_sci = roc_auc_score(y_test, gbm_sci.predict_proba(X_test)[:,1])
# Compare this result to H2O
train_h2o = H2OFrame(np.column_stack((y_train, X_train)).tolist())
test_h2o = H2OFrame(np.column_stack((y_test, X_test)).tolist())
gbm_h2o = h2o.gbm(x=train_h2o[1:], y=train_h2o["C1"].asfactor(), distribution=distribution, ntrees=ntrees,
min_rows=min_rows, max_depth=max_depth, learn_rate=learn_rate, nbins=nbins)
gbm_perf = gbm_h2o.model_performance(test_h2o)
auc_h2o = gbm_perf.auc()
#Log.info(paste("scikit AUC:", auc_sci, "\tH2O AUC:", auc_h2o))
assert abs(auc_h2o - auc_sci) < 1e-2, "h2o (auc) performance degradation, with respect to scikit. h2o auc: {0} " \
"scickit auc: {1}".format(auc_h2o, auc_sci)
if __name__ == "__main__":
tests.run_test(sys.argv, bernoulli_synthetic_data_mediumGBM)
| apache-2.0 |
emd/random_data | random_data/spectra/nonparametric.py | 1 | 40561 | '''This module implements a routines for analyzing spectra.
'''
# Standard library imports
import numpy as np
from matplotlib import mlab
from matplotlib.colors import LogNorm, Colormap
import matplotlib.pyplot as plt
from matplotlib.ticker import LogFormatter
# Related 3rd-party imports
from ..ensemble import Ensemble
class AutoSpectralDensity(object):
'''A class for autospectral density characterization.
For stationary signal `x`, the autospectral density `Sxx`
is defined as
Sxx(f) = lim_{T \\rightarrow \infty} (1 / T) E[ |X(f, T)| ^ 2 ]
where X(f, T) is the finite-time Fourier transform of `x` and
E[...] denotes the expectation value operator. The autopower
within a spectral band fmin < f < fmax is given as
Pxx(fmin < f < fmax) = \int_{fmin}^{fmax} Sxx(f) df
The total autopower is obtained when fmin = -f_Ny and fmax = f_Ny,
where f_Ny is the Nyquist frequency of the signals.
For real-valued signal `x`, X(-f, T) = X*(f, T). Thus,
only one side of the spectral density is uniquely determined.
This class assumes that `x` is a real-valued signal such that
a one-sided spectral density (f >= 0) is returned.
The one sided spectral density is denoted `Gxx`. As `Gxx`
is only defined for f >= 0, conservation of signal power requires
Gxx(f) = 2 Sxx(f), f >= 0, and
Gxx(f) = Sxx(f), f == 0
If `x` is a stationary signal, the entire sample record
is referred to as an "ensemble". The spectral density is then
*estimated* by splitting the ensemble into a number of
(potentially overlapping) smaller segments, referred to as
"realizations". These realizations are detrended, windowed, and
FFT'd; the resulting FFTs are then averaged to obtain an
estimate of the spectral density. The ensemble averaging is
required for a statistically consistent definition of the
spectral density: that is, as T -> \infty, the estimated
spectral density only converges to the true spectral density
when the ensemble average is computed. In particular, the
random error in the estimate decreases as the number of
realizations increases.
This class allows for analysis of *nonstationary* signals.
`x` is first split into a number of ensembles, where
`x` is approximately stationary over the ensemble time.
The estimation of the spectral density during each ensemble
then proceeds as above.
Attributes:
-----------
Below, `x` refers to the signal for which the autospectral
density is computed, and `Fs` is the signal sampling rate.
Gxx - array_like, (`L`, `M`)
The one-sided (f >= 0) spectral density estimate.
[Gxx] = [x]^2 / [Fs]
f - array_like, (`L`,)
The frequencies at which the spectral density has been estimated.
[f] = [Fs]
t - array_like, (`M`,)
The temporal midpoint of each ensemble.
[t] = 1 / [Fs]
Fs - float
The signal sampling rate, as specified at object initialization.
[Fs] = arbitrary units
Nreal_per_ens - int
The number of realizations per ensemble used in the computation
of the spectral density estimate `Gxx`. The random error in
the spectral density estimate decreases ~ 1 / sqrt(`Nreal_per_ens`).
Npts_per_real - int
The number of sample points per realization used in the computation
of the spectral density estimate `Gxx`.
Npts_overlap - int
The number of overlapping points between adjacent realizations
in the computation of the spectral density estimate `Gxx`.
detrend - string
The function applied to each realization before taking the FFT.
window - callable or ndarray
The window applied to each realization before taking the FFT.
Methods:
--------
Type `help(SpectralDensity)` in the IPython console for a listing.
'''
def __init__(self, x, Fs=1.0, t0=0.,
Tens=40960., Nreal_per_ens=10, fraction_overlap=0.5,
Npts_per_real=None, Npts_overlap=None,
detrend=None, window=mlab.window_hanning,
print_params=True, print_status=True):
'''Create an instance of the `SpectralDensity` class.
Input Parameters:
-----------------
x - array_like, (`N`,)
The signal for which the autospectral density will be computed.
[x] = arbitrary units
Fs - float
The sampling rate of `x`.
If not specified, `Fs` is assigned a value of unity such that
all frequencies are *normalized* to the sampling rate.
[Fs] = arbitrary units
t0 - float
The initial time corresponding to `x[0]`.
[t0] = 1 / [Fs]
Tens - float
The time window defining an ensemble. `Tens` determines the
time resolution of the spectral density calculations,
with larger `Tens` corresponding to reduced time resolution
and increased frequency resolution.
[Tens] = 1 / [Fs]
Nreal_per_ens - int
The number of realizations per ensemble. The random error in the
spectral density estimate decreases as ~ 1 / sqrt(Nreal_per_ens).
The frequency resolution `df` of the spectral density estimate
is linearly related to the number of realizations.
A ValueError is raised if not a positive integer.
fraction_overlap - float
The fractional overlap between adjacent realizations.
0 =< `fraction_overlap` < 1, otherwise a ValueError is raised.
Npts_per_real - int
The number of sample points per realization. If None,
`Tens` is used to compute `Npts_per_real` that is compatible
with `Nreal_per_ens` and efficient FFT computation.
If not None, `Tens` is ignored. A ValueError is raised
if not a positive integer.
Npts_overlap - int
The number of overlapping sample points between adjacent
realizations. If None, `fraction_overlap` sets the
number of overlapping sample points. If not None,
`fraction_overlap` is ignored. A ValueError is raised
if not a positive integer or if greater than or equal to
the number of points per realization.
detrend - string
The function applied to each realization before taking FFT.
May be [ 'default' | 'constant' | 'mean' | 'linear' | 'none']
or callable, as specified in :py:func: `csd <matplotlib.mlab.csd>`.
*Warning*: Naively detrending (even with something as simple as
`mean` or `linear` detrending) can introduce detrimental artifacts
into the computed spectrum, so *no* detrending is the default.
window - callable or ndarray
The window applied to each realization before taking FFT,
as specified in :py:func: `csd <matplotlib.mlab.csd>`.
print_params - bool
If True, print relevant spectral parameters to screen.
print_status - bool
If True, print percentage of ensembles whose spectra
have been computed.
'''
# Only real-valued signals are expected/supported at the moment
if np.iscomplexobj(x):
raise ValueError('`x` must be a real-valued signal!')
# Determine properties for ensemble averaging
ens = Ensemble(
x, Fs=Fs, t0=t0, Tens=Tens,
Nreal_per_ens=Nreal_per_ens, fraction_overlap=fraction_overlap,
Npts_per_real=Npts_per_real, Npts_overlap=Npts_overlap)
# Record important aspects of computation
self.Fs = ens.Fs
self.Npts_per_real = ens.Npts_per_real
self.Nreal_per_ens = ens.Nreal_per_ens
self.Npts_overlap = ens.Npts_overlap
self.Npts_per_ens = ens.Npts_per_ens
self.detrend = detrend
self.window = window
self.f = ens.f
self.df = ens.df
self.t = ens.t
self.dt = ens.dt
if print_params:
self.printSpectralParams()
# Perform spectral calculations
self.Gxx = self.getSpectralDensity(x, print_status=print_status)
def printSpectralParams(self):
print '\ndt: %.6g' % self.dt
print 'df: %.6g' % self.df
print 'Npts_per_real: %i' % self.Npts_per_real
print ('overlap: %.2f'
% (np.float(self.Npts_overlap) / self.Npts_per_real))
print 'Nreal_per_ens: %i' % self.Nreal_per_ens
print 'detrend: %s' % self.detrend
print 'window: %s' % self.window.func_name
return
def getSpectralDensity(self, x, print_status=False):
'Get spectral density of provided signal.'
return _spectral_density(
x, x, self.Fs, len(self.f), len(self.t),
self.Npts_per_real, self.Npts_overlap, self.Npts_per_ens,
self.detrend, self.window,
print_status=print_status, status_label='Gxx')
def plotSpectralDensity(self, tlim=None, flim=None, vlim=None,
AC_coupled=True,
cmap='viridis', interpolation='none', fontsize=16,
title=None, xlabel='$t$', ylabel='$f$',
cblabel='$|G_{xx}(f)|$',
cborientation='horizontal',
ax=None, fig=None, geometry=111):
'Plot magnitude of spectral density on log scale.'
if flim is None and AC_coupled:
# Don't allow DC signal to influence color mapping
flim = [self.f[1], self.f[-1]]
ax = _plot_image(
self.t, self.f, np.abs(self.Gxx),
xlim=tlim, ylim=flim, vlim=vlim,
norm='log', cmap=cmap, interpolation=interpolation,
title=title, xlabel=xlabel, ylabel=ylabel,
cblabel=cblabel, cborientation=cborientation,
fontsize=fontsize,
ax=ax, fig=fig, geometry=geometry)
return ax
class CrossSpectralDensity(object):
'''A class for cross-spectral density characterization.
For stationary signals `x` and `y`, the cross-spectral density `Sxy`
is defined as
Sxy(f) = lim_{T \\rightarrow \infty} (1 / T) E[ X*(f, T) Y(f, T)]
where X(f, T) and Y(f, T) are the finite-time Fourier transforms
of `x` and `y`, respectively, * denotes complex conjugation, and
E[...] denotes the expectation value operator. The crosspower
within a spectral band fmin < f < fmax is given as
Pxy(fmin < f < fmax) = \int_{fmin}^{fmax} Sxy(f) df
The total crosspower is obtained when fmin = -f_Ny and fmax = f_Ny,
where f_Ny is the Nyquist frequency of the signals.
For real-valued signal `x`, X(-f, T) = X*(f, T). Thus, if both
`x` and `y` are real-valued, Sxy(-f) = [Syx(f)]*, and only
one side of the spectral density is uniquely determined.
This class assumes that `x` and `y` are real-valued signals
such that a one-sided spectral density (f >= 0) is returned.
The one sided spectral density is denoted `Gxy`. As `Gxy`
is only defined for f >= 0, conservation of signal power requires
Gxy(f) = 2 Sxy(f), f >= 0, and
Gxy(f) = Sxy(f), f == 0
If `x` and `y` are stationary signals, the entire sample record
is referred to as an "ensemble". The spectral density is then
*estimated* by splitting the ensemble into a number of
(potentially overlapping) smaller segments, referred to as
"realizations". These realizations are detrended, windowed, and
FFT'd; the resulting FFTs are then averaged to obtain an
estimate of the spectral density. The ensemble averaging is
required for a statistically consistent definition of the
spectral density: that is, as T -> \infty, the estimated
spectral density only converges to the true spectral density
when the ensemble average is computed. In particular, the
random error in the estimate decreases as the number of
realizations increases.
This class allows for analysis of *nonstationary* signals.
`x` and `y` are first split into a number of ensembles, where
`x` and `y` are approximately stationary over the ensemble time.
The estimation of the spectral density during each ensemble
then proceeds as above.
Attributes:
-----------
Below, `x` and `y` refer to the signals for which the cross-spectral
density is computed, and `Fs` is the signal sampling rate.
Gxy - array_like, (`L`, `M`)
The one-sided (f >= 0) spectral density estimate.
[Gxy] = [x] [y] / [Fs]
f - array_like, (`L`,)
The frequencies at which the spectral density has been estimated.
[f] = [Fs]
t - array_like, (`M`,)
The temporal midpoint of each ensemble.
[t] = 1 / [Fs]
Fs - float
The signal sampling rate, as specified at object initialization.
[Fs] = arbitrary units
Nreal_per_ens - int
The number of realizations per ensemble used in the computation
of the spectral density estimate `Gxy`. The random error in
the spectral density estimate decreases ~ 1 / sqrt(`Nreal_per_ens`).
Npts_per_real - int
The number of sample points per realization used in the computation
of the spectral density estimate `Gxy`.
Npts_overlap - int
The number of overlapping points between adjacent realizations
in the computation of the spectral density estimate `Gxy`.
detrend - string
The function applied to each realization before taking the FFT.
window - callable or ndarray
The window applied to each realization before taking the FFT.
Methods:
--------
Type `help(SpectralDensity)` in the IPython console for a listing.
'''
def __init__(self, x, y, Fs=1.0, t0=0.,
Tens=40960., Nreal_per_ens=10, fraction_overlap=0.5,
Npts_per_real=None, Npts_overlap=None,
detrend=None, window=mlab.window_hanning,
print_params=True, print_status=True):
'''Create an instance of the `SpectralDensity` class.
Input Parameters:
-----------------
x, y - array_like, (`N`,)
The signals for which the cross-spectral density will be computed.
A ValueError is raised if `x` and `y` contain a different number
of samples. Note that `x` and `y` must be sampled at the *same*
rate, `Fs`.
[x] = arbitrary units
[y] = arbitrary units, potentially different than [x]
Fs - float
The sampling rate of `x` and `y`.
If not specified, `Fs` is assigned a value of unity such that
all frequencies are *normalized* to the sampling rate.
[Fs] = arbitrary units
t0 - float
The initial time corresponding to `x[0]` (and `y[0]`).
[t0] = 1 / [Fs]
Tens - float
The time window defining an ensemble. `Tens` determines the
time resolution of the spectral density calculations,
with larger `Tens` corresponding to reduced time resolution
and increased frequency resolution.
[Tens] = 1 / [Fs]
Nreal_per_ens - int
The number of realizations per ensemble. The random error in the
spectral density estimate decreases as ~ 1 / sqrt(Nreal_per_ens).
The frequency resolution `df` of the spectral density estimate
is linearly related to the number of realizations.
A ValueError is raised if not a positive integer.
fraction_overlap - float
The fractional overlap between adjacent realizations.
0 =< `fraction_overlap` < 1, otherwise a ValueError is raised.
Npts_per_real - int
The number of sample points per realization. If None,
`Tens` is used to compute `Npts_per_real` that is compatible
with `Nreal_per_ens` and efficient FFT computation.
If not None, `Tens` is ignored. A ValueError is raised
if not a positive integer.
Npts_overlap - int
The number of overlapping sample points between adjacent
realizations. If None, `fraction_overlap` sets the
number of overlapping sample points. If not None,
`fraction_overlap` is ignored. A ValueError is raised
if not a positive integer or if greater than or equal to
the number of points per realization.
detrend - string
The function applied to each realization before taking FFT.
May be [ 'default' | 'constant' | 'mean' | 'linear' | 'none']
or callable, as specified in :py:func: `csd <matplotlib.mlab.csd>`.
*Warning*: Naively detrending (even with something as simple as
`mean` or `linear` detrending) can introduce detrimental artifacts
into the computed spectrum, so *no* detrending is the default.
window - callable or ndarray
The window applied to each realization before taking FFT,
as specified in :py:func: `csd <matplotlib.mlab.csd>`.
print_params - bool
If True, print relevant spectral parameters to screen.
print_status - bool
If True, print percentage of ensembles whose spectra
have been computed.
'''
# Only real-valued signals are expected/supported at the moment
if np.iscomplexobj(x) or np.iscomplexobj(y):
raise ValueError('`x` and `y` must be real-valued signals!')
if len(x) != len(y):
raise ValueError('`x` and `y` must have the same length!')
# Determine properties for ensemble averaging
ens = Ensemble(
x, Fs=Fs, t0=t0, Tens=Tens,
Nreal_per_ens=Nreal_per_ens, fraction_overlap=fraction_overlap,
Npts_per_real=Npts_per_real, Npts_overlap=Npts_overlap)
# Record important aspects of computation
self.Fs = ens.Fs
self.Npts_per_real = ens.Npts_per_real
self.Nreal_per_ens = ens.Nreal_per_ens
self.Npts_overlap = ens.Npts_overlap
self.Npts_per_ens = ens.Npts_per_ens
self.detrend = detrend
self.window = window
self.f = ens.f
self.df = ens.df
self.t = ens.t
self.dt = ens.dt
if print_params:
self.printSpectralParams()
# Perform spectral calculations
self.Gxy = self.getSpectralDensity(x, y, print_status=print_status)
self.gamma2xy = self.getCoherence(x, y, print_status=print_status)
self.theta_xy = self.getPhaseAngle()
def printSpectralParams(self):
print '\ndt: %.6g' % self.dt
print 'df: %.6g' % self.df
print 'Npts_per_real: %i' % self.Npts_per_real
print ('overlap: %.2f'
% (np.float(self.Npts_overlap) / self.Npts_per_real))
print 'Nreal_per_ens: %i' % self.Nreal_per_ens
print 'detrend: %s' % self.detrend
print 'window: %s' % self.window.func_name
return
def getSpectralDensity(self, x, y, print_status=False):
'Get spectral density of provided signals.'
return _spectral_density(
x, y, self.Fs, len(self.f), len(self.t),
self.Npts_per_real, self.Npts_overlap, self.Npts_per_ens,
self.detrend, self.window,
print_status=print_status, status_label='Gxy')
def getCoherence(self, x, y, print_status=False):
'''Get (magnitude-squared) coherence of signals `x` and `y`.
The magnitude-squared coherence function (gamma_{xy})^2
of signals `x` and `y` is is defined as
(gamma_{xy})^2 = [ | G_{xy}(f) |^2 ] / [ G_{xx}(f) G_{yy}(f) ]
where `G_{xy}(f)` is the cross-spectral density of `x` and `y` and
`G_{xx}(f)` (`G_{yy}(f)`) is the autospectral density of `x` (`y`).
For real-valued `x` and `y`, `G_{xx}(f)` and `G_{yy}(f)` are
also real-valued. Thus, the magnitude-squared coherence function
is real-valued. Further, for all `f`,
0 <= (gamma_{xy})^2 <= 1
'''
Gxx = _spectral_density(
x, x, self.Fs, len(self.f), len(self.t),
self.Npts_per_real, self.Npts_overlap, self.Npts_per_ens,
self.detrend, self.window,
print_status=print_status, status_label='Gxx')
Gyy = _spectral_density(
y, y, self.Fs, len(self.f), len(self.t),
self.Npts_per_real, self.Npts_overlap, self.Npts_per_ens,
self.detrend, self.window,
print_status=print_status, status_label='Gyy')
num = (np.abs(self.Gxy) ** 2)
den = Gxx * Gyy
return num / den
def getPhaseAngle(self, unwrap=False):
'''Get phase angle `theta_xy` (in radians) of spectral density `Gxy`.
If `unwrap` is False, the returned angle will be between [-pi, pi].
'''
if unwrap:
self.theta_xy = np.unwrap(np.angle(self.Gxy), axis=-1)
else:
self.theta_xy = np.angle(self.Gxy)
return np.copy(self.theta_xy)
def plotSpectralDensity(self, tlim=None, flim=None, vlim=None,
AC_coupled=True,
cmap='viridis', interpolation='none', fontsize=16,
title=None, xlabel='$t$', ylabel='$f$',
cblabel='$|G_{xy}(f)|$',
cborientation='horizontal',
ax=None, fig=None, geometry=111):
'Plot magnitude of spectral density on log scale.'
if flim is None and AC_coupled:
# Don't allow DC signal to influence color mapping
flim = [self.f[1], self.f[-1]]
ax = _plot_image(
self.t, self.f, np.abs(self.Gxy),
xlim=tlim, ylim=flim, vlim=vlim,
norm='log', cmap=cmap, interpolation=interpolation,
title=title, xlabel=xlabel, ylabel=ylabel,
cblabel=cblabel, cborientation=cborientation,
fontsize=fontsize,
ax=ax, fig=fig, geometry=geometry)
return ax
def plotCoherence(self, tlim=None, flim=None, vlim=None,
cmap='viridis', interpolation='none', fontsize=16,
title=None, xlabel='$t$', ylabel='$f$',
cborientation='horizontal',
ax=None, fig=None, geometry=111):
'Plot magnitude squared coherence on linear scale.'
ax = _plot_image(
self.t, self.f, self.gamma2xy,
xlim=tlim, ylim=flim, vlim=vlim,
norm=None, cmap=cmap, interpolation=interpolation,
title=title, xlabel=xlabel, ylabel=ylabel,
cblabel='$\gamma_{xy}^2$', cborientation=cborientation,
fontsize=fontsize,
ax=ax, fig=fig, geometry=geometry)
return ax
def plotPhaseAngle(self, gamma2xy_threshold=0.5, Gxy_threshold=0.,
theta_min=-np.pi, dtheta=(np.pi / 4),
tlim=None, flim=None,
cmap='RdBu', interpolation='none', fontsize=16,
title=None, xlabel='$t$', ylabel='$f$',
cborientation='horizontal',
mode_number=False,
ax=None, fig=None, geometry=111):
'''Plot phase angle `theta` if magnitude-squared coherence is
greater than or equal to `gamma2xy_threshold`, cross-spectral
density amplitude is greater than or equal to `Gxy_threshold`,
and `theta` satisfies
theta_min <= theta < [theta_min + (2 * pi)]
If `dtheta` divides (2 * pi) into an *integer* number of bins,
the plotted phase angles will be displayed with resolution `dtheta`;
that is, plotted phase angles will fall within bins of width `dtheta`.
Note that the phase angle equivalent to zero will always occupy
one of the bin center points.
If `dtheta` does *not* divide (2 * pi) into an integer number
of bins, `dtheta` will be redefined as the closest value
that does divide (2 * pi) into an integer number of bins;
the above discussion about the bin width and centering for the
plotted phase angles then applies with this re-defined `dtheta`.
If `mode_number` is True, the plotted phase angles will be
normalized to `dtheta`, producing a plot of mode number `n`
rather than phase angle.
'''
cbticks, dtheta = phase_angle_bins(dtheta, theta_min)
# Get "discrete" colormap, with a distinct color corresponding
# to each value in `cbticks`
cmap = plt.get_cmap(cmap, len(cbticks))
# However, the bins also have width `dtheta` such that
# the colorbar boundaries should correspond to
#
# lower bound (0): cbticks[0] - (0.5 * dtheta)
# (1): cbticks[0] + (0.5 * dtheta)
# (2): cbticks[0] + (1.5 * dtheta)
# ...
# upper bound (N + 1): cbticks[-1] + (0.5 * dtheta)
#
# This is easily accomplished by setting the minimum and maximum
# values to represent in the image as follows:
vlim = np.array([
cbticks[0] - (0.5 * dtheta),
cbticks[-1] + (0.5 * dtheta)])
# Now, "wrap" the phase angles onto the specified domain
theta_xy = wrap(self.theta_xy, vlim[0], vlim[1])
# Normalize phase angle to `dtheta` to obtain plot of mode number `n`
if mode_number:
theta_xy /= dtheta
vlim /= dtheta
cbticks = (cbticks / dtheta).astype('int')
cblabel = '$n$'
else:
cblabel = '$\\theta_{xy}$'
# Finally, only consider phase angles from regions whose
# (a) magnitude-squared coherence and (b) cross-spectral-density
# amplitude are greater-than-or-equal-to the specified thresholds
theta_xy = np.ma.masked_where(
np.logical_or(
self.gamma2xy < gamma2xy_threshold,
np.abs(self.Gxy) < Gxy_threshold),
theta_xy)
ax = _plot_image(
self.t, self.f, theta_xy,
xlim=tlim, ylim=flim, vlim=vlim,
norm=None, cmap=cmap, interpolation=interpolation,
title=title, xlabel=xlabel, ylabel=ylabel,
cblabel=cblabel, cborientation=cborientation, cbticks=cbticks,
fontsize=fontsize,
ax=ax, fig=fig, geometry=geometry)
return ax
def _spectral_density(x, y, Fs, Nf, Nens,
Npts_per_real, Npts_overlap, Npts_per_ens,
detrend, window,
print_status=False, status_label=''):
'Get spectral density of provided signals.'
same_data = x is y
# Initialize spectral density array
if not same_data:
# Cross-spectral density is intrinsically complex-valued, so
# we must initialize the spectral density as a complex-valued
# array to avoid loss of information
Gxy = np.zeros([Nf, Nens], dtype=np.complex128)
else:
# Autospectral density is intrinsically real-valued
# (assuming `x` is real-valued), so we don't need the
# overhead of a complex-valued array
Gxy = np.zeros([Nf, Nens])
if print_status:
print ''
# Loop over successive ensembles
for ens in np.arange(Nens):
# Create a slice corresponding to current ensemble
ens_start = ens * Npts_per_ens
ens_stop = (ens + 1) * Npts_per_ens
sl = slice(ens_start, ens_stop)
if same_data:
Gxy[:, ens] = mlab.psd(
x[sl], Fs=Fs,
NFFT=Npts_per_real, noverlap=Npts_overlap,
detrend=detrend, window=window)[0]
else:
Gxy[:, ens] = mlab.csd(
x[sl], y[sl], Fs=Fs,
NFFT=Npts_per_real, noverlap=Npts_overlap,
detrend=detrend, window=window)[0]
if print_status:
print ('%s percent complete: %.1f \r'
% (status_label, (100 * np.float(ens + 1) / Nens))),
if print_status:
print ''
return Gxy
def _plot_image(x, y, z,
xlim=None, ylim=None, vlim=None,
norm=None, cmap='viridis', interpolation='none',
title=None, xlabel=None, ylabel=None, fontsize=16,
cblabel=None, cbticks=None, cborientation='horizontal',
ax=None, fig=None, geometry=111):
'''Create an image of z(y, x).
Parameters:
-----------
x - array_like, (`M`,)
The x-axis of image. It is assumed that the x-values correspond
to the midpoints of bins in the x-dimension (e.g. the midpoint
of the ensembles in
:py:class:`SpectralDensity <random_data.spectra.SpectralDensity>`)
y - array_like, (`N`,)
The y-axis of image. It is assumed that the y-values correspond
to discrete samples of a function, such as the discrete frequencies
of a discrete Fourier transform.
z - array_like, (`N`, `M`)
The array containing the image values.
xlim - array_like, (2,)
The minimum and maximum values of `x` to display.
ylim - array_like, (2,)
The minimum and maximum values of `y` to display.
vlim - array_like, (2,)
The minimum and maximum values of `z` to display.
norm - string or None
If `log`, display image on logarithmic scale;
otherwise, display image on linear scale.
cmap - string
Colormap used for image. Default matplotlib colormaps
are found in :py:module:`cm <matplotlib.cm>`.
interpolation - string
Interpolation method to be used by :py:function `imshow.
<matplotlib.pyplot.imshow>`. Examples of each interpolation
scheme are displayed here:
http://matplotlib.org/examples/images_contours_and_fields/interpolation_methods.html
and the difference between 'none' and 'nearest' is demonstrated here:
http://matplotlib.org/examples/images_contours_and_fields/interpolation_none_vs_nearest.html
title, xlabel, ylabel, cblabel - string
Titles of respective objects in image.
cborientation - string
Orientation of colorbar; may be in {'horizontal', 'vertical'}.
fontsize - int
Size of font in titles, labels, etc.
ax - :py:class:`AxesSubplot <matplotlib.axes._subplots.AxesSubplot>`
instance corresponding to the axis (i.e. "subplot") where
the image will be drawn. `ax` will (obviously) be
modified by this method. If an axis instance is not provided,
an axis will automatically be created.
fig - :py:class:`Figure <matplotlib.figure.Figure>` instance
If an axis instance is *not* provided, one can provide
a figure instance (and an axis `geometry`, describing the
location of the axis instance in the figure) to control
which window is plotted in. If a figure instance is not
provided (and axis instance is also not provided),
a figure instance will be created with the next available
window number.
geometry - int, or tuple
If an axis instance is *not* provided, `geometry` determines
the location of the axis instance in the provided or created
figure. The standard matplotlib subplot geometry indexing is
used (see `<matplotlib.pyplot.subplot>` for more information).
Returns:
--------
ax - :py:class:`AxesSubplot <matplotlib.axes._subplots.AxesSubplot>`
instance corresponding to the axis (i.e. "subplot") where
the spectrogram will be drawn. This is either identical to
the axis instance used during the call or, if an axis instance
was not provided, the axis instance created during the call.
'''
# Determine (x, y) extent of image
if xlim is not None:
xlim = np.sort(xlim)
xind = np.where(np.logical_and(x >= xlim[0], x <= xlim[1]))[0]
else:
xind = np.arange(len(x))
if ylim is not None:
ylim = np.sort(ylim)
yind = np.where(np.logical_and(y >= ylim[0], y <= ylim[1]))[0]
else:
yind = np.arange(len(y))
dx = x[1] - x[0]
dy = y[1] - y[0]
extent = (x[xind[0]] - (0.5 * dx),
x[xind[-1]] + (0.5 * dx),
y[yind[0]] - (0.5 * dy),
y[yind[-1]] + (0.5 * dy))
# If an axis instance is not provided, create one
if ax is None:
# If, in addition, a figure instance is not provided,
# create a new figure
if fig is None:
fig = plt.figure()
# Create axis with desired subplot geometry
ax = fig.add_subplot(geometry)
if vlim is not None:
vlim = np.sort(vlim)
else:
vlim = [np.nanmin(z[yind, :][:, xind]),
np.nanmax(z[yind, :][:, xind])]
if norm == 'log':
norm = LogNorm()
# Ensure that specified colormap is available
if not isinstance(cmap, Colormap) and (cmap not in plt.colormaps()):
cmap_backup = 'Purples'
print ("\nThe '%s' colormap is not available; falling back to '%s'\n"
% (cmap, cmap_backup))
cmap = cmap_backup
# Create plot
im = ax.imshow(np.flipud(z[yind, :][:, xind]),
extent=extent, aspect='auto',
vmin=vlim[0], vmax=vlim[1],
norm=norm, cmap=cmap, interpolation=interpolation)
# Colorbar
if norm == 'log':
format = LogFormatter(labelOnlyBase=True)
else:
format = None
cb = plt.colorbar(im, format=format, ticks=cbticks,
ax=ax, orientation=cborientation)
# Labeling
if title is not None:
ax.set_title(title, fontsize=fontsize)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=fontsize)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=fontsize)
if cblabel is not None:
cb.set_label(cblabel, fontsize=fontsize)
return ax
def wrap(theta, theta_min, theta_max):
'''Wrap array `theta` between `theta_min` and `theta_max`.
This is the inverse operation to :py:function: `unwrap <numpy.unwrap>`.
'''
full_cycle = theta_max - theta_min
return ((theta - theta_min) % full_cycle) + theta_min
def phase_angle_bins(dtheta0, theta_min):
'''Get center points for phase angle bins of (approximate) width `dtheta0`.
Parameters:
-----------
dtheta0 - float
The desired width of the phase angle bins in radians.
Our image processing algorithms are most easily implemented
if the bin widths divide (2 * pi) into an *integer* number
of bins. For this reason, the realized bin width `dtheta`
may differ slightly from the requested bin width `dtheta0`
(specifics discussed below under "Returns").
[dtheta0] = rad
theta_min -float
The returned bin center points will lie between `theta_min` and
`theta_min` + (2 * pi), subject to the constraint that
the phase angle equivalent to zero (i.e. m * (2 * pi),
m any integer) lies at the center point of one of the bins.
[theta_min] = rad
Returns:
--------
(bins, dtheta) - tuple, where
bins - array_like, (`N`,)
The center points of the phase angle bins.
dtheta -float
The realized phase-angle-bin width. If the requested bin width
`dtheta0` does *not* divide (2 * pi) into an integer number
of bins, the realized bin width `dtheta` is the bin width
closest to `dtheta0` that *does* divide (2 * pi) into
an integer number of bins.
'''
full_domain = 2 * np.pi
# Number of bins in full domain, rounded to the nearest integer value
N = np.int(np.round(full_domain / dtheta0))
# Redefine the angular separation of the bins such that
# the bins *exactly* divide full domain into `N` bins
dtheta = full_domain / N
# Determine phase-angle bins on [0, `full_domain`) domain
bins = np.arange(0, full_domain, dtheta)
# Wrap phase-angle bins onto desired domain and sort
bins = wrap(bins, theta_min, full_domain + theta_min)
bins = np.sort(bins)
return bins, dtheta
def _next_largest_divisor_for_integer_quotient(dividend, divisor):
'''Return `divisor` or next largest value that yields an
integer quotient when dividing into `dividend`.
'''
integer_quotient = np.int(np.float(dividend) / divisor)
return dividend / integer_quotient
def _test_phase_angle(
gamma2xy_threshold=0.5, Gxy_threshold=0.,
theta_min=-np.pi, dtheta=(np.pi / 4),
flim=[10e3, 100e3],
cmap='RdBu',
mode_number=False,
Tens=5e-3, Nreal_per_ens=10):
'''This routine plots the phase angle of several test cases
to ensure that the phase angle is correctly represented
by the methods in `CrossSpectralDensity`. Each test case
lies near the lower or upper boundary of a phase angle bin,
and good behavior at the bin's extrema implies good behavior
throughout the rest of the bin's interior. Note that several
figures are generated!
'''
# Create some uncorrelated noise
from ..signals import RandomSignal
sig1 = RandomSignal(4e6, 0, 0.1, fc=100e3, pole=2)
sig2 = RandomSignal(4e6, 0, 0.1, fc=100e3, pole=2)
# Coherent signal amplitude
A0 = 1e-3
# Signal will have a linearly *ramping* frequency
f0 = 50e3
f1 = 75e3
t = sig1.t()
m = (f1 - f0) / (2 * (t[-1] - t[0]))
f = f0 + (m * t)
# Check that plotted phase angle is correct for specified phase angles
bins, dtheta = phase_angle_bins(dtheta, theta_min)
# Check lower boundary for each phase angle
for i, th0 in enumerate(bins):
# Ideal lower boundary of phase angle is at `theta` - (0.5 * `dtheta`),
# but we select 0.45 to give a bit of head room due to noise etc.
th = th0 - (0.45 * dtheta)
y1 = sig1.x + (A0 * np.cos(2 * np.pi * f * t))
y2 = sig2.x + (A0 * np.cos((2 * np.pi * f * t) + th))
csd = CrossSpectralDensity(
y1, y2, Fs=sig1.Fs, t0=sig1.t0,
Tens=Tens, Nreal_per_ens=Nreal_per_ens,
print_params=False, print_status=False)
# Plot cross-spectral spectral density amplitude *once*
# so that it is easy to specify relevant alternative values
# for `Gxy_threshold`
if i == 0:
csd.plotSpectralDensity(flim=flim)
if mode_number:
title = 'Lower bound, n = %i' % np.round(th0 / dtheta)
else:
title = 'Lower bound, theta = %.3f' % th0
csd.plotPhaseAngle(
gamma2xy_threshold=gamma2xy_threshold,
Gxy_threshold=Gxy_threshold,
theta_min=theta_min, dtheta=dtheta,
flim=flim,
cmap=cmap,
title=title,
mode_number=mode_number)
# Check upper boundary for each phase angle
for i, th0 in enumerate(bins):
# Ideal upper boundary of phase angle is at `theta` + (0.5 * `dtheta`),
# but we select 0.45 to give a bit of head room due to noise etc.
th = th0 + (0.45 * dtheta)
y1 = sig1.x + (A0 * np.cos(2 * np.pi * f * t))
y2 = sig2.x + (A0 * np.cos((2 * np.pi * f * t) + th))
csd = CrossSpectralDensity(
y1, y2, Fs=sig1.Fs, t0=sig1.t0,
Tens=Tens, Nreal_per_ens=Nreal_per_ens,
print_params=False, print_status=False)
if mode_number:
title = 'Upper bound, n = %i' % np.round(th0 / dtheta)
else:
title = 'Upper bound, theta = %.3f' % th0
csd.plotPhaseAngle(
gamma2xy_threshold=gamma2xy_threshold,
Gxy_threshold=Gxy_threshold,
theta_min=theta_min, dtheta=dtheta,
flim=flim,
cmap=cmap,
title=title,
mode_number=mode_number)
return
| gpl-2.0 |
JohanComparat/pySU | spm/bin_SMF/smf_sdss_eboss.py | 1 | 15868 | import astropy.cosmology as co
aa=co.Planck15
import astropy.io.fits as fits
import matplotlib.pyplot as p
import numpy as n
import os
import sys
# global cosmo quantities
z_min = float(sys.argv[1])
z_max = float(sys.argv[2])
#imf = 'kroupa'
#lO2_min = float(sys.argv[3]) # 'salpeter'
SNlimit = 5
smf_ilbert13 = lambda M, M_star, phi_1s, alpha_1s, phi_2s, alpha_2s : ( phi_1s * (M/M_star) ** alpha_1s + phi_2s * (M/M_star) ** alpha_2s ) * n.e ** (-M/M_star) * (M/ M_star)
ff_dir = os.path.join(os.environ['DATA_DIR'], 'spm', 'firefly')
ll_dir = os.path.join(os.environ['DATA_DIR'], 'spm', 'literature')
co_dir = os.path.join(os.environ['DATA_DIR'], 'COSMOS' )
sdss_dir = os.path.join(os.environ['DATA_DIR'], 'SDSS')
spiders_dir = os.path.join(os.environ['DATA_DIR'], 'spiders')
out_dir = os.path.join(os.environ['DATA_DIR'], 'spm', 'results')
path_2_cosmos_cat = os.path.join( co_dir, "photoz_vers2.0_010312.fits")
#path_2_sdss_cat = os.path.join( ff_dir, "FireflyGalaxySdssDR14.fits" )
#path_2_eboss_cat = os.path.join( ff_dir, "FireflyGalaxyEbossDR14.fits" )
path_2_spall_sdss_dr12_cat = os.path.join( sdss_dir, "specObj-SDSS-dr12.fits" )
path_2_spall_sdss_dr14_cat = os.path.join( sdss_dir, "specObj-SDSS-dr14.fits" )
path_2_spall_boss_dr12_cat = os.path.join( sdss_dir, "specObj-BOSS-dr12.fits" )
path_2_spall_boss_dr14_cat = os.path.join( sdss_dir, "specObj-BOSS-dr14.fits" )
path_2_spall_spiders_dr14_cat = os.path.join( spiders_dir, "cluster_statistics_2016-11-08-DR14_spm.fits" )
#print "SDSS spAll DR14", len(fits.open(path_2_spall_sdss_dr14_cat)[1].data)
#print "BOSS spAll DR14",len(fits.open(path_2_spall_boss_dr14_cat)[1].data)
path_2_cosmos_cat = os.path.join( co_dir, "photoz_vers2.0_010312.fits")
path_2_vvdsW_cat = os.path.join( ff_dir, "VVDS_WIDE_summary.v1.spm.fits" )
path_2_vipers_cat = os.path.join( ff_dir, "VIPERS_W14_summary_v2.1.linesFitted.spm.fits" )
path_2_vvdsD_cat = os.path.join( ff_dir, "VVDS_DEEP_summary.v1.spm.fits" )
path_2_deep2_cat = os.path.join( ff_dir, "zcat.deep2.dr4.v4.LFcatalogTC.Planck15.spm.v2.fits" )
cosmos = fits.open(path_2_cosmos_cat)[1].data
deep2 = fits.open(path_2_deep2_cat)[1].data
vvdsD = fits.open(path_2_vvdsD_cat)[1].data
vvdsW = fits.open(path_2_vvdsW_cat)[1].data
vipers = fits.open(path_2_vipers_cat)[1].data
spiders = fits.open(path_2_spall_spiders_dr14_cat)[1].data
path_2_sdss_cat = os.path.join( ff_dir, "FireflyGalaxySdss26.fits" )
path_2_eboss_cat = os.path.join( ff_dir, "FireflyGalaxyEbossDR14.fits" )
path_2_pS_salpeter_cat = os.path.join( ll_dir, "portsmouth_stellarmass_starforming_salp-26.fits.gz" )
path_2_pB_salpeter_cat = os.path.join( ll_dir, "portsmouth_stellarmass_starforming_salp-DR12-boss.fits.gz" )
path_2_pS_kroupa_cat = os.path.join( ll_dir, "portsmouth_stellarmass_starforming_krou-26.fits.gz" )
path_2_pB_kroupa_cat = os.path.join( ll_dir, "portsmouth_stellarmass_starforming_krou-DR12-boss.fits.gz" )
path_2_ppS_kroupa_cat = os.path.join( ll_dir, "portsmouth_stellarmass_passive_krou-26.fits")
path_2_ppB_kroupa_cat = os.path.join( ll_dir, "portsmouth_stellarmass_passive_krou-DR12.fits")
path_2_F16_cat = os.path.join( sdss_dir, "RA_DEC_z_w_fluxOII_Mstar_grcol_Mr_lumOII.dat" )
RA, DEC, z, weigth, O2flux, M_star, gr_color, Mr_5logh, O2luminosity = n.loadtxt(path_2_F16_cat, unpack=True)
cosmos = fits.open(path_2_cosmos_cat)[1].data
sdss = fits.open(path_2_sdss_cat)[1].data
boss = fits.open(path_2_eboss_cat)[1].data
sdss_12_portSF_kr = fits.open(path_2_pS_kroupa_cat)[1].data
boss_12_portSF_kr = fits.open(path_2_pB_kroupa_cat)[1].data
sdss_12_portPA_kr = fits.open(path_2_ppS_kroupa_cat)[1].data
boss_12_portPA_kr = fits.open(path_2_ppB_kroupa_cat)[1].data
sdss_12_portSF_sa = fits.open(path_2_pS_salpeter_cat)[1].data
boss_12_portSF_sa = fits.open(path_2_pB_salpeter_cat)[1].data
path_ilbert13_SMF = os.path.join(ll_dir, "ilbert_2013_mass_function_params.txt")
zmin, zmax, N, M_comp, M_star, phi_1s, alpha_1s, phi_2s, alpha_2s, log_rho_s = n.loadtxt(os.path.join( ll_dir, "ilbert_2013_mass_function_params.txt"), unpack=True)
#smfs_ilbert13 = n.array([lambda mass : smf_ilbert13( mass , 10**M_star[ii], phi_1s[ii]*10**(-3), alpha_1s[ii], phi_2s[ii]*10**(-3), alpha_2s[ii] ) for ii in range(len(M_star)) ])
smf01 = lambda mass : smf_ilbert13( mass , 10**M_star[0], phi_1s[0]*10**(-3), alpha_1s[0], phi_2s[0]*10**(-3), alpha_2s[0] )
print 10**M_star[0], phi_1s[0]*10**(-3), alpha_1s[0], phi_2s[0]*10**(-3), alpha_2s[0]
volume_per_deg2 = ( aa.comoving_volume(z_max) - aa.comoving_volume(z_min) ) * n.pi / 129600.
volume_per_deg2_val = volume_per_deg2.value
# global spm quantities
# stat functions
ld = lambda selection : len(selection.nonzero()[0])
area_sdss = 7900.
area_boss = 10000.
area_cosmos = 1.52
def get_basic_stat_anyCat(catalog_name, z_name, z_err_name, name, zflg_val):
catalog = fits.open(catalog_name)[1].data
catalog_zOk =(catalog[z_err_name] > 0.) & (catalog[z_name] > catalog[z_err_name])
catalog_stat = (catalog_zOk) #& (catalog[z_name] > z_min) & (catalog[z_name] < z_max)
catalog_sel = (catalog_stat) & (catalog['LOGMASS'] < 14. ) & (catalog['LOGMASS'] > 0 ) & (catalog['MAXLOGMASS'] - catalog['MINLOGMASS'] <0.4) & (catalog['LOGMASS'] < catalog['MAXLOGMASS'] ) & (catalog['LOGMASS'] > catalog['MINLOGMASS'] )
m_catalog = catalog['LOGMASS']
w_catalog = n.ones_like(catalog['LOGMASS'])
print catalog_name, "& - & $", ld(catalog_zOk),"$ & $", ld(catalog_sel),"$ \\\\"
#return catalog_sel, m_catalog, w_catalog
def get_basic_stat_DR12(catalog, z_name, z_err_name, name, zflg_val):
catalog_zOk =(catalog[z_err_name] > 0.) & (catalog[z_name] > catalog[z_err_name])
catalog_stat = (catalog_zOk) & (catalog[z_name] > z_min) & (catalog[z_name] < z_max)
catalog_sel = (catalog_stat) & (catalog['LOGMASS'] < 14. ) & (catalog['LOGMASS'] > 0 ) & (catalog['MAXLOGMASS'] - catalog['MINLOGMASS'] <0.4) & (catalog['LOGMASS'] < catalog['MAXLOGMASS'] ) & (catalog['LOGMASS'] > catalog['MINLOGMASS'] )
m_catalog = catalog['LOGMASS']
w_catalog = n.ones_like(catalog['LOGMASS'])
print name, "& - & $", ld(catalog_zOk),"$ & $", ld(catalog_sel),"$ \\\\"
return catalog_sel, m_catalog, w_catalog
def get_basic_stat_DR14(catalog, z_name, z_err_name, class_name, zwarning, name, zflg_val, prefix):
catalog_zOk =(catalog[z_err_name] > 0.) & (catalog[z_name] > catalog[z_err_name]) & (catalog[class_name]=='GALAXY') & (catalog[zwarning]==zflg_val)
catalog_stat = (catalog_zOk) & (catalog[z_name] > z_min) & (catalog[z_name] < z_max)
catalog_sel = (catalog_stat) & (catalog[prefix+'stellar_mass'] < 10**14. ) & (catalog[prefix+'stellar_mass'] > 0 ) & (catalog[prefix+'stellar_mass'] > catalog[prefix+'stellar_mass_low'] ) & (catalog[prefix+'stellar_mass'] < catalog[prefix+'stellar_mass_up'] ) & ( - n.log10(catalog[prefix+'stellar_mass_low']) + n.log10(catalog[prefix+'stellar_mass_up']) < 0.4 )
m_catalog = n.log10(catalog[prefix+'stellar_mass'])
w_catalog = n.ones_like(catalog[prefix+'stellar_mass'])
print name, '& $',len(catalog), "$ & $", ld(catalog_zOk),"$ & $", ld(catalog_sel),"$ \\\\"
return catalog_sel, m_catalog, w_catalog
def get_hist(masses, weights, mbins):
NN = n.histogram(masses, mbins)[0]
NW = n.histogram(masses, mbins, weights = weights)[0]
xx = (mbins[1:] + mbins[:-1])/2.
return xx, NW, NN**(-0.5)*NW
dlog10m = 0.25
mbins = n.arange(8,12.5,dlog10m)
def plot_smf_b(IMF="Chabrier_ELODIE_"):
boss_sel, boss_m, boss_w = get_basic_stat_DR14(boss, 'Z_NOQSO', 'Z_ERR_NOQSO', 'CLASS_NOQSO', 'ZWARNING_NOQSO', IMF+' & BOSS & 14 ', 0., IMF)
x, y, ye = get_hist(boss_m[boss_sel], weights = boss_w[boss_sel]/(dlog10m*n.log(10)*area_boss*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label=IMF+'BOSS', lw=1)
def plot_smf_s(IMF="Chabrier_ELODIE_"):
boss_sel, boss_m, boss_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', IMF+' & BOSS & 14 ', 0., IMF)
x, y, ye = get_hist(boss_m[boss_sel], weights = boss_w[boss_sel]/(dlog10m*n.log(10)*area_boss*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label=IMF+'SDSS', lw=1)
def plot_smf_spiders(IMF="Chabrier_ELODIE_"):
boss_sel, boss_m, boss_w = get_basic_stat_DR14(spiders, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', IMF+' & BOSS & 14 ', 0., IMF)
x, y, ye = get_hist(boss_m[boss_sel], weights = boss_w[boss_sel]/(dlog10m*n.log(10)*area_boss*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label=IMF+'SPIDERS', lw=1)
p.figure(1, (8,8))
p.plot(mbins, smf01(10**mbins), label='Ilbert 13, 0.2<z<0.5', ls='dashed')
plot_smf_b("Chabrier_ELODIE_")
plot_smf_b("Chabrier_MILES_")
plot_smf_b("Chabrier_STELIB_")
plot_smf_b("Kroupa_ELODIE_")
plot_smf_b("Kroupa_MILES_")
plot_smf_b("Kroupa_STELIB_")
plot_smf_b("Salpeter_ELODIE_")
plot_smf_b("Salpeter_MILES_")
plot_smf_b("Salpeter_STELIB_")
plot_smf_spiders("Chabrier_ELODIE_")
p.title(str(z_min)+'<z<'+str(z_max)+' BOSS+eBOSS')
p.xlabel(r"$\log_{10}$ (M / $M_\odot$ )")
p.ylabel(r'$\Phi(M)$ [Mpc$^{-3}$ dex$^{-1}$]')
p.yscale('log')
p.legend(loc=0, frameon = False)
p.ylim((1e-8, 1e-2))
p.xlim((9.5, 12.5))
p.grid()
p.savefig(os.path.join(out_dir, "firefly_SMFs_BOSS_"+str(z_min)+'_z_'+str(z_max)+".jpg" ))
p.clf()
p.figure(1, (8,8))
p.plot(mbins, smf01(10**mbins), label='Ilbert 13, 0.2<z<0.5', ls='dashed')
plot_smf_s("Chabrier_ELODIE_")
plot_smf_s("Chabrier_MILES_")
plot_smf_s("Chabrier_STELIB_")
plot_smf_s("Kroupa_ELODIE_")
plot_smf_s("Kroupa_MILES_")
plot_smf_s("Kroupa_STELIB_")
plot_smf_s("Salpeter_ELODIE_")
plot_smf_s("Salpeter_MILES_")
plot_smf_s("Salpeter_STELIB_")
plot_smf_spiders("Chabrier_ELODIE_")
p.title(str(z_min)+'<z<'+str(z_max)+' SDSS')
p.xlabel(r'$\log_{10}$(M / $M_\odot$ )')
p.ylabel(r'$\Phi(M)$ [Mpc$^{-3}$ dex$^{-1}$]')
p.yscale('log')
p.legend(loc=0, frameon = False)
p.ylim((1e-8, 1e-2))
p.xlim((9.5, 12.5))
p.grid()
p.savefig(os.path.join(out_dir, "firefly_SMFs_SDSS_"+str(z_min)+'_z_'+str(z_max)+".jpg" ))
p.clf()
sys.exit()
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR12(boss_12_portSF_kr, 'Z', 'Z_ERR', 'Portsmouth SF Kroupa & BOSS & 12 ', 0.)
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR12(boss_12_portPA_kr, 'Z', 'Z_ERR', 'Portsmouth Passive Kroupa & BOSS & 12 ', 0.)
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR12(boss_12_portSF_sa, 'Z', 'Z_ERR', 'Portsmouth SF Salpeter & BOSS & 12 ', 0.)
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Chabrier ELODIE & SDSS & 14 ', 0., "Chabrier_ELODIE_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Chabrier MILES & SDSS & 14 ', 0., "Chabrier_MILES_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Chabrier STELIB & SDSS & 14 ', 0., "Chabrier_STELIB_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Kroupa ELODIE & SDSS & 14 ', 0., "Kroupa_ELODIE_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Kroupa MILES & SDSS & 14 ', 0., "Kroupa_MILES_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Kroupa STELIB & SDSS & 14 ', 0., "Kroupa_STELIB_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Salpeter ELODIE & SDSS & 14 ', 0., "Salpeter_ELODIE_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Salpeter MILES & SDSS & 14 ', 0., "Salpeter_MILES_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR14(sdss, 'Z', 'Z_ERR', 'CLASS', 'ZWARNING', 'Salpeter STELIB & SDSS & 14 ', 0., "Salpeter_STELIB_")
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR12(sdss_12_portSF_kr, 'Z', 'Z_ERR', 'Portsmouth SF Kroupa & SDSS & 12 ', 0.)
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR12(sdss_12_portPA_kr, 'Z', 'Z_ERR', 'Portsmouth Passive Kroupa & SDSS & 12 ', 0.)
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR12(sdss_12_portSF_sa, 'Z', 'Z_ERR', 'Portsmouth SF Salpeter & SDSS & 12 ', 0.)
x, y, ye = get_hist(boss14_m[boss14_sel], weights = boss14_w[boss14_sel]/(dlog10m*n.log(10)*area_boss14*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label='BOSS14', lw=0.5)
p.title(str(z_min)+'<z<'+str(z_max))
p.xlabel(r'$\log_{10}$ (stellar mass '+imf+r" / $M_\odot$ )")
p.ylabel(r'$\Phi(M)$ [Mpc$^{-3}$ dex$^{-1}$]')
p.yscale('log')
p.legend(loc=0, frameon = False)
p.ylim((1e-8, 1e-2))
p.xlim((9.5, 12.5))
p.grid()
p.savefig(os.path.join(out_dir, "SDSS_SMF_"+imf+"_"+str(z_min)+'_z_'+str(z_max)+".jpg" ))
p.clf()
sys.exit()
def plotMF_raw(imf='kroupa'):
sdss14_sel, sdss14_m, sdss14_w = get_basic_stat_FF(sdss14, 'Z', 'Z_ERR', 'ZWARNING', 'SDSS14', 0., imf=imf)
boss14_sel, boss14_m, boss14_w = get_basic_stat_FF(boss14, 'Z_NOQSO', 'Z_ERR_NOQSO', 'ZWARNING_NOQSO', 'BOSS14', 0., imf=imf)
sdss12_sel, sdss12_m, sdss12_w = get_basic_stat_DR12(sdss12, 'Z', 'Z_ERR', 'SDSS12', 0.)
boss12_sel, boss12_m, boss12_w = get_basic_stat_DR12(boss12, 'Z', 'Z_ERR', 'BOSS12', 0.)
dlog10m = 0.25
mbins = n.arange(8,12.5,dlog10m)
p.figure(1, (8,8))
p.plot(mbins, smf01(10**mbins), label='Ilbert 13, 0.2<z<0.5', ls='dashed')
x, y, ye = get_hist(sdss14_m[sdss14_sel], weights = sdss14_w[sdss14_sel]/(dlog10m*n.log(10)*area_sdss14*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label='SDSS14', lw=1)
x, y, ye = get_hist(sdss12_m[sdss12_sel], weights = sdss12_w[sdss12_sel]/(dlog10m*n.log(10)*area_sdss12*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label='SDSS12', lw=1)
x, y, ye = get_hist(boss12_m[boss12_sel], weights = boss12_w[boss12_sel]/(dlog10m*n.log(10)*area_boss12*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label='BOSS12', lw=0.5)
x, y, ye = get_hist(boss14_m[boss14_sel], weights = boss14_w[boss14_sel]/(dlog10m*n.log(10)*area_boss14*volume_per_deg2_val), mbins = mbins)
p.errorbar(x, y, yerr = ye, label='BOSS14', lw=0.5)
p.title(str(z_min)+'<z<'+str(z_max))
p.xlabel(r'$\log_{10}$ (stellar mass '+imf+r" / $M_\odot$ )")
p.ylabel(r'$\Phi(M)$ [Mpc$^{-3}$ dex$^{-1}$]')
p.yscale('log')
p.legend(loc=0, frameon = False)
p.ylim((1e-8, 1e-2))
p.xlim((9.5, 12.5))
p.grid()
p.savefig(os.path.join(out_dir, "SDSS_SMF_"+imf+"_"+str(z_min)+'_z_'+str(z_max)+".jpg" ))
p.clf()
import time
t0 = time.time()
id14 = n.arange(len(sdss14))
id12_2_14 = n.array([ id14[(sdss14['PLATE'] == sdss12[id12]['PLATE'])&(sdss14['MJD'] == sdss12[id12]['MJD'])&(sdss14['FIBERID'] == sdss12[id12]['FIBERID'])][0] for id12 in n.arange(5000)]) # len(sdss12)) ])
print time.time() - t0
m14_i = sdss14['stellar_mass_'+imf][id12_2_14]
m12_i = sdss12['LOGMASS'][n.arange(5000)]
ok = (m12_i >8 )&(m12_i < 13 )&(m14_i >8 )&(m14_i < 13 )
m14 = m14_i[ok]
m12 = m12_i[ok]
mms = n.arange(8,13,0.02)
outP = n.polyfit(m12, m14-m12, deg=1)
p.plot(mms, n.polyval(outP, mms), 'm')
outP = n.polyfit(m12, m14-m12, deg=2)
p.plot(mms, n.polyval(outP, mms), 'm')
p.plot(m12, m14-m12, 'b,')
p.axhline(n.mean(m14-m12), color='k')
p.xlabel('log(mass) dr12')
p.ylabel(r'$\Delta \log$(mass) dr14-dr12')
p.xlim((8,13.))
p.ylim((-2, 2))
p.grid()
p.savefig(os.path.join(out_dir, "SDSS_mass_comparison"+imf+"_"+str(z_min)+'_z_'+str(z_max)+".jpg" ))
p.show()
# for each object in the catalog, assign the corresponding stellar mass from firefly
# match in plate mjd fiberid using the SDSS DR12 as a reference i.e. make a DR12+DR14 catalog.
# fraction of objects with a more accurate stellar mass estimates : error is smaller.
# match DR14 wiht DR12
# extract mean SN per pixel in a spectrum and select objects that should have a better nass estimate.
#
plotMF_raw(imf='salpeter')
| cc0-1.0 |
abdulbaqi/ThinkStats2 | code/hinc_soln.py | 67 | 4296 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import hinc
import thinkplot
import thinkstats2
"""This file contains a solution to an exercise in Think Stats:
The distributions of wealth and income are sometimes modeled using
lognormal and Pareto distributions. To see which is better, let's
look at some data.
The Current Population Survey (CPS) is joint effort of the Bureau
of Labor Statistics and the Census Bureau to study income and related
variables. Data collected in 2013 is available from
http://www.census.gov/hhes/www/cpstables/032013/hhinc/toc.htm.
I downloaded hinc06.xls, which is an Excel spreadsheet with
information about household income, and converted it to hinc06.csv,
a CSV file you will find in the repository for this book. You
will also find hinc.py, which reads the CSV file.
Extract the distribution of incomes from this dataset. Are any of the
analytic distributions in this chapter a good model of the data? A
solution to this exercise is in hinc_soln.py.
My solution generates three figures:
1) The CDF of income on a linear scale.
2) The CCDF on a log-log scale along with a Pareto model intended
to match the tail behavior.
3) The CDF on a log-x scale along with a lognormal model chose to
match the median and inter-quartile range.
My conclusions based on these figures are:
1) The Pareto model is probably a reasonable choice for the top
10-20% of incomes.
2) The lognormal model captures the shape of the distribution better,
but the data deviate substantially from the model. With different
choices for sigma, you could match the upper or lower tail, but not
both at the same time.
In summary I would say that neither model captures the whole distribution,
so you might have to
1) look for another analytic model,
2) choose one that captures the part of the distribution that is most
relevent, or
3) avoid using an analytic model altogether.
"""
class SmoothCdf(thinkstats2.Cdf):
"""Represents a CDF based on calculated quantiles.
"""
def Render(self):
"""Because this CDF was not computed from a sample, it
should not be rendered as a step function.
"""
return self.xs, self.ps
def Prob(self, x):
"""Compute CDF(x), interpolating between known values.
"""
return np.interp(x, self.xs, self.ps)
def Value(self, p):
"""Compute inverse CDF(x), interpolating between probabilities.
"""
return np.interp(p, self.ps, self.xs)
def MakeFigures(df):
"""Plots the CDF of income in several forms.
"""
xs, ps = df.income.values, df.ps.values
cdf = SmoothCdf(xs, ps, label='data')
cdf_log = SmoothCdf(np.log10(xs), ps, label='data')
# linear plot
thinkplot.Cdf(cdf)
thinkplot.Save(root='hinc_linear',
xlabel='household income',
ylabel='CDF')
# pareto plot
# for the model I chose parameters by hand to fit the tail
xs, ys = thinkstats2.RenderParetoCdf(xmin=55000, alpha=2.5,
low=0, high=250000)
thinkplot.Plot(xs, 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf, complement=True)
thinkplot.Save(root='hinc_pareto',
xlabel='log10 household income',
ylabel='CCDF',
xscale='log',
yscale='log')
# lognormal plot
# for the model I estimate mu and sigma using
# percentile-based statistics
median = cdf_log.Percentile(50)
iqr = cdf_log.Percentile(75) - cdf_log.Percentile(25)
std = iqr / 1.349
# choose std to match the upper tail
std = 0.35
print(median, std)
xs, ps = thinkstats2.RenderNormalCdf(median, std, low=3.5, high=5.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Save(root='hinc_normal',
xlabel='log10 household income',
ylabel='CDF')
def main():
df = hinc.ReadData()
MakeFigures(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/stats/interface.py | 14 | 4723 | from pandas.core.api import Series, DataFrame, Panel, MultiIndex
from pandas.stats.ols import OLS, MovingOLS
from pandas.stats.plm import PanelOLS, MovingPanelOLS, NonPooledPanelOLS
import pandas.stats.common as common
def ols(**kwargs):
"""Returns the appropriate OLS object depending on whether you need
simple or panel OLS, and a full-sample or rolling/expanding OLS.
Will be a normal linear regression or a (pooled) panel regression depending
on the type of the inputs:
y : Series, x : DataFrame -> OLS
y : Series, x : dict of DataFrame -> OLS
y : DataFrame, x : DataFrame -> PanelOLS
y : DataFrame, x : dict of DataFrame/Panel -> PanelOLS
y : Series with MultiIndex, x : Panel/DataFrame + MultiIndex -> PanelOLS
Parameters
----------
y: Series or DataFrame
See above for types
x: Series, DataFrame, dict of Series, dict of DataFrame, Panel
weights : Series or ndarray
The weights are presumed to be (proportional to) the inverse of the
variance of the observations. That is, if the variables are to be
transformed by 1/sqrt(W) you must supply weights = 1/W
intercept: bool
True if you want an intercept. Defaults to True.
nw_lags: None or int
Number of Newey-West lags. Defaults to None.
nw_overlap: bool
Whether there are overlaps in the NW lags. Defaults to False.
window_type: {'full sample', 'rolling', 'expanding'}
'full sample' by default
window: int
size of window (for rolling/expanding OLS). If window passed and no
explicit window_type, 'rolling" will be used as the window_type
Panel OLS options:
pool: bool
Whether to run pooled panel regression. Defaults to true.
entity_effects: bool
Whether to account for entity fixed effects. Defaults to false.
time_effects: bool
Whether to account for time fixed effects. Defaults to false.
x_effects: list
List of x's to account for fixed effects. Defaults to none.
dropped_dummies: dict
Key is the name of the variable for the fixed effect.
Value is the value of that variable for which we drop the dummy.
For entity fixed effects, key equals 'entity'.
By default, the first dummy is dropped if no dummy is specified.
cluster: {'time', 'entity'}
cluster variances
Examples
--------
# Run simple OLS.
result = ols(y=y, x=x)
# Run rolling simple OLS with window of size 10.
result = ols(y=y, x=x, window_type='rolling', window=10)
print(result.beta)
result = ols(y=y, x=x, nw_lags=1)
# Set up LHS and RHS for data across all items
y = A
x = {'B' : B, 'C' : C}
# Run panel OLS.
result = ols(y=y, x=x)
# Run expanding panel OLS with window 10 and entity clustering.
result = ols(y=y, x=x, cluster='entity', window_type='expanding', window=10)
Returns
-------
The appropriate OLS object, which allows you to obtain betas and various
statistics, such as std err, t-stat, etc.
"""
if (kwargs.get('cluster') is not None and
kwargs.get('nw_lags') is not None):
raise ValueError(
'Pandas OLS does not work with Newey-West correction '
'and clustering.')
pool = kwargs.get('pool')
if 'pool' in kwargs:
del kwargs['pool']
window_type = kwargs.get('window_type')
window = kwargs.get('window')
if window_type is None:
if window is None:
window_type = 'full_sample'
else:
window_type = 'rolling'
else:
window_type = common._get_window_type(window_type)
if window_type != 'full_sample':
kwargs['window_type'] = common._get_window_type(window_type)
y = kwargs.get('y')
x = kwargs.get('x')
panel = False
if isinstance(y, DataFrame) or (isinstance(y, Series) and
isinstance(y.index, MultiIndex)):
panel = True
if isinstance(x, Panel):
panel = True
if window_type == 'full_sample':
for rolling_field in ('window_type', 'window', 'min_periods'):
if rolling_field in kwargs:
del kwargs[rolling_field]
if panel:
if pool is False:
klass = NonPooledPanelOLS
else:
klass = PanelOLS
else:
klass = OLS
else:
if panel:
if pool is False:
klass = NonPooledPanelOLS
else:
klass = MovingPanelOLS
else:
klass = MovingOLS
return klass(**kwargs)
| mit |
brianlorenz/COSMOS_IMACS_Redshifts | Emission_Fitting/Find_bad_low.py | 1 | 6605 | #Find which objects are bad and low based on various cuts through the data. Output this as a dataframe containing True False for every object in every line
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys, os, string
import pandas as pd
from astropy.io import fits
import collections
from astropy.stats import biweight_midvariance
from matplotlib.font_manager import FontProperties
#Folder to save the figures
figout = '/Users/blorenz/COSMOS/Reports/2018/Images/'
#The location with the file for all of our data
qualout = '/Users/blorenz/COSMOS/COSMOSData/dataqual.txt'
#The location with the file for all of our data
fluxdatapath = '/Users/blorenz/COSMOS/COSMOSData/lineflux.txt'
#The location to store the scale and its stddev of each line
scaledata = '/Users/blorenz/COSMOS/COSMOSData/scales.txt'
#Read in the scale of the lines
#scale_df = ascii.read(scaledata).to_pandas()
#Location of the equivalent width data
ewdata = '/Users/blorenz/COSMOS/COSMOSData/lineew.txt'
#Read in the ew of the lines
ew_df = ascii.read(ewdata).to_pandas()
#Read the datafile:
fluxdata = ascii.read(fluxdatapath).to_pandas()
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
sig = 5
lines = ['3727','4102','4340','4861','4959','5007','6548','6548_fix','6563','6563_fix','6583','6583_fix']
lines = np.sort(lines)
fig,axarr = plt.subplots(4,12,figsize=(100,30))
#Plotting parameters
mark='o'
ms=6
ls='--'
#Fontsizes for plotting
axisfont = 18
ticksize = 16
titlefont = 32
legendfont = 16
textfont = 16
med_bi_df = pd.DataFrame()
qualframe = pd.DataFrame()
cullnames = ['_scale','_rchi2','_shift','_stddev']
def readdata(cull,line):
#Special case for shift - we need to compute this, and not fit in log
if cull in ['_shift']:
if len(line)==8: lineval=int(line[0:4])
else: lineval = int(line)
#Compute the shift
culldata = (fluxdata[line+'_mean']-(1+fluxdata['zcc'])*lineval)/2
culldatapos=culldata
elif cull in ['_stddev']:
culldata = fluxdata[line+cull]
culldatapos = culldata
else:
#Set the data to cull
culldata = np.log10(fluxdata[line+cull])
culldatapos = np.log10(fluxdata[fluxdata[line+cull]>0][line+cull])
return culldata,culldatapos
#Loop over all lines
for line in lines:
#Read in the data that won't change
usig = fluxdata[line+'_usig']
err = np.sqrt(fluxdata[line+'_usig']**2+(0.25*ew_df[line+'_modelabs'])**2)
rchi2 = fluxdata[line+'_rchi2']
flux = fluxdata[line+'_flux']
usigrat = divz(flux,(err*np.sqrt(rchi2)))
#Find the low data at a cut of sig signal-to-noise
low = (usigrat < sig)
badflag = (fluxdata[line+'_flag'] != 0)
qualframe[line+'_temp_low'] = low
#Loop over all culling methods
for cull in cullnames:
culldata, culldatapos = readdata(cull,line)
#We cull in log so that 0s are filtered out
#Find the median and biweight of the distribution of objects
med = np.median(culldatapos)
biwt = np.sqrt(biweight_midvariance(culldatapos))
#Cut the data 3 sigma from the median
bad = (np.abs(culldata-med) > (3*biwt))
bad = np.logical_or(bad,badflag)
#Set the good, bad, and low for this cut
low = np.logical_and(low,np.logical_not(bad))
good = np.logical_and(np.logical_not(low),np.logical_not(bad))
#Store the med, biwt in the dataframe
med_bi_df.at[0,line+cull] = med
med_bi_df.at[1,line+cull] = biwt
#Store the good, low, and bad into the frame
qualframe[line+cull+'_good'] = good
qualframe[line+cull+'_low'] = low
qualframe[line+cull+'_bad'] = bad
counter = 0
#Now that the frame is complete, combine all of the bads to flag the points
for line in lines:
cullcount = 0
#Read in the fluxes
flux = fluxdata[line+'_flux']
#Get the bad data across all cuts
bads = [qualframe[line+name+'_bad'] for name in cullnames]
#If it is bad in any of the cuts, it is bad data
badline = np.logical_or.reduce(bads)
#Find the low data that is not bad
lowline = np.logical_and(qualframe[line+'_temp_low'],np.logical_not(badline))
goodline = np.logical_and(np.logical_not(lowline),np.logical_not(badline))
#Store the low, bad and good for each line across all methods
qualframe[line+'_bad'] = badline
qualframe[line+'_low'] = lowline
qualframe[line+'_good'] = goodline
#Now, plot the culling for each line:
for cull in cullnames:
#Read the data
culldata, culldatapos = readdata(cull,line)
#Set the current plot
ax = axarr[cullcount,counter]
med = med_bi_df.iloc[0][line+cull]
biwt = med_bi_df.iloc[1][line+cull]
#Plot the cut
ax.plot(flux[goodline],culldata[goodline],color='blue',marker=mark,ms=ms,label='Detection',ls='None')
ax.plot(flux[lowline],culldata[lowline],color='orange',marker=mark,ms=ms,label='Low S/N',ls='None')
ax.plot(flux[badline],culldata[badline],color='red',marker=mark,ms=ms,label='Not usuable',ls='None')
ax.plot((-100,10000),(med,med),color='black',ls=ls,label='Median')
#ax.plot((-100,10000),(med+biwt,med+biwt),color='darkgrey',ls=ls,label='1 sigma')
#ax.plot((-100,10000),(med-biwt,med-biwt),color='darkgrey',ls=ls)
ax.plot((-100,10000),(med+3*biwt,med+3*biwt),color='grey',ls=ls,label='3 sigma')
ax.plot((-100,10000),(med-3*biwt,med-3*biwt),color='grey',ls=ls)
ax.legend(fontsize=legendfont)
font = FontProperties()
font.set_weight('bold')
ax.set_xlabel('H$\\alpha$ Flux ($10^{-17}$ erg/s/${cm}^2/\AA$)',fontsize=axisfont)
if cull == '_scale':
ax.set_title(line,fontsize=titlefont)
ax.set_ylabel('log(Scale)',fontsize=axisfont)
ax.set_ylim(-2,2)
if cull == '_rchi2':
ax.set_ylabel('log($\\chi_\\nu^2$)',fontsize=axisfont)
ax.set_ylim(-3,3)
if cull == '_shift':
ax.set_ylim(-8,8)
ax.set_ylabel('Shift' +' (pixels)',fontsize=axisfont)
if cull == '_stddev':
ax.set_ylim(0,10)
ax.set_ylabel('Sigma'+' ($\AA$)',fontsize=axisfont)
ax.tick_params(labelsize = ticksize)
ax.set_xscale('log')
ax.set_xlim(0.001,500)
#ax.set_yscale('log')
cullcount = cullcount+1
counter = counter+1
#qualframe.to_csv(qualout,index=False)
fig.tight_layout()
fig.savefig(figout + 'data_culling.pdf')
plt.close(fig)
| mit |
e-koch/Phys-595 | project_code/Machine Learning/unsupervised_specs_lsq.py | 1 | 3652 |
'''
Unsupervised learning on SDSS spectral data
'''
import numpy as np
import matplotlib.pyplot as p
from sklearn.cross_validation import train_test_split
from pandas import read_csv, DataFrame, concat
import joblib
from lsanomaly import LSAnomaly
save_models = False
test_params = False
multi = True
learn = True
view = False
data = read_csv("all_spec_data_cleaned.csv")
X = data[data.columns[1:]]
X = np.asarray(X)
# Standardize the data
X = (X - np.mean(X, axis=0))/np.std(X, axis=0)
print "Ready for some anomaly finding!"
if test_params:
# Make a mock Grid Search, evaluating based on the number of
# anomalies found
sigmas = np.logspace(0, 3, 8)
rhos = np.logspace(-6, 3, 8)
# Use a subset of the data to speed things up a bit
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
X_sub = X[indices[:len(indices)/5]]
X_train, X_test = \
train_test_split(X, test_size=0.5, random_state=200)
def testing_func(a):
sigma, rho = a
print "Training with rho: %s, sigma: %s" % (rho, sigma)
clf = LSAnomaly(rho=rho, sigma=sigma)
clf.fit(X_train)
y_pred = clf.predict(X_test)
y_pred = np.asarray(y_pred)
anomalies = np.where(y_pred == 'anomaly')[0]
print anomalies
return [sigma, rho, len(anomalies),
len(anomalies)/float(X_test.shape[0])]
if multi:
from multiprocessing import Pool
from itertools import product
pool = Pool(processes=8)
results = pool.map(testing_func, product(sigmas, rhos))
pool.close()
pool.join()
else:
results = []
for rho in rhos:
for sigma in sigmas:
print "Now fitting: rho - %s; sigma - %s" % (rho, sigma)
clf = LSAnomaly(rho=rho, sigma=sigma, verbose=False)
clf.fit(X_train)
y_pred = clf.predict(X_test)
anomalies = np.where(y_pred == 'anomaly')[0]
print np.unique(anomalies)
results.append([nu, gamma, len(anomalies),
len(anomalies)/float(X_test.shape[0])])
test_df = DataFrame(results, columns=["Sigma", "Rho", "Anomalies",
"Percent"])
test_df.to_csv("lsq_anomaly_testing_fifth_betterparams.csv")
if learn:
all_anom = []
# Repeat the process many times
# Record all anomalies and look for those which are consistently
# labeled.
for i in range(100):
print "On %s/%s" % (i, 100)
# Need to keep track of the indices!
indices = np.arange(X.shape[0])
X_train, X_test, ind_train, ind_test = \
train_test_split(X, indices, test_size=0.5,
random_state=np.random.randint(1e8))
clf = LSAnomaly(rho=2.7, sigma=51.8)
clf.fit(X_train)
y_pred = clf.predict(X)
y_pred = np.asarray(y_pred)
anomalies = np.where(y_pred == 'anomaly')[0]
all_anom.append(DataFrame(data.ix[anomalies]))
if save_models:
joblib.dump(clf, "OneClassSVM_"+str(500+i)+".pkl")
del clf
anom_df = concat(all_anom)
anom_df.to_csv("anomalies_lsq.csv")
if view:
import triangle
from dim_red_vis import dim_red
# Use PCA to look at a projection of the set.
subspace = dim_red(X, verbose=True)
# Do it again with a higher dimension, then project that
subspace = dim_red(X, n_comp=6, verbose=False)
fig = \
triangle.corner(subspace,
labels=['c'+str(i) for i in range(1, 7)])
p.show()
| mit |
mehdidc/scikit-learn | sklearn/cluster/bicluster.py | 38 | 19313 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
ebilionis/variational-reformulation-of-inverse-problems | unittests/test_forward.py | 1 | 5111 | """
Test the catalysis forward model.
Author:
Panagiotis Tsilifis
Date:
5/22/2014
"""
import numpy as np
from matplotlib.pyplot import *
import sys
sys.path.insert(0,'./demos/')
from catalysis import CatalysisModel
from catalysis import CatalysisModelDMNLESS
from catalysis import CatalysisFullModelDMNLESS
# The data
data = np.loadtxt('data.txt').reshape((7, 6))
t = data[:, 0]
y_obs = data[:, 1:]
idx = [0, 1, 5, 2, 3, 4]
y_obs = np.hstack([y_obs, np.zeros((y_obs.shape[0], 1))])
y_obs = y_obs[:, idx]
x = np.array([0.0216, 0.0292, 0.0219, 0.0021, 0.0048])
# Test first the original model
solution = CatalysisModel()
print str(solution) + '\n'
state = solution._eval(x)
y = state['f']
J = state['f_grad']
H = state['f_grad_2']
print 'Solution'
print '-' * 80
print 'Shape : ' + str(y.shape)
print y.reshape((7, y.shape[0]/7))
print '\n'
print 'Jacobian'
print '-' * 80
print J
print '\n'
print 'Second derivatives'
print '-' * 80
print H
#t = np.array([0.0, 30., 60., 90., 120., 150., 180.])
plot(t, y_obs, '*')
plot(t, y.reshape((t.shape[0], y.shape[0]/t.shape[0])))
show()
print 'Test the evaluation at many inputs simultaneously'
kappa = 0.1*np.random.rand(x.shape[0],5)
state2 = solution(kappa)
print 'Solution'
print '-' * 80
print str(state['f']) + '\n'
print 'First derivates'
print '-' * 80
print str(state['f_grad']) + '\n'
print 'Second derivatives'
print '-' * 80
print str(state['f_grad_2']) + '\n'
# Now test the dimensionless version of the model
solution_dmnl = CatalysisModelDMNLESS()
print str(solution_dmnl) + '\n'
state2 = solution_dmnl._eval(x * 180)
y2 = state2['f']
J2 = state2['f_grad']
H2 = state2['f_grad_2']
print 'Solution'
print '-' * 80
print y2.reshape((7, y2.shape[0]/7))
print '\n'
print 'Jacobian'
print '-' * 80
print J2
print '\n'
print 'Second derivatives'
print '-' * 80
print H2
#t = np.array([0.0, 1./6, 1./3, 1./2, 2./3, 5./6, 1.])
plot(t/180., y_obs/500., '*')
plot(t/180., y2.reshape((t.shape[0], y2.shape[0]/t.shape[0])))
show()
print H.shape
#plot(J.flatten() / 500 / 180, J2.flatten(), '.')
plot(H.flatten() / 500 / 180 ** 2, H2.flatten(), '.')
show()
quit()
x_full = np.array([[-3.888, 0. , 0. , 0. , 0. , 0. ],
[ 3.888, -6.498, 0. , 0. , 0. , 0. ],
[ 0. , 5.256, -3.942, 0. , 0. , 0. ],
[ 0. , 0. , 3.942, 0. , 0. , 0. ],
[ 0. , 0.864, 0. , 0. , 0. , 0. ],
[ 0. , 0.378, 0. , 0. , 0. , 0. ]])
x_full = x_full.reshape(36)
# Now test the dimensionless version of the full model
solution_full = CatalysisFullModelDMNLESS()
print str(solution_full) + '\n'
state3 = solution_full._eval(x_full)
y3 = state3['f']
J3 = state3['f_grad']
H3 = state3['f_grad_2']
print 'Solution'
print '-' * 80
print y3.reshape((7 ,y3.shape[0] / 7))
print '\n'
print 'Jacobian'
print '-' * 80
print J3
print '\n'
print 'Second derivative'
print '-' * 80
print H3
plot(t/180, y_obs/500., '*')
plot(t/180., y3.reshape((t.shape[0], y3.shape[0]/t.shape[0])))
show()
x_full_numer = np.array([[-4.33196874, -5. , 6.67939317, -1.53947333, 3.98937717,
6.15844821],
[ 2.78059767, -5. , 1.74400916, -0.21704501, -3.52037185,
-0.53811489],
[ 1.60431762, 2.6435434 , -4.99999968, -0.67762272, -2.85725542,
0.75467099],
[-0.51638391, 1.45563688, 3.67548823, 1.56088914, -0.25853257,
-5. ],
[ 0.37593609, -3.80254408, 4.2799548 , 1.38250366, -2.8156011 ,
-5. ],
[ 0.24968126, -4.61529558, 5.73391027, -0.50962955, 1.67635654,
2.73356322]])
x_full_numer = x_full_numer.reshape(36)
# Now test the dimensionless version of the full model
state4 = solution_full._eval(x_full_numer)
y4 = state4['f']
J4 = state4['f_grad']
H4 = state4['f_grad_2']
print 'Solution'
print '-' * 80
print y4.reshape((7 ,y4.shape[0] / 7))
print '\n'
print 'Jacobian'
print '-' * 80
print J4
print '\n'
print 'Second derivative'
print '-' * 80
print H4
plot(t/180, y_obs/500., '*')
plot(t/180., y4.reshape((t.shape[0], y4.shape[0]/t.shape[0])))
show()
"""
"""
A = np.array([[-3.76978464, -0.50424625, -0.21679456, 0.29876246, -0.03181339, 0.03879548],
[ 2.41461854, -1.22813897, -2.20618065, 0.00794199, 0.36123171, -0.00530061],
[ 1.58491466, -0.0048723, -0.36971964, -0.62944618, 0.0174473, -0.12462298],
[-0.39650543, 1.6740821, 2.42152955, 0.08367671, -0.28383747, 0.04730487],
[ 0.14612687, -0.217782, -0.02277859, 0.19942222, -0.02387345, 0.02980567],
[ 0.02063, 0.28095741, 0.39394389, 0.0396428, -0.03915471, 0.01401757]])
A = A.reshape(36)
state5 = solution_full._eval(A)
y5 = state5['f']
J5 = state5['f_grad']
H5 = state5['f_grad_2']
plot(t/180, y_obs/500., '*')
plot(t/180., y5.reshape((t.shape[0], y5.shape[0]/t.shape[0])))
show()
B = np.zeros(36)
for i in xrange(36):
if A[i] > 0.1:
B[i] = A[i]
print 'Matrix' + '\n'
print str(B)
state6 = solution_full._eval(A)
y6 = state6['f']
J6 = state6['f_grad']
H6 = state6['f_grad_2']
plot(t/180, y_obs/500., '*')
plot(t/180., y6.reshape((t.shape[0], y6.shape[0]/t.shape[0])))
show()
| gpl-2.0 |
toobaz/pandas | pandas/compat/numpy/function.py | 2 | 13719 | """
For compatibility with numpy libraries, pandas functions or
methods have to accept '*args' and '**kwargs' parameters to
accommodate numpy arguments that are not actually used or
respected in the pandas implementation.
To ensure that users do not abuse these parameters, validation
is performed in 'validators.py' to make sure that any extra
parameters passed correspond ONLY to those in the numpy signature.
Part of that validation includes whether or not the user attempted
to pass in non-default values for these extraneous parameters. As we
want to discourage users from relying on these parameters when calling
the pandas implementation, we want them only to pass in the default values
for these parameters.
This module provides a set of commonly used default arguments for functions
and methods that are spread throughout the codebase. This module will make it
easier to adjust to future upstream changes in the analogous numpy signatures.
"""
from collections import OrderedDict
from distutils.version import LooseVersion
from typing import Any, Dict, Optional, Union
from numpy import __version__ as _np_version, ndarray
from pandas._libs.lib import is_bool, is_integer
from pandas.errors import UnsupportedFunctionCall
from pandas.util._validators import (
validate_args,
validate_args_and_kwargs,
validate_kwargs,
)
class CompatValidator:
def __init__(self, defaults, fname=None, method=None, max_fname_arg_count=None):
self.fname = fname
self.method = method
self.defaults = defaults
self.max_fname_arg_count = max_fname_arg_count
def __call__(self, args, kwargs, fname=None, max_fname_arg_count=None, method=None):
if args or kwargs:
fname = self.fname if fname is None else fname
max_fname_arg_count = (
self.max_fname_arg_count
if max_fname_arg_count is None
else max_fname_arg_count
)
method = self.method if method is None else method
if method == "args":
validate_args(fname, args, max_fname_arg_count, self.defaults)
elif method == "kwargs":
validate_kwargs(fname, kwargs, self.defaults)
elif method == "both":
validate_args_and_kwargs(
fname, args, kwargs, max_fname_arg_count, self.defaults
)
else:
raise ValueError(
"invalid validation method " "'{method}'".format(method=method)
)
ARGMINMAX_DEFAULTS = dict(out=None)
validate_argmin = CompatValidator(
ARGMINMAX_DEFAULTS, fname="argmin", method="both", max_fname_arg_count=1
)
validate_argmax = CompatValidator(
ARGMINMAX_DEFAULTS, fname="argmax", method="both", max_fname_arg_count=1
)
def process_skipna(skipna, args):
if isinstance(skipna, ndarray) or skipna is None:
args = (skipna,) + args
skipna = True
return skipna, args
def validate_argmin_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmin' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmin(args, kwargs)
return skipna
def validate_argmax_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmax' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmax(args, kwargs)
return skipna
ARGSORT_DEFAULTS = OrderedDict() # type: OrderedDict[str, Optional[Union[int, str]]]
ARGSORT_DEFAULTS["axis"] = -1
ARGSORT_DEFAULTS["kind"] = "quicksort"
ARGSORT_DEFAULTS["order"] = None
if LooseVersion(_np_version) >= LooseVersion("1.17.0"):
# GH-26361. NumPy added radix sort and changed default to None.
ARGSORT_DEFAULTS["kind"] = None
validate_argsort = CompatValidator(
ARGSORT_DEFAULTS, fname="argsort", max_fname_arg_count=0, method="both"
)
# two different signatures of argsort, this second validation
# for when the `kind` param is supported
ARGSORT_DEFAULTS_KIND = OrderedDict() # type: OrderedDict[str, Optional[int]]
ARGSORT_DEFAULTS_KIND["axis"] = -1
ARGSORT_DEFAULTS_KIND["order"] = None
validate_argsort_kind = CompatValidator(
ARGSORT_DEFAULTS_KIND, fname="argsort", max_fname_arg_count=0, method="both"
)
def validate_argsort_with_ascending(ascending, args, kwargs):
"""
If 'Categorical.argsort' is called via the 'numpy' library, the
first parameter in its signature is 'axis', which takes either
an integer or 'None', so check if the 'ascending' parameter has
either integer type or is None, since 'ascending' itself should
be a boolean
"""
if is_integer(ascending) or ascending is None:
args = (ascending,) + args
ascending = True
validate_argsort_kind(args, kwargs, max_fname_arg_count=3)
return ascending
CLIP_DEFAULTS = dict(out=None) # type Dict[str, Any]
validate_clip = CompatValidator(
CLIP_DEFAULTS, fname="clip", method="both", max_fname_arg_count=3
)
def validate_clip_with_axis(axis, args, kwargs):
"""
If 'NDFrame.clip' is called via the numpy library, the third
parameter in its signature is 'out', which can takes an ndarray,
so check if the 'axis' parameter is an instance of ndarray, since
'axis' itself should either be an integer or None
"""
if isinstance(axis, ndarray):
args = (axis,) + args
axis = None
validate_clip(args, kwargs)
return axis
COMPRESS_DEFAULTS = OrderedDict() # type: OrderedDict[str, Any]
COMPRESS_DEFAULTS["axis"] = None
COMPRESS_DEFAULTS["out"] = None
validate_compress = CompatValidator(
COMPRESS_DEFAULTS, fname="compress", method="both", max_fname_arg_count=1
)
CUM_FUNC_DEFAULTS = OrderedDict() # type: OrderedDict[str, Any]
CUM_FUNC_DEFAULTS["dtype"] = None
CUM_FUNC_DEFAULTS["out"] = None
validate_cum_func = CompatValidator(
CUM_FUNC_DEFAULTS, method="both", max_fname_arg_count=1
)
validate_cumsum = CompatValidator(
CUM_FUNC_DEFAULTS, fname="cumsum", method="both", max_fname_arg_count=1
)
def validate_cum_func_with_skipna(skipna, args, kwargs, name):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'dtype', which takes either a
'numpy' dtype or 'None', so check if the 'skipna' parameter is
a boolean or not
"""
if not is_bool(skipna):
args = (skipna,) + args
skipna = True
validate_cum_func(args, kwargs, fname=name)
return skipna
ALLANY_DEFAULTS = OrderedDict() # type: OrderedDict[str, Optional[bool]]
ALLANY_DEFAULTS["dtype"] = None
ALLANY_DEFAULTS["out"] = None
ALLANY_DEFAULTS["keepdims"] = False
validate_all = CompatValidator(
ALLANY_DEFAULTS, fname="all", method="both", max_fname_arg_count=1
)
validate_any = CompatValidator(
ALLANY_DEFAULTS, fname="any", method="both", max_fname_arg_count=1
)
LOGICAL_FUNC_DEFAULTS = dict(out=None, keepdims=False)
validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method="kwargs")
MINMAX_DEFAULTS = dict(out=None, keepdims=False)
validate_min = CompatValidator(
MINMAX_DEFAULTS, fname="min", method="both", max_fname_arg_count=1
)
validate_max = CompatValidator(
MINMAX_DEFAULTS, fname="max", method="both", max_fname_arg_count=1
)
RESHAPE_DEFAULTS = dict(order="C") # type: Dict[str, str]
validate_reshape = CompatValidator(
RESHAPE_DEFAULTS, fname="reshape", method="both", max_fname_arg_count=1
)
REPEAT_DEFAULTS = dict(axis=None) # type: Dict[str, Any]
validate_repeat = CompatValidator(
REPEAT_DEFAULTS, fname="repeat", method="both", max_fname_arg_count=1
)
ROUND_DEFAULTS = dict(out=None) # type: Dict[str, Any]
validate_round = CompatValidator(
ROUND_DEFAULTS, fname="round", method="both", max_fname_arg_count=1
)
SORT_DEFAULTS = OrderedDict() # type: OrderedDict[str, Optional[Union[int, str]]]
SORT_DEFAULTS["axis"] = -1
SORT_DEFAULTS["kind"] = "quicksort"
SORT_DEFAULTS["order"] = None
validate_sort = CompatValidator(SORT_DEFAULTS, fname="sort", method="kwargs")
STAT_FUNC_DEFAULTS = OrderedDict() # type: OrderedDict[str, Optional[Any]]
STAT_FUNC_DEFAULTS["dtype"] = None
STAT_FUNC_DEFAULTS["out"] = None
PROD_DEFAULTS = SUM_DEFAULTS = STAT_FUNC_DEFAULTS.copy()
SUM_DEFAULTS["keepdims"] = False
SUM_DEFAULTS["initial"] = None
MEDIAN_DEFAULTS = STAT_FUNC_DEFAULTS.copy()
MEDIAN_DEFAULTS["overwrite_input"] = False
MEDIAN_DEFAULTS["keepdims"] = False
STAT_FUNC_DEFAULTS["keepdims"] = False
validate_stat_func = CompatValidator(STAT_FUNC_DEFAULTS, method="kwargs")
validate_sum = CompatValidator(
SUM_DEFAULTS, fname="sum", method="both", max_fname_arg_count=1
)
validate_prod = CompatValidator(
PROD_DEFAULTS, fname="prod", method="both", max_fname_arg_count=1
)
validate_mean = CompatValidator(
STAT_FUNC_DEFAULTS, fname="mean", method="both", max_fname_arg_count=1
)
validate_median = CompatValidator(
MEDIAN_DEFAULTS, fname="median", method="both", max_fname_arg_count=1
)
STAT_DDOF_FUNC_DEFAULTS = OrderedDict() # type: OrderedDict[str, Optional[bool]]
STAT_DDOF_FUNC_DEFAULTS["dtype"] = None
STAT_DDOF_FUNC_DEFAULTS["out"] = None
STAT_DDOF_FUNC_DEFAULTS["keepdims"] = False
validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS, method="kwargs")
TAKE_DEFAULTS = OrderedDict() # type: OrderedDict[str, Optional[str]]
TAKE_DEFAULTS["out"] = None
TAKE_DEFAULTS["mode"] = "raise"
validate_take = CompatValidator(TAKE_DEFAULTS, fname="take", method="kwargs")
def validate_take_with_convert(convert, args, kwargs):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'axis', which takes either an
ndarray or 'None', so check if the 'convert' parameter is either
an instance of ndarray or is None
"""
if isinstance(convert, ndarray) or convert is None:
args = (convert,) + args
convert = True
validate_take(args, kwargs, max_fname_arg_count=3, method="both")
return convert
TRANSPOSE_DEFAULTS = dict(axes=None)
validate_transpose = CompatValidator(
TRANSPOSE_DEFAULTS, fname="transpose", method="both", max_fname_arg_count=0
)
def validate_window_func(name, args, kwargs):
numpy_args = ("axis", "dtype", "out")
msg = (
"numpy operations are not "
"valid with window objects. "
"Use .{func}() directly instead ".format(func=name)
)
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_rolling_func(name, args, kwargs):
numpy_args = ("axis", "dtype", "out")
msg = (
"numpy operations are not "
"valid with window objects. "
"Use .rolling(...).{func}() instead ".format(func=name)
)
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_expanding_func(name, args, kwargs):
numpy_args = ("axis", "dtype", "out")
msg = (
"numpy operations are not "
"valid with window objects. "
"Use .expanding(...).{func}() instead ".format(func=name)
)
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_groupby_func(name, args, kwargs, allowed=None):
"""
'args' and 'kwargs' should be empty, except for allowed
kwargs because all of
their necessary parameters are explicitly listed in
the function signature
"""
if allowed is None:
allowed = []
kwargs = set(kwargs) - set(allowed)
if len(args) + len(kwargs) > 0:
raise UnsupportedFunctionCall(
(
"numpy operations are not valid "
"with groupby. Use .groupby(...)."
"{func}() instead".format(func=name)
)
)
RESAMPLER_NUMPY_OPS = ("min", "max", "sum", "prod", "mean", "std", "var")
def validate_resampler_func(method, args, kwargs):
"""
'args' and 'kwargs' should be empty because all of
their necessary parameters are explicitly listed in
the function signature
"""
if len(args) + len(kwargs) > 0:
if method in RESAMPLER_NUMPY_OPS:
raise UnsupportedFunctionCall(
(
"numpy operations are not valid "
"with resample. Use .resample(...)."
"{func}() instead".format(func=method)
)
)
else:
raise TypeError("too many arguments passed in")
def validate_minmax_axis(axis):
"""
Ensure that the axis argument passed to min, max, argmin, or argmax is
zero or None, as otherwise it will be incorrectly ignored.
Parameters
----------
axis : int or None
Raises
------
ValueError
"""
ndim = 1 # hard-coded for Index
if axis is None:
return
if axis >= ndim or (axis < 0 and ndim + axis < 0):
raise ValueError(
"`axis` must be fewer than the number of "
"dimensions ({ndim})".format(ndim=ndim)
)
| bsd-3-clause |
Mecanon/morphing_wing | dynamic_model/results/Hartl_SMA/config_B/animation.py | 2 | 5192 | # -*- coding: utf-8 -*-
"""
Created on Tue May 24 10:50:05 2016
@author: Pedro Leal
"""
import math
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import xfoil_module as xf
import airfoil_module as af
from actuator import actuator
#plt.rcParams['animation.ffmpeg_path'] = 'C:\ffmpeg\bin'
Data = pickle.load(open( "data.p", "rb" ))
def plotter(i, database, s, l, x, y, J_x):
theta = database['theta'][i]
T = database['T'][i]
#plot SMA actuator
s.theta = theta
s.update()
image = plt.plot([s.x_n + s.x_J, s.x_n + s.x_J + s.r_1],
[s.y_n + s.y_J, s.y_n + s.y_J + s.r_2], 'r')
image.append(plt.scatter([s.x_n + s.x_J, s.x_n + s.x_J + s.r_1],
[s.y_n + s.y_J, s.y_n + s.y_J + s.r_2], c = 'r'))
image.append(plt.scatter([s.x_J],[s.y_J], c = 'r'))
#Plot linear actuator
l.theta = theta
l.update()
image2 = plt.plot([l.x_n + l.x_J, l.x_n + l.x_J + l.r_1],
[l.y_n + l.y_J, l.y_n + l.y_J + l.r_2], 'b')
image += image2
image.append(plt.scatter([l.x_n + l.x_J, l.x_n + l.x_J + l.r_1],
[l.y_n + l.y_J, l.y_n + l.y_J + l.r_2], c = 'b'))
image.append(plt.scatter([l.x_J],[l.y_J], c = 'g'))
image.append(plt.text(0, -0.16, 'T = %.0f K' % T))
image.append(plt.text(0, -0.19, r'$\theta$ = %.1f$^{\circ}$' % math.degrees(theta)))
#plot flap
image = plot_flap(x, y, J_x, theta = - theta, image = image)
return image
def plot_flap(x, y, x_J, theta, image):
"""
Plot flap with actuators. theta is clockwise positive.
@Author: Endryws (modified by Pedro Leal)
Created on Fri Mar 18 14:26:54 2016
"""
x_dict, y_dict = af.separate_upper_lower(x, y)
# Below I create the dictionarys used to pass to the function find_hinge
upper = {'x': x_dict['upper'], 'y': y_dict['upper']} # x and y upper points
lower = {'x': x_dict['lower'], 'y': y_dict['lower']} # x and y lower points
hinge = af.find_hinge(x_J, upper, lower)
#=======================================================================
# With the Joint (hinge) point, i can use the find flap function to
# found the points of the flap in the airfoil.
#=======================================================================
data = {'x': x, 'y': y}
static_data, flap_data = af.find_flap(data, hinge)
R = hinge['y_upper']
theta_list = np.linspace(3*math.pi/2, math.pi/2, 50)
x_circle_list = hinge['x'] + R*np.cos(theta_list)
y_circle_list = hinge['y'] + R*np.sin(theta_list)
n_upper = len(flap_data['x'])/2
# Ploting the flap in the original position
# plt.plot(flap_data['x'][:n_upper],flap_data['y'][:n_upper],'k--')
# plt.plot(flap_data['x'][n_upper:],flap_data['y'][n_upper:],'k--')
# Rotate and plot
upper = {'x': np.concatenate((flap_data['x'][:n_upper], x_circle_list)),
'y': np.concatenate((flap_data['y'][:n_upper], y_circle_list))}
lower = {'x':(flap_data['x'][n_upper:]),
'y':(flap_data['y'][n_upper:])}
rotated_upper, rotated_lower = af.rotate(upper, lower, hinge, theta,
unit_theta = 'rad')
image2 = plt.plot(static_data['x'], static_data['y'],'k')
image3 = plt.plot(rotated_upper['x'], rotated_upper['y'],'k')
image4 = plt.plot(rotated_lower['x'], rotated_lower['y'],'k')
image += image2 + image3 + image4
return image
def thickness(x, t, chord):
y = af.Naca00XX(chord, t, [x], return_dict = 'y')
thickness_at_x = y['u'] - y['l']
return thickness_at_x
#Positions
J = {'x':0.75, 'y':0.}
sma = {'x-': 7.407724e-001, 'y-': -3.680615e-001,
'x+': 9.933211e-001, 'y+': 6.004423e-001}
linear = {'x-': 7.290939e-001, 'y-': -7.584186e-001,
'x+': 7.550874e-001, 'y+': -4.011175e-001}
airfoil = "naca0012"
chord = 1.#0.6175
t = 0.12*chord
J = {'x':0.75, 'y':0.}
# need to transform normalized coordiantes in to global coordinates
sma['y+'] = sma['y+']*thickness(sma['x+'], t, chord)/2.
sma['y-'] = sma['y-']*thickness(sma['x-'], t, chord)/2.
linear['y+'] = linear['y+']*thickness(linear['x+'], t, chord)/2.
linear['y-'] = linear['y-']*thickness(linear['x-'], t, chord)/2.
#Generate x,y coordinates for flap plots
filename = xf.file_name(airfoil, output='Coordinates')
Data_xy = xf.output_reader(filename, output='Coordinates',
header = ['x','y'])
x = Data_xy['x']
y = Data_xy['y']
#Create actuators
eps_0 = Data['eps_s'][0]
s = actuator(sma, J, eps_0 = eps_0, material = 'SMA')
l = actuator(linear, J, zero_stress_length = 0.2023, material = 'linear')
fig = plt.figure()
fig.gca(xlim=[0,1], ylim=[-0.2,0.1])
fig.gca().set_aspect('equal', adjustable='box')
plt.grid()
plt.xlabel('${}_{I}x + x_J$')
plt.ylabel('${}_{I}y$')
ims = []
for i in range(len(Data['T'])):
ims.append(plotter(i, Data, s, l, x, y, J['x']))
im_ani = animation.ArtistAnimation(fig, ims, interval=50, repeat_delay=3000,
blit=True)
print "Compiling video"
im_ani.save('im.mp4',dpi=400)
plt.show()
| mit |
apur27/public | ASX-Python/numerai.py | 1 | 1703 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 13:55:04 2019
@author: UPuroAb
"""
import pandas as pd
import sklearn.linear_model
#import numerapi
# download the latest training dataset (takes around 30s)
training_data = pd.read_csv("https://numerai-public-datasets.s3-us-west-2.amazonaws.com/latest_numerai_training_data.csv.xz")
training_data.head()
# download the latest tournament dataset (takes around 30s)
tournament_data = pd.read_csv("https://numerai-public-datasets.s3-us-west-2.amazonaws.com/latest_numerai_tournament_data.csv.xz")
tournament_data.head()
# find only the feature columns
feature_cols = training_data.columns[training_data.columns.str.startswith('feature')]
# select those columns out of the training dataset
training_features = training_data[feature_cols]
# create a model and fit the training data (~30 sec to run)
model = sklearn.linear_model.LinearRegression()
model.fit(training_features, training_data.target_kazutsugi)
# select the feature columns from the tournament data
live_features = tournament_data[feature_cols]
# predict the target on the live features
predictions = model.predict(live_features)
# predictions must have an `id` column and a `prediction_kazutsugi` column
predictions_df = tournament_data["id"].to_frame()
predictions_df["prediction_kazutsugi"] = predictions
predictions_df.head()
## Get your API keys from https://numer.ai/submit
#public_id = "REPLACEME"
#secret_key = "REPLACEME"
#napi = numerapi.NumerAPI(public_id=public_id, secret_key=secret_key)
#
## Upload your predictions
#predictions_df.to_csv("predictions.csv", index=False)
#submission_id = napi.upload_predictions("predictions.csv") | artistic-2.0 |
selective-inference/selective-inference | doc/learning_examples/HIV/lambda_1se.py | 3 | 3975 | import functools
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
# load in the X matrix
from selection.tests.instance import HIV_NRTI
X_full = HIV_NRTI(datafile="NRTI_DATA.txt", standardize=False)[0]
from selection.learning.utils import full_model_inference, liu_inference, pivot_plot
from selection.learning.core import split_sampler, keras_fit
from selection.learning.Rutils import lasso_glmnet, cv_glmnet_lam
boot_design = False
def simulate(s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=6000, seed=0):
# description of statistical problem
n, p = X_full.shape
if boot_design:
idx = np.random.choice(np.arange(n), n, replace=True)
X = X_full[idx] # bootstrap X to make it really an IID sample, i.e. don't condition on X throughout
X += 0.1 * np.std(X) * np.random.standard_normal(X.shape) # to make non-degenerate
else:
X = X_full.copy()
X = X - np.mean(X, 0)[None, :]
X = X / np.std(X, 0)[None, :]
n, p = X.shape
truth = np.zeros(p)
truth[:s] = np.linspace(signal[0], signal[1], s)
np.random.shuffle(truth)
truth /= np.sqrt(n)
truth *= sigma
y = X.dot(truth) + sigma * np.random.standard_normal(n)
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = np.linalg.norm(resid)**2 / (n-p)
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
print(dispersion, sigma**2)
splitting_sampler = split_sampler(X * y[:, None], covS)
def meta_algorithm(X, XTXi, resid, sampler):
S = sampler(scale=0.) # deterministic with scale=0
ynew = X.dot(XTXi).dot(S) + resid # will be ok for n>p and non-degen X
G = lasso_glmnet(X, ynew, *[None]*4)
select = G.select(seed=seed, CV=False)
return set(list(select[0]))
selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid)
# run selection algorithm
df = full_model_inference(X,
y,
truth,
selection_algorithm,
splitting_sampler,
success_params=(1, 1),
B=B,
fit_probability=keras_fit,
fit_args={'epochs':10, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'})
if df is not None:
lam_min, lam_1se = cv_glmnet_lam(X.copy(), y.copy(), seed=seed)
lam_min, lam_1se = n * lam_min, n * lam_1se
liu_df = liu_inference(X,
y,
1.00001 * lam_1se,
dispersion,
truth,
alpha=alpha)
return pd.merge(df, liu_df, on='variable')
else:
return df
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
U = np.linspace(0, 1, 101)
plt.clf()
init_seed = np.fabs(np.random.standard_normal() * 500)
for i in range(500):
df = simulate(seed=init_seed+i)
csvfile = 'HIV_1se.csv'
outbase = csvfile[:-4]
if df is not None or i > 0:
try:
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
if df is not None:
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, lengths_ax = pivot_plot(df, outbase)
liu_pivot = df['liu_pivot']
liu_pivot = liu_pivot[~np.isnan(liu_pivot)]
pivot_ax.plot(U, sm.distributions.ECDF(liu_pivot)(U), 'gray', label='Liu CV',
linewidth=3)
pivot_ax.legend()
fig = pivot_ax.figure
fig.savefig(csvfile[:-4] + '.pdf')
| bsd-3-clause |
hrjn/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 67 | 7474 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
NicovincX2/Python-3.5 | Statistiques/Estimation (statistique)/Régression/svr_lin_poly_rbf.py | 1 | 1205 | # -*- coding: utf-8 -*-
import os
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
os.system("pause")
| gpl-3.0 |
droundy/deft | papers/fuzzy-fmt/figs/radial-argon-plots.py | 1 | 3093 | #!/usr/bin/python
from __future__ import division
# We need the following two lines in order for matplotlib to work
# without access to an X server.
import matplotlib, sys
if 'show' not in sys.argv:
matplotlib.use('Agg')
from pylab import *
sigma = 0.3405 #nm
sigma_over_R=2**(5/6)
rmaxplot = 4 # upper limit of our plots
figure(figsize=(18, 4.5))
subplot(1, 3, 1)
title('$n^*=0.8389$, $T^*=0.71$')
data = loadtxt('figs/YarnellArgon85K.dat')
n =0.02125 # Angstrom^-3
nsig_3 = n*(sigma*10)**3
plot(data[:, 0]/(sigma*10), data[:, 1], label='experiment')
data_mc1=loadtxt('figs/mc_testp_wca-0.8389-0.7100.dat.gradial')
plot((1/sigma_over_R)*data_mc1[:, 0], data_mc1[:, 1], '--', label='MC')
data_dft1 = loadtxt('figs/new-data/radial-lj-0.7100-0.84.dat')
plot(data_dft1[:, 0], data_dft1[:, 1]/nsig_3, label='DFT')
data_bh1 = loadtxt('figs/new-data/radial-bh-lj-0.7100-0.84.dat')
plot(data_bh1[:, 0], data_bh1[:, 1]/nsig_3, label='BH', ls = ':', color='k')
xlabel(r'$r/\sigma$')
xlim(0, rmaxplot)
ylim(0)
legend(loc='best')
subplot(1, 3, 2)
# title('$n^*=0.9570$, $T*=2.48$')
# data2 = loadtxt('figs/EggertArgon0.6GPaX.dat')
# n = 24.23 #atoms/nm^3
# nsig_3 = n*sigma**3
# plot(data2[:,0]/sigma,data2[:,1], label='experiment')
# data_mc2 = loadtxt('figs/mc_testp_wca-0.9570-2.4800.dat.gradial')
# plot((1/sigma_over_R)*data_mc2[:,0], data_mc2[:,1], '--', label='MC')
# data_dft2 = loadtxt('figs/new-data/radial-lj-2.4800-0.96.dat')
# plot(data_dft2[:,0],data_dft2[:,1]/nsig_3, label='DFT')
# xlabel(r'$r/\sigma$')
# xlim(0, rmaxplot)
# legend(loc='best')
title('$n^*=1.0950$, $T^*=2.48$')
data3 = loadtxt('figs/EggertArgon1.1GPaRAW.dat')
n = 27.74 #atoms/nm^3
nsig_3 = n*sigma**3
plot(data3[:, 0]/sigma, (data3[:, 1]), label='experiment')
data_mc3 = loadtxt('figs/mc_testp_wca-1.0950-2.4800.dat.gradial')
plot((1/sigma_over_R)*data_mc3[:, 0], data_mc3[:, 1], '--', label='MC')
data_dft3 = loadtxt('figs/new-data/radial-lj-2.4800-1.09.dat')
plot(data_dft3[:, 0], data_dft3[:, 1]/nsig_3, label='DFT')
data_bh3 = loadtxt('figs/new-data/radial-bh-lj-2.4800-1.09.dat')
plot(data_bh3[:, 0], data_bh3[:, 1]/nsig_3, label='BH', ls = ':', color='k')
xlabel(r'$r/\sigma$')
xlim(0, rmaxplot)
ylim(0)
legend(loc='best')
subplot(1, 3, 3)
title('$n^*=0.5488$, $T^*=1.235$')
data4 = loadtxt('figs/Mikolaj-X.dat')
#n = 27.74 #atoms/nm^3
#nsig_3 = n*sigma**3
plot(data4[:, 0]/(sigma*10), data4[:, 1], label='experiment')
data_mc5 = loadtxt('figs/mcfcc-0.5844-1.2350.dat.gradial')
plot((1/sigma_over_R)*data_mc5[:, 0], data_mc5[:, 1], '--', label='MC')
data_dft4 = loadtxt('figs/new-data/radial-lj-1.2350-0.58.dat')
plot(data_dft4[:, 0], data_dft4[:, 1]/0.5844, label='DFT')
data_bh4 = loadtxt('figs/new-data/radial-bh-lj-1.2350-0.58.dat')
plot(data_bh4[:, 0], data_bh4[:, 1]/0.5844, label='BH', ls = ':', color='k')
# data_dft6 = loadtxt('figs/radial-lj-1.2350-0.58.dat')
# plot((1/sigma_over_R)*data_dft6[:,0],data_dft6[:,1]/.58, '--')
xlabel(r'$r/\sigma$')
xlim(0, rmaxplot)
ylim(0, 3.5)
# axvline(1)
# axvline(2**(1.0/6.0))
legend(loc='best')
tight_layout()
savefig('figs/argon-plots.pdf')
show()
| gpl-2.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/indexes/base.py | 7 | 129163 | import datetime
import warnings
import operator
import numpy as np
import pandas.tslib as tslib
import pandas.lib as lib
import pandas._join as _join
import pandas.algos as _algos
import pandas.index as _index
from pandas.lib import Timestamp, Timedelta, is_datetime_array
from pandas.compat import range, u
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.types.generic import ABCSeries, ABCMultiIndex, ABCPeriodIndex
from pandas.types.missing import isnull, array_equivalent
from pandas.types.common import (_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_object_dtype,
is_categorical_dtype,
is_bool_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.types.cast import _coerce_indexer_dtype
from pandas.core.common import (is_bool_indexer,
_values_from_object,
_asarray_tuplesafe)
from pandas.core.base import (PandasObject, FrozenList, FrozenNDArray,
IndexOpsMixin)
import pandas.core.base as base
from pandas.util.decorators import (Appender, Substitution, cache_readonly,
deprecate, deprecate_kwarg)
import pandas.core.common as com
import pandas.types.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
from pandas.formats.printing import pprint_thing
from pandas.core.ops import _comp_method_OBJECT_ARRAY
from pandas.core.strings import StringAccessorMixin
from pandas.core.config import get_option
# simplify
default_pprint = lambda x, max_seq_items=None: \
pprint_thing(x, escape_chars=('\t', '\r', '\n'), quote_strings=True,
max_seq_items=max_seq_items)
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, StringAccessorMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_arrmap = _algos.arrmap_object
_left_indexer_unique = _join.left_join_indexer_unique_object
_left_indexer = _join.left_join_indexer_object
_inner_indexer = _join.inner_join_indexer_object
_outer_indexer = _join.outer_join_indexer_object
_box_scalars = False
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_allow_index_ops = True
_allow_datetime_index_ops = False
_allow_period_index_ops = False
_is_numeric_dtype = False
_can_hold_na = True
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = _index.ObjectEngine
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, copy=copy, name=name, **kwargs)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.tseries.index import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.tseries.tdi import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
data = np.array(data, copy=copy, dtype=dtype)
elif inferred in ['floating', 'mixed-integer-float']:
# if we are actually all equal to integers
# then coerce to integer
from .numeric import Int64Index, Float64Index
try:
res = data.astype('i8')
if (res == data).all():
return Int64Index(res, copy=copy,
name=name)
except (TypeError, ValueError):
pass
# return an actual float index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError):
pass
# maybe coerce to a sub-class
from pandas.tseries.period import (PeriodIndex,
IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if issubclass(data.dtype.type, np.integer):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.floating):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = _asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
from .numeric import Int64Index
return Int64Index(subarr.astype('i8'), copy=copy,
name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'boolean':
# don't support boolean explicity ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.tseries.index import DatetimeIndex
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except tslib.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.tseries.tdi import TimedeltaIndex
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if (tupleize_cols and isinstance(data, list) and data and
isinstance(data[0], tuple)):
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
try:
# must be orderable in py3
if compat.PY3:
sorted(data)
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
except (TypeError, KeyError):
# python2 - MultiIndex fails on mixed types
pass
# other iterable of some kind
subarr = _asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if values is None and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepcopy_if_needed(self, orig, copy=False):
"""
.. versionadded:: 0.19.0
Make a copy of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
copy : boolean, default False
when False, do not run any check, just return self
Returns
-------
A copy of self if needed, otherwise self : Index
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.copy(deep=True)
return self
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
_index_shared_docs['_get_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
uniques : Index or None
Index of unique values for level
"""
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level=None):
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
def get_values(self):
""" return the underlying data as an ndarray """
return self.values
# ops compat
def tolist(self):
"""
return a list of the Index values
"""
return list(self.values)
def repeat(self, n, *args, **kwargs):
"""
Repeat elements of an Index. Refer to `numpy.ndarray.repeat`
for more information about the `n` argument.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(n))
def where(self, cond, other=None):
"""
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean same length as self
other : scalar, or array-like
"""
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
return self._shallow_copy_with_infer(values, dtype=self.dtype)
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self._values.ravel(order=order)
# construction helpers
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be called with a collection of some '
'kind, {1} was passed'.format(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not isinstance(data, (np.ndarray, Index)):
if data is None or is_scalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return dict([(k, getattr(self, k, None)) for k in self._attributes])
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, '_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
return Index([item], dtype=self.dtype, **self._get_attributes_dict())
_index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
deep : boolean, default False
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
@Appender(_index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
if deep:
new_index = self._shallow_copy(self._data.copy())
else:
new_index = self._shallow_copy()
names = kwargs.get('names')
names = self._validate_names(name=name, names=names, deep=deep)
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
__copy__ = copy
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
klass = self.__class__.__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
prepr = (u(",%s") %
space).join([u("%s=%s") % (k, v) for k, v in attrs])
# no data provided, just attributes
if data is None:
data = ''
res = u("%s(%s%s)") % (klass, data, prepr)
return res
def _format_space(self):
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
@property
def _formatter_func(self):
"""
Return the formatted data as a unicode string
"""
return default_pprint
def _format_data(self):
"""
Return the formatted data as a unicode string
"""
from pandas.formats.format import get_console_size, _get_adjustment
display_width, _ = get_console_size()
if display_width is None:
display_width = get_option('display.width') or 80
space1 = "\n%s" % (' ' * (len(self.__class__.__name__) + 1))
space2 = "\n%s" % (' ' * (len(self.__class__.__name__) + 2))
n = len(self)
sep = ','
max_seq_items = get_option('display.max_seq_items') or n
formatter = self._formatter_func
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
# are we a truncated display
is_truncated = n > max_seq_items
# adj can optionaly handle unicode eastern asian width
adj = _get_adjustment()
def _extend_line(s, line, value, display_width, next_line_prefix):
if (adj.len(line.rstrip()) + adj.len(value.rstrip()) >=
display_width):
s += line.rstrip()
line = next_line_prefix
line += value
return s, line
def best_len(values):
if values:
return max([adj.len(x) for x in values])
else:
return 0
if n == 0:
summary = '[], '
elif n == 1:
first = formatter(self[0])
summary = '[%s], ' % first
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = '[%s, %s], ' % (first, last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
else:
head = []
tail = [formatter(x) for x in self]
# adjust all values to max length if needed
if is_justify:
# however, if we are not truncated and we are only a single
# line, then don't justify
if (is_truncated or
not (len(', '.join(head)) < display_width and
len(', '.join(tail)) < display_width)):
max_len = max(best_len(head), best_len(tail))
head = [x.rjust(max_len) for x in head]
tail = [x.rjust(max_len) for x in tail]
summary = ""
line = space2
for i in range(len(head)):
word = head[i] + sep + ' '
summary, line = _extend_line(summary, line, word,
display_width, space2)
if is_truncated:
# remove trailing space of last line
summary += line.rstrip() + space2 + '...'
line = space2
for i in range(len(tail) - 1):
word = tail[i] + sep + ' '
summary, line = _extend_line(summary, line, word,
display_width, space2)
# last value: no sep added + 1 space of width used for trailing ','
summary, line = _extend_line(summary, line, tail[-1],
display_width - 2, space2)
summary += line
summary += '],'
if len(summary) > (display_width):
summary += space1
else: # one row
summary += ' '
# remove initial space
summary = '[' + summary[len(space2):]
return summary
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
attrs = []
attrs.append(('dtype', "'%s'" % self.dtype))
if self.name is not None:
attrs.append(('name', default_pprint(self.name)))
max_seq_items = get_option('display.max_seq_items') or len(self)
if len(self) > max_seq_items:
attrs.append(('length', len(self)))
return attrs
def to_series(self, **kwargs):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
return Series(self._to_embed(), index=self, name=self.name)
def _to_embed(self, keep_tz=False):
"""
*this is an internal non-public method*
return an array repr of this object, potentially casting to object
"""
return self.values.copy()
_index_shared_docs['astype'] = """
Create an Index with values cast to dtypes. The class of a new Index
is determined by dtype. When conversion is impossible, a ValueError
exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
.. versionadded:: 0.19.0
"""
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
return Index(self.values.astype(dtype, copy=copy), name=self.name,
dtype=dtype)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self
def to_datetime(self, dayfirst=False):
"""
DEPRECATED: use :meth:`pandas.to_datetime` instead.
For an Index containing strings or datetime.datetime objects, attempt
conversion to DatetimeIndex
"""
warnings.warn("to_datetime is deprecated. Use pd.to_datetime(...)",
FutureWarning, stacklevel=2)
from pandas.tseries.index import DatetimeIndex
if self.inferred_type == 'string':
from dateutil.parser import parse
parser = lambda x: parse(x, dayfirst=dayfirst)
parsed = lib.try_parse_dates(self.values, parser=parser)
return DatetimeIndex(parsed)
else:
return DatetimeIndex(self.values)
def _assert_can_do_setop(self, other):
if not is_list_like(other):
raise TypeError('Input must be Index or array-like')
return True
def _convert_can_do_setop(self, other):
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
return other, result_name
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
return value
def _assert_can_do_op(self, value):
""" Check value is valid for scalar op """
if not lib.isscalar(value):
msg = "'value' must be a scalar, passed: {0}"
raise TypeError(msg.format(type(value).__name__))
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name, ))
def _set_names(self, values, level=None):
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d' %
len(values))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
if level is not None and self.nlevels == 1:
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
def reshape(self, *args, **kwargs):
"""
NOT IMPLEMENTED: do not call this method, as reshaping is not
supported for Index objects and will raise an error.
Reshape an Index.
"""
raise NotImplementedError("reshaping is not supported "
"for Index objects")
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def summary(self, name=None):
if len(self) > 0:
head = self[0]
if (hasattr(head, 'format') and
not isinstance(head, compat.string_types)):
head = head.format()
tail = self[-1]
if (hasattr(tail, 'format') and
not isinstance(tail, compat.string_types)):
tail = tail.format()
index_summary = ', %s to %s' % (pprint_thing(head),
pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
# introspection
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self._engine.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
return self._engine.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly(allow_setting=True)
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
@property
def has_duplicates(self):
return not self.is_unique
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return is_object_dtype(self.dtype)
def is_categorical(self):
return self.inferred_type in ['categorical']
def is_mixed(self):
return self.inferred_type in ['mixed']
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
# validate / convert indexers
def _convert_scalar_indexer(self, key, kind=None):
"""
convert a scalar indexer
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
if len(self) and not isinstance(self, ABCMultiIndex,):
# we can raise here if we are definitive that this
# is positional indexing (eg. .ix on with a float)
# or label indexing if we are using a type able
# to be represented in the index
if kind in ['getitem', 'ix'] and is_float(key):
if not self.is_floating():
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_float(key):
# we want to raise KeyError on string/mixed here
# technically we *could* raise a TypeError
# on anything but mixed though
if self.inferred_type not in ['floating',
'mixed-integer-float',
'string',
'unicode',
'mixed']:
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_integer(key):
if not self.holds_integer():
return self._invalid_indexer('label', key)
return key
def _convert_slice_indexer(self, key, kind=None):
"""
convert a slice indexer. disallow floats in the start/stop/step
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
# validate iloc
if kind == 'iloc':
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
def is_int(v):
return v is None or is_integer(v)
is_null_slicer = start is None and stop is None
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()
if kind == 'getitem':
"""
called from the getitem slicers, validate that we are in fact
integers
"""
if self.is_integer() or is_index_slice:
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# convert the slice to an indexer here
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
# TODO: i, j are not used anywhere
if start is not None:
i = self.get_loc(start) # noqa
if stop is not None:
j = self.get_loc(stop) # noqa
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
raise
if is_null_slicer:
indexer = key
elif is_positional:
indexer = key
else:
try:
indexer = self.slice_indexer(start, stop, step, kind=kind)
except Exception:
if is_index_slice:
if self.is_integer():
raise
else:
indexer = key
else:
raise
return indexer
def _convert_list_indexer(self, keyarr, kind=None):
"""
passed a key that is tuplesafe that is integer based
and we have a mixed index (e.g. number/labels). figure out
the indexer. return None if we can't help
"""
if (kind in [None, 'iloc', 'ix'] and
is_integer_dtype(keyarr) and not self.is_floating() and
not isinstance(keyarr, ABCPeriodIndex)):
if self.inferred_type == 'mixed-integer':
indexer = self.get_indexer(keyarr)
if (indexer >= 0).all():
return indexer
# missing values are flagged as -1 by get_indexer and negative
# indices are already converted to positive indices in the
# above if-statement, so the negative flags are changed to
# values outside the range of indices so as to trigger an
# IndexError in maybe_convert_indices
indexer[indexer < 0] = len(self)
from pandas.core.indexing import maybe_convert_indices
return maybe_convert_indices(indexer, len(self))
elif not self.inferred_type == 'integer':
keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr)
return keyarr
return None
def _invalid_indexer(self, form, key):
""" consistent invalid indexer message """
raise TypeError("cannot do {form} indexing on {klass} with these "
"indexers [{key}] of {kind}".format(
form=form, klass=type(self), key=key,
kind=type(key)))
def get_duplicates(self):
from collections import defaultdict
counter = defaultdict(lambda: 0)
for k in self.values:
counter[k] += 1
return sorted(k for k, v in compat.iteritems(counter) if v > 1)
_get_duplicates = get_duplicates
def _cleanup(self):
self._engine.clear_mapping()
@cache_readonly
def _constructor(self):
return type(self)
@cache_readonly
def _engine(self):
# property, for now, slow to look up
return self._engine_type(lambda: self._values, len(self))
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError("Too many levels: Index has only 1 level,"
" %d is not a valid level number" % (level, ))
elif level > 0:
raise IndexError("Too many levels:"
" Index has only 1 level, not %d" %
(level + 1))
elif level != self.name:
raise KeyError('Level %s must be same as name (%s)' %
(level, self.name))
def _get_level_number(self, level):
self._validate_index_level(level)
return 0
@cache_readonly
def inferred_type(self):
""" return a string of the type inferred from the values """
return lib.infer_dtype(self)
def is_type_compatible(self, kind):
return kind == self.inferred_type
@cache_readonly
def is_all_dates(self):
if self._data is None:
return False
return is_datetime_array(_ensure_object(self.values))
def __iter__(self):
return iter(self.values)
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
self._data = state.pop('data')
for k, v in compat.iteritems(state):
setattr(self, k, v)
elif isinstance(state, tuple):
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
def __contains__(self, key):
hash(key)
# work around some kind of odd cython bug
try:
return key in self._engine
except TypeError:
return False
def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
raise TypeError("Index does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
# There's no custom logic to be implemented in __getslice__, so it's
# not overloaded intentionally.
getitem = self._data.__getitem__
promote = self._shallow_copy
if is_scalar(key):
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization of basic indexing.
return promote(getitem(key))
if is_bool_indexer(key):
key = np.asarray(key)
key = _values_from_object(key)
result = getitem(key)
if not is_scalar(result):
return promote(result)
else:
return result
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if not isinstance(obj, Index):
raise TypeError('all inputs must be Index')
names = set([obj.name for obj in to_concat])
name = None if len(names) > 1 else self.name
if self.is_categorical():
# if calling index is category, don't check dtype of others
from pandas.indexes.category import CategoricalIndex
return CategoricalIndex._append_same_dtype(self, to_concat, name)
typs = _concat.get_dtype_kinds(to_concat)
if len(typs) == 1:
return self._append_same_dtype(to_concat, name=name)
return _concat._concat_index_asobject(to_concat, name=name)
def _append_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
"""
# must be overrided in specific classes
return _concat._concat_index_asobject(to_concat, name)
_index_shared_docs['take'] = """
return a new %(klass)s of the values selected by the indices
For internal compatibility with numpy arrays.
Parameters
----------
indices : list
Indices to be taken
axis : int, optional
The axis over which to select values, always 0.
allow_fill : bool, default True
fill_value : bool, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 is regarded as NA. If Index doesn't hold NA, raise ValueError
See also
--------
numpy.ndarray.take
"""
@Appender(_index_shared_docs['take'])
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
if self._can_hold_na:
taken = self._assert_take_fillable(self.values, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=self._na_value)
else:
if allow_fill and fill_value is not None:
msg = 'Unable to fill values because {0} cannot contain NA'
raise ValueError(msg.format(self.__class__.__name__))
taken = self.values.take(indices)
return self._shallow_copy(taken)
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=np.nan):
""" Internal method to handle NA filling of take """
indices = _ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = values.take(indices)
mask = indices == -1
if mask.any():
taken[mask] = na_value
else:
taken = values.take(indices)
return taken
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
if self._can_hold_na:
return isnull(self)
else:
# shouldn't reach to this condition by checking hasnans beforehand
values = np.empty(len(self), dtype=np.bool_)
values.fill(False)
return values
@cache_readonly
def _nan_idxs(self):
if self._can_hold_na:
w, = self._isnan.nonzero()
return w
else:
return np.array([], dtype=np.int64)
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
if self._can_hold_na:
return self._isnan.any()
else:
return False
def putmask(self, mask, value):
"""
return a new Index of the values set with the mask
See also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
try:
np.putmask(values, mask, self._convert_for_op(value))
return self._shallow_copy(values)
except (ValueError, TypeError):
# coerces to object
return self.astype(object).putmask(mask, value)
def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index
"""
header = []
if name:
header.append(pprint_thing(self.name,
escape_chars=('\t', '\r', '\n')) if
self.name is not None else '')
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, **kwargs)
def _format_with_header(self, header, na_rep='NaN', **kwargs):
values = self.values
from pandas.formats.format import format_array
if is_categorical_dtype(values.dtype):
values = np.array(values)
elif is_object_dtype(values.dtype):
values = lib.maybe_convert_objects(values, safe=1)
if is_object_dtype(values.dtype):
result = [pprint_thing(x, escape_chars=('\t', '\r', '\n'))
for x in values]
# could have nans
mask = isnull(values)
if mask.any():
result = np.array(result)
result[mask] = na_rep
result = result.tolist()
else:
result = _trim_front(format_array(values, None, justify='left'))
return header + result
def to_native_types(self, slicer=None, **kwargs):
""" slice and dice then format """
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
""" actually format my specific types """
mask = isnull(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
else:
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if is_object_dtype(self) and not is_object_dtype(other):
# if other is not object, use other's logic for coercion
return other.equals(self)
try:
return array_equivalent(_values_from_object(self),
_values_from_object(other))
except:
return False
def identical(self, other):
"""Similar to equals, but check that other comparable attributes are
also equal
"""
return (self.equals(other) and
all((getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables)) and
type(self) == type(other))
def asof(self, label):
"""
For a sorted index, return the most recent label up to and including
the passed label. Return NaN if not found.
See also
--------
get_loc : asof is a thin wrapper around get_loc with method='pad'
"""
try:
loc = self.get_loc(label, method='pad')
except KeyError:
return _get_na_value(self.dtype)
else:
if isinstance(loc, slice):
loc = loc.indices(len(self))[-1]
return self[loc]
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
locs = self.values[mask].searchsorted(where.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where < self.values[first])] = -1
return result
def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def order(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
DEPRECATED: use :meth:`Index.sort_values`
"""
warnings.warn("order is deprecated, use sort_values(...)",
FutureWarning, stacklevel=2)
return self.sort_values(return_indexer=return_indexer,
ascending=ascending)
def sort(self, *args, **kwargs):
raise TypeError("cannot sort an Index object in-place, use "
"sort_values instead")
def sortlevel(self, level=None, ascending=True, sort_remaining=None):
"""
For internal compatibility with with the Index API
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : boolean, default True
False to sort in descending order
level, sort_remaining are compat parameters
Returns
-------
sorted_index : Index
"""
return self.sort_values(return_indexer=True, ascending=ascending)
def shift(self, periods=1, freq=None):
"""
Shift Index containing datetime objects by input number of periods and
DateOffset
Returns
-------
shifted : Index
"""
raise NotImplementedError("Not supported for type %s" %
type(self).__name__)
def argsort(self, *args, **kwargs):
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
"""
result = self.asi8
if result is None:
result = np.array(self)
return result.argsort(*args, **kwargs)
def __add__(self, other):
return Index(np.array(self) + other)
def __radd__(self, other):
return Index(other + np.array(self))
__iadd__ = __add__
def __sub__(self, other):
raise TypeError("cannot perform __sub__ with this index type: "
"{typ}".format(typ=type(self)))
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def _get_consensus_name(self, other):
"""
Given 2 indexes, give a consensus name meaning
we take the not None one, or None if the names differ.
Return a new object if we are resetting the name
"""
if self.name != other.name:
if self.name is None or other.name is None:
name = self.name or other.name
else:
name = None
if self.name != name:
return self._shallow_copy(name=name)
return self
def union(self, other):
"""
Form the union of two Index objects and sorts if possible.
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if len(other) == 0 or self.equals(other):
return self._get_consensus_name(other)
if len(self) == 0:
return other._get_consensus_name(self)
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.union(other)
if self.is_monotonic and other.is_monotonic:
try:
result = self._outer_indexer(self._values, other._values)[0]
except TypeError:
# incomparable objects
result = list(self._values)
# worth making this faster? a very unusual case
value_set = set(self._values)
result.extend([x for x in other._values if x not in value_set])
else:
indexer = self.get_indexer(other)
indexer, = (indexer == -1).nonzero()
if len(indexer) > 0:
other_diff = algos.take_nd(other._values, indexer,
allow_fill=False)
result = _concat._concat_compat((self._values, other_diff))
try:
self._values[0] < other_diff[0]
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
else:
types = frozenset((self.inferred_type,
other.inferred_type))
if not types & _unsortable_types:
result.sort()
else:
result = self._values
try:
result = np.sort(result)
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
# for subclasses
return self._wrap_union_result(other, result)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self.__class__(result, name=name)
def intersection(self, other):
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`.
Sortedness of the result is not guaranteed.
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Int64Index([3, 4], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if self.equals(other):
return self._get_consensus_name(other)
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.intersection(other)
if self.is_monotonic and other.is_monotonic:
try:
result = self._inner_indexer(self._values, other._values)[0]
return self._wrap_union_result(other, result)
except TypeError:
pass
try:
indexer = Index(self._values).get_indexer(other._values)
indexer = indexer.take((indexer != -1).nonzero()[0])
except:
# duplicates
indexer = Index(self._values).get_indexer_non_unique(
other._values)[0].unique()
indexer = indexer[indexer != -1]
taken = self.take(indexer)
if self.name != other.name:
taken.name = None
return taken
def difference(self, other):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
Returns
-------
difference : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Int64Index([1, 2], dtype='int64')
"""
self._assert_can_do_setop(other)
if self.equals(other):
return Index([], name=self.name)
other, result_name = self._convert_can_do_setop(other)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer,
assume_unique=True)
the_diff = this.values.take(label_diff)
try:
the_diff = algos.safe_sort(the_diff)
except TypeError:
pass
return this._shallow_copy(the_diff, name=result_name, freq=None)
def symmetric_difference(self, other, result_name=None):
"""
Compute the symmetric difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
result_name : str
Returns
-------
symmetric_difference : Index
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> idx1 = Index([1, 2, 3, 4])
>>> idx2 = Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Int64Index([1, 5], dtype='int64')
You can also use the ``^`` operator:
>>> idx1 ^ idx2
Int64Index([1, 5], dtype='int64')
"""
self._assert_can_do_setop(other)
other, result_name_update = self._convert_can_do_setop(other)
if result_name is None:
result_name = result_name_update
this = self._get_unique_index()
other = other._get_unique_index()
indexer = this.get_indexer(other)
# {this} minus {other}
common_indexer = indexer.take((indexer != -1).nonzero()[0])
left_indexer = np.setdiff1d(np.arange(this.size), common_indexer,
assume_unique=True)
left_diff = this.values.take(left_indexer)
# {other} minus {this}
right_indexer = (indexer == -1).nonzero()[0]
right_diff = other.values.take(right_indexer)
the_diff = _concat._concat_compat([left_diff, right_diff])
try:
the_diff = algos.safe_sort(the_diff)
except TypeError:
pass
attribs = self._get_attributes_dict()
attribs['name'] = result_name
if 'freq' in attribs:
attribs['freq'] = None
return self._shallow_copy_with_infer(the_diff, **attribs)
sym_diff = deprecate('sym_diff', symmetric_difference)
def _get_unique_index(self, dropna=False):
"""
Returns an index containing unique values.
Parameters
----------
dropna : bool
If True, NaN values are dropped.
Returns
-------
uniques : index
"""
if self.is_unique and not dropna:
return self
values = self.values
if not self.is_unique:
values = self.unique()
if dropna:
try:
if self.hasnans:
values = values[~isnull(values)]
except NotImplementedError:
pass
return self._shallow_copy(values)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Parameters
----------
key : label
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
tolerance : optional
Maximum distance from index value for inexact matches. The value of
the index at the matching location most satisfy the equation
``abs(index[loc] - key) <= tolerance``.
.. versionadded:: 0.17.0
Returns
-------
loc : int if unique index, possibly slice or mask if not
"""
if method is None:
if tolerance is not None:
raise ValueError('tolerance argument only valid if using pad, '
'backfill or nearest lookups')
key = _values_from_object(key)
try:
return self._engine.get_loc(key)
except KeyError:
return self._engine.get_loc(self._maybe_cast_indexer(key))
indexer = self.get_indexer([key], method=method, tolerance=tolerance)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError('get_loc requires scalar valued input')
loc = indexer.item()
if loc == -1:
raise KeyError(key)
return loc
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
# if we have something that is Index-like, then
# use this, e.g. DatetimeIndex
s = getattr(series, '_values', None)
if isinstance(s, Index) and is_scalar(key):
try:
return s[key]
except (IndexError, ValueError):
# invalid type as an indexer
pass
s = _values_from_object(series)
k = _values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
try:
return self._engine.get_value(s, k,
tz=getattr(series.dtype, 'tz', None))
except KeyError as e1:
if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
raise
try:
return tslib.get_value_box(s, key)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# python 3
if is_scalar(key): # pragma: no cover
raise IndexError(key)
raise InvalidIndexError(key)
def set_value(self, arr, key, value):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
self._engine.set_value(_values_from_object(arr),
_values_from_object(key), value)
def get_level_values(self, level):
"""
Return vector of label values for requested level, equal to the length
of the index
Parameters
----------
level : int
Returns
-------
values : ndarray
"""
# checks that level number is actually just 1
self._validate_index_level(level)
return self
def get_indexer(self, target, method=None, limit=None, tolerance=None):
"""
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : Index
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
.. versionadded:: 0.17.0
Examples
--------
>>> indexer = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
"""
method = missing.clean_reindex_fill_method(method)
target = _ensure_index(target)
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance)
pself, ptarget = self._possibly_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(ptarget, method=method, limit=limit,
tolerance=tolerance)
if not is_dtype_equal(self.dtype, target.dtype):
this = self.astype(object)
target = target.astype(object)
return this.get_indexer(target, method=method, limit=limit,
tolerance=tolerance)
if not self.is_unique:
raise InvalidIndexError('Reindexing only valid with uniquely'
' valued Index objects')
if method == 'pad' or method == 'backfill':
indexer = self._get_fill_indexer(target, method, limit, tolerance)
elif method == 'nearest':
indexer = self._get_nearest_indexer(target, limit, tolerance)
else:
if tolerance is not None:
raise ValueError('tolerance argument only valid if doing pad, '
'backfill or nearest reindexing')
if limit is not None:
raise ValueError('limit argument only valid if doing pad, '
'backfill or nearest reindexing')
indexer = self._engine.get_indexer(target._values)
return _ensure_platform_int(indexer)
def _convert_tolerance(self, tolerance):
# override this method on subclasses
return tolerance
def _get_fill_indexer(self, target, method, limit=None, tolerance=None):
if self.is_monotonic_increasing and target.is_monotonic_increasing:
method = (self._engine.get_pad_indexer if method == 'pad' else
self._engine.get_backfill_indexer)
indexer = method(target._values, limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method,
limit)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target._values, indexer,
tolerance)
return indexer
def _get_fill_indexer_searchsorted(self, target, method, limit=None):
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets
"""
if limit is not None:
raise ValueError('limit argument for %r method only well-defined '
'if index and target are monotonic' % method)
side = 'left' if method == 'pad' else 'right'
target = np.asarray(target)
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = (indexer == -1)
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact],
side)
if side == 'left':
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer
def _get_nearest_indexer(self, target, limit, tolerance):
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
left_indexer = self.get_indexer(target, 'pad', limit=limit)
right_indexer = self.get_indexer(target, 'backfill', limit=limit)
target = np.asarray(target)
left_distances = abs(self.values[left_indexer] - target)
right_distances = abs(self.values[right_indexer] - target)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(op(left_distances, right_distances) |
(right_indexer == -1), left_indexer, right_indexer)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target, indexer,
tolerance)
return indexer
def _filter_indexer_tolerance(self, target, indexer, tolerance):
distance = abs(self.values[indexer] - target)
indexer = np.where(distance <= tolerance, indexer, -1)
return indexer
def get_indexer_non_unique(self, target):
""" return an indexer suitable for taking from a non unique index
return the labels in the same order as the target, and
return a missing indexer into the target (missing are marked as -1
in the indexer); target must be an iterable """
target = _ensure_index(target)
pself, ptarget = self._possibly_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if self.is_all_dates:
self = Index(self.asi8)
tgt_values = target.asi8
else:
tgt_values = target._values
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return Index(indexer), missing
def get_indexer_for(self, target, **kwargs):
""" guaranteed return of an indexer even when non-unique """
if self.is_unique:
return self.get_indexer(target, **kwargs)
indexer, _ = self.get_indexer_non_unique(target, **kwargs)
return indexer
def _possibly_promote(self, other):
# A hack, but it works
from pandas.tseries.index import DatetimeIndex
if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):
return DatetimeIndex(self), other
elif self.inferred_type == 'boolean':
if not is_object_dtype(self.dtype):
return self.astype('object'), other.astype('object')
return self, other
def groupby(self, values):
"""
Group the index labels by a given array of values.
Parameters
----------
values : array
Values used to determine the groups.
Returns
-------
groups : dict
{group name -> group labels}
"""
# TODO: if we are a MultiIndex, we can do better
# that converting to tuples
from .multi import MultiIndex
if isinstance(values, MultiIndex):
values = values.values
values = _ensure_categorical(values)
result = values._reverse_indexer()
# map to the label
result = {k: self.take(v) for k, v in compat.iteritems(result)}
return result
def map(self, mapper):
"""
Apply mapper function to its values.
Parameters
----------
mapper : callable
Function to be applied.
Returns
-------
applied : array
"""
return self._arrmap(self.values, mapper)
def isin(self, values, level=None):
"""
Compute boolean array of whether each index value is found in the
passed set of values.
Parameters
----------
values : set or list-like
Sought values.
.. versionadded:: 0.18.1
Support for values as a set
level : str or int, optional
Name or position of the index level to use (if the index is a
MultiIndex).
Notes
-----
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Returns
-------
is_contained : ndarray (boolean dtype)
"""
if level is not None:
self._validate_index_level(level)
return algos.isin(np.array(self), values)
def _can_reindex(self, indexer):
"""
*this is an internal non-public method*
Check if we are allowing reindexing with this particular indexer
Parameters
----------
indexer : an integer indexer
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if not self.is_unique and len(indexer):
raise ValueError("cannot reindex from a duplicate axis")
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'name')
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = _ensure_has_len(target) # target may be an iterator
if not isinstance(target, Index) and len(target) == 0:
attrs = self._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = self._simple_new(None, dtype=self.dtype, **attrs)
else:
target = _ensure_index(target)
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
_, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True)
else:
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit,
tolerance=tolerance)
else:
if method is not None or limit is not None:
raise ValueError("cannot reindex a non-unique index "
"with a method or limit")
indexer, missing = self.get_indexer_non_unique(target)
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy()
target.name = self.name
return target, indexer
def _reindex_non_unique(self, target):
"""
*this is an internal non-public method*
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
target = _ensure_index(target)
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels = self.take(indexer[check])
new_indexer = None
if len(missing):
l = np.arange(len(indexer))
missing = _ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = _ensure_int64(l[~check])
cur_labels = self.take(indexer[check]).values
cur_indexer = _ensure_int64(l[check])
new_labels = np.empty(tuple([len(indexer)]), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# a unique indexer
if target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer = indexer.values
indexer[~check] = 0
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[~check] = -1
new_index = self._shallow_copy_with_infer(new_labels, freq=None)
return new_index, indexer, new_indexer
def join(self, other, how='left', level=None, return_indexers=False):
"""
*this is an internal non-public method*
Compute join_index and indexers to conform data
structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : boolean, default False
Returns
-------
join_index, (left_indexer, right_indexer)
"""
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# try to figure out the join level
# GH3662
if level is None and (self_is_mi or other_is_mi):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how,
return_indexers=return_indexers)
# join on the level
if level is not None and (self_is_mi or other_is_mi):
return self._join_level(other, level, how=how,
return_indexers=return_indexers)
other = _ensure_index(other)
if len(other) == 0 and how in ('left', 'outer'):
join_index = self._shallow_copy()
if return_indexers:
rindexer = np.repeat(-1, len(join_index))
return join_index, None, rindexer
else:
return join_index
if len(self) == 0 and how in ('right', 'outer'):
join_index = other._shallow_copy()
if return_indexers:
lindexer = np.repeat(-1, len(join_index))
return join_index, lindexer, None
else:
return join_index
if self._join_precedence < other._join_precedence:
how = {'right': 'left', 'left': 'right'}.get(how, how)
result = other.join(self, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
x, y, z = result
result = x, z, y
return result
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.join(other, how=how, return_indexers=return_indexers)
_validate_join_method(how)
if not self.is_unique and not other.is_unique:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif not self.is_unique or not other.is_unique:
if self.is_monotonic and other.is_monotonic:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
else:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif self.is_monotonic and other.is_monotonic:
try:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
except TypeError:
pass
if how == 'left':
join_index = self
elif how == 'right':
join_index = other
elif how == 'inner':
join_index = self.intersection(other)
elif how == 'outer':
join_index = self.union(other)
if return_indexers:
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer(join_index)
return join_index, lindexer, rindexer
else:
return join_index
def _join_multi(self, other, how, return_indexers=True):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# figure out join names
self_names = [n for n in self.names if n is not None]
other_names = [n for n in other.names if n is not None]
overlap = list(set(self_names) & set(other_names))
# need at least 1 in common, but not more than 1
if not len(overlap):
raise ValueError("cannot join with no level specified and no "
"overlapping names")
if len(overlap) > 1:
raise NotImplementedError("merging with more than one level "
"overlap on a multi-index is not "
"implemented")
jl = overlap[0]
# make the indices into mi's that match
if not (self_is_mi and other_is_mi):
flip_order = False
if self_is_mi:
self, other = other, self
flip_order = True
# flip if join method is right or left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = other.names.index(jl)
result = self._join_level(other, level, how=how,
return_indexers=return_indexers)
if flip_order:
if isinstance(result, tuple):
return result[0], result[2], result[1]
return result
# 2 multi-indexes
raise NotImplementedError("merging with both multi-indexes is not "
"implemented")
def _join_non_unique(self, other, how='left', return_indexers=False):
from pandas.tools.merge import _get_join_indexers
left_idx, right_idx = _get_join_indexers([self.values],
[other._values], how=how,
sort=True)
left_idx = _ensure_platform_int(left_idx)
right_idx = _ensure_platform_int(right_idx)
join_index = self.values.take(left_idx)
mask = left_idx == -1
np.putmask(join_index, mask, other._values.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, left_idx, right_idx
else:
return join_index
def _join_level(self, other, level, how='left', return_indexers=False,
keep_order=True):
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex. If `keep_order` == True, the
order of the data indexed by the MultiIndex will not be changed;
otherwise, it will tie out with `other`.
"""
from pandas.algos import groupsort_indexer
from .multi import MultiIndex
def _get_leaf_sorter(labels):
"""
returns sorter for the inner most level while preserving the
order of higher levels
"""
if labels[0].size == 0:
return np.empty(0, dtype='int64')
if len(labels) == 1:
lab = _ensure_int64(labels[0])
sorter, _ = groupsort_indexer(lab, 1 + lab.max())
return sorter
# find indexers of begining of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = _ensure_int64(labels[-1])
return lib.get_level_sorter(lab, _ensure_int64(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError('Join on level between two MultiIndex objects '
'is ambiguous')
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError('Index._join_level on non-unique index '
'is not implemented')
new_level, left_lev_indexer, right_lev_indexer = \
old_level.join(right, how=how, return_indexers=True)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.labels[:level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = _ensure_int64(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer,
len(old_level))
new_lev_labels = algos.take_nd(rev_indexer, left.labels[level],
allow_fill=False)
new_labels = list(left.labels)
new_labels[level] = new_lev_labels
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left), dtype=np.intp)
mask = new_lev_labels != -1
if not mask.all():
new_labels = [lab[mask] for lab in new_labels]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
ngroups = 1 + new_lev_labels.max()
left_indexer, counts = groupsort_indexer(new_lev_labels,
ngroups)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0]:]
new_labels = [lab[left_indexer] for lab in new_labels]
else: # sort the leaves
mask = new_lev_labels != -1
mask_all = mask.all()
if not mask_all:
new_labels = [lab[mask] for lab in new_labels]
left_indexer = _get_leaf_sorter(new_labels[:level + 1])
new_labels = [lab[left_indexer] for lab in new_labels]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(levels=new_levels, labels=new_labels,
names=left.names, verify_integrity=False)
if right_lev_indexer is not None:
right_indexer = algos.take_nd(right_lev_indexer,
join_index.labels[level],
allow_fill=False)
else:
right_indexer = join_index.labels[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
if return_indexers:
left_indexer = (None if left_indexer is None
else _ensure_platform_int(left_indexer))
right_indexer = (None if right_indexer is None
else _ensure_platform_int(right_indexer))
return join_index, left_indexer, right_indexer
else:
return join_index
def _join_monotonic(self, other, how='left', return_indexers=False):
if self.equals(other):
ret_index = other if how == 'right' else self
if return_indexers:
return ret_index, None, None
else:
return ret_index
sv = self._values
ov = other._values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == 'left':
join_index = self
lidx = None
ridx = self._left_indexer_unique(sv, ov)
elif how == 'right':
join_index = other
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
else:
if how == 'left':
join_index, lidx, ridx = self._left_indexer(sv, ov)
elif how == 'right':
join_index, ridx, lidx = self._left_indexer(ov, sv)
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
lidx = None if lidx is None else _ensure_platform_int(lidx)
ridx = None if ridx is None else _ensure_platform_int(ridx)
return join_index, lidx, ridx
else:
return join_index
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Index(joined, name=name)
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
# this is for partial string indexing,
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
For an ordered Index, compute the slice indexer for input labels and
step
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, default None
kind : string, default None
Returns
-------
indexer : ndarray or slice
Notes
-----
This function assumes that the data is sorted, so use at your own peril
"""
start_slice, end_slice = self.slice_locs(start, end, step=step,
kind=kind)
# return a slice
if not is_scalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not is_scalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
def _maybe_cast_indexer(self, key):
"""
If we have a float key and are not a floating index
then try to cast to an int if equivalent
"""
if is_float(key) and not self.is_floating():
try:
ckey = int(key)
if ckey == key:
key = ckey
except (ValueError, TypeError):
pass
return key
def _validate_indexer(self, form, key, kind):
"""
if we are positional indexer
validate that we have appropriate typed bounds
must be an integer
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc']
if key is None:
pass
elif is_integer(key):
pass
elif kind in ['iloc', 'getitem']:
self._invalid_indexer(form, key)
return key
def _maybe_cast_slice_bound(self, label, side, kind):
"""
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ['ix', 'loc', 'getitem', None]
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. Float64Index and
# datetimelike Indexes
# reject them
if is_float(label):
if not (kind in ['ix'] and (self.holds_integer() or
self.is_floating())):
self._invalid_indexer('slice', label)
# we are trying to find integer bounds on a non-integer based index
# this is rejected (generally .loc gets you here)
elif is_integer(label):
self._invalid_indexer('slice', label)
return label
def _searchsorted_monotonic(self, label, side='left'):
if self.is_monotonic_increasing:
return self.searchsorted(label, side=side)
elif self.is_monotonic_decreasing:
# np.searchsorted expects ascending sort order, have to reverse
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(label, side='right' if side == 'left'
else 'right')
return len(self) - pos
raise ValueError('index must be monotonic increasing or decreasing')
def get_slice_bound(self, label, side, kind):
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
"""
assert kind in ['ix', 'loc', 'getitem', None]
if side not in ('left', 'right'):
raise ValueError("Invalid value for side kwarg,"
" must be either 'left' or 'right': %s" %
(side, ))
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side, kind)
# we need to look up the label
try:
slc = self.get_loc(label)
except KeyError as err:
try:
return self._searchsorted_monotonic(label, side)
except ValueError:
# raise the original KeyError
raise err
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array or an array of indices, which
# is OK as long as they are representable by a slice.
if is_bool_dtype(slc):
slc = lib.maybe_booleans_to_slice(slc.view('u1'))
else:
slc = lib.maybe_indices_to_slice(slc.astype('i8'), len(self))
if isinstance(slc, np.ndarray):
raise KeyError("Cannot get %s slice bound for non-unique "
"label: %r" % (side, original_label))
if isinstance(slc, slice):
if side == 'left':
return slc.start
else:
return slc.stop
else:
if side == 'right':
return slc + 1
else:
return slc
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, defaults None
If None, defaults to 1
kind : {'ix', 'loc', 'getitem'} or None
Returns
-------
start, end : int
"""
inc = (step is None or step >= 0)
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, 'left', kind)
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, 'right', kind)
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._shallow_copy(np.delete(self._data, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
_self = np.asarray(self)
item = self._coerce_scalar_to_index(item)._values
idx = np.concatenate((_self[:loc], item, _self[loc:]))
return self._shallow_copy_with_infer(idx)
def drop(self, labels, errors='raise'):
"""
Make new Index with passed list of labels deleted
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : Index
"""
labels = com._index_labels_to_array(labels)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != 'ignore':
raise ValueError('labels %s not contained in axis' %
labels[mask])
indexer = indexer[~mask]
return self.delete(indexer)
@Appender(base._shared_docs['unique'] % _index_doc_kwargs)
def unique(self):
result = super(Index, self).unique()
return self._shallow_copy(result)
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
@Appender(base._shared_docs['drop_duplicates'] % _index_doc_kwargs)
def drop_duplicates(self, keep='first'):
return super(Index, self).drop_duplicates(keep=keep)
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
@Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, keep='first'):
return super(Index, self).duplicated(keep=keep)
_index_shared_docs['fillna'] = """
Fill NA/NaN values with the specified value
Parameters
----------
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
downcast : dict, default is None
a dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible)
Returns
-------
filled : %(klass)s
"""
@Appender(_index_shared_docs['fillna'])
def fillna(self, value=None, downcast=None):
self._assert_can_do_op(value)
if self.hasnans:
result = self.putmask(self._isnan, value)
if downcast is None:
# no need to care metadata other than name
# because it can't have freq if
return Index(result, name=self.name)
return self._shallow_copy()
_index_shared_docs['dropna'] = """
Return Index without NA/NaN values
Parameters
----------
how : {'any', 'all'}, default 'any'
If the Index is a MultiIndex, drop the value when any or all levels
are NaN.
Returns
-------
valid : Index
"""
@Appender(_index_shared_docs['dropna'])
def dropna(self, how='any'):
if how not in ('any', 'all'):
raise ValueError("invalid how option: {0}".format(how))
if self.hasnans:
return self._shallow_copy(self.values[~self._isnan])
return self._shallow_copy()
def _evaluate_with_timedelta_like(self, other, op, opstr):
raise TypeError("can only perform ops with timedelta like values")
def _evaluate_with_datetime_like(self, other, op, opstr):
raise TypeError("can only perform ops with datetime like values")
def _evalute_compare(self, op):
raise base.AbstractMethodError(self)
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
def _make_compare(op):
def _evaluate_compare(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = _comp_method_OBJECT_ARRAY(
op, self.values, other)
else:
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
return _evaluate_compare
cls.__eq__ = _make_compare(operator.eq)
cls.__ne__ = _make_compare(operator.ne)
cls.__lt__ = _make_compare(operator.lt)
cls.__gt__ = _make_compare(operator.gt)
cls.__le__ = _make_compare(operator.le)
cls.__ge__ = _make_compare(operator.ge)
@classmethod
def _add_numeric_methods_add_sub_disabled(cls):
""" add in the numeric add/sub methods to disable """
def _make_invalid_op(name):
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: "
"{typ}".format(name=name, typ=type(self)))
invalid_op.__name__ = name
return invalid_op
cls.__add__ = cls.__radd__ = __iadd__ = _make_invalid_op('__add__') # noqa
cls.__sub__ = __isub__ = _make_invalid_op('__sub__') # noqa
@classmethod
def _add_numeric_methods_disabled(cls):
""" add in numeric methods to disable other than add/sub """
def _make_invalid_op(name):
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: "
"{typ}".format(name=name, typ=type(self)))
invalid_op.__name__ = name
return invalid_op
cls.__pow__ = cls.__rpow__ = _make_invalid_op('__pow__')
cls.__mul__ = cls.__rmul__ = _make_invalid_op('__mul__')
cls.__floordiv__ = cls.__rfloordiv__ = _make_invalid_op('__floordiv__')
cls.__truediv__ = cls.__rtruediv__ = _make_invalid_op('__truediv__')
if not compat.PY3:
cls.__div__ = cls.__rdiv__ = _make_invalid_op('__div__')
cls.__neg__ = _make_invalid_op('__neg__')
cls.__pos__ = _make_invalid_op('__pos__')
cls.__abs__ = _make_invalid_op('__abs__')
cls.__inv__ = _make_invalid_op('__inv__')
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
return attrs
def _validate_for_numeric_unaryop(self, op, opstr):
""" validate if we can perform a numeric unary operation """
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op "
"{opstr} for type: {typ}".format(
opstr=opstr,
typ=type(self))
)
def _validate_for_numeric_binop(self, other, op, opstr):
"""
return valid other, evaluate or raise TypeError
if we are not of the appropriate type
internal method called by ops
"""
from pandas.tseries.offsets import DateOffset
# if we are an inheritor of numeric,
# but not actually numeric (e.g. DatetimeIndex/PeriodInde)
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op {opstr} "
"for type: {typ}".format(
opstr=opstr,
typ=type(self))
)
if isinstance(other, Index):
if not other._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op "
"{opstr} with type: {typ}".format(
opstr=type(self),
typ=type(other))
)
elif isinstance(other, np.ndarray) and not other.ndim:
other = other.item()
if isinstance(other, (Index, ABCSeries, np.ndarray)):
if len(self) != len(other):
raise ValueError("cannot evaluate a numeric op with "
"unequal lengths")
other = _values_from_object(other)
if other.dtype.kind not in ['f', 'i']:
raise TypeError("cannot evaluate a numeric op "
"with a non-numeric dtype")
elif isinstance(other, (DateOffset, np.timedelta64,
Timedelta, datetime.timedelta)):
# higher up to handle
pass
elif isinstance(other, (Timestamp, np.datetime64)):
# higher up to handle
pass
else:
if not (is_float(other) or is_integer(other)):
raise TypeError("can only perform ops with scalar values")
return other
@classmethod
def _add_numeric_methods_binary(cls):
""" add in numeric methods """
def _make_evaluate_binop(op, opstr, reversed=False, constructor=Index):
def _evaluate_numeric_binop(self, other):
from pandas.tseries.offsets import DateOffset
other = self._validate_for_numeric_binop(other, op, opstr)
# handle time-based others
if isinstance(other, (DateOffset, np.timedelta64,
Timedelta, datetime.timedelta)):
return self._evaluate_with_timedelta_like(other, op, opstr)
elif isinstance(other, (Timestamp, np.datetime64)):
return self._evaluate_with_datetime_like(other, op, opstr)
# if we are a reversed non-communative op
values = self.values
if reversed:
values, other = other, values
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
with np.errstate(all='ignore'):
result = op(values, other)
return constructor(result, **attrs)
return _evaluate_numeric_binop
cls.__add__ = cls.__radd__ = _make_evaluate_binop(
operator.add, '__add__')
cls.__sub__ = _make_evaluate_binop(
operator.sub, '__sub__')
cls.__rsub__ = _make_evaluate_binop(
operator.sub, '__sub__', reversed=True)
cls.__mul__ = cls.__rmul__ = _make_evaluate_binop(
operator.mul, '__mul__')
cls.__pow__ = cls.__rpow__ = _make_evaluate_binop(
operator.pow, '__pow__')
cls.__mod__ = _make_evaluate_binop(
operator.mod, '__mod__')
cls.__floordiv__ = _make_evaluate_binop(
operator.floordiv, '__floordiv__')
cls.__rfloordiv__ = _make_evaluate_binop(
operator.floordiv, '__floordiv__', reversed=True)
cls.__truediv__ = _make_evaluate_binop(
operator.truediv, '__truediv__')
cls.__rtruediv__ = _make_evaluate_binop(
operator.truediv, '__truediv__', reversed=True)
if not compat.PY3:
cls.__div__ = _make_evaluate_binop(
operator.div, '__div__')
cls.__rdiv__ = _make_evaluate_binop(
operator.div, '__div__', reversed=True)
cls.__divmod__ = _make_evaluate_binop(
divmod,
'__divmod__',
constructor=lambda result, **attrs: (
Index(result[0], **attrs),
Index(result[1], **attrs),
),
)
@classmethod
def _add_numeric_methods_unary(cls):
""" add in numeric unary methods """
def _make_evaluate_unary(op, opstr):
def _evaluate_numeric_unary(self):
self._validate_for_numeric_unaryop(op, opstr)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(op(self.values), **attrs)
return _evaluate_numeric_unary
cls.__neg__ = _make_evaluate_unary(lambda x: -x, '__neg__')
cls.__pos__ = _make_evaluate_unary(lambda x: x, '__pos__')
cls.__abs__ = _make_evaluate_unary(np.abs, '__abs__')
cls.__inv__ = _make_evaluate_unary(lambda x: -x, '__inv__')
@classmethod
def _add_numeric_methods(cls):
cls._add_numeric_methods_unary()
cls._add_numeric_methods_binary()
@classmethod
def _add_logical_methods(cls):
""" add in logical methods """
_doc = """
%(desc)s
Parameters
----------
All arguments to numpy.%(outname)s are accepted.
Returns
-------
%(outname)s : bool or array_like (if axis is specified)
A single element array_like may be converted to bool."""
def _make_logical_function(name, desc, f):
@Substitution(outname=name, desc=desc)
@Appender(_doc)
def logical_func(self, *args, **kwargs):
result = f(self.values)
if (isinstance(result, (np.ndarray, ABCSeries, Index)) and
result.ndim == 0):
# return NumPy type
return result.dtype.type(result.item())
else: # pragma: no cover
return result
logical_func.__name__ = name
return logical_func
cls.all = _make_logical_function('all', 'Return whether all elements '
'are True',
np.all)
cls.any = _make_logical_function('any',
'Return whether any element is True',
np.any)
@classmethod
def _add_logical_methods_disabled(cls):
""" add in logical methods to disable """
def _make_invalid_op(name):
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: "
"{typ}".format(name=name, typ=type(self)))
invalid_op.__name__ = name
return invalid_op
cls.all = _make_invalid_op('all')
cls.any = _make_invalid_op('any')
Index._add_numeric_methods_disabled()
Index._add_logical_methods()
Index._add_comparison_methods()
def _ensure_index(index_like, copy=False):
if isinstance(index_like, Index):
if copy:
index_like = index_like.copy()
return index_like
if hasattr(index_like, 'name'):
return Index(index_like, name=index_like.name, copy=copy)
# must check for exactly list here because of strict type
# check in clean_index_list
if isinstance(index_like, list):
if type(index_like) != list:
index_like = list(index_like)
# 2200 ?
converted, all_arrays = lib.clean_index_list(index_like)
if len(converted) > 0 and all_arrays:
from .multi import MultiIndex
return MultiIndex.from_arrays(converted)
else:
index_like = converted
else:
# clean_index_list does the equivalent of copying
# so only need to do this if not list instance
if copy:
from copy import copy
index_like = copy(index_like)
return Index(index_like)
def _get_na_value(dtype):
return {np.datetime64: tslib.NaT,
np.timedelta64: tslib.NaT}.get(dtype, np.nan)
def _ensure_frozen(array_like, categories, copy=False):
array_like = _coerce_indexer_dtype(array_like, categories)
array_like = array_like.view(FrozenNDArray)
if copy:
array_like = array_like.copy()
return array_like
def _ensure_has_len(seq):
"""If seq is an iterator, put its values into a list."""
try:
len(seq)
except TypeError:
return list(seq)
else:
return seq
def _trim_front(strings):
"""
Trims zeros and decimal points
"""
trimmed = strings
while len(strings) > 0 and all([x[0] == ' ' for x in trimmed]):
trimmed = [x[1:] for x in trimmed]
return trimmed
def _validate_join_method(method):
if method not in ['left', 'right', 'inner', 'outer']:
raise ValueError('do not recognize join method %s' % method)
| apache-2.0 |
parenthetical-e/modelmodel | bin/conjunction.py | 1 | 1817 | """Run conjunction tests on csv tables of stats and p-values."""
import os
import argparse
import pandas as pd
import numpy as np
from modelmodel.stats import conjunction
parser = argparse.ArgumentParser(
description="Run conjunction tests on csv tables of stats.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"name", type=str,
help="Name of file"
)
parser.add_argument(
"stat_values", type=str,
help="Name of statistics file"
)
parser.add_argument(
"p_values", type=str, default=None,
help="Name of p-values file"
)
parser.add_argument(
"--missing_value", type=int, default=-999999,
help="Missing value code",
)
parser.add_argument(
"--drop", type=int, default=None,
help="Drop this column name from the analysis",
)
args = parser.parse_args()
stats = pd.read_csv(args.stat_values)
ps = pd.read_csv(args.p_values)
if args.drop is not None:
stats = stats.drop([stats.columns[args.drop]], 1)
ps = ps.drop([ps.columns[args.drop]], 1)
conjs = []
p_conjs = []
paths = []
for i in range(stats.shape[0]):
spath = stats.ix[i,-1]
sdata = stats.ix[i,0:-1].values
sdata = sdata[sdata != args.missing_value]
ppath = ps.ix[i,-1]
pdata = ps.ix[i,0:-1].values
pdata = pdata[pdata != args.missing_value]
if sdata.shape[0] != pdata.shape[0]:
raise ValueError("Row lengths don't match ({0})".format(i))
stat, p = conjunction(sdata, pdata)
conjs.append(stat)
p_conjs.append(p)
paths.append(spath)
df = pd.DataFrame(
data=np.vstack([conjs, p_conjs]).transpose(),
columns=['value', 'p']
)
df['path'] = paths
df.to_csv(args.name, index=False, float_format='%.8f')
| bsd-2-clause |
harisbal/pandas | pandas/tests/indexes/timedeltas/test_astype.py | 1 | 2923 | from datetime import timedelta
import numpy as np
import pytest
import pandas.util.testing as tm
from pandas import (
Float64Index, Index, Int64Index, NaT, Timedelta, TimedeltaIndex,
timedelta_range
)
class TestTimedeltaIndex(object):
def test_astype_object(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name='idx')
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype_object_with_nat(self):
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), NaT,
Timedelta('4 days')]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name='idx')
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timedelta('1 days 03:46:40')] + [NaT] * 3,
dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([100000000000000] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
expected = Index(str(x) for x in idx)
tm.assert_index_equal(result, expected)
rng = timedelta_range('1 days', periods=10)
result = rng.astype('i8')
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
result = idx.astype('timedelta64')
expected = Float64Index([1e+14] + [np.NaN] * 3, dtype='float64')
tm.assert_index_equal(result, expected)
result = idx.astype('timedelta64[ns]')
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype('timedelta64[ns]', copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
@pytest.mark.parametrize('dtype', [
float, 'datetime64', 'datetime64[ns]'])
def test_astype_raises(self, dtype):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
msg = 'Cannot cast TimedeltaIndex to dtype'
with tm.assert_raises_regex(TypeError, msg):
idx.astype(dtype)
| bsd-3-clause |
xiaoxiamii/scikit-learn | sklearn/ensemble/tests/test_forest.py | 48 | 39224 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(X, y, name, criterion):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = est.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0,
criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0,
criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, X, y, name, criterion
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, X, y, name, criterion
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
| bsd-3-clause |
warith-harchaoui/TensorflowWrappers4NeuralNetworks | example.py | 1 | 6026 | """
Tensorflow Personal Toolbox for Neural Networks
Basic Neural Network for MNIST Demo
--
Warith HARCHAOUI, Astrid MERCKLING
2017
"""
print(" === Basic Neural Network for MNIST Demo === ")
import tensorflow as tf
import numpy as np
import math
from utils import *
from matplotlib import pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
import os
# For Visualizations
tensorboard_summary = True
if tensorboard_summary:
# required folder for dumping visualizations
summaries_folder = "summaries"
if not os.path.exists(summaries_folder):
os.makedirs(summaries_folder)
# For accelerating training convergence
batch_normalization = True
print(" === Loading and maybe downloading MNIST === ")
# Loads data (and maybe download it if needed)
# MNIST has 10 classes of hand-written digits images
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
nb_labels = 10 # number of classes
image_width = 28 # pixel size
input_dimensionality = image_width*image_width
data = {}
data["train"] = np.reshape(mnist.train.images,
[-1, input_dimensionality])
data["val"] = np.reshape(mnist.validation.images,
[-1, input_dimensionality])
data["test"] = np.reshape(mnist.test.images,
[-1, input_dimensionality])
labels = {}
labels["train"] = mnist.train.labels
labels["val"] = mnist.validation.labels
labels["test"] = mnist.test.labels
learning_rate_v = 0.002
batch_size_v = 128
n = data["train"].shape[0]
image_dims = [input_dimensionality]
keep_prob_v = 0.9
print(" === Building the Computation Graph for the Neural Network === ")
# Input Placeholder to specify for each mini-batch in feed_dict ===
learning_rate = tf.placeholder(tf.float32, name = 'learning_rate')
input_images = tf.placeholder(tf.float32,
[None] + image_dims, name = 'input_images')
output_labels = tf.placeholder(tf.float32,
[None, nb_labels] , name = 'output_labels')
batch_size = tf.placeholder(tf.int32, name = "batch_size")
train = tf.placeholder(tf.bool, name = "train")
keep_prob = tf.placeholder(tf.float32, name = "keep_prob")
dropout = tf.placeholder(tf.bool, name = "dropout")
x = input_images
layers = [512, 128, 32, nb_labels]
mlp_activations = [tf.nn.relu]*(len(layers)-1)+[tf.identity]
# NB: the last layer has an identity activation instead of softmax
# due to numerical stability reasons handled later in the
# tf.nn.softmax_cross_entropy_with_logits function
# mlp is the sequence of matrix product and activations
x = mlp(x, layers, mlp_activations, "mlp",
train = train, dropout = dropout, keep_prob = keep_prob,
batch_normalization = batch_normalization,
tensorboard_summary = tensorboard_summary)
# Multi-class cross-entropy loss function to be minimized
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits = x,
labels = output_labels))
# For Tensorboard visualization
scalar_summary(cost, "cost")
# Gradient step to operate
optimizer = tf.train.AdamOptimizer(
learning_rate = learning_rate).minimize(cost)
# Vector of correct answers boolean
correct_pred = tf.equal(tf.argmax(x, 1), tf.argmax(output_labels, 1))
# Mean accuracy
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
scalar_summary(accuracy, "accuracy") # again for Tensorboard
print(" === Tensroflow Sesssion Beginning -- Tensorflow Magic! === ")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
if tensorboard_summary:
merged_summaries = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(summaries_folder, sess.graph)
nb_iteration_per_epoch = int(n/batch_size_v)
epoch = 0
iteration = 0
print(" === Learning Loop === ")
while iteration < 5000:
# randomly pick batch_size_v indices
idx = np.random.choice(n, batch_size_v)
n_batch = idx.shape[0]
feed_dict = {}
feed_dict[learning_rate] = learning_rate_v
feed_dict[input_images] = data["train"][idx]
feed_dict[output_labels] = labels["train"][idx]
feed_dict[batch_size] = n_batch
feed_dict[train] = True
feed_dict[keep_prob] = keep_prob_v
feed_dict[dropout] = True
# Gradient step operation
sess.run(optimizer, feed_dict = feed_dict)
# Update Tensorboard every 1000 iterations
if tensorboard_summary & ((iteration + 1) % 1000 == 0):
merged_summaries_v = sess.run(merged_summaries, feed_dict = feed_dict)
train_writer.add_summary(merged_summaries_v, iteration)
if (iteration + 1) % 1000 == 0:
# Computes Validation Accuracy
n_val = data["val"].shape[0]
i = 0
accuracy_v = 0
while i< n_val:
idx = np.array(list(range(i, min(n_val, i+batch_size_v))))
n_batch = idx.shape[0]
feed_dict = {}
feed_dict[input_images] = data["val"][idx]
feed_dict[output_labels] = labels["val"][idx]
feed_dict[batch_size] = n_batch
feed_dict[train] = False
feed_dict[dropout] = False
feed_dict[keep_prob] = keep_prob_v
accuracy_v += n_batch * sess.run(accuracy, feed_dict = feed_dict)
i = i + batch_size_v
print("Validation Accuracy", accuracy_v * 100 / n_val, "%")
# Computes Test Accuracy
n_test = data["test"].shape[0]
i = 0
accuracy_v = 0
while i< n_test:
idx = np.array(list(range(i, min(n_test, i+batch_size_v))))
n_batch = idx.shape[0]
feed_dict = {}
feed_dict[input_images] = data["test"][idx]
feed_dict[output_labels] = labels["test"][idx]
feed_dict[batch_size] = n_batch
feed_dict[train] = False
feed_dict[dropout] = False
feed_dict[keep_prob] = keep_prob_v
accuracy_v += n_batch * sess.run(accuracy, feed_dict = feed_dict)
i = i + batch_size_v
print("Test Accuracy", accuracy_v*100/n_test, "%")
iteration = iteration+1
| bsd-3-clause |
vsmolyakov/DP_means | python/dpmeans.py | 1 | 6101 | from sklearn.datasets import load_iris
import numpy as np
import matplotlib.pyplot as plt
import time
np.random.seed(0)
class dpmeans:
def __init__(self,X):
# Initialize parameters for DP means
self.K = 1
self.K_init = 4
self.d = X.shape[1]
self.z = np.mod(np.random.permutation(X.shape[0]),self.K)+1
self.mu = np.random.standard_normal((self.K, self.d))
self.sigma = 1
self.nk = np.zeros(self.K)
self.pik = np.ones(self.K)/self.K
#init mu
self.mu = np.array([np.mean(X,0)])
#init lambda
self.Lambda = self.kpp_init(X,self.K_init)
self.max_iter = 100
self.obj = np.zeros(self.max_iter)
self.em_time = np.zeros(self.max_iter)
def kpp_init(self,X,k):
#k++ init
#lambda is max distance to k++ means
[n,d] = np.shape(X)
mu = np.zeros((k,d))
dist = np.inf*np.ones(n)
mu[0,:] = X[np.ceil(np.random.rand()*n-1),:]
for i in range(1,k):
D = X-np.tile(mu[i-1,:],(n,1))
dist = np.minimum(dist, np.sum(D*D,1))
idx = np.where(np.random.rand() < np.cumsum(dist/float(sum(dist))))
mu[i,:] = X[idx[0][0],:]
Lambda = np.max(dist)
return Lambda
def fit(self,X):
obj_tol = 1e-3
max_iter = self.max_iter
[n,d] = np.shape(X)
obj = np.zeros(max_iter)
em_time = np.zeros(max_iter)
print 'running dpmeans...'
for iter in range(max_iter):
tic = time.time()
dist = np.zeros((n,self.K))
#assignment step
for kk in range(self.K):
Xm = X - np.tile(self.mu[kk,:],(n,1))
dist[:,kk] = np.sum(Xm*Xm,1)
#update labels
dmin = np.min(dist,1)
self.z = np.argmin(dist,1)
idx = np.where(dmin > self.Lambda)
if (np.size(idx) > 0):
self.K = self.K + 1
self.z[idx[0]] = self.K-1 #cluster labels in [0,...,K-1]
self.mu = np.vstack([self.mu,np.mean(X[idx[0],:],0)])
Xm = X - np.tile(self.mu[self.K-1,:],(n,1))
dist = np.hstack([dist, np.array([np.sum(Xm*Xm,1)]).T])
#update step
self.nk = np.zeros(self.K)
for kk in range(self.K):
self.nk[kk] = self.z.tolist().count(kk)
idx = np.where(self.z == kk)
self.mu[kk,:] = np.mean(X[idx[0],:],0)
self.pik = self.nk/float(np.sum(self.nk))
#compute objective
for kk in range(self.K):
idx = np.where(self.z == kk)
obj[iter] = obj[iter] + np.sum(dist[idx[0],kk],0)
obj[iter] = obj[iter] + self.Lambda * self.K
#check convergence
if (iter > 0 and np.abs(obj[iter]-obj[iter-1]) < obj_tol*obj[iter]):
print('converged in %d iterations\n'% iter)
break
em_time[iter] = time.time()-tic
#end for
self.obj = obj
self.em_time = em_time
return self.z, obj, em_time
def compute_nmi(self, z1, z2):
# compute normalized mutual information
n = np.size(z1)
k1 = np.size(np.unique(z1))
k2 = np.size(np.unique(z2))
nk1 = np.zeros((k1,1))
nk2 = np.zeros((k2,1))
for kk in range(k1):
nk1[kk] = np.sum(z1==kk)
for kk in range(k2):
nk2[kk] = np.sum(z2==kk)
pk1 = nk1/float(np.sum(nk1))
pk2 = nk2/float(np.sum(nk2))
nk12 = np.zeros((k1,k2))
for ii in range(k1):
for jj in range(k2):
nk12[ii,jj] = np.sum((z1==ii)*(z2==jj))
pk12 = nk12/float(n)
Hx = -np.sum(pk1 * np.log(pk1 + np.finfo(float).eps))
Hy = -np.sum(pk2 * np.log(pk2 + np.finfo(float).eps))
Hxy = -np.sum(pk12 * np.log(pk12 + np.finfo(float).eps))
MI = Hx + Hy - Hxy;
nmi = MI/float(0.5*(Hx+Hy))
return nmi
def generate_plots(self,X):
plt.close('all')
plt.figure(0)
for kk in range(self.K):
#idx = np.where(self.z == kk)
plt.scatter(X[self.z == kk,0], X[self.z == kk,1], \
s = 100, marker = 'o', color = np.random.rand(3,1), label = str(kk))
#end for
plt.xlabel('X1')
plt.ylabel('X2')
plt.legend()
plt.title('DP-means clusters')
plt.grid(True)
plt.show()
plt.figure(1)
plt.plot(self.obj)
plt.title('DP-means objective function')
plt.xlabel('iterations')
plt.ylabel('penalized l2 squared distance')
plt.grid(True)
plt.show()
plt.figure(2)
plt.plot(self.em_time)
plt.title('DP-means time per iteration')
plt.xlabel('iterations')
plt.ylabel('time, sec')
plt.grid(True)
plt.show()
def display_params(self):
print 'K = %d'% self.K
print 'd = %d'% self.d
print 'Labels:'
print self.z
print 'Means:'
print self.mu
print 'Sigma:'
print self.sigma
print 'Counts:'
print self.nk
print 'Proportions:'
print self.pik
print 'Lambda: %.2f'% self.Lambda
if __name__ == "__main__":
iris = load_iris()
X = iris.data
y = iris.target
dp = dpmeans(X)
labels, obj, em_time = dp.fit(X)
dp.generate_plots(X)
nmi = dp.compute_nmi(labels,y)
print "NMI: %.4f" % nmi
| mit |
e-koch/ewky_scripts | flux_recovered.py | 1 | 15686 |
'''
Find the amount of emission recovered in a high-resolution map, based on a low-resolution map
'''
from spectral_cube import SpectralCube
import numpy as np
from radio_beam import Beam
from astropy.convolution import convolve_fft
from astropy import units as u
from astropy import wcs
from FITS_tools.header_tools import wcs_to_platescale
import matplotlib.pyplot as p
from scipy.interpolate import interp1d
import dask.array as da
import warnings
class MultiResObs(object):
"""
Object to hold observations of the same object at different resolutions.
This is intended for matching and analyzing high-res (interferometric)
and low-res (single dish) observations.
"""
def __init__(self, highres, lowres):
super(MultiResObs, self).__init__()
self.highres = SpectralCube.read(highres)
self.lowres = SpectralCube.read(lowres)
self.highres_convolved = None
self.lowres_convolved = None
self.lowbeam = self.lowres.beam
self.highbeam = self.highres.beam
self.combined_beam = self.lowbeam.convolve(self.highbeam)
def apply_mask(self, highres_mask=None, lowres_mask=None):
'''
Apply a pre-made mask to either of the data cubes.
'''
if highres_mask is not None:
self.highres = self.highres.with_mask(highres_mask)
if lowres_mask is not None:
self.lowres = self.lowres.with_mask(lowres_mask)
def match_coords(self):
'''
Match the spatial and spectral coordinates of the cubes.
'''
# TODO: use Skycoords to convert extrema into the same frame
# Are either of the cubes in the correct frame?
if self.highres.header['CTYPE1'] != self.lowres.header['CTYPE1']:
raise TypeError("ctypes do not match. Are observations in the "
"same projection? Highres: " +
self.highres.header['CTYPE1'] +
" Lowres: " + self.lowres.header['CTYPE1'])
elif self.highres.header['CTYPE2'] != self.lowres.header['CTYPE2']:
raise TypeError("ctypes do not match. Are observations in the "
"same projection? Highres: " +
self.highres.header['CTYPE2'] +
" Lowres: " + self.lowres.header['CTYPE2'])
# Determine which cube should be slice down so both have the same
# spatial coverage.
limits_high = []
limits_low = []
low_long = self.lowres.longitude_extrema
high_long = self.highres.longitude_extrema
low_lat = self.lowres.latitude_extrema
high_lat = self.highres.latitude_extrema
if low_long[0] < high_long[0]:
limits_low.append(high_long[0])
limits_high.append("min")
else:
limits_high.append(low_long[0])
limits_low.append("min")
if low_long[1] > high_long[1]:
limits_low.append(high_long[1])
limits_high.append("max")
else:
limits_high.append(low_long[0])
limits_low.append("min")
if low_lat[1] > high_lat[1]:
limits_low.append(high_lat[1])
limits_high.append("min")
else:
limits_high.append(low_lat[0])
limits_low.append("min")
if low_lat[0] < high_lat[0]:
limits_low.append(high_lat[0])
limits_high.append("max")
else:
limits_high.append(low_lat[0])
limits_low.append("max")
# Apply common slicing to both
self.highres = \
self.highres.subcube(xlo=limits_high[0], xhi=limits_high[1],
ylo=limits_high[2], yhi=limits_high[3])
self.lowres = \
self.lowres.subcube(xlo=limits_low[0], xhi=limits_low[1],
ylo=limits_low[2], yhi=limits_low[3])
# Now match the spectral extends
low_spec = \
self.highres.spectral_extrema[0] if \
self.highres.spectral_extrema[0] > self.lowres.spectral_extrema[0]\
else self.lowres.spectral_extrema[0]
high_spec = \
self.highres.spectral_extrema[1] if \
self.highres.spectral_extrema[1] < self.lowres.spectral_extrema[1]\
else self.lowres.spectral_extrema[1]
self.highres = self.highres.spectral_slab(low_spec, high_spec)
self.lowres = self.lowres.spectral_slab(low_spec, high_spec)
def convert_to(self, unit=u.K, freq=1420.40575177*u.MHz):
'''
Convert both sets to common brightness units.
'''
convert_high = unit != self.highres.unit
convert_low = unit != self.lowres.unit
high_unit = self.highres.unit
low_unit = self.lowres.unit
if unit == u.K:
if convert_high and 'Jy' in high_unit.name:
self.highres = \
self.highres.to(unit, self.highbeam.jtok_equiv(freq))
if convert_low and 'Jy' in low_unit.name:
self.lowres = \
self.lowres.to(unit, self.lowbeam.jtok_equiv(freq))
else:
raise NotImplementedError("Only supporting Jy/beam -> K right now.")
def convolve_to_common(self, verbose=False, use_dask=True,
block=(256, 256)):
'''
Convolve cubes to a common resolution using the combined beam.
'''
# Create convolution kernels from the combined beam
conv_kernel_high = \
self.combined_beam.as_kernel(wcs_to_platescale(self.highres.wcs))
# if use_dask:
# assert np.alltrue([bl > kern for bl, kern in
# zip(block, conv_kernel_high.shape)])
conv_kernel_low = \
self.combined_beam.as_kernel(wcs_to_platescale(self.lowres.wcs))
# if use_dask:
# assert np.alltrue([bl > kern for bl, kern in
# zip(block, conv_kernel_low.shape)])
high_pad = np.ceil(conv_kernel_high.shape[0] / 2).astype(int)
highres_convolved = np.empty(self.highres.shape)
high_chans = len(self.highres.spectral_axis)
if verbose:
print("Convolving high resolution cube.")
if use_dask:
highres_convolved = auto_dask_map(self.highres, blocks=block,
args=[conv_kernel_high])
# for chan in range(high_chans):
# if verbose:
# print("On Channel: "+str(chan)+" of "+str(high_chans))
# if use_dask:
# da_arr = \
# da.from_array(np.pad(self.highres.filled_data[chan, :, :],
# high_pad, padwithnans),
# chunks=block)
# highres_convolved[chan, high_pad:-high_pad,
# high_pad:-high_pad] = \
# da_arr.map_overlap(
# lambda a:
# convolve_fft(a,
# conv_kernel_high,
# boundary='fill',
# interpolate_nan=True,
# normalize_kernel=True),
# depth=2*high_pad,
# boundary=np.nan).compute()
# else:
# highres_convolved[chan, :, :] = \
# convolve_fft(self.highres.filled_data[chan, :, :],
# conv_kernel_high, boundary='fill',
# interpolate_nan=True, normalize_kernel=True)
update_high_hdr = \
_update_beam_in_hdr(self.highres.header, self.combined_beam)
self.highres_convolved = \
SpectralCube(highres_convolved*self.highres.unit, self.highres.wcs,
header=update_high_hdr)
# Cleanup a bit
del highres_convolved
# Now the low resolution data
lowres_convolved = np.empty(self.lowres.shape)
low_chans = len(self.lowres.spectral_axis)
if verbose:
print("Convolving low resolution cube.")
if use_dask:
lowres_convolved = auto_dask_map(self.highres, blocks=block,
args=[conv_kernel_low])
# for chan in range(low_chans):
# if verbose:
# print("On Channel: "+str(chan)+" of "+str(low_chans))
# lowres_convolved[chan, :, :] = \
# convolve_fft(self.lowres.filled_data[chan, :, :],
# conv_kernel_low, boundary='fill',
# interpolate_nan=True, normalize_kernel=True)
update_low_hdr = \
_update_beam_in_hdr(self.lowres.header, self.combined_beam)
self.lowres_convolved = \
SpectralCube(lowres_convolved*self.lowres.unit, self.lowres.wcs,
header=update_low_hdr)
def flux_recovered(self, plot=True, filename=None, enable_interp=True,
interp_to='lower', diff_tol=1e-4*u.m/u.s):
'''
Check the amount of flux recovered in the high resolution image versus
the low resolution data. If the spectral axes don't match, one is
interpolated onto the other.
Parameters
----------
plot : bool, optional
Enable plotting.
filename : str, optional
Give filename for the plot save file. When specified, the plot
is automatically saved.
interp_to : 'lower' or 'upper', optional
If the spectral axes don't match, interpolated onto the same.
The default 'lower' interpolates to the spectral axis with the
lowest resolution.
'''
# Add up the total intensity in the cubes and compare
# Assumes that there is some masking such that noise
# doesn't dominate
if self.highres_convolved is not None:
high_channel_intensity = \
self.highres_convolved.sum(axis=(1, 2))
else:
high_channel_intensity = self.highres.sum(axis=(1, 2))
Warning("Should run convolve_to_common before. Using unconvolved"
" cube.")
if self.lowres_convolved is not None:
low_channel_intensity = self.lowres_convolved.sum(axis=(1, 2))
else:
low_channel_intensity = self.lowres.sum(axis=(1, 2))
Warning("Should run convolve_to_common before. Using unconvolved"
" cube.")
# If the spectral axes are not the same, try interpolating to match
if enable_interp:
better_vres_high = \
np.abs(self.lowres.header['CDELT3']) > \
np.abs(self.highres.header['CDELT3'])
spec_diff = np.abs(np.abs(self.lowres.header['CDELT3']) -
np.abs(self.highres.header['CDELT3']))
spec_unit = self.highres.spectral_axis.unit
if spec_diff * spec_unit < diff_tol:
warnings.warn("Channels are essentially the same. "
"Skipping interpolation. Lower diff_tol"
" to re-enable interpolation.")
else:
# Invert which to interpolate to
if interp_to == "higher":
better_vres_high = not better_vres_high
if better_vres_high:
f = interp1d(np.round(self.highres.spectral_axis.value, 3),
high_channel_intensity.value)
high_channel_intensity = \
f(np.round(self.lowres.spectral_axis.value, 3)) *\
self.highres.unit
else:
f = interp1d(np.round(self.lowres.spectral_axis.value, 3),
low_channel_intensity.value)
low_channel_intensity = \
f(np.round(self.highres.spectral_axis.value, 3)) *\
self.lowres.unit
self.fraction_flux_recovered = \
high_channel_intensity.sum()/low_channel_intensity.sum()
if plot:
p.plot(self.highres.spectral_axis.value,
high_channel_intensity.value)
p.plot(self.lowres.spectral_axis.value,
low_channel_intensity.value)
p.xlabel("Spectral Axis ("+self.highres.spectral_axis.unit.to_string()+")")
p.ylabel("Intensity ("+self.highres.unit.to_string()+")")
if filename is None:
p.show()
else:
p.savefig(filename)
def run_all(self, highres_mask=None, lowres_mask=None, plot=True,
unit=u.K, freq=1420.40575177*u.MHz, filename=True,
use_dask=False, verbose=True):
'''
Run the complete comparisons.
'''
self.apply_mask(highres_mask=highres_mask, lowres_mask=lowres_mask)
self.match_coords()
self.convert_to(unit=unit, freq=freq)
self.convolve_to_common(verbose=verbose, use_dask=use_dask)
self.flux_recovered(plot=plot, filename=filename)
def _update_beam_in_hdr(hdr, beam):
hdr["BMAJ"] = beam.major.value
hdr["BMIN"] = beam.minor.value
hdr["BPA"] = beam.pa.value
return hdr
def padwithnans(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = np.nan
vector[-pad_width[1]:] = np.nan
return vector
def auto_dask_map(cube, operation=convolve_fft, blocks=None, args=[],
kwargs={}, output_array=None, verbose=True):
'''
Based on the dimensions of the given cube, and the dimensions
of the blocks, return an appropriate dask output.
Currently, this only supports 2D and 3D inputs. For 2D blocks,
an iterator is returned with 2D slices of the array. For 3D,
a single dask array is returned.
'''
if len(blocks) == 2:
if output_array is None:
output_array = np.empty(cube.shape)
def_slice = [slice(None)] * len(cube.shape)
for chan, cube_slice in dask_slice_iterator(cube, blocks):
if verbose:
print("On "+str(chan))
def_slice[0] = slice(chan, chan+1)
output_array[def_slice] = \
cube_slice.map_blocks(lambda a:
operation(a, *args, **kwargs)).compute()
else:
dask_arr = da.from_array(cube.filled_data[:], blocks)
if verbose:
print("No channel iterations, so no print out.")
# In the >2D case, map blocks to function in 2D planes
output_array = \
dask_arr.map_blocks(
lambda a: _map_flatten(operation,
a, args=args,
kwargs=kwargs)).compute()
return output_array
def dask_slice_iterator(cube, blocks):
for chan in range(len(cube.spectral_axis)):
yield chan, da.from_array(cube.filled_data[chan, :, :],
blocks)
def _map_flatten(operation, arr, args=[], kwargs={}):
restore_slice = [slice(None)] * len(arr.shape)
for i in np.where(np.asarray(arr.shape)==1)[0]:
restore_slice[i] = np.newaxis
return operation(arr.squeeze(), *args, **kwargs)[restore_slice]
| mit |
phobson/statsmodels | statsmodels/sandbox/regression/example_kernridge.py | 39 | 1232 |
import numpy as np
import matplotlib.pyplot as plt
from .kernridgeregress_class import GaussProcess, kernel_euclid
m,k = 50,4
upper = 6
scale = 10
xs = np.linspace(1,upper,m)[:,np.newaxis]
#xs1 = xs1a*np.ones((1,4)) + 1/(1.0+np.exp(np.random.randn(m,k)))
#xs1 /= np.std(xs1[::k,:],0) # normalize scale, could use cov to normalize
##y1true = np.sum(np.sin(xs1)+np.sqrt(xs1),1)[:,np.newaxis]
xs1 = np.sin(xs)#[:,np.newaxis]
y1true = np.sum(xs1 + 0.01*np.sqrt(np.abs(xs1)),1)[:,np.newaxis]
y1 = y1true + 0.10 * np.random.randn(m,1)
stride = 3 #use only some points as trainig points e.g 2 means every 2nd
xstrain = xs1[::stride,:]
ystrain = y1[::stride,:]
xstrain = np.r_[xs1[:m/2,:], xs1[m/2+10:,:]]
ystrain = np.r_[y1[:m/2,:], y1[m/2+10:,:]]
index = np.hstack((np.arange(m/2), np.arange(m/2+10,m)))
gp1 = GaussProcess(xstrain, ystrain, kernel=kernel_euclid,
ridgecoeff=5*1e-4)
yhatr1 = gp1.predict(xs1)
plt.figure()
plt.plot(y1true, y1,'bo',y1true, yhatr1,'r.')
plt.title('euclid kernel: true y versus noisy y and estimated y')
plt.figure()
plt.plot(index,ystrain.ravel(),'bo-',y1true,'go-',yhatr1,'r.-')
plt.title('euclid kernel: true (green), noisy (blue) and estimated (red) '+
'observations')
| bsd-3-clause |
iismd17/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
truongdq/word2vec-1 | word2vec/wordvectors.py | 3 | 7409 | from __future__ import division, print_function, unicode_literals
import numpy as np
try:
from sklearn.externals import joblib
except:
joblib = None
from word2vec.utils import unitvec
class WordVectors(object):
def __init__(self, vocab, vectors, clusters=None):
"""
Initialize a WordVectors class based on vocabulary and vectors
This initializer precomputes the vectors of the vectors
Parameters
----------
vocab : np.array
1d array with the vocabulary
vectors : np.array
2d array with the vectors calculated by word2vec
clusters : word2vec.WordClusters (optional)
1d array with the clusters calculated by word2vec
"""
self.vocab = vocab
self.vectors = vectors
self.clusters = clusters
def ix(self, word):
"""
Returns the index on self.vocab and `self.vectors` for `word`
"""
temp = np.where(self.vocab == word)[0]
if temp.size == 0:
raise KeyError('Word not in vocabulary')
else:
return temp[0]
def __getitem__(self, word):
return self.get_vector(word)
def __contains__(self, word):
return word in self.vocab
def get_vector(self, word):
"""
Returns the (vectors) vector for `word` in the vocabulary
"""
idx = self.ix(word)
return self.vectors[idx]
def cosine(self, word, n=10):
"""
Cosine similarity.
metric = dot(vectors_of_vectors, vectors_of_target_vector)
Uses a precomputed vectors of the vectors
Parameters
----------
word : string
n : int, optional (default 10)
number of neighbors to return
Returns
-------
2 numpy.array:
1. position in self.vocab
2. cosine similarity
"""
metrics = np.dot(self.vectors, self[word].T)
best = np.argsort(metrics)[::-1][1:n+1]
best_metrics = metrics[best]
return best, best_metrics
def analogy(self, pos, neg, n=10):
"""
Analogy similarity.
Parameters
----------
pos : list
neg : list
Returns
-------
2 numpy.array:
1. position in self.vocab
2. cosine similarity
Example
-------
`king - man + woman = queen` will be:
`pos=['king', 'woman'], neg=['man']`
"""
exclude = pos + neg
pos = [(word, 1.0) for word in pos]
neg = [(word, -1.0) for word in neg]
mean = []
for word, direction in pos + neg:
mean.append(direction * self[word])
mean = np.array(mean).mean(axis=0)
metrics = np.dot(self.vectors, mean)
best = metrics.argsort()[::-1][:n + len(exclude)]
exclude_idx = [np.where(best == self.ix(word)) for word in exclude if
self.ix(word) in best]
new_best = np.delete(best, exclude_idx)
best_metrics = metrics[new_best]
return new_best[:n], best_metrics[:n]
def generate_response(self, indexes, metrics, clusters=True):
'''
Generates a pure python (no numpy) response based on numpy arrays
returned by `self.cosine` and `self.analogy`
'''
if self.clusters and clusters:
return np.rec.fromarrays((self.vocab[indexes], metrics,
self.clusters.clusters[indexes]),
names=('word', 'metric', 'cluster'))
else:
return np.rec.fromarrays((self.vocab[indexes], metrics),
names=('word', 'metric'))
def to_mmap(self, fname):
if not joblib:
raise Exception("sklearn is needed to save as mmap")
joblib.dump(self, fname)
@classmethod
def from_binary(cls, fname, vocabUnicodeSize=78, desired_vocab=None):
"""
Create a WordVectors class based on a word2vec binary file
Parameters
----------
fname : path to file
vocabUnicodeSize: the maximum string length (78, by default)
desired_vocab: if set, this will ignore any word and vector that
doesn't fall inside desired_vocab.
Returns
-------
WordVectors instance
"""
with open(fname, 'rb') as fin:
header = fin.readline()
vocab_size, vector_size = list(map(int, header.split()))
vocab = np.empty(vocab_size, dtype='<U%s' % vocabUnicodeSize)
vectors = np.empty((vocab_size, vector_size), dtype=np.float)
binary_len = np.dtype(np.float32).itemsize * vector_size
for i in range(vocab_size):
# read word
word = ''
while True:
ch = fin.read(1).decode('ISO-8859-1')
if ch == ' ':
break
word += ch
include = desired_vocab is None or word in desired_vocab
if include:
vocab[i] = word
# read vector
vector = np.fromstring(fin.read(binary_len), dtype=np.float32)
if include:
vectors[i] = unitvec(vector)
fin.read(1) # newline
if desired_vocab is not None:
vectors = vectors[vocab != '', :]
vocab = vocab[vocab != '']
return cls(vocab=vocab, vectors=vectors)
@classmethod
def from_text(cls, fname, vocabUnicodeSize=78, desired_vocab=None):
"""
Create a WordVectors class based on a word2vec text file
Parameters
----------
fname : path to file
vocabUnicodeSize: the maximum string length (78, by default)
desired_vocab: if set, this will ignore any word and vector that
doesn't fall inside desired_vocab.
Returns
-------
WordVectors instance
"""
with open(fname, 'rb') as fin:
header = fin.readline()
vocab_size, vector_size = list(map(int, header.split()))
vocab = np.empty(vocab_size, dtype='<U%s' % vocabUnicodeSize)
vectors = np.empty((vocab_size, vector_size), dtype=np.float)
for i, line in enumerate(fin):
line = line.decode('ISO-8859-1').strip()
parts = line.split(' ')
word = parts[0]
include = desired_vocab is None or word in desired_vocab
if include:
vector = np.array(parts[1:], dtype=np.float)
vocab[i] = word
vectors[i] = unitvec(vector)
if desired_vocab is not None:
vectors = vectors[vocab != '', :]
vocab = vocab[vocab != '']
return cls(vocab=vocab, vectors=vectors)
@classmethod
def from_mmap(cls, fname):
"""
Create a WordVectors class from a memory map
Parameters
----------
fname : path to file
Returns
-------
WordVectors instance
"""
memmaped = joblib.load(fname, mmap_mode='r+')
return cls(vocab=memmaped.vocab, vectors=memmaped.vectors)
| apache-2.0 |
piem/aubio | python/demos/demo_pitch_sinusoid.py | 4 | 1732 | #! /usr/bin/env python
import numpy as np
import aubio
def build_sinusoid(length, freqs, samplerate):
return np.sin( 2. * np.pi * np.arange(length) * freqs / samplerate).astype(aubio.float_type)
def run_pitch(p, input_vec):
cands = []
for vec_slice in input_vec.reshape((-1, p.hop_size)):
a = p(vec_slice)[0]
cands.append(a)
return cands
methods = ['default', 'schmitt', 'fcomb', 'mcomb', 'yin', 'yinfft']
cands = {}
buf_size = 2048
hop_size = 512
samplerate = 44100
sin_length = (samplerate * 10) % 512 * 512
freqs = np.zeros(sin_length)
partition = sin_length // 8
pointer = 0
pointer += partition
freqs[pointer: pointer + partition] = 440
pointer += partition
pointer += partition
freqs[ pointer : pointer + partition ] = 740
pointer += partition
freqs[ pointer : pointer + partition ] = 1480
pointer += partition
pointer += partition
freqs[ pointer : pointer + partition ] = 400 + 5 * np.random.random(sin_length//8)
a = build_sinusoid(sin_length, freqs, samplerate)
for method in methods:
p = aubio.pitch(method, buf_size, hop_size, samplerate)
cands[method] = run_pitch(p, a)
print(method)
print(cands[method])
print("done computing")
if 1:
import matplotlib.pyplot as plt
# times
ramp = np.arange(0, sin_length / hop_size).astype('float') * hop_size / samplerate
# plot each result
for method in methods:
plt.plot(ramp, cands[method], '.-', label=method)
# plot ground truth
ramp = np.arange(0, sin_length).astype('float') / samplerate
plt.plot(ramp, freqs, ':', label = 'ground truth')
plt.legend(loc='upper left')
plt.xlabel('time (s)')
plt.ylabel('frequency (Hz)')
plt.ylim([0,2000])
plt.show()
| gpl-3.0 |
kmkolasinski/Bubel | plots/plot_SpinPolarizations.py | 2 | 1469 | #!/usr/bin/python
"""
Created on Thu Mar 5 14:16:21 2015
@author: Krzysztof Kolasinski
"""
import numpy as np
import matplotlib.pyplot as plt
import csv
import matplotlib.colors as colors
import matplotlib.cm as cmx
file = "polarizations.dat"
f = open(file)
lines = f.readlines()
plt.cla()
cmap = plt.cm.jet
scalarMap = cmx.ScalarMappable(cmap=cmap)
for i in range(np.size(lines)):
data = [float(x) for x in lines[i].split()]
no_modes = int(data[0])
Ef = data[1] # Fermi energy
kvecs = data[2:no_modes+2]# take wave vectors
pXYZ = data[no_modes+2:] # take polarizations
p = np.reshape(pXYZ,[3,no_modes])
print "m=",no_modes,"Ef=",Ef
#print p
for m in range(no_modes):
p[:,m] /= sqrt(sum(p[:,m]**2))
colors = cmx.jet((p[2,:]+1)/2)
plt.scatter(kvecs,[Ef]*no_modes,c=colors,zorder=2)
for m in range(no_modes):
#print "m=",m," p=", sqrt(sum(p[:,m]**2))
c = cmx.jet((p[2,m]+1)/2)
p[0,m] /= 25.0
p[:,m] /= 2.0
plt.arrow(kvecs[m], Ef, p[0,m], p[1,m], head_width=0.001, head_length=0.01,color=c)
file = "bands.dat"
data = np.loadtxt(file)
no_lines = np.size(data[0,:])
E0= 27211.384523
x = data[:,0]
for i in range(no_lines-1):
plt.plot(x,data[:,i+1]*E0,c='k',ls='-',zorder=0)
plt.xlim([-0.4,0.4])
plt.ylim([-0.1,6])
plt.xlabel("k [1/unit size]")
plt.ylabel("Energy [meV]") | mit |
google/meterstick | setup.py | 1 | 1623 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup file for the meterstick package."""
import pathlib
from setuptools import setup
HERE = pathlib.Path(__file__).parent
README = (HERE/"README.md").read_text()
setup(
name="meterstick",
version="1.1.0",
description="A grammar of data analysis",
long_description=README,
long_description_content_type="text/markdown",
URL="https://github.com/google/meterstick",
author="Xunmo Yang, Dennis Sun, Taylor Pospisil",
authoremail="[email protected]",
license="Apache License 2.0",
packages=["meterstick"],
installrequires=["six", "numpy", "scipy", "pandas"],
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Development Status :: 5 - Production/Stable",
"Framework :: IPython",
"Framework :: Jupyter",
"Intended Audience :: Science/Research",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: SQL",
])
| apache-2.0 |
pschella/scipy | scipy/misc/common.py | 17 | 6104 | """
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import arange, newaxis, hstack, product, array, fromstring
__all__ = ['central_diff_weights', 'derivative', 'lena', 'ascent', 'face']
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative.
Assumes equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Parameters
----------
Np : int
Number of points for the central derivative.
ndiv : int, optional
Number of divisions. Default is 1.
Notes
-----
Can be inaccurate for large number of points.
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the n-th derivative of a function at a point.
Given a function, use a central difference formula with spacing `dx` to
compute the `n`-th derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which `n`-th derivative is found.
dx : float, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> from scipy.misc import derivative
>>> def f(x):
... return x**3 + x**2
>>> derivative(f, 1.0, dx=1e-6)
4.9999999999217337
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n == 1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n == 2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / product((dx,)*n,axis=0)
def lena():
"""
Function that previously returned an example image
.. note:: Removed in 0.17
Parameters
----------
None
Returns
-------
None
Raises
------
RuntimeError
This functionality has been removed due to licensing reasons.
Notes
-----
The image previously returned by this function has an incompatible license
and has been removed from SciPy. Please use `face` or `ascent` instead.
See Also
--------
face, ascent
"""
raise RuntimeError('lena() is no longer included in SciPy, please use '
'ascent() or face() instead')
def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
with open(fname, 'rb') as f:
ascent = array(pickle.load(f))
return ascent
def face(gray=False):
"""
Get a 1024 x 768, color image of a raccoon face.
raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
Parameters
----------
gray : bool, optional
If True return 8-bit grey-scale image, otherwise return a color image
Returns
-------
face : ndarray
image of a racoon face
Examples
--------
>>> import scipy.misc
>>> face = scipy.misc.face()
>>> face.shape
(768, 1024, 3)
>>> face.max()
255
>>> face.dtype
dtype('uint8')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(face)
>>> plt.show()
"""
import bz2
import os
with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:
rawdata = f.read()
data = bz2.decompress(rawdata)
face = fromstring(data, dtype='uint8')
face.shape = (768, 1024, 3)
if gray is True:
face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8')
return face
| bsd-3-clause |
deepchem/deepchem | contrib/tensorflow_models/test_progressive.py | 6 | 4932 | '''
"""
Sanity tests on progressive models.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import tempfile
import numpy as np
import unittest
import sklearn
import shutil
import tensorflow as tf
import deepchem as dc
from tensorflow.python.framework import test_util
class TestProgressive(test_util.TensorFlowTestCase):
"""
Test that progressive models satisfy basic sanity checks.
"""
def setUp(self):
super(TestProgressive, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
def test_construction(self):
"""Test that progressive models can be constructed without crash."""
prog_model = dc.models.ProgressiveMultitaskRegressor(
n_tasks=1,
n_features=100,
alpha_init_stddevs=[.08],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=200)
def test_fit(self):
"""Test that progressive models can fit without crash."""
n_tasks = 2
n_samples = 10
n_features = 100
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
prog_model = dc.models.ProgressiveMultitaskRegressor(
n_tasks=n_tasks,
n_features=n_features,
alpha_init_stddevs=[.08],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=2)
prog_model.fit(dataset)
def test_fit_lateral(self):
"""Test that multilayer model fits correctly.
Lateral connections and adapters are only added for multilayer models. Test
that fit functions with multilayer models.
"""
n_tasks = 2
n_samples = 10
n_features = 100
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
n_layers = 3
prog_model = dc.models.ProgressiveMultitaskRegressor(
n_tasks=n_tasks,
n_features=n_features,
alpha_init_stddevs=[.08] * n_layers,
layer_sizes=[100] * n_layers,
weight_init_stddevs=[.02] * n_layers,
bias_init_consts=[1.] * n_layers,
dropouts=[0.] * n_layers,
learning_rate=0.003,
batch_size=2)
prog_model.fit(dataset)
def test_fit_lateral_multi(self):
"""Test that multilayer model fits correctly.
Test multilayer model with multiple tasks (> 2) to verify that lateral
connections of growing size work correctly.
"""
n_tasks = 3
n_samples = 10
n_features = 100
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
n_layers = 3
prog_model = dc.models.ProgressiveMultitaskRegressor(
n_tasks=n_tasks,
n_features=n_features,
alpha_init_stddevs=[.08] * n_layers,
layer_sizes=[100] * n_layers,
weight_init_stddevs=[.02] * n_layers,
bias_init_consts=[1.] * n_layers,
dropouts=[0.] * n_layers,
learning_rate=0.003,
batch_size=2)
prog_model.fit(dataset)
def test_frozen_weights(self):
"""Test that fitting one task doesn't change predictions of another.
Tests that weights are frozen when training different tasks.
"""
n_tasks = 2
n_samples = 10
n_features = 100
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
n_layers = 3
prog_model = dc.models.ProgressiveMultitaskRegressor(
n_tasks=n_tasks,
n_features=n_features,
alpha_init_stddevs=[.08] * n_layers,
layer_sizes=[100] * n_layers,
weight_init_stddevs=[.02] * n_layers,
bias_init_consts=[1.] * n_layers,
dropouts=[0.] * n_layers,
learning_rate=0.003,
batch_size=2)
# Fit just on task zero
# Notice that we keep the session open
prog_model.fit(dataset, tasks=[0], close_session=False)
y_pred_task_zero = prog_model.predict(dataset)[:, 0]
# Fit on task one
prog_model.fit(dataset, tasks=[1])
y_pred_task_zero_after = prog_model.predict(dataset)[:, 0]
# The predictions for task zero should not change after training
# on task one.
np.testing.assert_allclose(y_pred_task_zero, y_pred_task_zero_after)
'''
| mit |
h2educ/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 157 | 13799 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=np.float64)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
roderickmackenzie/gpvdm | gpvdm_gui/gui/fxmesh.py | 1 | 10443 | #
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2012-2017 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
# Room B86 Coates, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
## @package fxmesh
# fx domain mesh editor
#
import os
from gui_util import dlg_get_text
from inp import inp_save
import webbrowser
from util import fx_with_units
from icon_lib import icon_get
from scan_human_labels import get_scan_human_labels
import i18n
_ = i18n.language.gettext
#inp
from inp import inp_search_token_value
from inp import inp_read_next_item
from inp import inp
#matplotlib
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
#qt
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtWidgets import QWidget,QVBoxLayout,QHBoxLayout,QToolBar,QSizePolicy,QAction,QTabWidget,QTableWidget,QAbstractItemView,QApplication
from PyQt5.QtGui import QPainter,QIcon
#windows
from open_save_dlg import save_as_jpg
from colors import get_color
from cal_path import get_sim_path
from gpvdm_tab import gpvdm_tab
import matplotlib.cm as cm
import numpy as np
mesh_articles = []
class tab_fxmesh(QWidget):
lines=[]
edit_list=[]
line_number=[]
save_file_name=""
file_name=""
name=""
visible=1
def save_data(self):
file_name="fxmesh"+str(self.index)+".inp"
self.get_scan_human_labels.remove_file(file_name)
out_text=[]
for i in range(0,self.tab.rowCount()):
out_text.append("#fx_segment"+str(i)+"_start")
self.get_scan_human_labels.add_item(file_name,out_text[len(out_text)-1],_("Part ")+str(i)+" "+_("start"))
out_text.append(str(self.tab.get_value(i, 0)))
out_text.append("#fx_segment"+str(i)+"_stop")
self.get_scan_human_labels.add_item(file_name,out_text[len(out_text)-1],_("Part ")+str(i)+" "+_("stop"))
out_text.append(str(self.tab.get_value(i, 1)))
out_text.append("#fx_segment"+str(i)+"_points")
self.get_scan_human_labels.add_item(file_name,out_text[len(out_text)-1],_("Part ")+str(i)+" "+_("points"))
out_text.append(str(self.tab.get_value(i, 2)))
out_text.append("#fx_segment"+str(i)+"_mul")
self.get_scan_human_labels.add_item(file_name,out_text[len(out_text)-1],_("Part ")+str(i)+" "+_("mul"))
out_text.append(str(self.tab.get_value(i, 3)))
out_text.append("#ver")
out_text.append("1.1")
out_text.append("#end")
inp_save(file_name,out_text)
self.update_scan_tokens()
def update_scan_tokens(self):
file_name="fxmesh"+str(self.index)+".inp"
self.get_scan_human_labels.remove_file(file_name)
for i in range(0,len(self.list)):
self.get_scan_human_labels.add_item(file_name,"#fx_segment"+str(i)+"_start",_("Part ")+str(i)+" "+_("start"))
self.get_scan_human_labels.add_item(file_name,"#fx_segment"+str(i)+"_stop",_("Part ")+str(i)+" "+_("stop"))
self.get_scan_human_labels.add_item(file_name,"#fx_segment"+str(i)+"_points",_("Part ")+str(i)+" "+_("points"))
self.get_scan_human_labels.add_item(file_name,"#fx_segment"+str(i)+"_mul",_("Part ")+str(i)+" "+_("mul"))
def callback_add_section(self):
self.tab.add(["0.0","0.0","0.0","0.0"])
self.build_mesh()
self.draw_graph()
self.fig.canvas.draw()
self.save_data()
def callback_remove_item(self):
self.tab.remove()
self.build_mesh()
self.draw_graph()
self.fig.canvas.draw()
self.save_data()
def callback_move_down(self):
self.tab.move_down()
self.build_mesh()
self.draw_graph()
self.fig.canvas.draw()
self.save_data()
def callback_move_up(self):
self.tab.move_up()
self.build_mesh()
self.draw_graph()
self.fig.canvas.draw()
self.save_data()
def update(self):
self.build_mesh()
self.draw_graph()
self.fig.canvas.draw()
def draw_graph(self):
if len(self.fx)==0:
return
my_max=self.fx[0][0]
my_min=self.fx[0][0]
for i in range(0,len(self.fx)):
for ii in range(0,len(self.fx[i])):
if self.fx[i][ii]>my_max:
my_max=self.fx[i][ii]
if self.fx[i][ii]<my_min:
my_min=self.fx[i][ii]
#if (len(self.fx)>0):
# mul,unit=fx_with_units(float(my_max-my_min))
#else:
mul=1.0
unit="Hz"
fx=[]
mag=[]
for i in range(0,len(self.fx)):
local_fx=[]
for ii in range(0,len(self.fx[i])):
local_fx.append(self.fx[i][ii]*mul)
mag.append(1)
fx.extend(local_fx)
self.fig.clf()
self.fig.subplots_adjust(bottom=0.2)
self.fig.subplots_adjust(left=0.1)
self.ax1 = self.fig.add_subplot(111)
cmap = cm.jet
#self.ax1.clear()
#self.ax1.scatter(self.x,self.mag ,c=c, cmap=cmap)
#self.fig.canvas.draw()
c = np.linspace(0, 10, len(fx))
self.ax1 = self.fig.add_subplot(111)
self.ax1.ticklabel_format(useOffset=False)
self.ax1.set_ylabel(_("Magnitude")+" ("+_("Volts")+" )")
self.ax1.get_yaxis().set_visible(False)
self.ax1.spines['top'].set_visible(False)
self.ax1.spines['right'].set_visible(False)
#self.ax1.spines['bottom'].set_visible(False)
self.ax1.spines['left'].set_visible(False)
self.ax1.set_xscale("log")
#print(fx,self.mag)
self.ax1.scatter(fx,mag, c=c, cmap=cmap)
self.ax1.set_xlabel(_("Frequency")+" ("+unit+")")
def load_data(self):
self.tab.clear()
self.tab.setColumnCount(4)
#self.tab.setSelectionBehavior(QAbstractItemView.SelectRows)
self.tab.setHorizontalHeaderLabels([_("Frequency start"),_("Frequency stop"), _("Max points"), _("Multiply")])
self.tab.setColumnWidth(0, 200)
self.tab.setColumnWidth(1, 200)
self.start_fx=0.0
file_name="fxmesh"+str(self.index)+".inp"
f=inp().load(os.path.join(get_sim_path(),file_name))
lines=f.lines
if f!=False:
if inp_search_token_value(lines, "#ver")=="1.1":
pos=0
while(1):
token,start,pos=inp_read_next_item(lines,pos)
if token=="#ver" or token=="#end":
break
token,stop,pos=inp_read_next_item(lines,pos)
token,points,pos=inp_read_next_item(lines,pos)
token,mul,pos=inp_read_next_item(lines,pos)
self.tab.add([str(start),str(stop),str(points),str(mul)])
return True
else:
print("file "+file_name+"wrong version")
exit("")
return False
else:
print("file "+file_name+" not found")
return False
return False
def build_mesh(self):
self.mag=[]
self.fx=[]
for i in range(0,self.tab.rowCount()):
local_mag=[]
local_fx=[]
start=float(self.tab.get_value(i, 0))
fx=start
stop=float(self.tab.get_value(i, 1))
max_points=float(self.tab.get_value(i, 2))
mul=float(self.tab.get_value(i, 3))
pos=0
if stop!=0.0 and max_points!=0.0 and mul!=0.0:
if max_points==1:
local_fx.append(fx)
local_mag.append(1.0)
else:
fx_start=fx
while(fx<stop):
local_fx.append(fx)
local_mag.append(1.0)
if mul==1.0:
fx=fx+(stop-fx_start)/max_points
else:
fx=fx*mul
pos=pos+1
if pos>max_points:
break
#local_fx.append(stop)
#local_mag.append(1.0)
self.mag.append(local_mag)
self.fx.append(local_fx)
#print( self.fx)
local_mag=[]
local_fx=[]
#self.statusbar.push(0, str(len(self.fx))+_(" mesh points"))
def redraw_and_save(self):
self.build_mesh()
self.draw_graph()
self.fig.canvas.draw()
self.save_data()
def on_cell_edited(self, x,y):
self.redraw_and_save()
def save_image(self):
file_name = save_as_jpg(self)
if file_name !=None:
self.fig.savefig(file_name)
def paste_action(self):
return
#cb = QApplication.clipboard()
#text=cb.text()
#lines=text.rstrip().split()
#for l in lines:
# self.tab.add([l,l,"1","1.0"])
#self.build_mesh()
#self.draw_graph()
#self.fig.canvas.draw()
#self.save_data()
#print("paste>>",lines)
def __init__(self,index):
QWidget.__init__(self)
self.index=index
self.ax1=None
self.show_key=True
self.edit_list=[]
self.line_number=[]
self.list=[]
self.fig = Figure(figsize=(5,4), dpi=100)
self.canvas = FigureCanvas(self.fig)
self.canvas.figure.patch.set_facecolor('white')
self.get_scan_human_labels=get_scan_human_labels()
self.main_vbox = QHBoxLayout()
self.main_vbox.addWidget(self.canvas)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
#toolbar 2
toolbar2=QToolBar()
toolbar2.setIconSize(QSize(32, 32))
self.tb_add = QAction(icon_get("list-add"), _("Add section"), self)
self.tb_add.triggered.connect(self.callback_add_section)
toolbar2.addAction(self.tb_add)
self.tb_remove = QAction(icon_get("list-remove"), _("Delete section"), self)
self.tb_remove.triggered.connect(self.callback_remove_item)
toolbar2.addAction(self.tb_remove)
self.tb_move = QAction(icon_get("go-down"), _("Move down"), self)
self.tb_move.triggered.connect(self.callback_move_down)
toolbar2.addAction(self.tb_move)
self.tb_move_up = QAction(icon_get("go-up"), _("Move up"), self)
self.tb_move_up.triggered.connect(self.callback_move_up)
toolbar2.addAction(self.tb_move_up)
tab_holder=QWidget()
tab_vbox_layout= QVBoxLayout()
tab_holder.setLayout(tab_vbox_layout)
tab_vbox_layout.addWidget(toolbar2)
self.tab = gpvdm_tab()
self.tab.resizeColumnsToContents()
self.tab.verticalHeader().setVisible(False)
self.load_data()
self.build_mesh()
self.draw_graph()
self.update_scan_tokens()
self.tab.cellChanged.connect(self.on_cell_edited)
self.tab.changed.connect(self.redraw_and_save)
#self.tab.paste_callback=self.paste_action
tab_vbox_layout.addWidget(self.tab)
self.main_vbox.addWidget(tab_holder)
self.setLayout(self.main_vbox)
self.tab.menu_paste.triggered.connect(self.paste_action)
| gpl-2.0 |
hakujyo/chessplaying_robot | timetest.py | 1 | 1856 | # import time
# time1 = time.time()
# time.sleep(15)
# time2 = time.time()
# print (int(time2 - time1))
#
import cv2
import numpy as np
import time
from matplotlib import pyplot as plt
cap = cv2.VideoCapture(1) # 读入视频文件
time = 0 #计时器
def sleeptime(hour,min,sec):
second = sleeptime(0, 0, 20)
if cap.isOpened(): # 判断是否正常打开
rval, frame = cap.read()
else:
rval = False
while rval: # 循环读取视频帧
rval, frame = cap.read()
cv2.imshow("capture", frame) #视频窗口
if (time % 100 == 0): # 每隔timeF帧进行存储操作
count='%d'%time
url='%d'%time + ".png"
#src='%d'%c
cv2.imwrite(url, frame) # 存储为图像
#读图矫正
a = cv2.imread(url)
rows, cols, channels = a.shape
list1 = np.float32([[69, 14], [500, 9], [23, 461], [549, 456]])
list2 = np.float32([[0, 0], [720, 0], [0, 720], [720, 720]])
M = cv2.getPerspectiveTransform(list1, list2)
img_perspective = cv2.warpPerspective(a, M, (720, 720))
print('perspective:\n', M)
cv2.imwrite(url, img_perspective) #矫正后图片
#cv2.imshow(url, img_perspective)
#保存灰度差值图
if time!=0:
a = cv2.imread(url)
src= '%d'%(time-100)
lasturl = src + '.png'
print(lasturl)
b = cv2.imread(lasturl)
Graya = cv2.cvtColor(a,cv2.COLOR_BGR2GRAY)
Grayb = cv2.cvtColor(b,cv2.COLOR_BGR2GRAY)
c = Grayb - Graya;
Grayurl='sub'+count+'.png'
cv2.imwrite(Grayurl, c) #灰度图
#cv2.imshow(Grayurl, c)
print("ccc=")
cv2.waitKey(1)
time = time + 1
cv2.waitKey(1)
cap.release() | gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.