prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 8 13:54:55 2020
@author: akurnizk
"""
import csv
import math
import time
import sys,os
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib import pylab
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
map_dir = r'E:\Maps' # retrieved files from https://viewer.nationalmap.gov/basic/
data_dir = os.path.join('E:\Data')
#%% Interpolate nans in arrays
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
#%% Loading Information from HR Dike Sensors (Make sure times are in EDT)
with open(os.path.join(data_dir,"General Dike Data","USGS 011058798 Herring R at Chequessett Neck Rd.txt")) as f:
reader = csv.reader(f, delimiter="\t")
HR_dike_all_info = list(reader)
HR_dike_lev_disch_cond = HR_dike_all_info[32:]
HR_dike_all_df = pd.DataFrame(HR_dike_lev_disch_cond[2:], columns=HR_dike_lev_disch_cond[0])
HR_dike_all_df.drop(HR_dike_all_df.columns[[0,1,3,5,7,9,11,13]],axis=1,inplace=True)
HR_dike_all_df.columns = ["datetime","Gage height, ft, Ocean side","Discharge, cfs","Gage height, ft, HR side",
"Spec Con, microsiemens/cm, HR side","Spec Con, microsiemens/cm, Ocean side"]
# Make strings numeric
HR_dike_all_df = HR_dike_all_df.replace("Eqp", '', regex=True)
HR_dike_all_df["datetime"] = pd.to_datetime(HR_dike_all_df["datetime"])
HR_dike_all_df["Gage height, ft, Ocean side"] =
|
pd.to_numeric(HR_dike_all_df["Gage height, ft, Ocean side"])
|
pandas.to_numeric
|
import networkx as nx
import numpy as np
import pandas as pd
from quetzal.analysis import analysis
from quetzal.engine import engine, nested_logit, optimal_strategy
from quetzal.engine.pathfinder import PublicPathFinder
from quetzal.engine.road_pathfinder import RoadPathFinder
from quetzal.model import preparationmodel
from syspy.assignment import raw as raw_assignment
from syspy.skims import skims
from tqdm import tqdm
class OptimalModel(preparationmodel.PreparationModel):
def get_optimal_strategy_edges(
self,
boarding_time=0,
alighting_time=0,
alpha=0.5,
target=None,
inf=1e9,
walk_on_road=False,
):
links = self.links.copy()
links['index'] = links.index
if walk_on_road:
road_links = self.road_links.copy()
road_links['time'] = road_links['walk_time']
footpaths = pd.concat([road_links, self.road_to_transit])
access = self.zone_to_road.copy()
else:
access = self.zone_to_transit.copy()
footpaths = self.footpaths.copy()
# transit edges
links['j'] = [tuple(l) for l in links[['b', 'trip_id']].values]
links['i'] = [tuple(l) for l in links[['a', 'trip_id']].values]
links['f'] = inf
links['c'] = links['time']
transit_edges = links[['i', 'j', 'f', 'c']].reset_index().values.tolist()
# boarding edges
links.index = 'boarding_' + links['index'].astype(str)
links['f'] = 1 / links['headway'] / alpha
if 'boarding_stochastic_utility' in links.columns:
links['f'] *= np.exp(links['boarding_stochastic_utility'])
links['c'] = boarding_time
boarding_edges = links[['a', 'i', 'f', 'c']].reset_index().values.tolist()
# alighting edges
links.index = 'alighting_' + links['index'].astype(str)
links['f'] = inf
links['c'] = alighting_time
alighting_edges = links[['j', 'b', 'f', 'c']].reset_index().values.tolist()
# access edges
if target is not None:
# we do not want to egress to a destination that is not the target
access = access.loc[(access['direction'] == 'access') | (access['b'] == target)]
access['f'] = inf
access['c'] = access['time']
access_edges = access[['a', 'b', 'f', 'c']].reset_index().values.tolist()
# footpaths
footpaths['f'] = inf
footpaths['c'] = footpaths['time']
footpaths_edges = footpaths[['a', 'b', 'f', 'c']].reset_index().values.tolist()
edges = access_edges + boarding_edges + transit_edges + alighting_edges + footpaths_edges
edges = [tuple(e) for e in edges]
return edges
def step_strategy_finder(self, *args, **kwargs):
s_dict = {}
node_df_list = []
all_edges = self.get_optimal_strategy_edges(*args, **kwargs)
for destination in tqdm(self.zones.index):
forbidden = set(self.zones.index) - {destination}
edges = [e for e in all_edges if e[2] not in forbidden]
strategy, u, f = optimal_strategy.find_optimal_strategy(edges, destination)
s_dict[destination] = strategy
node_df = pd.DataFrame({'f': pd.Series(f), 'u': pd.Series(u)})
node_df['destination'] = destination
node_df_list.append(node_df)
optimal_strategy_nodes =
|
pd.concat(node_df_list)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 8 20:05:36 2016
@author: JSong
"""
import os
import time
import pandas as pd
import numpy as np
|
pd.set_option('display.float_format', lambda x: '%.2f' % x)
|
pandas.set_option
|
import pandas as pd,requests, plotly.graph_objects as go, plotly.express as px, os
from dotenv import load_dotenv
from plotly.subplots import make_subplots
# Using requests library to create urls
def req(series: str, start: str, end: str, json: str):
'''
{param} series: The series we are looking at (PAYEMS, GDPC1, and CPIAUCSL)
{param} start: Observation start date (default: 1776-07-04)
{param} end: Observation end date (default: 9999-12-31)
{param} json: File type to send
{type} str, str, str, str
{return} Json file of what we get from using requests.get
'''
payload = {'series_id': series, 'observation_start': start, 'observation_end': end, 'file_type': json}
load_dotenv() # Searches for .env file in root directory
api_key = os.environ.get("Api_Key", None) # Extracting the API Key from the .env file, returns None if there is nothing with this name there
payload["api_key"]=api_key
r = requests.get('https://api.stlouisfed.org/fred/series/observations', params=payload) # Going to retrieve data from this command using the parameters specified above
return r.json()
# Gathering series from FRED
PAYEMS = req("PAYEMS","2000-01-01","2020-12-31", "json") # Calling function from above to create a url then json file with these parameters
GDPC1 = req("GDPC1", "2000-01-01", "2020-12-31","json") # Because we want to look at data from 2000 to 2020,
# I chose the range to be the first day of 2000 to the last day of 2020
CPIAUCSL=req("CPIAUCSL", "2000-01-01", "2020-12-31", "json")
# Joining the series together into one dataframe
df1 = pd.json_normalize(PAYEMS, record_path=['observations'])
df1.rename(columns={'value':'Total Nonfarm Employment Value'}, inplace=True) # Source: https://www.geeksforgeeks.org/how-to-rename-columns-in-pandas-dataframe/
df2 = pd.json_normalize(GDPC1, record_path=['observations'])
df2.rename(columns={'value':'Real Gross Domestic Product Value'}, inplace=True)
df3 = pd.json_normalize(CPIAUCSL, record_path=['observations'])
"""
Source: https://towardsdatascience.com/how-to-convert-json-into-a-pandas-dataframe-100b2ae1e0d8
Because the data frame had a nested list, I wanted to first extract the data from the "observations".
record_path=['observations'] tells me I'm looking into the observation column at the dictionary inside of it
The pd.json_normalize then takes the realtime_start, realtime_end, date, and value within each observation and creates a column for each
I decided not to set meta = anything because the observation start, end, and file type parameters were the same for the three and I didn't want to confuse the dates with the actual date.
"""
df3.rename(columns={'value':'Consumer Price Index Value'}, inplace=True) # Renaming "value" column to "Consumer Price Index Value" in my third dataframe so I know what I am working with
merged_df=pd.merge(df1, df3, how="outer") # Creating a new variable to store my joined first dataframe and second dataframe
merged_df1=pd.merge(merged_df, df2, how="outer") # Using the new variable I just created w/ my first and second dataframes to join it with my third dataframe
merged_df1.drop("realtime_start", axis=1, inplace=True)
merged_df1.drop("realtime_end", axis=1, inplace=True) # Deleting the columns named realtime_start and realtime_end since we don't need it, source: https://www.nbshare.io/notebook/199139718/How-To-Drop-One-Or-More-Columns-In-Pandas-Dataframe/
# Saving dataframe as a .csv file
merged_df1.to_csv(r"/Users/sophia/Desktop/Lowe/FRED_DF1.csv") # Source: https://stackoverflow.com/questions/16923281/writing-a-pandas-dataframe-to-csv-file
# Specifying which folder I want to place my csv file with the name "Data_FRED.csv" in
# Plotting two of the time series on the same plot, with time as the horizontal axis
df =
|
pd.read_csv("/Users/sophia/Desktop/Lowe/FRED_DF1.csv")
|
pandas.read_csv
|
import pandas as pd
# We are going to import the dataframes lego_base and lego_opacity.
lego_base = pd.read_csv('data/lego_theme_minimal.csv')
lego_opacity =
|
pd.read_csv('data/lego_opacity.csv')
|
pandas.read_csv
|
# Author: <NAME>
# Date: 04/25/2019
# Git-Hub: Data-is-Life
from bs4 import BeautifulSoup
import gc
from numpy import where
from numpy import mean as NpM
from pandas import to_timedelta as ToTd
from pandas import to_datetime as ToDt
from pandas import to_numeric as ToNm
from pandas import DataFrame as DF
from pandas import concat
gc.enable()
class CleanerHelper(object):
def __init__(self):
pass
def reset_drop(self, df):
df.reset_index(inplace=True)
df.drop(columns=['index'], inplace=True)
return df
def sort_drop(self, df):
df.sort_values(by=['start_date', 'end_date'], inplace=True)
df.reset_index(inplace=True)
df.drop(columns=['index'], inplace=True)
return df
class TrimData(object):
def __init__(self, soup_obj, speed_unit, measure_unit):
super(TrimData, self).__init__()
self.soup_obj = soup_obj
self.speed_unit = speed_unit
self.measure_unit = measure_unit
self.one_day = ToTd(1, 'D')
self.two_days = ToTd(2, 'D')
self.twh = 24 - 1e-6
self.ch = CleanerHelper()
def parse_soup(self):
self.df = DF()
'''Insert the values in the original dataframe. Convert time to
numerical format to make calculations easier.'''
self.df.loc[:, 'start_date'] = [d['startDate'] for d in self.soup_obj]
self.df.loc[:, 'end_date'] = [d['endDate'] for d in self.soup_obj]
self.df.loc[:, self.measure_unit] = [d['value'] for d in self.soup_obj]
self.df.loc[:, 'source'] = [d['sourceName'] for d in self.soup_obj]
self.df.loc[
:, self.measure_unit] = self.df[self.measure_unit].astype(float)
return self.df
def clean_set(self):
'''Split start and end date, time, and timezone to a new df. Drop the
Timezone column.'''
start_date_df = self.df.start_date.str.split(
' ', expand=True).drop(columns=[2])
end_date_df = self.df.end_date.str.split(
' ', expand=True).drop(columns=[2])
# Merge the start and end date & time to a single value.
start_date_df.loc[:, 'std'] = start_date_df[0] + ' ' + start_date_df[1]
end_date_df.loc[:, 'etd'] = end_date_df[0] + ' ' + end_date_df[1]
# Convert the date to `datetime` and time to `timedelta`
start_date_df.loc[:, 'sd'] = ToDt(start_date_df[0], format='%Y-%m-%d')
end_date_df.loc[:, 'ed'] = ToDt(end_date_df[0], format='%Y-%m-%d')
start_date_df.loc[:, 'st'] = ToTd(start_date_df[1])
end_date_df.loc[:, 'et'] = ToTd(end_date_df[1])
'''Insert the values in the original dataframe. Convert time to
numerical format to make calculations easier.'''
self.df.loc[:, 'start_date'] = start_date_df.sd.copy()
self.df.loc[:, 'start_time'] =
|
ToNm(start_date_df.st)
|
pandas.to_numeric
|
# %% [Algorithm 1c Loop]
# # MUSHROOMS
# %% [markdown]
# ## Binary Classification
# %% [markdown]
# ### Imports
# %%
import os
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
# %% [markdown]
# ### Load Data
dataset = pd.read_csv(r"C:\Users\yxie367\Documents\GitHub\Mushrooms\DATA\mushrooms.csv")
#dataset = pd.read_csv(r"C:\Users\xieya\Documents\GitHub\Mushrooms\DATA\mushrooms.csv")
# %% [markdown]
# ### View Data and Informations
# %%
dataset.head()
# %%
dataset.info()
# %%
edible, poisonous = dataset['class'].value_counts()
# print("Edible:\t ", edible,"\nPoisonous:", poisonous)
# %%
# Categorical to numerical
labels = {'e': 0, 'p': 1}
dataset['class'].replace(labels, inplace=True)
edible, poisonous = dataset['class'].value_counts()
#print("0 - Edible: ", edible,"\n1 - Poisonous:", poisonous)
# %% [markdown]
# # NN1 Stalk Root - Rooted (r)
# %% [markdown]
# ### Split Dataset
# %% [markdown]
# #### Get the Labels
# %%
X, y = dataset.drop('class', axis=1), dataset['class'].copy()
#print("X:",X.shape,"\ny:",y.shape)
# %% [markdown]
# #### Train Set and Test Set
total_error_1 = 0
total_error_2 = 0
total_error_comb = 0
randnum = np.arange(2,44,4)
num_trials = len(randnum)
record = ""
wrong_record = ""
run = 1
# %% Data cleaning
from sklearn.model_selection import train_test_split
X_white = pd.DataFrame()
X_not_white = pd.DataFrame()
y_white = pd.Series(dtype='float64')
y_not_white = pd.Series(dtype='float64')
for i in range(0,len(X)):
if X.loc[i,"stalk-root"] == "r":
X_white = X_white.append(X.iloc[i,:])
y_white = y_white.append(pd.Series(y.iloc[i]))
else:
X_not_white = X_not_white.append(X.iloc[i,:])
y_not_white = y_not_white.append(pd.Series(y.iloc[i]))
# %% Data cleaning pt2
X_green = pd.DataFrame()
X_not_green = pd.DataFrame()
y_green = pd.Series(dtype='float64')
y_not_green = pd.Series(dtype='float64')
for i in range(0,len(X)):
if X.loc[i,"odor"] == "a":
X_green = X_green.append(X.iloc[i,:])
y_green = y_green.append(pd.Series(y.iloc[i]))
else:
X_not_green = X_not_green.append(X.iloc[i,:])
y_not_green = y_not_green.append(pd.Series(y.iloc[i]))
# %%
for j in randnum:
X_train_not_white, X_test_not_white, y_train_not_white, y_test_not_white = train_test_split(X_not_white, y_not_white, test_size=1-(6905/(8124-len(X_white))), random_state=j)
X_train_not_green, X_test_not_green, y_train_not_green, y_test_not_green = train_test_split(X_not_green, y_not_green, test_size=1-(6905/(8124-len(X_green))), random_state=j)
X_train_green = (X_train_not_green)
y_train_green = (y_train_not_green)
X_train_white = (X_train_not_white)
y_train_white = (y_train_not_white)
# %%
from sklearn.utils import shuffle
X_train_full1 = shuffle(X_train_white, random_state=j)
X_test = shuffle(X, random_state=j).iloc[4000:8000]
y_train_full1 = shuffle(y_train_white, random_state=j)
y_test = shuffle(y, random_state=j).iloc[4000:8000]
# %% [markdown]
# #### Validation Set
# %%
X_valid1, X_train1 = X_train_full1[:500], X_train_full1[500:]
y_valid1, y_train1 = y_train_full1[:500], y_train_full1[500:]
# print("X_train:", X_train1.shape[0], "y_train", y_train1.shape[0])
# print("X_valid: ", X_valid1.shape[0], "y_valid ", y_valid1.shape[0])
# print("X_test: ", X_test.shape[0], "y_test ", X_test.shape[0])
# %% [markdown]
# ### Prepare the Data
# %% [markdown]
# #### Data Transformation
# %%
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.compose import ColumnTransformer
cat_attr_pipeline = Pipeline([
('encoder', OrdinalEncoder())
])
cols = list(X)
pipeline = ColumnTransformer([
('cat_attr_pipeline', cat_attr_pipeline, cols)
])
X_train1 = pipeline.fit_transform(X_train1)
X_valid1 = pipeline.fit_transform(X_valid1)
X_test1 = pipeline.fit_transform(X_test)
# %% [markdown]
# ### Neural Network
# %% [markdown]
# #### Model
# %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import InputLayer, Dense
# %%
# tf.random.set_seed(j)
tf.random.set_random_seed(j)
# %%
model1 = Sequential([
InputLayer(input_shape=(22,)), # input layer
Dense(45, activation='relu'), # hidden layer
Dense(1, activation='sigmoid') # output layer
])
# %%
#model1.summary()
# %% [markdown]
# #### Compile the Model
# %%
model1.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# %% [markdown]
# #### Prepare Callbacks
# %%
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint_cb = ModelCheckpoint('../SavedModels/best_model.h5',
save_best_only=True)
early_stopping_cb = EarlyStopping(patience=3,
restore_best_weights=True)
# %% [markdown]
# ### Training
# %%
train_model1 = model1.fit(X_train1, y_train1,
epochs=100,
validation_data=(X_valid1, y_valid1),
callbacks=[checkpoint_cb, early_stopping_cb])
# %% [markdown]
# ### Evaluate the Best Model on Test Set
# %%
results1 = model1.evaluate(X_test1, y_test)
# print("test loss, test acc:", results1)
# %% [markdown]
# ### Make Some Predictions
# %%
X_new1 = X_test1[:5]
y_prob1 = model1.predict(X_new1)
# print(y_prob.round(3))
# %%
y_pred1 = (model1.predict(X_new1) > 0.5).astype("int32")
# print(y_pred)
y_test_pred = (model1.predict(X_test1) > 0.5).astype("int32")
# %% [markdown]
# ## KL Divergence
# %%
# X_new = X_test[:5]
X_df1 = pd.DataFrame(model1.predict(X_test1))
y_test_pred1 = pd.DataFrame(y_test_pred).reset_index(drop=True)
X_df1 =
|
pd.concat([X_df1, y_test_pred1], axis=1)
|
pandas.concat
|
from __future__ import print_function, division
from warnings import warn
import pandas as pd
import numpy as np
import json
from nilmtk.disaggregate import Disaggregator
import os
class Mean(Disaggregator):
def __init__(self, params):
self.model = {}
self.MODEL_NAME = 'Mean' # Add the name for the algorithm
self.save_model_path = params.get('save-model-path', None)
self.load_model_path = params.get('pretrained-model-path',None)
self.chunk_wise_training = params.get('chunk_wise_training', True)
if self.load_model_path:
self.load_model(self.load_model_path)
def partial_fit(self, train_main, train_appliances, **load_kwargs):
for app_name, power in train_appliances:
power_ = pd.concat(power, axis=0)
app_dict = self.model.get(app_name, {'sum': 0,'n_elem': 0})
app_dict['sum'] += int(np.nansum(power_.values))
app_dict['n_elem'] += len(power_[~np.isnan(power_)])
self.model[app_name] = app_dict
if self.save_model_path:
self.save_model(self.save_model_path)
def disaggregate_chunk(self, test_mains):
test_predictions_list = []
for test_df in test_mains:
appliance_powers = pd.DataFrame()
for i, app_name in enumerate(self.model):
app_model = self.model[app_name]
predicted_power = [app_model['sum'] / app_model['n_elem']] * test_df.shape[0]
appliance_powers[app_name] =
|
pd.Series(predicted_power, index=test_df.index, name=i)
|
pandas.Series
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from multiprocessing import Pool
import pysam
import pandas as pd
import os
from scipy.optimize import curve_fit
import argparse
from singlecellmultiomics.bamProcessing.bamFunctions import get_contigs_with_reads, get_r1_counts_per_cell
from singlecellmultiomics.bamProcessing.bamBinCounts import merge_overlapping_ranges
from collections import Counter, defaultdict
import numpy as np
import seaborn as sns
import math
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 300
class DivCounter(Counter):
"""Divisable counter"""
def __truediv__(self,other):
result = Counter()
for k,v in self.items():
result[k] = v/other
return result
def find_nearest(array, values):
idxes = np.searchsorted(array, values, side="left")
r = []
for value, idx in zip(values, idxes):
if idx > 0 and (idx == len(array) or math.fabs(value - array[idx - 1]) < math.fabs(value - array[idx])):
r.append(array[idx - 1])
else:
r.append(array[idx])
return r
def calculate_distance(vector_target: np.array, vector_viewpoint: np.array, max_range: float):
# Calculate distance between viewpoint and target, skip locations with a nan (will not be returned in the result)
existing = ~(np.isnan(vector_viewpoint) & ~np.isnan(vector_target))
if existing.sum() == 0:
return []
dist = vector_viewpoint[existing] - vector_target[existing]
return dist[(dist > -max_range) * (dist < max_range)]
def dictionary_to_diff_vector(d,sample: str, vmin: float, vmax: float):
"""Convert a dict {contig:sample:position:obs} into sorted vector [ distance, distance, ..]"""
return np.array([
v for v in np.clip(
np.concatenate(
[np.diff(sorted(d[contig][sample])) for contig in d])
,vmin,vmax) if v>vmin and v<vmax])
def generate_prefix(prefix, prefix_with_region, contig, start, end ):
if prefix_with_region:
if prefix is None:
return (contig, start, end )
else:
return (prefix,contig, start, end )
else:
return prefix
def get_sc_cut_dictionary(bam_path: str, filter_function=None, strand_specific=False, prefix_with_bam=False, regions=None, prefix_with_region=False):
"""
Generates cut distribution dictionary (contig)->sample->position->obs
"""
if filter_function is None:
filter_function = read_counts_function
cut_sites = {}
if type(bam_path) is str:
bam_paths = [bam_path]
else:
bam_paths=bam_path
with Pool() as workers:
for bam_path in bam_paths:
if prefix_with_bam:
prefix = bam_path.split('/')[-1].replace('.bam','')
else:
prefix=None
if regions is None:
regions = [(contig, None, None) for contig in get_contigs_with_reads(bam_path)]
with pysam.AlignmentFile(bam_path) as alignments:
start = None
end= None
for contig,r in workers.imap_unordered(
_get_sc_cut_dictionary, (
(bam_path,
contig,
strand_specific,
filter_function,
generate_prefix(prefix,prefix_with_region,contig,start,end)
, start, end)
for contig, start, end in regions )):
# Perform merge:
if not contig in cut_sites:
cut_sites[contig]=r
else:
for sample, positions in r.items():
cut_sites[contig][sample].update(positions)
return cut_sites
def extract_indices(haystack, indices, fill):
return np.array([haystack[index] if index > 0 and index < len(haystack) else np.nan for index in indices])
def find_nearest_above(needles, haystack):
indices = np.searchsorted(haystack, needles, side="right")
return extract_indices(haystack, indices, np.nan)
def find_nearest_below(needles, haystack):
haystack_rev = -haystack
haystack_rev.sort()
indices = np.searchsorted(haystack_rev, -needles, side="right")
return np.abs(extract_indices(haystack_rev, indices, np.nan))
def get_stranded_pairwise_counts(sc_cut_dict_stranded, max_range=3000):
"""
Obtain how many observations exist of different types of pairs of molecules
Args:
sc_cut_dict_stranded(dict) : { contig: { sample: { Counter( position: obs ) .. }}}
max_range(int) : maximum distance to record
Returns:
distance_counter_fwd_above
distance_counter_fwd_below
distance_counter_rev_above
distance_counter_rev_below
"""
distance_counter_fwd_above = defaultdict(Counter)
distance_counter_fwd_below = defaultdict(Counter)
distance_counter_rev_above = defaultdict(Counter)
distance_counter_rev_below = defaultdict(Counter)
for contig in sc_cut_dict_stranded:
for sample in sc_cut_dict_stranded[contig].keys():
forward = np.array([pos for strand, pos in sc_cut_dict_stranded[contig][sample] if not strand])
reverse = np.array([pos for strand, pos in sc_cut_dict_stranded[contig][sample] if strand])
if len(forward) <= 1 or len(reverse) <= 1:
continue
forward.sort()
reverse.sort()
# for each position on the fwd strand find the closest fragment on the forward strand.
# [>>>>>>>> .....|
# <<<<<<<
nearest_fwd_above = find_nearest_above(forward, reverse)
distance_counter_fwd_above[sample] += Counter(calculate_distance(forward, nearest_fwd_above, max_range))
# >>>>>>>>
# <<<<<<<
nearest_fwd_below = find_nearest_below(forward, reverse)
distance_counter_fwd_below[sample] += Counter(calculate_distance(forward, nearest_fwd_below, max_range))
# >>>>>>> ..........|
# <<<<<<]
nearest_rev_above = find_nearest_above(reverse, forward)
distance_counter_rev_above[sample] += Counter(calculate_distance(reverse, nearest_rev_above, max_range))
# >>>>>>>>
# <<<<<<<
nearest_rev_below = find_nearest_below(reverse, forward)
distance_counter_rev_below[sample] += Counter(calculate_distance(reverse, nearest_rev_below, max_range))
return distance_counter_fwd_above, distance_counter_fwd_below, distance_counter_rev_above, distance_counter_rev_below
def read_counts_function(read):
if not read.is_read1 or read.is_duplicate or read.is_qcfail or read.mapping_quality==0:
return False
return True
def strict_read_counts_function(read):
if not read.is_read1 or \
read.is_duplicate or \
read.is_qcfail or \
read.mapping_quality<50 or \
'S' in read.cigarstring or \
'I' in read.cigarstring or \
not read.is_proper_pair or \
read.get_tag('NM')>1:
return False
return True
def _get_sc_cut_dictionary(args):
bam, contig, strand_specific, filter_function, prefix, start, end = args
cut_positions = defaultdict(Counter)
with pysam.AlignmentFile(bam) as alignments:
for read in alignments.fetch(contig, start, end):
if not filter_function(read):
continue
k = read.get_tag('SM') if prefix is None else (prefix, read.get_tag('SM'))
cut_positions[k][
(read.is_reverse, read.get_tag('DS'))
if strand_specific else
read.get_tag('DS')
]+=1
return contig,cut_positions
def cuts_to_observation_vector(cell, cell_cuts, window_radius, n_bins, bin_size=1, take_n_samples=None,
log_distance=False):
obs = np.zeros(n_bins, dtype=np.int64)
forward = np.array(list(cell_cuts.keys()))
if take_n_samples is not None:
forward = np.random.choice(forward, take_n_samples, replace=True)
forward.sort()
total_tests = 0
for position in forward:
distance_to_all_points = forward - position
in_bounds = np.abs(distance_to_all_points[(distance_to_all_points >= -window_radius) & (
distance_to_all_points <= window_radius)])
# Exclude the point itself, which will be of course always associated to a distance 0
in_bounds = in_bounds[in_bounds > 0] - 1 # Offsets 1bp lower
total_tests += 1
# Add 1 to every distance we saw
if log_distance:
in_bounds = np.ceil(np.log2(in_bounds) * 100).astype(int)
else:
in_bounds = (np.floor(in_bounds / bin_size)).astype(int)
np.add.at(obs, in_bounds, 1)
return cell, obs, total_tests
def _cuts_to_observation_vector(kwargs):
return cuts_to_observation_vector(**kwargs)
def analyse(bam_path,output_dir, create_plot=False, min_distance=20, max_distance=800, verbose=False, strand_specific=False):
if verbose:
print('Obtaining molecules per cell .. ', end='\r')
cpr = get_r1_counts_per_cell(bam_path)
if verbose:
print('Molecules per cell: ')
for cell, obs in cpr.most_common():
print(f'\t{cell}\t{obs}')
if verbose:
print('Obtaining cuts per cell .. ', end='\r')
cut_sites = get_sc_cut_dictionary(bam_path, strand_specific=strand_specific)
all_counts = {}
for cell, total_molecules in cpr.most_common():
# Write from 0 to max_distance table
all_counts[cell] = DivCounter(dictionary_to_diff_vector(cut_sites,cell,0,max_distance))
cut_count_df = pd.DataFrame(all_counts).sort_index().sort_index(1).fillna(0)
cut_count_df.to_csv(f'{output_dir}/counts.csv')
if verbose:
print('Obtaining cuts per cell [ OK ]')
print('Fitting and plotting ..', end='\r')
if create_plot:
try:
cut_count_df.index.name='distance between cuts'
filtered_count_df = cut_count_df.loc[:, cut_count_df.sum()>100]
sns.clustermap((filtered_count_df / filtered_count_df.loc[20:].mean()).T,
cmap='viridis', vmax=3,
metric='correlation', col_cluster=False,
method='ward',figsize=(8,20))
plt.tight_layout()
plt.savefig(f'{output_dir}/heatmap.png')
#ax.figure.subplots_adjust(left=0.3) # change 0.3 to suit your needs.
except Exception as e:
print(e)
def function_to_fit(xdata, period, offset, amplitude, decay, mean ):
frequency = 1/period
return (amplitude*np.cos((2*np.pi*(frequency)*(xdata+offset) ))) * np.exp(-xdata*(1/decay)) + mean
# Bounds for fitting:
bounds=(
(150,300), # Frequency (b)
(-30,30), # offset (c)
(1,400), # amplitude
(100,1900), # decay
(1,99999), # mean
)
if create_plot:
sc_plot_dir = f'{output_dir}/sc_plots'
if not os.path.exists(sc_plot_dir):
os.makedirs(sc_plot_dir)
smooth_small_signals = {}
smooth_big_signals = {}
fit_params_per_cell = defaultdict(dict)
for cell, total_molecules in cpr.most_common():
try:
sc_counts = pd.DataFrame({
cell:DivCounter(
dictionary_to_diff_vector(cut_sites,cell,min_distance,max_distance))})
if create_plot:
fig, ax = plt.subplots(figsize=(10,3))
big_window = 35
smooth = sc_counts.rolling(window=big_window,center=True).mean()
smooth_big_signals[cell] = smooth[cell]
if create_plot:
ax.plot(smooth.index, smooth[cell],label=f'{big_window}bp sliding window')
limits = ax.get_ylim()
xdata = sc_counts[cell].index
ydata = sc_counts[cell].values
if len(ydata)==0:
continue
xdata = xdata[~np.isnan(ydata)]
ydata = ydata[~np.isnan(ydata)]
fit_params = curve_fit(function_to_fit, xdata, ydata,bounds=(np.array(bounds).T[0], np.array(bounds).T[1]))[0]
if create_plot:
plt.scatter(xdata,ydata, c='grey', s=1, label='Raw data')
period, offset, amplitude, decay,mean = fit_params
fit_params_per_cell['period'][cell] = period
fit_params_per_cell['offset'][cell] = offset
fit_params_per_cell['amplitude'][cell]= amplitude
fit_params_per_cell['decay'][cell] = decay
fit_params_per_cell['mean'][cell] = mean
if not create_plot:
continue
plt.plot(xdata,function_to_fit(xdata,*fit_params), c='r',
label=f'Fit : per:{period:.0f} ph:{offset:.0f} mean:{mean:.0f} dec:{decay:.2f}')
ax.axhline(mean,c='k')
ax.axvline(period-offset,c='b',lw=1)
ax.axvline(2*period-offset,c='b',lw=1)
ax.set_title(f'{cell},\n{total_molecules} molecules' )
ax.set_xlabel(f'distance to nearest cut (bp)' )
ax.set_ylabel(f'# cuts' )
ax.set_ylim( (limits[0]*0.9,limits[1]*1.1))
sns.despine()
ax.grid()
plt.legend()
plt.tight_layout()
plt.savefig(f'{sc_plot_dir}/{cell}.png')
plt.close()
# Plot residual with smoothed function
except RuntimeError as e:
print(f'Could not fit data for {cell}, ( {total_molecules} molecules )')
pass
if verbose:
print('Fitting and plotting [ OK ]')
print('Writing files ..', end='\r')
# Write tables to disk
tmp = {'molecules_total':cpr}
tmp.update(fit_params_per_cell)
df = pd.DataFrame(tmp)
df.to_csv(f'{output_dir}/fit.csv')
if verbose:
print('All done ')
if __name__ == '__main__':
import matplotlib
matplotlib.rcParams['figure.dpi'] = 160
matplotlib.use('Agg')
argparser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Extract cut distribution from bam file')
argparser.add_argument('alignmentfiles', type=str, nargs='+')
argparser.add_argument('-o', type=str, required=True, help='Output folder')
argparser.add_argument('-regions', type=str, help='Restrict analysis to these regions (bed file)')
argparser.add_argument('-region_radius', type=int, default=0, help='Add extra radius to the regions')
argparser.add_argument('-min_region_len', type=int, default=1000)
argparser.add_argument('--legacy', action='store_true', help='Create legacy unstranded anaylsis plots and files')
argparser.add_argument('-max_distance', type=int,default=2000, help='Maximum distance in both plots and output tables')
args = argparser.parse_args()
if args.regions is not None:
regions_per_contig = defaultdict(list)
with open(args.regions) as f:
rc = 0
for line in f:
if line.startswith('#'):
continue
parts = line.split()
if len(parts)<3:
continue
contig = parts[0]
start = int(parts[1]) - args.region_radius
end = int(parts[2]) + args.region_radius
regions_per_contig[contig].append( (start,end) )
rc+=1
print(f'{rc} regions read from bed file')
regions = []
for contig, contig_regions in regions_per_contig.items():
for start, end in merge_overlapping_ranges(contig_regions):
if end-start < args.min_region_len:
print('skipping region', contig, start, end)
continue
regions.append( (contig, start, end) )
print(f'{len(regions)} regions left after merging overlapping regions and filtering for small regions')
else:
regions=None
if not os.path.exists(args.o):
os.makedirs(args.o)
# 'Original' analysis
if args.legacy:
print('Performing legacy analysis')
if len(args.alignmentfiles)!=1:
raise ValueError('The legacy analysis only works on a single bam file')
analyse(args.alignmentfiles[0], args.o, create_plot=True, verbose=True,strand_specific=False,max_distance=args.max_distance)
# Stranded analysis:
sc_cut_dict_stranded = get_sc_cut_dictionary( args.alignmentfiles,strand_specific=True,filter_function=strict_read_counts_function, regions=regions)
distance_counter_fwd_above, distance_counter_fwd_below, distance_counter_rev_above, distance_counter_rev_below = get_stranded_pairwise_counts(sc_cut_dict_stranded)
# Write tables:
pd.DataFrame(distance_counter_fwd_above).sort_index().sort_index(1).to_csv(f'{args.o}/STRANDED_fwd_above.csv')
pd.DataFrame(distance_counter_fwd_below).sort_index().sort_index(1).to_csv(f'{args.o}/STRANDED_fwd_below.csv')
pd.DataFrame(distance_counter_rev_above).sort_index().sort_index(1).to_csv(f'{args.o}/STRANDED_rev_above.csv')
pd.DataFrame(distance_counter_rev_below).sort_index().sort_index(1).to_csv(f'{args.o}/STRANDED_rev_below.csv')
del sc_cut_dict_stranded
#################
# Unstranded density analysis:
prefix_with_bam=False if len(args.alignmentfiles)==1 else True
sc_cut_dict = get_sc_cut_dictionary( args.alignmentfiles,strand_specific=False,filter_function=strict_read_counts_function, prefix_with_bam=prefix_with_bam, regions=regions)
cpr = get_r1_counts_per_cell(args.alignmentfiles, prefix_with_bam=prefix_with_bam)
counts = pd.Series(cpr).sort_values()
print(counts)
def get_commands(one_contig=None):
for contig in sc_cut_dict: # sc_cut_dict:
if '_' in contig or contig in ('chrY', 'chrM', 'chrEBV'):
continue
if one_contig is not None and contig != one_contig:
continue
for cell, cell_cuts in sc_cut_dict[contig].items():
yield cell, cell_cuts, contig
# Calculate distance from one position within a window
window_radius = args.max_distance
bin_size = 1
n_bins = int(np.ceil(window_radius / bin_size))
x_obs = np.linspace(1, window_radius , n_bins) # the associated distance per bin
# Single cell and one-sided
# This is a histogram of the amount of observed fragments at distances x:
obs = defaultdict(lambda: np.zeros(n_bins, dtype=np.int64))
total_tests = Counter() # cell -> tests
with Pool() as workers:
for cell, cell_obs, n_tests in workers.imap_unordered(
_cuts_to_observation_vector,
(
{'cell_cuts': cell_cuts,
'window_radius': window_radius,
'cell': cell,
'log_distance': False,
'n_bins': n_bins,
'bin_size': bin_size,
'take_n_samples': None # sample_target[contig]
}
for cell, cell_cuts, contig in get_commands()
)):
obs[cell] += cell_obs
total_tests[cell] += n_tests
p_obs =
|
pd.DataFrame(obs)
|
pandas.DataFrame
|
#!/usr/bin/env python
import argparse
import os
import subprocess
import sys
import pandas as pd
import logging
import logging.handlers
import csv, json
import shutil
import time
import errno
import pathlib
import warnings
import glob
import base64
import gzip
import datetime
#**********************************************************
#Tool Updates
algoname = "Cryptosporidium_Genotyping"
version = 1.8
updated = datetime.date(2021,12,10)
#set color to the warning messages
#Set colors for warnings
CRED = '\033[91m' + '\nError:'
CYEL = '\033[93m' + '\nWarning:'
CGRE = '\033[92m' + '\nInfo:'
CEND = '\033[0m'
# To parse the commandline arguments
# override to split space based cmdline args
class myargumentparser(argparse.ArgumentParser):
def convert_arg_line_to_args(self, arg_line):
return arg_line.split()
# Parse the commandline arguments
def parse_cmdline():
# create the parser
parser = myargumentparser(fromfile_prefix_chars='@')
#the environment variables
parser.add_argument('--clientversion',type=int)
parser.add_argument('--nthreads',help='number of threads',type=int,default=2)
parser.add_argument('--localdir',default='./local',help='local working directory',type=str) #this is scratch
parser.add_argument('--tempdir',help='temporary shared directory',type=str)# temp folder to save data between runs but tricky to use ,avoid it
parser.add_argument('--resultsdir',help='results directory',type=str) #this is where we need to save our results and create a directory if needed
parser.add_argument('--shareddir',help='base shared directory of the project',type=str) # base shared folder for thre project
parser.add_argument('--toolsdir',default='',help='tools directory',type=str) # custom tools, not required for this
parser.add_argument('--scratchdir',default='',help='intermediate files directory',type=str) # not required for now
parser.add_argument('--reference_folder',default='',help="Blast reference database",type=str) #where reference database is located
parser.add_argument('--query',default='', help='filename for the assembled query genome', type=str) #where input is located
# # get the known arguments of the command line
try:
args = parser.parse_args(['@Crypto/scripts/settings.txt'])
except:
args = parser.parse_args()
return args
def directories(args):
folders = (args.localdir,args.resultsdir)
for f in folders:
os.path.join(os.getcwd(),f)
os.makedirs(f,exist_ok=True)
return folders
#creating subdirectories
def sub_dirs(args):
mode=0o777
resultsub = args.resultsdir+"/results/raw"
tempsub = args.localdir+"/sorted_blast_pair/result_tables"
finalsub = args.localdir + "/final"
logsub = args.resultsdir+"/logs"
os.makedirs(resultsub,mode,exist_ok=True)
os.makedirs(tempsub,mode,exist_ok=True)
os.makedirs(logsub,mode,exist_ok=True)
os.makedirs(finalsub,mode,exist_ok=True)
#logging function
def logger_setup(logname,logfile,level=logging.INFO,filemode='w'):
'''To create loggers through out the program,just call the function where required'''
format = logging.Formatter('%(asctime)s\t%(levelname)s\t%(message)s')
filehandler = logging.FileHandler(logfile)
filehandler.setFormatter(format)
logger = logging.getLogger(logname)
logger.setLevel(level)
logger.addHandler(filehandler)
return logger
def progress(status,args):
filename = args.resultsdir+"/logs/__progress__.txt"
with open(filename,'w') as f:
f.write(str(status)+"\n") #
# f.truncate()
def RunBlast(warnlog,args):
child_processes=[]
#loops throught the folder for .gz fasta files
for file in os.listdir(args.query):
query=os.path.join(args.localdir,file)
query=query.split('.gz')[0]
with gzip.open(args.query + file, 'rb') as input_file, open(query,'wb') as output_file:
output_file.write(input_file.read())
baseq=os.path.basename(query)
filename =os.path.splitext(baseq)[0]
for database in os.listdir(args.reference_folder): #blastdb
basedb=os.path.basename(database)
dbname=basedb.split(".")[0]
databasename =os.path.join(args.reference_folder,basedb.split(".")[0])
try:
p=subprocess.Popen(["blastn","-query",query,"-db",databasename,"-evalue","1e-6","-outfmt","6 qseqid sseqid pident qlen slen qstart qend sstart send","-max_target_seqs","3","-out",args.localdir+"/"+filename+"_"+dbname+".blast"],
stdout = subprocess.PIPE,stderr=subprocess.STDOUT)
# instead of printing the warns on console, reading the text from PIPE and redirecting to the logger function,logger object passes the log file
for line in p.stdout:
warnlog.warning(line)
child_processes.append(p)
for cp in child_processes:
cp.wait()
except RuntimeError as err:
errlog.error("{} occured at RunBlast level".format(err.message))
def filter(args):
fpath=args.localdir+"/sorted_blast_pair"
for blast_result in os.listdir(args.localdir):
if blast_result.endswith(".blast"):
genename=os.path.basename(blast_result)
genomename=genename.split(".")[0]
blastresult=open(args.localdir+"/"+blast_result)
for line in blastresult:
try:
gene={}
line = line.split( )
qseqid=line[0]
sseqid=line[1]
pident=float(line[2])
qlength=float(line[3])
slength=float(line[4])
qstart=float(line[5])
qend=float(line[6])
sstart=float(line[7])
send=float(line[8])
if (pident> 97) & (abs(qend-qstart)/slength > 0.75) :
gene[qseqid]=sseqid
for key in gene:
with open(fpath+"/"+genomename+".blast","a") as ofile:
ofile.write(genomename+"\t"+key+"\t"+gene.get(key)+"\t"\
+str(pident)+"\t"+str(slength)+"\t"+str(abs(qend-qstart)/slength)+"\t"+str(qstart)+"\t"+str(qend)+"\n")
ofile.close
except IOError:
msglog.info("no input")
blastresult.close()
#### Generate tables for each genome"
def generate_table(args):
temppath = args.localdir+"/sorted_blast_pair/"
if not "result_tables" in os.listdir(temppath):
os.mkdir(temppath+"result_tables")
for filename in os.listdir(temppath):
if filename.endswith(".blast"):
base=filename.split(".")
name=base[0]
df=pd.read_csv(temppath+filename,sep="\t",header=None)
df.columns=['Genome','testgenome_gene_name','db_gene_name','pident','slength','coverage','querystart','queryend']
df['db']=df.db_gene_name.apply(lambda x:x.split("_")[-1])
df['score']=df['pident']*df['coverage']
#df['result']=df.sort_values(by="score",ascending=False).head(1).db_gene_name.values[0]+"_true"
#df=df.sort_values(by="score",ascending=False).head(6)
f_name=temppath+"result_tables/"+str(filename)+"_table.csv"
df.to_csv(f_name)
def result_table1(args):
temppath = args.localdir+"/sorted_blast_pair/result_tables/"
filelist=glob.glob(temppath+"*_table.csv")
df_list=[pd.read_csv(file) for file in filelist]
bigdf1=pd.concat(df_list,axis=0)
bigdf1=bigdf1.drop("Unnamed: 0",axis=1)
bigdf1.to_csv(temppath + 'result_table1.csv')
##filter best hit and insert newtype that are not in database
def filter2(args):
temppath = args.localdir+"/sorted_blast_pair/result_tables/"
for file in os.listdir(temppath):
if file.endswith("_table.csv"):
name=file.split("_table.csv")[0]
dic1={}
dic={}
dic1[file]=dic
sample=pd.read_csv(temppath+"/"+file)
sample=sample.drop("Unnamed: 0",axis=1)
sample['score']=sample['pident']*sample['coverage']
table=pd.DataFrame(columns=sample.columns)
for i in ['18s','actin','hsp70']:
if i in str(sample['db']):
if (sample['db'].str.count(i).sum()) == 1:
dic[i]=sample[sample.db==i].db_gene_name.values[0] #.replace('C_','C. ')
table=table.append(pd.DataFrame(sample[sample.db_gene_name==str([dic[i]][0])]))
elif (sample['db'].str.count(i).sum()) > 1:
dic[i]=sample[sample.db==i].sort_values(by="score",ascending=False).head(1).db_gene_name.values[0] #.replace('C_','C. ')
table=table.append(pd.DataFrame(sample[sample.db_gene_name==str([dic[i]][0])].iloc[0]).T) #tracking the change iloc[0] -> iloc[:,0]
elif i not in str(sample['db']):
dic[i]="type not found"
table=table.append(pd.DataFrame(sample[sample.db_gene_name==str([dic[i]][0])])).fillna("N/A")
f_name=temppath+str(name)+"_newtable.csv"
f_name2=temppath+str(name)+"_newtable2.csv"
pd.DataFrame.from_dict(dic1).to_csv(f_name, quoting=csv.QUOTE_NONE,quotechar='', escapechar=",")
table.to_csv(f_name2)
def combine_table(args):
temppath = args.localdir+"/sorted_blast_pair/result_tables/"
filelist=glob.glob(temppath+"*_newtable.csv")
df_list=[pd.read_csv(file) for file in filelist]
bigdf=pd.concat(df_list,axis=1)
bigdf=bigdf.drop("Unnamed: 0",axis=1)
bigdf.index=['18s','actin','hsp70']
bigdf.loc['result']=''
bigdf.columns = bigdf.columns.str.split("_a").str[0]
for i in range(0,len(bigdf.columns)):
bigdf.loc['result'][i]=bigdf[bigdf.columns[i]].apply(lambda x:"_".join(x.split("_")[0:2])).value_counts().index[0]
bigdf.loc['result'][i]=bigdf.loc['result'][i].replace('C_','C. ')
# bigdf.loc['result'].to_csv(temppath +"Species_Call.csv")
bigdf_T=bigdf.T.reset_index()
bigdf_T.columns=['Genome','18s','actin','hsp70','Result']
bigdf_T[['Genome','Result']].to_csv(temppath + "Species_Call.csv",header=True,index=False)
bigdf_T.Genome=bigdf_T.Genome.apply(lambda x:x.split(".")[0])
# bigdf_T['Result'].to_csv(temppath +"Species_Call.csv")
filelist1=glob.glob(temppath+"*_table.csv")
filelist2=glob.glob(temppath+"*_newtable2.csv")
df_list=[
|
pd.read_csv(f)
|
pandas.read_csv
|
import sys
import os
import os.path
# os.environ['R_LIBS_USER'] = '/project/projectdirs/metatlas/r_pkgs/'
#curr_ld_lib_path = ''
from metatlas import metatlas_objects as metob
from metatlas import h5_query as h5q
from metatlas.helpers import metatlas_get_data_helper_fun as ma_data
from metatlas.helpers import spectralprocessing as sp
# from metatlas import gui
from textwrap import fill, TextWrapper
# import qgrid
import pandas as pd
import os
import tables
import pickle
import dill
import numpy as np
import re
import json
import matplotlib.pyplot as plt
from rdkit import Chem
from rdkit.Chem import Descriptors, rdMolDescriptors, AllChem, Draw, rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D, IPythonConsole
from itertools import cycle
from collections import defaultdict
from IPython.display import SVG,display
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
from IPython.display import display
import getpass
from ast import literal_eval
# from datetime import datetime
from matplotlib.widgets import Slider, Button, RadioButtons
from matplotlib.widgets import AxesWidget
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
ADDUCT_INFO = {'[2M+H]': {'charge': '1',
'color': '#fabebe',
'common': True,
'comp_num': '2',
'mass': '1.0073'},
'[2M-H]': {'charge': '-1',
'color': '#008080',
'common': True,
'comp_num': '2',
'mass': '-1.0073'},
'[M+2H]': {'charge': '2',
'color': '#ffe119',
'common': True,
'comp_num': '1',
'mass': '2.0146'},
'[M+2Na]': {'charge': '2',
'color': '#fffac8',
'common': False,
'comp_num': '1',
'mass': '45.9784'},
'[M+Cl]': {'charge': '-1',
'color': '#d2f53c',
'common': True,
'comp_num': '1',
'mass': '34.9694'},
'[M+H-H2O]': {'charge': '1',
'color': '#911eb4',
'common': True,
'comp_num': '1',
'mass': '-17.0033'},
'[M+H]': {'charge': '1',
'color': '#3cb44b',
'common': True,
'comp_num': '1',
'mass': '1.0073'},
'[M+K]': {'charge': '1',
'color': '#aa6e28',
'common': False,
'comp_num': '1',
'mass': '38.963158'},
'[M+NH4]': {'charge': '1',
'color': '#0082c8',
'common': True,
'comp_num': '1',
'mass': '18.0338'},
'[M+Na]': {'charge': '1',
'color': '#f58231',
'common': True,
'comp_num': '1',
'mass': '22.9892'},
'[M+acetate]': {'charge': '-1',
'color': '#808000',
'common': False,
'comp_num': '1',
'mass': '59.0139'},
'[M-2H]': {'charge': '-2',
'color': '#f032e6',
'common': True,
'comp_num': '1',
'mass': '-2.014552904'},
'[M-H+2Na]': {'charge': '1',
'color': '#000080',
'common': False,
'comp_num': '1',
'mass': '44.9711'},
'[M-H+Cl]': {'charge': '-2',
'color': '#ffd8b1',
'common': False,
'comp_num': '1',
'mass': '33.9621'},
'[M-H+Na]': {'charge': '0',
'color': '#e6beff',
'common': False,
'comp_num': '1',
'mass': '21.98194425'},
'[M-H]': {'charge': '-1',
'color': '#46f0f0',
'common': True,
'comp_num': '1',
'mass': '-1.0073'},
'[M-e]': {'charge': '1',
'color': '#aaffc3',
'common': False,
'comp_num': '1',
'mass': '-0.0005'},
'[M]': {'charge': '0',
'color': '#e6194b',
'common': True,
'comp_num': '1',
'mass': '0'}}
def get_google_sheet(notebook_name = "Sheet name",
token='/project/projectdirs/metatlas/projects/google_sheets_auth/ipython to sheets demo-9140f8697062.json',
sheet_name = 'Sheet1'):
"""
Returns a pandas data frame from the google sheet.
Assumes header row is first row.
To use the token hard coded in the token field,
the sheet must be shared with:
<EMAIL>
Unique sheet names are a requirement of this approach.
"""
json_key = json.load(open(token))
scope = ['https://spreadsheets.google.com/feeds']
#this is deprecated as of january, but we have pinned the version of oauth2.
#see https://github.com/google/oauth2client/issues/401
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'].encode(), scope)
#here is the new way incase the version pin is removed
#credentials = ServiceAccountCredentials.from_json_keyfile_name(token, scope)
gc = gspread.authorize(credentials)
wks = gc.open(notebook_name)
istd_qc_data = wks.worksheet(sheet_name).get_all_values()
headers = istd_qc_data.pop(0)
df = pd.DataFrame(istd_qc_data,columns=headers)
return df
class VertSlider(AxesWidget):
"""
A slider representing a floating point range
The following attributes are defined
*ax* : the slider :class:`matplotlib.axes.Axes` instance
*val* : the current slider value
*vline* : a :class:`matplotlib.lines.Line2D` instance
representing the initial value of the slider
*poly* : A :class:`matplotlib.patches.Polygon` instance
which is the slider knob
*valfmt* : the format string for formatting the slider text
*label* : a :class:`matplotlib.text.Text` instance
for the slider label
*closedmin* : whether the slider is closed on the minimum
*closedmax* : whether the slider is closed on the maximum
*slidermin* : another slider - if not *None*, this slider must be
greater than *slidermin*
*slidermax* : another slider - if not *None*, this slider must be
less than *slidermax*
*dragging* : allow for mouse dragging on slider
Call :meth:`on_changed` to connect to the slider event
"""
def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%.1e',
closedmin=True, closedmax=True, slidermin=None,
slidermax=None, dragging=True, **kwargs):
"""
Create a slider from *valmin* to *valmax* in axes *ax*
*valinit*
The slider initial position
*label*
The slider label
*valfmt*
Used to format the slider value
*closedmin* and *closedmax*
Indicate whether the slider interval is closed
*slidermin* and *slidermax*
Used to constrain the value of this slider to the values
of other sliders.
additional kwargs are passed on to ``self.poly`` which is the
:class:`matplotlib.patches.Rectangle` which draws the slider
knob. See the :class:`matplotlib.patches.Rectangle` documentation
valid property names (e.g., *facecolor*, *edgecolor*, *alpha*, ...)
"""
AxesWidget.__init__(self, ax)
self.valmin = valmin
self.valmax = valmax
self.val = valinit
self.valinit = valinit
self.poly = ax.axhspan(valmin, valinit, 0, 1, **kwargs)
self.vline = ax.axhline(valinit, 0, 1, color='r', lw=1)
self.valfmt = valfmt
ax.set_xticks([])
ax.set_ylim((valmin, valmax))
ax.set_yticks([])
ax.set_navigate(False)
self.connect_event('button_press_event', self._update)
self.connect_event('button_release_event', self._update)
if dragging:
self.connect_event('motion_notify_event', self._update)
self.label = ax.text(0.5, 1.03, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='center')
self.valtext = ax.text(0.5, -0.03, valfmt % valinit,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='center')
self.cnt = 0
self.observers = {}
self.closedmin = closedmin
self.closedmax = closedmax
self.slidermin = slidermin
self.slidermax = slidermax
self.drag_active = False
def _update(self, event):
"""update the slider position"""
if self.ignore(event):
return
if event.button != 1:
return
if event.name == 'button_press_event' and event.inaxes == self.ax:
self.drag_active = True
event.canvas.grab_mouse(self.ax)
if not self.drag_active:
return
elif ((event.name == 'button_release_event') or
(event.name == 'button_press_event' and
event.inaxes != self.ax)):
self.drag_active = False
event.canvas.release_mouse(self.ax)
return
val = event.ydata
if val <= self.valmin:
if not self.closedmin:
return
val = self.valmin
elif val >= self.valmax:
if not self.closedmax:
return
val = self.valmax
if self.slidermin is not None and val <= self.slidermin.val:
if not self.closedmin:
return
val = self.slidermin.val
if self.slidermax is not None and val >= self.slidermax.val:
if not self.closedmax:
return
val = self.slidermax.val
self.set_val(val)
def set_val(self, val):
xy = self.poly.xy
xy[1] = 0, val
xy[2] = 1, val
self.poly.xy = xy
self.valtext.set_text(self.valfmt % val)
if self.drawon:
self.ax.figure.canvas.draw()
self.val = val
if not self.eventson:
return
for cid, func in self.observers.iteritems():
func(val)
def on_changed(self, func):
"""
When the slider value is changed, call *func* with the new
slider position
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
"""remove the observer with connection id *cid*"""
try:
del self.observers[cid]
except KeyError:
pass
def reset(self):
"""reset the slider to the initial value if needed"""
if (self.val != self.valinit):
self.set_val(self.valinit)
class adjust_rt_for_selected_compound(object):
def __init__(self,
data,
include_lcmsruns = None,
exclude_lcmsruns = None,
include_groups = None,
exclude_groups = None,
compound_idx = 0,
width = 12,
height = 6,
y_scale='linear',
alpha = 0.5,
min_max_color = 'green',
peak_color = 'darkviolet',
slider_color = 'ghostwhite',
y_max = 'auto',
y_min = 0):
"""
data: a metatlas_dataset where files and compounds are stored.
for example,
self.metatlas_dataset[file_idx][compound_idx]['identification'].rt_references[-1].unique_id
is the unique id to the retention time reference for a compound in a file.
width: specify a width value in inches for the plots and slides
height: specify a width value in inches for the plots and slides
min_max_color & peak_color: specify a valid matplotlib color string for the slider and vertical bars
slider_color: background color for the sliders. Must be a valid matplotlib color
Press Left and Right arrow keys to move to the next or previous compound
"""
self.compound_idx = compound_idx
self.width = width
self.height = height
self.y_scale = y_scale
self.alpha = alpha
self.min_max_color = min_max_color
self.peak_color = peak_color
self.slider_color = slider_color
self.y_max = y_max
self.y_min = y_min
# filter runs from the metatlas dataset
if include_lcmsruns:
data = filter_lcmsruns_in_dataset_by_include_list(data,'lcmsrun',include_lcmsruns)
if include_groups:
data = filter_lcmsruns_in_dataset_by_include_list(data,'group',include_groups)
if exclude_lcmsruns:
data = filter_lcmsruns_in_dataset_by_exclude_list(data,'lcmsrun',exclude_lcmsruns)
if exclude_groups:
data = filter_lcmsruns_in_dataset_by_exclude_list(data,'group',exclude_groups)
self.data = data
# create figure and first axes
self.fig,self.ax = plt.subplots(figsize=(width, height))
plt.subplots_adjust(left=0.09, bottom=0.275)
# plt.ticklabel_format(style='plain', axis='x')
# plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
# warn the user if they do not own the atlas; and can not edit its values
self.enable_edit = True
self.atlas = metob.retrieve('Atlas',unique_id = self.data[0][0]['atlas_unique_id'],username='*')[-1]
print("loaded file for username = ", self.atlas.username)
if getpass.getuser() != self.atlas.username:
self.ax.set_title("YOUR ARE %s YOU ARE NOT ALLOWED TO EDIT VALUES THE RT CORRECTOR. USERNAMES ARE NOT THE SAME"%getpass.getuser())
self.enable_edit = False
#create all event handlers
self.fig.canvas.callbacks.connect('pick_event', self.on_pick)
self.fig.canvas.mpl_connect('key_press_event', self.press)
#create the plot
self.set_plot_data()
def set_plot_data(self):
#set y-scale and bounds if provided
self.ax.set_yscale(self.y_scale)
if self.y_max != 'auto':
self.ax.set_ylim(self.y_min,self.y_max)
self.ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
default_data = self.data[0][self.compound_idx]
if default_data['identification'].name:
compound_str = default_data['identification'].name.split('///')[0]
elif default_data['identification'].compound[-1].name:
compound_str = default_data['identification'].compound[-1].name
else:
compound_str = 'nameless compound'
compound_str = '%d, %s'%(self.compound_idx, compound_str)
self.ax.set_title('')
self.ax.set_ylabel('%s'%compound_str)
self.ax.set_xlabel('Retention Time')
self.my_rt = metob.retrieve('RTReference',
unique_id = default_data['identification'].rt_references[-1].unique_id, username='*')[-1]
for d in self.data: #this loops through the files
if d[self.compound_idx]['data']['eic']:
if len(d[self.compound_idx]['data']['eic']['rt']) > 0:
x = d[self.compound_idx]['data']['eic']['rt']
y = d[self.compound_idx]['data']['eic']['intensity']
x = np.asarray(x)
y = np.asarray(y)
#minval = np.min(y[np.nonzero(y)])
#y = y - minval
x = x[y>0]
y = y[y>0]#y[y<0.0] = 0.0
self.ax.plot(x,y,'k-',linewidth=2.0,alpha=self.alpha, picker=5, label = d[self.compound_idx]['lcmsrun'].name.replace('.mzML',''))
self.min_line = self.ax.axvline(self.my_rt.rt_min, color=self.min_max_color,linewidth=4.0)
self.max_line = self.ax.axvline(self.my_rt.rt_max, color=self.min_max_color,linewidth=4.0)
self.peak_line = self.ax.axvline(self.my_rt.rt_peak, color=self.peak_color,linewidth=4.0)
self.rt_peak_ax = plt.axes([0.09, 0.05, 0.81, 0.03], facecolor=self.slider_color)
self.rt_max_ax = plt.axes([0.09, 0.1, 0.81, 0.03], facecolor=self.slider_color)
self.rt_min_ax = plt.axes([0.09, 0.15, 0.81, 0.03], facecolor=self.slider_color)
self.y_scale_ax = plt.axes([0.925, 0.275, 0.02, 0.63], facecolor=self.slider_color)
min_x = self.ax.get_xlim()[0]
max_x = self.ax.get_xlim()[1]
self.rt_min_slider = Slider(self.rt_min_ax, 'RT min', min_x, max_x, valinit=self.my_rt.rt_min,color=self.min_max_color)
self.rt_min_slider.vline.set_color('black')
self.rt_min_slider.vline.set_linewidth(4)
self.rt_max_slider = Slider(self.rt_max_ax, 'RT max', min_x, max_x, valinit=self.my_rt.rt_max,color=self.min_max_color)
self.rt_max_slider.vline.set_color('black')
self.rt_max_slider.vline.set_linewidth(4)
self.rt_peak_slider = Slider(self.rt_peak_ax,'RT peak', min_x, max_x, valinit=self.my_rt.rt_peak,color=self.peak_color)
self.rt_peak_slider.vline.set_color('black')
self.rt_peak_slider.vline.set_linewidth(4)
if self.enable_edit:
self.rt_min_slider.on_changed(self.update_rt)
self.rt_max_slider.on_changed(self.update_rt)
self.rt_peak_slider.on_changed(self.update_rt)
(self.slider_y_min,self.slider_y_max) = self.ax.get_ylim()
self.slider_val = self.slider_y_max
self.y_scale_slider = VertSlider(self.y_scale_ax,'',self.slider_y_min,self.slider_y_max, valfmt = '', valinit=self.slider_y_max,color=self.peak_color)
self.y_scale_slider.vline.set_color('black')
self.y_scale_slider.vline.set_linewidth(8)
self.y_scale_slider.on_changed(self.update_yscale)
self.lin_log_ax = plt.axes([0.1, 0.75, 0.1, 0.15])#, axisbg=axcolor)
self.lin_log_ax.axis('off')
self.lin_log_radio = RadioButtons(self.lin_log_ax, ('linear', 'log'))
self.lin_log_radio.on_clicked(self.set_lin_log)
self.peak_flag_ax = plt.axes([0.8, 0.75, 0.1, 0.15])#, axisbg=axcolor)
self.peak_flag_ax.axis('off')
peak_flags = ('keep', 'remove', 'unresolvable isomers','poor peak shape')
my_id = metob.retrieve('CompoundIdentification',
unique_id = self.data[0][self.compound_idx]['identification'].unique_id, username='*')[-1]
if my_id.description in peak_flags:
peak_flag_index = peak_flags.index(my_id.description)
else:
peak_flag_index = 0
self.peak_flag_radio = RadioButtons(self.peak_flag_ax, peak_flags)
self.peak_flag_radio.on_clicked(self.set_peak_flag)
self.peak_flag_radio.set_active(peak_flag_index)
def set_lin_log(self,label):
self.ax.set_yscale(label)
self.ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
self.fig.canvas.draw_idle()
def set_peak_flag(self,label):
my_id = metob.retrieve('CompoundIdentification',
unique_id = self.data[0][self.compound_idx]['identification'].unique_id, username='*')[-1]
my_id.description = label
metob.store(my_id)
def on_pick(self,event):
thisline = event.artist
thisline.set_color('red')
self.ax.set_title(thisline.get_label())
def press(self,event):
if event.key == 'right':
if self.compound_idx + 1 < len(self.data[0]):
self.compound_idx += 1
self.ax.cla()
self.rt_peak_ax.cla()
self.rt_min_ax.cla()
self.rt_max_ax.cla()
self.y_scale_ax.cla()
self.set_plot_data()
if event.key == 'left':
if self.compound_idx > 0:
self.compound_idx -= 1
self.ax.cla()
self.rt_peak_ax.cla()
self.rt_min_ax.cla()
self.rt_max_ax.cla()
self.y_scale_ax.cla()
self.set_plot_data()
if event.key == 'x':
self.peak_flag_radio.set_active(1)
#This is really hacky, but using set_peak_flag function above didn't work.
my_id = metob.retrieve('CompoundIdentification',
unique_id = self.data[0][self.compound_idx]['identification'].unique_id, username='*')[-1]
my_id.description = 'remove'
metob.store(my_id)
def update_yscale(self,val):
self.y_scale_slider.valinit = self.slider_val
self.slider_val = self.y_scale_slider.val
self.ax.set_ylim(self.slider_y_min,self.slider_val)
self.fig.canvas.draw_idle()
def update_rt(self,val):
self.my_rt.rt_min = self.rt_min_slider.val
self.my_rt.rt_max = self.rt_max_slider.val
#self.my_rt.rt_peak = self.rt_peak_slider.val
self.rt_min_slider.valinit = self.my_rt.rt_min
self.rt_max_slider.valinit = self.my_rt.rt_max
self.rt_peak_slider.valinit = self.my_rt.rt_peak
metob.store(self.my_rt)
self.min_line.set_xdata((self.my_rt.rt_min,self.my_rt.rt_min))
self.max_line.set_xdata((self.my_rt.rt_max,self.my_rt.rt_max))
self.peak_line.set_xdata((self.my_rt.rt_peak,self.my_rt.rt_peak))
self.fig.canvas.draw_idle()
class adjust_mz_for_selected_compound(object):
def __init__(self,
data,
include_lcmsruns = None,
exclude_lcmsruns = None,
include_groups = None,
exclude_groups = None,
compound_idx = 0,
width = 12,
height = 6,
y_scale='linear',
alpha = 0.5,
min_max_color = 'sage',
peak_color = 'darkviolet',
slider_color = 'ghostwhite',
y_max = 'auto',
y_min = 0):
"""
data: a metatlas_dataset where files and compounds are stored.
for example,
self.metatlas_dataset[file_idx][compound_idx]['identification'].rt_references[-1].unique_id
is the unique id to the retention time reference for a compound in a file.
width: specify a width value in inches for the plots and slides
height: specify a width value in inches for the plots and slides
min_max_color & peak_color: specify a valid matplotlib color string for the slider and vertical bars
slider_color: background color for the sliders. Must be a valid matplotlib color
Press Left and Right arrow keys to move to the next or previous compound
"""
self.compound_idx = compound_idx
self.width = width
self.height = height
self.y_scale = y_scale
self.alpha = alpha
self.min_max_color = min_max_color
self.peak_color = peak_color
self.slider_color = slider_color
self.y_max = y_max
self.y_min = y_min
# filter runs from the metatlas dataset
if include_lcmsruns:
data = filter_lcmsruns_in_dataset_by_include_list(data,'lcmsrun',include_lcmsruns)
if include_groups:
data = filter_lcmsruns_in_dataset_by_include_list(data,'group',include_groups)
if exclude_lcmsruns:
data = filter_lcmsruns_in_dataset_by_exclude_list(data,'lcmsrun',exclude_lcmsruns)
if exclude_groups:
data = filter_lcmsruns_in_dataset_by_exclude_list(data,'group',exclude_groups)
self.data = data
# create figure and first axes
self.fig,self.ax = plt.subplots(figsize=(width, height))
plt.subplots_adjust(left=0.09, bottom=0.275)
# plt.ticklabel_format(style='plain', axis='x')
# plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
# warn the user if they do not own the atlas; and can not edit its values
self.enable_edit = True
self.atlas = metob.retrieve('Atlas',unique_id = self.data[0][0]['atlas_unique_id'],username='*')[-1]
print("loaded file for username = ", self.atlas.username)
if getpass.getuser() != self.atlas.username:
self.ax.set_title("YOUR ARE %s YOU ARE NOT ALLOWED TO EDIT VALUES THE RT CORRECTOR. USERNAMES ARE NOT THE SAME"%getpass.getuser())
self.enable_edit = False
#create all event handlers
self.fig.canvas.callbacks.connect('pick_event', self.on_pick)
self.fig.canvas.mpl_connect('key_press_event', self.press)
#create the plot
self.set_plot_data()
def set_plot_data(self):
self.ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
self.ax.ticklabel_format(useOffset=False, style='plain', axis='x')
default_data = self.data[0][self.compound_idx]
if default_data['identification'].name:
compound_str = default_data['identification'].name.split('///')[0]
elif default_data['identification'].compound[-1].name:
compound_str = default_data['identification'].compound[-1].name
else:
compound_str = 'nameless compound'
compound_str = '%d, %s'%(self.compound_idx, compound_str)
self.ax.set_title('')
self.ax.set_ylabel('%s'%compound_str)
self.ax.set_xlabel('Retention Time')
self.my_mz = metob.retrieve('MZReference',
unique_id = default_data['identification'].mz_references[-1].unique_id, username='*')[-1]
for i,d in enumerate(self.data): #this loops through the files
if d[self.compound_idx]['data']['ms1_summary']:
# if len(d[self.compound_idx]['data']['ms1_summary']['rt']) > 0:
x = d[self.compound_idx]['data']['ms1_summary']['mz_centroid']
y = d[self.compound_idx]['data']['ms1_summary']['peak_height']
x = np.asarray(x)
y = np.asarray(y)
self.ax.plot(x,y,'k.',linewidth=2.0,alpha=self.alpha, picker=5, label = d[self.compound_idx]['lcmsrun'].name.replace('.mzML',''))
mz_delta = self.my_mz.mz_tolerance*self.my_mz.mz/1e6
self.min_line = self.ax.axvline(self.my_mz.mz-mz_delta, color=self.min_max_color,linewidth=4.0)
self.max_line = self.ax.axvline(self.my_mz.mz+mz_delta, color=self.min_max_color,linewidth=4.0)
self.peak_line = self.ax.axvline(self.my_mz.mz, color=self.peak_color,linewidth=4.0)
min_x = self.ax.get_xlim()[0]
max_x = self.ax.get_xlim()[1]
print(min_x,max_x)
self.mz_peak_ax = plt.axes([0.09, 0.05, 0.81, 0.03], axisbg=self.slider_color)
self.mz_max_ax = plt.axes([0.09, 0.1, 0.81, 0.03], axisbg=self.slider_color)
self.mz_min_ax = plt.axes([0.09, 0.15, 0.81, 0.03], axisbg=self.slider_color)
self.mz_min_slider = Slider(self.mz_min_ax, 'mz min', min_x, max_x, valinit=self.my_mz.mz-mz_delta,color=self.min_max_color,valfmt='%1.4f')
self.mz_min_slider.vline.set_color('black')
self.mz_min_slider.vline.set_linewidth(4)
self.mz_max_slider = Slider(self.mz_max_ax, 'mz max', min_x, max_x, valinit=self.my_mz.mz+mz_delta,color=self.min_max_color,valfmt='%1.4f')
self.mz_max_slider.vline.set_color('black')
self.mz_max_slider.vline.set_linewidth(4)
self.mz_peak_slider = Slider(self.mz_peak_ax,'mz peak', min_x, max_x, valinit=self.my_mz.mz,color=self.peak_color,valfmt='%1.4f')
self.mz_peak_slider.vline.set_color('black')
self.mz_peak_slider.vline.set_linewidth(4)
# if self.enable_edit:
# self.rt_min_slider.on_changed(self.update_rt)
# self.rt_max_slider.on_changed(self.update_rt)
# self.rt_peak_slider.on_changed(self.update_rt)
self.lin_log_ax = plt.axes([0.1, 0.75, 0.1, 0.15])#, axisbg=axcolor)
self.lin_log_ax.axis('off')
self.lin_log_radio = RadioButtons(self.lin_log_ax, ('linear', 'log'))
self.lin_log_radio.on_clicked(self.set_lin_log)
def set_lin_log(self,label):
self.ax.set_yscale(label)
self.ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
self.fig.canvas.draw_idle()
def on_pick(self,event):
thisline = event.artist
thisline.set_color('red')
self.ax.set_title(thisline.get_label())
def press(self,event):
if event.key == 'right':
if self.compound_idx + 1 < len(self.data[0]):
self.compound_idx += 1
self.ax.cla()
self.mz_peak_ax.cla()
self.mz_min_ax.cla()
self.mz_max_ax.cla()
self.set_plot_data()
if event.key == 'left':
if self.compound_idx > 0:
self.compound_idx -= 1
self.ax.cla()
self.mz_peak_ax.cla()
self.mz_min_ax.cla()
self.mz_max_ax.cla()
self.set_plot_data()
# def update_rt(self,val):
# self.my_rt.rt_min = self.rt_min_slider.val
# self.my_rt.rt_max = self.rt_max_slider.val
# self.my_rt.rt_peak = self.rt_peak_slider.val
# self.rt_min_slider.valinit = self.my_rt.rt_min
# self.rt_max_slider.valinit = self.my_rt.rt_max
# self.rt_peak_slider.valinit = self.my_rt.rt_peak
# metob.store(self.my_rt)
# self.min_line.set_xdata((self.my_rt.rt_min,self.my_rt.rt_min))
# self.max_line.set_xdata((self.my_rt.rt_max,self.my_rt.rt_max))
# self.peak_line.set_xdata((self.my_rt.rt_peak,self.my_rt.rt_peak))
# self.fig.canvas.draw_idle()
def replace_compound_id_with_name(x):
id_list = literal_eval(x)
if id_list:
found_compound = metob.retrieve('Compounds',unique_id=id_list[0],username='*')
return found_compound[-1].name
else:
return ''
def make_compound_id_df(data):
ids = []
for d in data[0]:
ids.append(d['identification'])
df = metob.to_dataframe(ids)
df['compound'] = df['compound'].apply(replace_compound_id_with_name).astype('str')
df['rt_unique_id'] = df['rt_references'].apply(lambda x: literal_eval(x))
# df['mz_unique_id'] = df['mz_references'].apply(lambda x: literal_eval(x))
# df['frag_unique_id'] = df['frag_references'].apply(lambda x: literal_eval(x))
df = df[['compound','name','username','rt_unique_id']]#,'mz_unique_id','frag_unique_id']]
return df
def show_compound_grid(input_fname = '',input_dataset=[]):
"""
Provide a valid path to data in or a dataset
"""
if not input_dataset:
print("loading...")
data = ma_data.get_dill_data(input_fname)
else:
data = input_dataset
atlas_in_data = metob.retrieve('Atlas',unique_id = data[0][0]['atlas_unique_id'],username='*')
print("loaded file for username = ", atlas_in_data[0].username)
username = getpass.getuser()
if username != atlas_in_data[0].username:
print("YOUR ARE", username, "YOU ARE NOT ALLOWED TO EDIT VALUES THE RT CORRECTOR. USERNAMES ARE NOT THE SAME")
#return
compound_df = make_compound_id_df(data)
#compound_grid = gui.create_qgrid([])
#compound_grid.df = compound_df
compound_grid = qgrid.QGridWidget(df=compound_df)#,set_grid_option={'show_toolbar',True})
#qgrid.show_grid(compound_df,show_toolbar=True)
compound_grid.export()
#display(compound_grid)
return data,compound_grid
def getcommonletters(strlist):
"""
Parameters
----------
strlist
Returns
-------
"""
return ''.join([x[0] for x in zip(*strlist) if reduce(lambda a,b:(a == b) and a or None,x)])
def findcommonstart(strlist):
"""
Parameters
----------
strlist
Returns
-------
"""
strlist = strlist[:]
prev = None
while True:
common = getcommonletters(strlist)
if common == prev:
break
strlist.append(common)
prev = common
return getcommonletters(strlist)
def plot_all_compounds_for_each_file(input_dataset = [], input_fname = '', include_lcmsruns = [],exclude_lcmsruns = [], nCols = 8, scale_y=True , output_loc=''):
"""
Parameters
----------
kwargs
Returns
-------
"""
if not input_dataset:
data = ma_data.get_dill_data(os.path.expandvars(input_fname))
else:
data = input_dataset
# filter runs from the metatlas dataset
if include_lcmsruns:
data = filter_lcmsruns_in_dataset_by_include_list(data,'lcmsrun',include_lcmsruns)
data = filter_lcmsruns_in_dataset_by_include_list(data,'group',include_lcmsruns)
if exclude_lcmsruns:
data = filter_lcmsruns_in_dataset_by_exclude_list(data,'lcmsrun',exclude_lcmsruns)
data = filter_lcmsruns_in_dataset_by_exclude_list(data,'group',exclude_lcmsruns)
compound_names = ma_data.get_compound_names(data)[0]
file_names = ma_data.get_file_names(data)
output_loc = os.path.expandvars('output_loc')
nRows = int(np.ceil(len(compound_names)/float(nCols)))
xmin = 0
xmax = 210
subrange = float(xmax-xmin)/float(nCols) # scale factor for the x-axis
y_max = list()
if scale_y:
for file_idx,my_file in enumerate(file_names):
temp = -1
counter = 0
for compound_idx,compound in enumerate(compound_names):
d = data[file_idx][compound_idx]
if len(d['data']['eic']['rt']) > 0:
counter += 1
y = max(d['data']['eic']['intensity'])
if y > temp:
temp = y
#y_max.append(temp)
y_max += [temp] * counter
else:
for file_idx,my_file in enumerate(file_names):
for compound_idx,compound in enumerate(compound_names):
d = data[file_idx][compound_idx]
if len(d['data']['eic']['rt']) > 0:
y_max.append(max(d['data']['eic']['intensity']))
y_max = cycle(y_max)
# create ouput dir
if not os.path.exists(output_loc):
os.makedirs(output_loc)
for file_idx,my_file in enumerate(file_names):
ax = plt.subplot(111)#, aspect='equal')
plt.setp(ax, 'frame_on', False)
ax.set_ylim([0, nRows+7])
col = 0
row = nRows+6
counter = 1
for compound_idx,compound in enumerate(compound_names):
if col == nCols:
row -= 1.3
col = 0
d = data[file_idx][compound_idx]
rt_min = d['identification'].rt_references[0].rt_min
rt_max = d['identification'].rt_references[0].rt_max
rt_peak = d['identification'].rt_references[0].rt_peak
if len(d['data']['eic']['rt']) > 0:
x = d['data']['eic']['rt']
y = d['data']['eic']['intensity']
y = y/y_max.next()
new_x = (x-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2) ## remapping the x-range
xlbl = np.array_str(np.linspace(min(x), max(x), 8), precision=2)
rt_min_ = (rt_min-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2)
rt_max_ = (rt_max-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2)
rt_peak_ = (rt_peak-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2)
ax.plot(new_x, y+row,'k-')#,ms=1, mew=0, mfc='b', alpha=1.0)]
#ax.annotate('plot={}'.format(col+1),(max(new_x)/2+col*subrange,row-0.1), size=5,ha='center')
ax.annotate(xlbl,(min(new_x),row-0.1), size=2)
ax.annotate('{0},{1},{2},{3}'.format(compound,rt_min, rt_peak, rt_max),(min(new_x),row-0.2), size=2)#,ha='center')
myWhere = np.logical_and(new_x>=rt_min_, new_x<=rt_max_ )
ax.fill_between(new_x,min(y)+row,y+row,myWhere, facecolor='c', alpha=0.3)
col += 1
else:
new_x = np.asarray([0,1])#(x-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2) ## remapping the x-range
ax.plot(new_x, new_x-new_x+row,'r-')#,ms=1, mew=0, mfc='b', alpha=1.0)]
ax.annotate(compound,(min(new_x),row-0.1), size=2)
col += 1
counter += 1
plt.title(my_file)
fig = plt.gcf()
fig.set_size_inches(nRows*1.0, nCols*4.0)
fig.savefig(os.path.join(output_loc, my_file + '-' + str(counter) + '.pdf'))
plt.clf()
def plot_all_files_for_each_compound(input_dataset = [], input_fname = '', include_lcmsruns = [],exclude_lcmsruns = [], nCols = 8, scale_y=True , output_loc=''):
"""
Parameters
----------
kwargs
Returns
-------
"""
if not input_dataset:
data = ma_data.get_dill_data(os.path.expandvars(input_fname))
else:
data = input_dataset
# filter runs from the metatlas dataset
if include_lcmsruns:
data = filter_lcmsruns_in_dataset_by_include_list(data,'lcmsrun',include_lcmsruns)
data = filter_lcmsruns_in_dataset_by_include_list(data,'group',include_lcmsruns)
if exclude_lcmsruns:
data = filter_lcmsruns_in_dataset_by_exclude_list(data,'lcmsrun',exclude_lcmsruns)
data = filter_lcmsruns_in_dataset_by_exclude_list(data,'group',exclude_lcmsruns)
compound_names = ma_data.get_compound_names(data)[0]
file_names = ma_data.get_file_names(data)
output_loc = os.path.expandvars(output_loc)
nRows = int(np.ceil(len(file_names)/float(nCols)))
print('nrows = ', nRows)
xmin = 0
xmax = 210
subrange = float(xmax-xmin)/float(nCols) # scale factor for the x-axis
y_max = list()
if scale_y:
for compound_idx,compound in enumerate(compound_names):
temp = -1
counter = 0
for file_idx,my_file in enumerate(file_names):
d = data[file_idx][compound_idx]
if len(d['data']['eic']['rt']) > 0:
counter += 1
y = max(d['data']['eic']['intensity'])
if y > temp:
temp = y
y_max += [temp] * counter
else:
for compound_idx,compound in enumerate(compound_names):
for file_idx,my_file in enumerate(file_names):
d = data[file_idx][compound_idx]
if len(d['data']['eic']['rt']) > 0:
y_max.append(max(d['data']['eic']['intensity']))
print("length of ymax is ", len(y_max))
y_max = cycle(y_max)
# create ouput dir
if not os.path.exists(output_loc):
os.makedirs(output_loc)
plt.ioff()
for compound_idx,compound in enumerate(compound_names):
ax = plt.subplot(111)#, aspect='equal')
plt.setp(ax, 'frame_on', False)
ax.set_ylim([0, nRows+7])
col = 0
row = nRows+6
counter = 1
for file_idx,my_file in enumerate(file_names):
if col == nCols:
row -= 1.3
col = 0
d = data[file_idx][compound_idx]
#file_name = compound_names[compound_idx]
rt_min = d['identification'].rt_references[0].rt_min
rt_max = d['identification'].rt_references[0].rt_max
rt_peak = d['identification'].rt_references[0].rt_peak
if len(d['data']['eic']['rt']) > 0:
x = d['data']['eic']['rt']
y = d['data']['eic']['intensity']
y = y/y_max.next()
new_x = (x-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2) ## remapping the x-range
xlbl = np.array_str(np.linspace(min(x), max(x), 8), precision=2)
rt_min_ = (rt_min-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2)
rt_max_ = (rt_max-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2)
rt_peak_ = (rt_peak-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2)
ax.plot(new_x, y+row,'k-')#,ms=1, mew=0, mfc='b', alpha=1.0)]
#ax.annotate('plot={}'.format(col+1),(max(new_x)/2+col*subrange,row-0.1), size=5,ha='center')
ax.annotate(xlbl,(min(new_x),row-0.1), size=2)
ax.annotate('{0},{1},{2},{3}'.format(my_file,rt_min, rt_peak, rt_max),(min(new_x),row-0.2), size=2)#,ha='center')
myWhere = np.logical_and(new_x>=rt_min_, new_x<=rt_max_ )
#ax.fill_between(new_x,min(y)+row,y+row,myWhere, facecolor='c', alpha=0.3)
col += 1
else:
new_x = np.asarray([0,1])
ax.plot(new_x, new_x-new_x+row,'r-')#,ms=1, mew=0, mfc='b', alpha=1.0)]
# y = [0,1]#(x-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2) ## remapping the x-range
# ax.plot(new_x, y-y+row,'r-')#,ms=1, mew=0, mfc='b', alpha=1.0)]
ax.annotate(my_file,(min(new_x),row-0.1), size=1)
col += 1
counter += 1
plt.title(compound)
fig = plt.gcf()
fig.set_size_inches(nRows*1.0,nCols*4.0)
fig.savefig(os.path.join(output_loc, compound + '-' + str(counter) + '.pdf'))
plt.close(fig)
""" contribution from <NAME> """
def _InitialiseNeutralisationReactions():
patts= (
# Imidazoles
('[n+;H]','n'),
# Amines
('[N+;!H0]','N'),
# Carboxylic acids and alcohols
('[$([O-]);!$([O-][#7])]','O'),
# Thiols
('[S-;X1]','S'),
# Sulfonamides
('[$([N-;X2]S(=O)=O)]','N'),
# Enamines
('[$([N-;X2][C,N]=C)]','N'),
# Tetrazoles
('[n-]','[nH]'),
# Sulfoxides
('[$([S-]=O)]','S'),
# Amides
('[$([N-]C=O)]','N'),
)
return [(Chem.MolFromSmarts(x),Chem.MolFromSmiles(y,False)) for x,y in patts]
def desalt(mol):
#input is an rdkit mol
#returns an rdkit mol keeping the biggest component
#returns original mol if only one component
#returns a boolean indicated if cleaning was necessary
d = Chem.rdmolops.GetMolFrags(mol) #these are atom indices
if len(d) == 1: #If there are fragments or multiple molecules this will be greater than 1
return mol,False
my_smiles=Chem.MolToSmiles(mol)
parent_atom_count=0;
disconnected=my_smiles.split('.')
#With GetMolFrags, we've already established that there is more than one disconnected structure
for s in disconnected:
little_mol=Chem.MolFromSmiles(s)
count = little_mol.GetNumAtoms()
if count > parent_atom_count:
parent_atom_count = count
parent_mol = little_mol
return parent_mol,True
""" contribution from <NAME> """
def _InitialiseNeutralisationReactions():
patts= (
# Imidazoles
('[n+;H]','n'),
# Amines
('[N+;!H0]','N'),
# Carboxylic acids and alcohols
('[$([O-]);!$([O-][#7])]','O'),
# Thiols
('[S-;X1]','S'),
# Sulfonamides
('[$([N-;X2]S(=O)=O)]','N'),
# Enamines
('[$([N-;X2][C,N]=C)]','N'),
# Tetrazoles
('[n-]','[nH]'),
# Sulfoxides
('[$([S-]=O)]','S'),
# Amides
('[$([N-]C=O)]','N'),
)
return [(Chem.MolFromSmarts(x),Chem.MolFromSmiles(y,False)) for x,y in patts]
def NeutraliseCharges(mol, reactions=None):
reactions=_InitialiseNeutralisationReactions()
replaced = False
for i,(reactant, product) in enumerate(reactions):
while mol.HasSubstructMatch(reactant):
replaced = True
rms = Chem.AllChem.ReplaceSubstructs(mol, reactant, product)
rms_smiles = Chem.MolToSmiles(rms[0])
mol = Chem.MolFromSmiles(rms_smiles)
if replaced:
return (mol, True) #Chem.MolToSmiles(mol,True)
else:
return (mol, False)
def drawStructure_Fragment(pactolus_tree,fragment_idx,myMol,myMol_w_Hs):
from copy import deepcopy
fragment_atoms = np.where(pactolus_tree[fragment_idx]['atom_bool_arr'])[0]
depth_of_hit = np.sum(pactolus_tree[fragment_idx]['bond_bool_arr'])
mol2 = deepcopy(myMol_w_Hs)
# Now set the atoms you'd like to remove to dummy atoms with atomic number 0
fragment_atoms = np.where(pactolus_tree[fragment_idx]['atom_bool_arr']==False)[0]
for f in fragment_atoms:
mol2.GetAtomWithIdx(f).SetAtomicNum(0)
# Now remove dummy atoms using a query
mol3 = Chem.DeleteSubstructs(mol2, Chem.MolFromSmarts('[#0]'))
mol3 = Chem.RemoveHs(mol3)
# You get what you are looking for
return moltosvg(mol3),depth_of_hit
def moltosvg(mol,molSize=(450,150),kekulize=True):
mc = Chem.Mol(mol.ToBinary())
if kekulize:
try:
Chem.Kekulize(mc)
except:
mc = Chem.Mol(mol.ToBinary())
if not mc.GetNumConformers():
rdDepictor.Compute2DCoords(mc)
drawer = rdMolDraw2D.MolDraw2DSVG(molSize[0],molSize[1])
drawer.DrawMolecule(mc)
drawer.FinishDrawing()
svg = drawer.GetDrawingText()
# It seems that the svg renderer used doesn't quite hit the spec.
# Here are some fixes to make it work in the notebook, although I think
# the underlying issue needs to be resolved at the generation step
return svg.replace('svg:','')
def get_ion_from_fragment(frag_info,spectrum):
hit_indices = np.where(np.sum(frag_info,axis=1))
hit = spectrum[hit_indices,:][0]
return hit,hit_indices
def calculate_median_of_internal_standards(dataset_for_median,atlas,include_lcmsruns = [],exclude_lcmsruns = [], include_groups = [],exclude_groups = []):
"""
"""
# filter runs from the metatlas dataset
# dataset_for_median = copy.deepcopy(dataset_for_median)
if include_lcmsruns:
dataset_for_median = filter_lcmsruns_in_dataset_by_include_list(dataset_for_median,'lcmsrun',include_lcmsruns)
if include_groups:
dataset_for_median = filter_lcmsruns_in_dataset_by_include_list(dataset_for_median,'group',include_groups)
if exclude_lcmsruns:
dataset_for_median = filter_lcmsruns_in_dataset_by_exclude_list(dataset_for_median,'lcmsrun',exclude_lcmsruns)
if exclude_groups:
dataset_for_median = filter_lcmsruns_in_dataset_by_exclude_list(dataset_for_median,'group',exclude_groups)
internal_standard_vals = []
for i,dd in enumerate(dataset_for_median): #loop through files
for j,d in enumerate(dd): #loop through compounds
if atlas.compound_identifications[j].internal_standard_id != 'nan':
save_dict = {'file_name':d['lcmsrun'].name,'internal_standard_id':atlas.compound_identifications[j].internal_standard_id}
for fieldname in ['peak_height','peak_area']:
if (not d['data']['ms1_summary']) or (not d['data']['ms1_summary'][fieldname]):
v = 0
else:
v = d['data']['ms1_summary'][fieldname]
save_dict[fieldname] = v
internal_standard_vals.append(save_dict)
return internal_standard_vals
def normalize_peaks_by_internal_standard(metatlas_dataset,atlas,include_lcmsruns = [],exclude_lcmsruns = [], include_groups = [],exclude_groups = []):
"""
Takes in a metatlas dataset and an atlas. Returns a metatlas dataset with
ms1_summary peak_height and peak_area normalized by internal standard where
user selected in their atlas.
The compound_identification in the atlas has the followign fields:
internal_standard_id = MetUnicode(help='Freetext identifier for an internal standard')
do_normalization = MetBool(False)
internal_standard_to_use = MetUnicode(help='identifier of which internal standard to normalize by')
Peaks are normalized by:
I_normalized = I_molecule_in_file / I_standard_in_file * MEDIAN(I_standard_in_good_files)
"good files" for calculating the median intensity of the standard are identified
by exclude_lcmsruns=[]
The patterns in exclude_lcmsruns will remove files that you don't want to use for calculating the median intensity
"""
internal_standard_vals = calculate_median_of_internal_standards(metatlas_dataset,atlas,include_lcmsruns = include_lcmsruns,
exclude_lcmsruns =exclude_lcmsruns, include_groups =include_groups,exclude_groups =exclude_groups)
median_vals = pd.DataFrame(internal_standard_vals).drop('file_name',axis=1).groupby('internal_standard_id').median()
df = pd.DataFrame(internal_standard_vals)#.drop('peak_height',axis=1)
norm_dfs = {}
norm_dfs['peak_area'] = df.pivot(index='internal_standard_id', columns='file_name', values='peak_area')
norm_dfs['peak_height'] = df.pivot(index='internal_standard_id', columns='file_name', values='peak_height')
for i,dd in enumerate(metatlas_dataset): #loop through files
if dd[0]['lcmsrun'].name in norm_dfs['peak_area'].columns: #make sure the file name is in the normalization dataframe
for j,d in enumerate(dd): #loop through compounds
if atlas.compound_identifications[j].do_normalization == True:
for fieldname in ['peak_height','peak_area']:
if (not d['data']['ms1_summary']) or (not d['data']['ms1_summary'][fieldname]):
v = 0
else:
norm_val = norm_dfs[fieldname].loc[atlas.compound_identifications[j].internal_standard_to_use,d['lcmsrun'].name]
median_val = median_vals.loc[atlas.compound_identifications[j].internal_standard_to_use,fieldname]
metatlas_dataset[i][j]['data']['ms1_summary'][fieldname] = d['data']['ms1_summary'][fieldname] / norm_val * median_val
return metatlas_dataset
#plot msms and annotate
#compound name
#formula
#adduct
#theoretical m/z
#histogram of retention times
#scatter plot of retention time with peak area
#retention time
#print all chromatograms
#structure
def make_output_dataframe(input_fname = '',input_dataset = [],include_lcmsruns = [],exclude_lcmsruns = [], include_groups = [],exclude_groups = [], output_loc = [], fieldname = 'peak_height', use_labels=False):
"""
fieldname can be: peak_height, peak_area, mz_centroid, rt_centroid, mz_peak, rt_peak
"""
if not input_dataset:
data = ma_data.get_dill_data(os.path.expandvars(input_fname))
else:
data = input_dataset
# filter runs from the metatlas dataset
if include_lcmsruns:
data = filter_lcmsruns_in_dataset_by_include_list(data,'lcmsrun',include_lcmsruns)
if include_groups:
data = filter_lcmsruns_in_dataset_by_include_list(data,'group',include_groups)
if exclude_lcmsruns:
data = filter_lcmsruns_in_dataset_by_exclude_list(data,'lcmsrun',exclude_lcmsruns)
if exclude_groups:
data = filter_lcmsruns_in_dataset_by_exclude_list(data,'group',exclude_groups)
compound_names = ma_data.get_compound_names(data,use_labels=use_labels)[0]
file_names = ma_data.get_file_names(data)
group_names = ma_data.get_group_names(data)
output_loc = os.path.expandvars(output_loc)
fieldname = fieldname
df = pd.DataFrame( index=compound_names, columns=file_names, dtype=float)
# peak_height['compound'] = compound_list
# peak_height.set_index('compound',drop=True)
for i,dd in enumerate(data):
for j,d in enumerate(dd):
if (not d['data']['ms1_summary']) or (not d['data']['ms1_summary'][fieldname]):
df.ix[compound_names[j],file_names[i]] = 0
else:
df.ix[compound_names[j],file_names[i]] = d['data']['ms1_summary'][fieldname]
columns = []
for i,f in enumerate(file_names):
columns.append((group_names[i],f))
df.columns = pd.MultiIndex.from_tuples(columns,names=['group', 'file'])
if output_loc:
if not os.path.exists(output_loc):
os.makedirs(output_loc)
df.to_csv(os.path.join(output_loc, fieldname + '.tab'),sep='\t')
return df
def file_with_max_precursor_intensity(data,compound_idx):
idx = None
my_max = 0
for i,d in enumerate(data):
if 'data' in d[compound_idx]['data']['msms'].keys():
if type(d[compound_idx]['data']['msms']['data']) != list:#.has_key('precursor_intensity'):
temp = d[compound_idx]['data']['msms']['data']['precursor_intensity']
if len(temp)>0:
m = max(temp)
if m > my_max:
my_max = m
idx = i
return idx,my_max
def file_with_max_score(data, frag_refs, compound_idx, filter_by):
idx = []
max_score = np.nan
best_ref_spec = []
for file_idx in range(len(data)):
#empty can look like this:
# {'eic': {'rt': [], 'intensity': [], 'mz': []}, 'ms1_summary': {'num_ms1_datapoints': 0.0, 'rt_centroid': nan, 'mz_peak': nan, 'peak_height': nan, 'rt_peak': nan, 'peak_area': nan, 'mz_centroid': nan}, 'msms': {'data': {'rt': array([], dtype=float64), 'collision_energy': array([], dtype=float64), 'i': array([], dtype=float64), 'precursor_intensity': array([], dtype=float64), 'precursor_MZ': array([], dtype=float64), 'mz': array([], dtype=float64)}}}
#or empty can look like this:
# {'eic': None, 'ms1_summary': None, 'msms': {'data': []}}
if ('data' in data[file_idx][compound_idx]['data']['msms'].keys()) and \
(isinstance(data[file_idx][compound_idx]['data']['msms']['data'],dict)) and \
('rt' in data[file_idx][compound_idx]['data']['msms']['data'].keys()) and \
(len(data[file_idx][compound_idx]['data']['msms']['data']['rt'])>0):
msv_sample_scans = np.array([data[file_idx][compound_idx]['data']['msms']['data']['mz'], data[file_idx][compound_idx]['data']['msms']['data']['i']])
rt_of_msv_sample = np.array(data[file_idx][compound_idx]['data']['msms']['data']['rt'])
scan_idxs = [i+1
for i in range(rt_of_msv_sample.size-1)
if rt_of_msv_sample[i] != rt_of_msv_sample[i+1]]
for i, msv_sample in enumerate(np.split(msv_sample_scans, scan_idxs, axis=1)):
for f, frag in sp.filter_frag_refs(data, frag_refs, compound_idx, file_idx, filter_by).iterrows():
msv_ref = sp.sort_ms_vector_by_mz(np.array(frag['mz_intensities']).T)
score = sp.score_ms_vectors_composite(*sp.pairwise_align_ms_vectors(msv_sample, msv_ref, .005, 'shape'))
if score > max_score or np.isnan(max_score):
max_score = score
idx = file_idx
best_ref_spec = [frag['mz_intensities']]
return idx, max_score, best_ref_spec
def plot_errorbar_plots(df,output_loc=''):
output_loc = os.path.expandvars(output_loc)
if not os.path.exists(output_loc):
os.makedirs(output_loc)
plt.ioff()
for compound in df.index:
m = df.ix[compound].groupby(level='group').mean()
e = df.ix[compound].groupby(level='group').std()
c = df.ix[compound].groupby(level='group').count()
for i in range(len(e)):
if c[i]>0:
e[i] = e[i] / c[i]**0.5
f, ax = plt.subplots(1, 1,figsize=(12,12))
m.plot(yerr=e, kind='bar',ax=ax)
ax.set_title(compound,fontsize=12,weight='bold')
plt.tight_layout()
f.savefig(os.path.join(output_loc, compound + '_errorbar.pdf'))
#f.clear()
plt.close(f)#f.clear()
def frag_refs_to_json(json_dir = '/project/projectdirs/metatlas/projects/sharepoint/', name = 'frag_refs', save = True):
ids = metob.retrieve('CompoundIdentification',username='*')
frag_refs = [cid for cid in ids if cid.frag_references]
data = {'head_id': [],
'inchi_key': [],
'neutralized_inchi_key': [],
'neutralized_2d_inchi_key': [],
'polarity': [],
'collision_energy': [],
'technique': [],
'precursor_mz': [],
'mz_intensities': []}
for fr in frag_refs:
data['head_id'].append(fr.frag_references[0].head_id),
data['inchi_key'].append(fr.compound[0].inchi_key)
data['neutralized_inchi_key'].append(fr.compound[0].neutralized_inchi_key)
data['neutralized_2d_inchi_key'].append(fr.compound[0].neutralized_2d_inchi_key)
data['polarity'].append(fr.frag_references[0].polarity)
data['precursor_mz'].append(fr.frag_references[0].precursor_mz)
data['mz_intensities'].append([(m.mz, m.intensity) for m in fr.frag_references[0].mz_intensities])
data['collision_energy'].append(fr.frag_references[0].collision_energy)
data['technique'].append(fr.frag_references[0].technique)
if save:
with open(os.path.join(json_dir, name + '.json'), 'w') as text_file:
text_file.write(json.dumps(data))
else:
return json.dumps(data)
# def get_idenficications_with_fragrefs():
# """
# Select all CompoundIdentifications that have a fragmentation reference
# """
def make_identification_figure(frag_json_dir = '/project/projectdirs/metatlas/projects/sharepoint/', frag_json_name = 'frag_refs',
input_fname = '', input_dataset = [], include_lcmsruns = [],
exclude_lcmsruns = [], include_groups = [], exclude_groups = [], output_loc = [], use_labels=False):
output_loc = os.path.expandvars(output_loc)
if not os.path.exists(output_loc):
os.makedirs(output_loc)
if not input_dataset:
data = ma_data.get_dill_data(os.path.expandvars(input_fname))
else:
data = input_dataset
# filter runs from the metatlas dataset
if include_lcmsruns:
data = filter_lcmsruns_in_dataset_by_include_list(data,'lcmsrun',include_lcmsruns)
if include_groups:
data = filter_lcmsruns_in_dataset_by_include_list(data,'group',include_groups)
if exclude_lcmsruns:
data = filter_lcmsruns_in_dataset_by_exclude_list(data,'lcmsrun',exclude_lcmsruns)
if exclude_groups:
data = filter_lcmsruns_in_dataset_by_exclude_list(data,'lcmsrun',exclude_groups)
#data = filter_lcmsruns_in_dataset_by_exclude_list(data,'group',exclude_lcmsruns)
compound_names = ma_data.get_compound_names(data,use_labels=use_labels)[0]
file_names = ma_data.get_file_names(data)
# print(len(data),len(data[0]),len(compound_names))
frag_refs = pd.read_json(os.path.join(frag_json_dir, frag_json_name + ".json"))
for compound_idx in range(len(compound_names)):
file_idx = None
file_precursor_intensity = 0
score = None
ref_spec = []
if any([(len(data[i][compound_idx]['identification'].compound)!=0) and (data[i][compound_idx]['identification'].compound is not None) for i in range(len(file_names))]):
# print('checking for compound ids')
file_idx, score, ref_spec = file_with_max_score(data, frag_refs, compound_idx, 'inchi_key and rt and polarity')
if ~isinstance(file_idx,int): #There is not a reference for that compound
file_idx = file_with_max_precursor_intensity(data,compound_idx)[0]
# print('found one',file_idx)
else:
file_idx = file_with_max_precursor_intensity(data,compound_idx)[0]
# print(file_idx,compound_idx, compound_names[compound_idx])
if isinstance(file_idx,int):
# print('printing')
# print(file_idx,compound_idx)
fig = plt.figure(figsize=(20,20))
# fig = plt.figure()
ax = fig.add_subplot(211)
ax.set_title(compound_names[compound_idx],fontsize=12,weight='bold')
ax.set_xlabel('m/z',fontsize=12,weight='bold')
ax.set_ylabel('intensity',fontsize=12,weight='bold')
#TODO: iterate across all collision energies
precursor_intensity = data[file_idx][compound_idx]['data']['msms']['data']['precursor_intensity']
idx_max = np.argwhere(precursor_intensity == np.max(precursor_intensity)).flatten()
mz = data[file_idx][compound_idx]['data']['msms']['data']['mz'][idx_max]
zeros = np.zeros(data[file_idx][compound_idx]['data']['msms']['data']['mz'][idx_max].shape)
intensity = data[file_idx][compound_idx]['data']['msms']['data']['i'][idx_max]
ax.vlines(mz,zeros,intensity,colors='r',linewidth = 2)
sx = np.argsort(intensity)[::-1]
labels = [1.001e9]
for i in sx:
if np.min(np.abs(mz[i] - labels)) > 0.1 and intensity[i] > 0.02 * np.max(intensity):
ax.annotate('%5.4f'%mz[i], xy=(mz[i], 1.01*intensity[i]),rotation = 90, horizontalalignment = 'center', verticalalignment = 'left')
labels.append(mz[i])
# precursor_mz = data[file_idx][compound_idx]['data']['msms']['precursor_mz'])
# print data[file_idx][compound_idx]['data']['msms']['polarity']
if ref_spec:
ref_mz = []
ref_intensity = []
ref_zeros = []
for s in ref_spec[0]:
ref_mz.append(s[0])
ref_intensity.append(s[1]*-1)
ref_zeros.append(0)
s = -1* intensity[sx[0]] / min(ref_intensity)
# L = plt.ylim()
# print data[file_idx][compound_idx]['identification'].compound[0].name, float(intensity[sx[0]]), float(min(ref_intensity))
ax.vlines(ref_mz,ref_zeros,[r*s for r in ref_intensity],colors='r',linewidth = 2)
# print "we have reference spectra", len(ref_spec[0])
plt.ioff()
plt.axhline()
plt.tight_layout()
L = plt.ylim()
plt.ylim(L[0],L[1]*1.12)
if data[file_idx][compound_idx]['identification'].compound:
inchi = data[file_idx][compound_idx]['identification'].compound[0].inchi
myMol = Chem.MolFromInchi(inchi.encode('utf-8'))
# myMol,neutralised = NeutraliseCharges(myMol)
if myMol:
image = Draw.MolToImage(myMol, size = (300,300) )
ax2 = fig.add_subplot(223)
ax2.imshow(image)
ax2.axis('off')
# SVG(moltosvg(myMol))
ax3 = fig.add_subplot(224)
ax3.set_xlim(0,1)
mz_theoretical = data[file_idx][compound_idx]['identification'].mz_references[0].mz
mz_measured = data[file_idx][compound_idx]['data']['ms1_summary']['mz_centroid']
if not mz_measured:
mz_measured = 0
delta_mz = abs(mz_theoretical - mz_measured)
delta_ppm = delta_mz / mz_theoretical * 1e6
rt_theoretical = data[file_idx][compound_idx]['identification'].rt_references[0].rt_peak
rt_measured = data[file_idx][compound_idx]['data']['ms1_summary']['rt_peak']
if not rt_measured:
rt_measured = 0
ax3.text(0,1,'%s'%os.path.basename(data[file_idx][compound_idx]['lcmsrun'].hdf5_file),fontsize=12)
ax3.text(0,0.95,'%s %s'%(compound_names[compound_idx], data[file_idx][compound_idx]['identification'].mz_references[0].adduct),fontsize=12)
ax3.text(0,0.9,'m/z theoretical = %5.4f, measured = %5.4f, %5.4f ppm difference'%(mz_theoretical, mz_measured, delta_ppm),fontsize=12)
ax3.text(0,0.85,'Expected Elution of %5.2f minutes, %5.2f min actual'%(rt_theoretical,rt_measured),fontsize=12)
if score != None:
ax3.text(0,0.80,'Score: %f'%(score),fontsize=12)
ax3.set_ylim(0.2,1.01)
ax3.axis('off')
# plt.show()
fig.savefig(os.path.join(output_loc, compound_names[compound_idx] + '.pdf'))
plt.close()
def top_five_scoring_files(data, frag_refs, compound_idx, filter_by):
file_idxs = []
ref_idxs = []
scores = []
msv_sample_list = []
msv_ref_list = []
rt_list = []
for file_idx in range(len(data)):
try:
assert(isinstance(data[file_idx][compound_idx]['data']['msms']['data'], dict))
except AssertionError:
continue
except IndexError:
continue
except KeyError:
continue
msv_sample_scans = np.array([data[file_idx][compound_idx]['data']['msms']['data']['mz'], data[file_idx][compound_idx]['data']['msms']['data']['i']])
rt_of_msv_sample = np.array(data[file_idx][compound_idx]['data']['msms']['data']['rt'])
scan_idxs = [i+1
for i in range(rt_of_msv_sample.size-1)
if rt_of_msv_sample[i] != rt_of_msv_sample[i+1]]
for i, msv_sample in enumerate(np.split(msv_sample_scans, scan_idxs, axis=1)):
current_best_score = None
current_best_ref_idx = None
current_best_msv_sample = None
current_best_msv_ref = None
current_best_rt = None
for ref_idx, frag in sp.filter_frag_refs(data, frag_refs, compound_idx, file_idx, filter_by).iterrows():
msv_ref = np.array(frag['mz_intensities']).T
msv_sample_aligned, msv_ref_aligned = sp.pairwise_align_ms_vectors(msv_sample, msv_ref, .005, 'shape')
score = sp.score_ms_vectors_composite(msv_sample_aligned, msv_ref_aligned)
if current_best_score == None or score > current_best_score:
current_best_score = score
current_best_ref_idx = ref_idx
current_best_msv_sample = msv_sample_aligned
current_best_msv_ref = msv_ref_aligned
current_best_rt = np.split(rt_of_msv_sample, scan_idxs)[i][0]
if current_best_score:
scores.append(current_best_score)
file_idxs.append(file_idx)
ref_idxs.append(current_best_ref_idx)
msv_sample_list.append(current_best_msv_sample)
msv_ref_list.append(current_best_msv_ref)
rt_list.append(current_best_rt)
return zip(*sorted(zip(file_idxs, ref_idxs, scores, msv_sample_list, msv_ref_list, rt_list), key=lambda l: l[2], reverse=True)[:5])
def plot_msms_comparison(i, score, ax, msv_sample, msv_ref):
msv_sample_matches, msv_ref_matches, msv_sample_nonmatches, msv_ref_nonmatches = sp.partition_aligned_ms_vectors(msv_sample, msv_ref)
msv_sample_unaligned = np.concatenate((msv_sample_matches, msv_sample_nonmatches), axis=1)
msv_ref_unaligned = np.concatenate((msv_ref_matches, msv_ref_nonmatches), axis=1)
sample_mz = msv_sample_nonmatches[0]
sample_zeros = np.zeros(msv_sample_nonmatches[0].shape)
sample_intensity = msv_sample_nonmatches[1]
ax.vlines(sample_mz, sample_zeros, sample_intensity, colors='r', linewidth=1)
shared_mz = msv_sample_matches[0]
shared_zeros = np.zeros(msv_sample_matches[0].shape)
shared_sample_intensity = msv_sample_matches[1]
ax.vlines(shared_mz, shared_zeros, shared_sample_intensity, colors='g', linewidth=1)
most_intense_idxs = np.argsort(msv_sample_unaligned[1])[::-1]
if i == 0:
ax.set_title('%.4f' % score, fontsize=8, weight='bold')
ax.set_xlabel('m/z', fontsize=8, weight='bold')
ax.set_ylabel('intensity', fontsize=8, weight='bold')
ax.tick_params(axis='both', which='major', labelsize=6)
labels = [1.001e9]
intensity_requirement = [m for m in most_intense_idxs
if
np.min(np.abs(msv_sample_unaligned[0][m] - labels)) > 0.1
and msv_sample_unaligned[1][m] > 0.2 * np.max(msv_sample_unaligned[1])]
for m in max([most_intense_idxs[:6], intensity_requirement], key=len):
if np.min(np.abs(msv_sample_unaligned[0][m] - labels)) > 0.1 and msv_sample_unaligned[1][m] > 0.02 * np.max(msv_sample_unaligned[1]):
ax.annotate('%5.4f' % msv_sample_unaligned[0][m],
xy=(msv_sample_unaligned[0][m], 1.01 * msv_sample_unaligned[1][m]),
rotation=90,
horizontalalignment='center', verticalalignment='left',
size=4)
labels.append(msv_sample_unaligned[0][m])
if msv_ref_unaligned[0].size > 0:
ref_scale = -1 * np.max(msv_sample_unaligned[1]) / np.max(msv_ref_unaligned[1])
ref_mz = msv_ref_nonmatches[0]
ref_zeros = np.zeros(msv_ref_nonmatches[0].shape)
ref_intensity = ref_scale * msv_ref_nonmatches[1]
shared_ref_intensity = ref_scale * msv_ref_matches[1]
ax.vlines(ref_mz, ref_zeros, ref_intensity, colors='r', linewidth=1)
ax.vlines(shared_mz, shared_zeros, shared_ref_intensity, colors='g', linewidth=1)
ax.axhline()
ylim = ax.get_ylim()
ax.set_ylim(ylim[0], ylim[1] * 1.33)
def plot_structure(ax, compound, dimensions):
if compound:
inchi = compound[0].inchi
myMol = Chem.MolFromInchi(inchi.encode('utf-8'))
if myMol:
image = Draw.MolToImage(myMol, size=(dimensions, dimensions))
ax.imshow(image)
ax.axis('off')
def plot_ema_compound_info(ax, compound_info, label=''):
wrapper = TextWrapper(width=28, break_on_hyphens=True)
if compound_info.compound:
name = ['Name:', wrapper.fill(compound_info.compound[0].name)]
label = ['Label:', wrapper.fill(label)]
formula = ['Formula:', compound_info.compound[0].formula]
polarity = ['Polarity:', compound_info.mz_references[0].detected_polarity]
neutral_mass = ['Monoisotopic Mass:', compound_info.compound[0].mono_isotopic_molecular_weight]
theoretical_mz = ['Theoretical M/Z:', compound_info.mz_references[0].mz]
adduct = ['Adduct:', compound_info.mz_references[0].adduct]
cell_text = [name, label, formula, polarity, neutral_mass, theoretical_mz, adduct]
ema_compound_info_table = ax.table(cellText=cell_text,
colLabels=['', 'EMA Compound Info'],
bbox=[0.0, 0.0, 1, 1], loc='top left')
ema_compound_info_table.scale(1, .7)
ema_compound_info_table.auto_set_font_size(False)
ema_compound_info_table.set_fontsize(4)
cellDict = ema_compound_info_table.get_celld()
for i in range(len(cell_text)+1):
cellDict[(i,0)].set_width(0.3)
cellDict[(i,1)]._loc = 'center'
ax.axis('off')
def plot_eic(ax, data, compound_idx):
for file_idx in range(len(data)):
rt_min = data[file_idx][compound_idx]['identification'].rt_references[0].rt_min
rt_max = data[file_idx][compound_idx]['identification'].rt_references[0].rt_max
rt_peak = data[file_idx][compound_idx]['identification'].rt_references[0].rt_peak
if len(data[file_idx][compound_idx]['data']['eic']['rt']) > 1:
x = np.asarray(data[file_idx][compound_idx]['data']['eic']['rt'])
y = np.asarray(data[file_idx][compound_idx]['data']['eic']['intensity'])
ax.plot(x, y, 'k-', linewidth=.1, alpha=min(1, 10*(1./len(data))))
myWhere = np.logical_and(x>=rt_min, x<=rt_max )
ax.fill_between(x,0,y,myWhere, facecolor='c', alpha=min(1, 2*(1./len(data))))
# ax.tick_params(labelbottom='off')
ax.xaxis.set_tick_params(labelsize=5)
ax.get_yaxis().get_major_formatter().set_useOffset(False)
ax.get_yaxis().set_visible(False)
ax.axvline(rt_min, color='k', linewidth=1.0)
ax.axvline(rt_max, color='k', linewidth=1.0)
ax.axvline(rt_peak, color='r', linewidth=1.0)
def plot_score_and_ref_file(ax, score, rt, ref):
ax.text(0.5, 1, '%.4f'%score,
weight='bold',
horizontalalignment='center',
verticalalignment='top',
fontsize=4,
transform=ax.transAxes)
ax.text(0, .45, fill(ref + ' RT=%5.3f'%rt, width=26),
horizontalalignment='left',
verticalalignment='center',
rotation='vertical',
fontsize=2,
transform=ax.transAxes)
def get_msms_hits(metatlas_dataset, use_labels=False,
pre_query='database == "metatlas"',
# pre_query = 'index == index or index == @pd.NaT',
query='(@inchi_key == inchi_key) and (@polarity == polarity) and ((@precursor_mz - .5*(((.5*(@pre_mz_ppm**-decimal)/(decimal+1)) + .005 + ((.5*(@pre_mz_ppm**-decimal)/(decimal+1)) - .005)**2)**.5)) <= precursor_mz <= (@precursor_mz + .5*(((.5*(@pre_mz_ppm**-decimal)/(decimal+1)) + .005 + ((.5*(@pre_mz_ppm**-decimal)/(decimal+1)) - .005)**2)**.5)))',
# query='(@inchi_key == inchi_key) and (@polarity == polarity) and ((@precursor_mz - (.5*(@pre_mz_ppm**-decimal)/(decimal+1)) - @pre_mz_ppm*(@precursor_mz*1e-6)) <= precursor_mz <= (@precursor_mz + (.5*(@pre_mz_ppm**-decimal)/(decimal+1)) + @pre_mz_ppm*(@precursor_mz*1e-6)))',
# query='(@inchi_key == inchi_key) and (@polarity == polarity) and (@rt-.1 < rt < @rt+.1) and ((@precursor_mz - (.5*(@pre_mz_ppm**-decimal)/(decimal+1)) - @pre_mz_ppm*(@precursor_mz*1e-6)) <= precursor_mz <= (@precursor_mz + (.5*(@pre_mz_ppm**-decimal)/(decimal+1)) + @pre_mz_ppm*(@precursor_mz*1e-6)))',
**kwargs):
kwargs = dict(locals(), **kwargs)
resolve_by = kwargs.pop('resolve_by', 'shape')
frag_mz_tolerance = kwargs.pop('frag_mz_tolerance', .005)
# Reference parameters
ref_loc = kwargs.pop('ref_loc', '/global/project/projectdirs/metatlas/projects/spectral_libraries/msms_refs_v2.tab')
ref_dtypes = kwargs.pop('ref_dtypes', {'database':str, 'id':str, 'name':str,
'spectrum':object,'decimal':int, 'precursor_mz':float,
'polarity':str, 'adduct':str, 'fragmentation_method':str,
'collision_energy':str, 'instrument':str, 'instrument_type':str,
'formula':str, 'exact_mass':float,
'inchi_key':str, 'inchi':str, 'smiles':str})
ref_index = kwargs.pop('ref_index', ['database', 'id'])
if 'ref_df' in kwargs:
ref_df = kwargs.pop('ref_df')
else:
ref_df = pd.read_csv(ref_loc,
sep='\t',
dtype=ref_dtypes
).set_index(ref_index)
ref_df = ref_df.query(pre_query, local_dict=dict(locals(), **kwargs))
if ref_df['spectrum'].apply(type).eq(str).all():
ref_df['spectrum'] = ref_df['spectrum'].apply(lambda s: eval(s)).apply(np.array)
file_names = ma_data.get_file_names(metatlas_dataset)
compound_names = ma_data.get_compound_names(metatlas_dataset)[0]
msms_hits = []
for compound_idx,compound_name in enumerate(compound_names):
if len(metatlas_dataset[0][compound_idx]['identification'].compound) == 0:
# exit here if there isn't a compound in the identification
continue
inchi_key = metatlas_dataset[0][compound_idx]['identification'].compound[0].inchi_key
pre_mz_ppm = metatlas_dataset[0][compound_idx]['identification'].mz_references[0].mz_tolerance
precursor_mz = metatlas_dataset[0][compound_idx]['identification'].mz_references[0].mz
compound_hits = []
for file_idx,file_name in enumerate(file_names):
polarity = metatlas_dataset[file_idx][compound_idx]['identification'].mz_references[0].detected_polarity
try:
assert set(['rt', 'i', 'precursor_MZ', 'mz']).issubset(set(metatlas_dataset[file_idx][compound_idx]['data']['msms']['data'].keys()))
except (KeyError, AssertionError, AttributeError):
continue
rt_mz_i_df = pd.DataFrame({k:metatlas_dataset[file_idx][compound_idx]['data']['msms']['data'][k]
for k in ['rt', 'mz', 'i', 'precursor_MZ']}
).sort_values(['rt', 'mz'])
for rt in rt_mz_i_df.rt.unique():
msv_sample = rt_mz_i_df[rt_mz_i_df['rt'] == rt][['mz', 'i']].values.T
msv_sample = msv_sample[:,msv_sample[0] < rt_mz_i_df[rt_mz_i_df['rt'] == rt]['precursor_MZ'].values[0] + 2.5]
scan_df = sp.search_ms_refs(msv_sample, **dict(locals(), **kwargs))
if len(scan_df) > 0:
scan_df['file_name'] = file_name
scan_df['msms_scan'] = rt
scan_df.set_index('file_name', append=True, inplace=True)
scan_df.set_index('msms_scan', append=True, inplace=True)
msms_hits.append(scan_df)
if len(msms_hits)>0:
return pd.concat(msms_hits)
else:
return pd.DataFrame(columns=ref_df.index.names+['file_name', 'msms_scan', 'score', 'num_matches']
).set_index(ref_df.index.names+['file_name', 'msms_scan'])
def make_identification_figure_v2(
input_fname = '', input_dataset = [], include_lcmsruns = [], exclude_lcmsruns = [], include_groups = [],
exclude_groups = [], output_loc = [],use_labels=False,intensity_sorted_matches=False):
#empty can look like this:
# {'eic': {'rt': [], 'intensity': [], 'mz': []}, 'ms1_summary': {'num_ms1_datapoints': 0.0, 'rt_centroid': nan, 'mz_peak': nan, 'peak_height': nan, 'rt_peak': nan, 'peak_area': nan, 'mz_centroid': nan},
#'msms': {'data': {'rt': array([], dtype=float64), 'collision_energy': array([], dtype=float64), 'i': array([], dtype=float64), 'precursor_intensity': array([], dtype=float64), 'precursor_MZ': array([], dtype=float64), 'mz': array([], dtype=float64)}}}
#or empty can look like this:
# {'eic': None, 'ms1_summary': None, 'msms': {'data': []}}
if not os.path.exists(output_loc):
os.makedirs(output_loc)
if not input_dataset:
data = ma_data.get_dill_data(os.path.expandvars(input_fname))
else:
data = input_dataset
#Filter runs from the metatlas dataset
if include_lcmsruns:
data = filter_lcmsruns_in_dataset_by_include_list(data, 'lcmsrun', include_lcmsruns)
if include_groups:
data = filter_lcmsruns_in_dataset_by_include_list(data, 'group', include_groups)
if exclude_lcmsruns:
data = filter_lcmsruns_in_dataset_by_exclude_list(data, 'lcmsrun', exclude_lcmsruns)
if exclude_groups:
data = filter_lcmsruns_in_dataset_by_exclude_list(data, 'group', exclude_groups)
msms_hits_df = get_msms_hits(data, use_labels, ref_index=['database', 'id', 'inchi_key', 'precursor_mz'])
if msms_hits_df is not None:
msms_hits_df.reset_index(['inchi_key', 'precursor_mz'], inplace=True)
msms_hits_df.reset_index(inplace = True)
msms_hits_df.sort_values('score', ascending=False, inplace=True)
# msms_hits_df.drop_duplicates(['inchi_key', 'file_name'], keep='first', inplace=True)
# msms_hits_df = msms_hits_df.groupby(['inchi_key']).head(5).sort_values(['inchi_key'], kind='mergesort')
#Obtain compound and file names
compound_names = ma_data.get_compound_names(data,use_labels)[0]
file_names = ma_data.get_file_names(data)
#Turn off interactive plotting
plt.ioff()
#Iterate over compounds
for compound_idx in range(len(compound_names)):
file_idxs, scores, msv_sample_list, msv_ref_list, rt_list = [], [], [], [], []
#Find 5 best file and reference pairs by score
try:
comp_msms_hits = msms_hits_df[(msms_hits_df['inchi_key'] == data[0][compound_idx]['identification'].compound[0].inchi_key) \
& ((abs(msms_hits_df['precursor_mz'].values.astype(float) - data[0][compound_idx]['identification'].mz_references[0].mz)/data[0][compound_idx]['identification'].mz_references[0].mz) \
<= data[0][compound_idx]['identification'].mz_references[0].mz_tolerance*1e-6)].drop_duplicates('file_name').head(5)
assert len(comp_msms_hits) > 0
inchi_key = data[0][compound_idx]['identification'].compound[0].inchi_key
file_idxs = [file_names.index(f) for f in comp_msms_hits['file_name']]
scores = comp_msms_hits['score'].values.tolist()
msv_sample_list = comp_msms_hits['msv_query_aligned'].values.tolist()
msv_ref_list = comp_msms_hits['msv_ref_aligned'].values.tolist()
rt_list = comp_msms_hits['msms_scan'].values.tolist()
except (IndexError, AssertionError, TypeError) as e:
file_idx = file_with_max_precursor_intensity(data,compound_idx)[0]
if file_idx is not None:
precursor_intensity = data[file_idx][compound_idx]['data']['msms']['data']['precursor_intensity']
idx_max = np.argwhere(precursor_intensity == np.max(precursor_intensity)).flatten()
file_idxs = [file_idx]
msv_sample_list = [np.array([data[file_idx][compound_idx]['data']['msms']['data']['mz'][idx_max],
data[file_idx][compound_idx]['data']['msms']['data']['i'][idx_max]])]
msv_ref_list = [np.full_like(msv_sample_list[-1], np.nan)]
scores = [0]
else:
file_idx = None
max_intensity = 0
for fi in range(len(data)):
try:
temp = max(data[fi][compound_idx]['data']['eic']['intensity'])
if temp > max_intensity:
file_idx = fi
max_intensity = temp
except ValueError:
continue
file_idxs = [file_idx]
msv_sample_list = [np.array([0, np.nan]).T]
msv_ref_list = [np.array([0, np.nan]).T]
scores = [np.nan]
#Plot if compound yields any scores
if file_idxs and file_idxs[0] is not None:
#Top 5 MSMS Spectra
ax1 = plt.subplot2grid((24, 24), (0, 0), rowspan=12, colspan=12)
ax2a = plt.subplot2grid((24, 24), (0, 12), rowspan=3, colspan=3)
ax2a.tick_params(axis='both', length=2)
ax2a.set_xticklabels([])
ax2a.set_yticklabels([])
ax2b = plt.subplot2grid((24, 24), (3, 12), rowspan=3, colspan=3)
ax2b.tick_params(axis='both', length=2)
ax2b.set_xticklabels([])
ax2b.set_yticklabels([])
ax2c = plt.subplot2grid((24, 24), (6, 12), rowspan=3, colspan=3)
ax2c.tick_params(axis='both', length=2)
ax2c.set_xticklabels([])
ax2c.set_yticklabels([])
ax2d = plt.subplot2grid((24, 24), (9, 12), rowspan=3, colspan=3)
ax2d.tick_params(axis='both', length=2)
ax2d.set_xticklabels([])
ax2d.set_yticklabels([])
for i,(score,ax) in enumerate(zip(scores,[ax1, ax2a, ax2b, ax2c, ax2d])):
plot_msms_comparison(i, score, ax, msv_sample_list[i], msv_ref_list[i])
#EMA Compound Info
ax3 = plt.subplot2grid((24, 24), (0, 16), rowspan=6, colspan=8)
plot_ema_compound_info(ax3, data[file_idxs[0]][compound_idx]['identification'])#,
# ma_data.get_compound_names(data,use_labels=True)[0][compound_idx])
#Next Best Scores and Filenames
ax4a = plt.subplot2grid((24, 24), (0, 15), rowspan=3, colspan=1)
ax4a.axis('off')
ax4b = plt.subplot2grid((24, 24), (3, 15), rowspan=3, colspan=1)
ax4b.axis('off')
ax4c = plt.subplot2grid((24, 24), (6, 15), rowspan=3, colspan=1)
ax4c.axis('off')
ax4d = plt.subplot2grid((24, 24), (9, 15), rowspan=3, colspan=1)
ax4d.axis('off')
for i,(score,ax) in enumerate(zip(scores[1:],[ax4a, ax4b, ax4c, ax4d])):
plot_score_and_ref_file(ax, score, rt_list[i+1], os.path.basename(data[file_idxs[i+1]][compound_idx]['lcmsrun'].hdf5_file))
#Structure
ax5 = plt.subplot2grid((24, 24), (13, 0), rowspan=6, colspan=6)
plot_structure(ax5, data[file_idxs[0]][compound_idx]['identification'].compound, 100)
#EIC
ax6 = plt.subplot2grid((24, 24), (6, 16), rowspan=6, colspan=6)
plot_eic(ax6, data, compound_idx)
# #Reference and Sample Info
# ax10 = plt.subplot2grid((24, 24), (14, 6), rowspan=10, colspan=20)
# plot_ref_sample_info(ax10, 1, 1)
#Old code
ax7 = plt.subplot2grid((24, 24), (15, 6), rowspan=9, colspan=20)
mz_theoretical = data[file_idxs[0]][compound_idx]['identification'].mz_references[0].mz
mz_measured = data[file_idxs[0]][compound_idx]['data']['ms1_summary']['mz_centroid']
if not mz_measured:
mz_measured = 0
delta_mz = abs(mz_theoretical - mz_measured)
delta_ppm = delta_mz / mz_theoretical * 1e6
rt_theoretical = data[file_idxs[0]][compound_idx]['identification'].rt_references[0].rt_peak
rt_measured = data[file_idxs[0]][compound_idx]['data']['ms1_summary']['rt_peak']
if not rt_measured:
rt_measured = 0
ax7.text(0,1,'%s'%fill(os.path.basename(data[file_idxs[0]][compound_idx]['lcmsrun'].hdf5_file), width=54),fontsize=8)
ax7.text(0,0.9,'%s %s'%(compound_names[compound_idx], data[file_idxs[0]][compound_idx]['identification'].mz_references[0].adduct),fontsize=8)
ax7.text(0,0.85,'Measured M/Z = %5.4f, %5.4f ppm difference'%(mz_measured, delta_ppm),fontsize=8)
ax7.text(0,0.8,'Expected Elution of %5.2f minutes, %5.2f min actual'%(rt_theoretical,rt_measured),fontsize=8)
if len(rt_list) > 0:
ax7.text(0,0.7,'MSMS Scan at %5.3f minutes'%rt_list[0],fontsize=8)
msv_sample_matches = sp.partition_aligned_ms_vectors(msv_sample_list[0], msv_ref_list[0])[0]
if intensity_sorted_matches:
msv_sample_matches = msv_sample_matches[:, msv_sample_matches[1].argsort()[::-1]]
if len(msv_sample_matches[0]) > 0:
mz_sample_matches = msv_sample_matches[0].tolist()
threshold_mz_sample_matches = sp.remove_ms_vector_noise(msv_sample_matches)[0].tolist()
else:
mz_sample_matches = [np.nan]
threshold_mz_sample_matches = [np.nan]
ax7.text(0,0.6,
fill('Matching M/Zs above 1E-3*max: ' + ', '.join(['%5.3f'%m for m in threshold_mz_sample_matches]), width=90) + '\n\n' +
fill('All Matching M/Zs: ' + ', '.join(['%5.3f'%m for m in mz_sample_matches]), width=90),
fontsize=6, verticalalignment='top')
ax7.set_ylim(.5,1.1)
ax7.axis('off')
plt.savefig(os.path.join(output_loc, compound_names[compound_idx] + '.pdf'))
plt.close()
def plot_ms1_spectra(polarity = None, mz_min = 5, mz_max = 5, input_fname = '', input_dataset = [], compound_names = [], include_lcmsruns = [], exclude_lcmsruns = [], include_groups = [], exclude_groups = [], output_loc = []):
"""
Plot three views of ms1 spectra for compounds in input_dataset using file with highest RT peak of a polarity:
Unscaled: plots ms1 spectra within window of mz_min and mz_max
Scaled: plots ms1 spectra within window of mz_min and mz_max scaling mz of compound to 70%
Full Range: plots ms1 spectra without window (unscaled)
"""
print('here I am')
if not input_dataset:
data = ma_data.get_dill_data(os.path.expandvars(input_fname))
else:
data = input_dataset
if include_lcmsruns:
data = filter_lcmsruns_in_dataset_by_include_list(data, 'lcmsrun', include_lcmsruns)
if include_groups:
data = filter_lcmsruns_in_dataset_by_include_list(data, 'group', include_groups)
if exclude_lcmsruns:
data = filter_lcmsruns_in_dataset_by_exclude_list(data, 'lcmsrun', exclude_lcmsruns)
if exclude_groups:
data = filter_lcmsruns_in_dataset_by_exclude_list(data, 'group', exclude_groups)
#Make sure there is data
assert(len(data) != 0)
all_compound_names = ma_data.get_compound_names(data)[0]
#Set default compound list to all compounds in input_dataset
if not compound_names:
compound_names = all_compound_names
#Find implicit polarity and make sure there is not more than one
if 'POS' in include_lcmsruns or 'NEG' in exclude_lcmsruns:
assert(polarity == None or polarity == 'positive')
polarity = 'positive'
if 'NEG' in include_lcmsruns or 'POS' in exclude_lcmsruns:
assert(polarity == None or polarity == 'negative')
polarity = 'negative'
if 'POS' in include_groups or 'NEG' in exclude_groups:
assert(polarity == None or polarity == 'positive')
polarity = 'positive'
if 'NEG' in include_groups or 'POS' in exclude_groups:
assert(polarity == None or polarity == 'negative')
polarity = 'negative'
assert(polarity == 'positive' or polarity == 'negative')
#Additional variables used acorss all compounds
lcms_polarity = 'ms1_' + polarity[:3]
titles = ['Unscaled', 'Scaled', 'Full Range']
for compound_idx in [i for i,c in enumerate(all_compound_names) if c in compound_names]:
print('compound is',compound_idx)
#Find file_idx of with highest RT peak
highest = 0
file_idx = None
for i,d in enumerate(data):
if d[compound_idx]['identification'].mz_references[0].detected_polarity == polarity:
if d[compound_idx]['data']['ms1_summary']['peak_height'] > highest:
highest = d[compound_idx]['data']['ms1_summary']['peak_height']
file_idx = i
lcms_data = ma_data.df_container_from_metatlas_file(data[file_idx][compound_idx]['lcmsrun'].hdf5_file)
#Find RT and mz peak for compound in file
rt_peak = data[file_idx][compound_idx]['data']['ms1_summary']['rt_peak']
rt_peak_actual = lcms_data[lcms_polarity].iloc[(lcms_data[lcms_polarity].rt - rt_peak).abs().argsort()[0]].rt
mz_peak_actual = data[file_idx][compound_idx]['data']['ms1_summary']['mz_peak']
#Create and sort dataframe containing RT peak, mz and intensity
df_all = lcms_data[lcms_polarity][(lcms_data[lcms_polarity].rt == rt_peak_actual)]
df_all.sort_values('i',ascending=False,inplace=True)
#Limit prior dataframe to +/- mz_min, mz_max
df_window = df_all[(df_all['mz'] > mz_peak_actual - mz_min) &
(df_all['mz'] < mz_peak_actual + mz_max) ]
#Plot compound name, mz, and RT peak
plt.ioff()
fig = plt.gcf()
fig.suptitle('%s, m/z: %5.4f, rt: %f'%(all_compound_names[compound_idx], mz_peak_actual, rt_peak_actual),
fontsize=8,weight='bold')
#Create axes for different views of ms1 spectra (unscaled, scaled, and full range)
ax1 = plt.subplot2grid((11, 12), (0, 0), rowspan=5, colspan=5)
ax2 = plt.subplot2grid((11, 12), (0, 7), rowspan=5, colspan=5)
ax3 = plt.subplot2grid((11, 12), (6, 0), rowspan=5, colspan=12)
#Plot ms1 spectra
for ax_idx,(ax,df) in enumerate(zip([ax1, ax2, ax3], [df_window, df_window, df_all])):
ax.set_xlabel('m/z',fontsize=8,weight='bold')
ax.set_ylabel('intensity',fontsize=8,weight='bold')
ax.tick_params(axis='both', which='major', labelsize=6)
ax.set_title(titles[ax_idx],fontsize=8,weight='bold')
mzs = df['mz']
zeros = np.zeros(len(df['mz']))
intensities = df['i']
ax.vlines(mzs, zeros, intensities, colors='r',linewidth = 2)
labels = [1.001e9]
for i,row in df.iloc[:6].iterrows():
ax.annotate('%.4f'%row.mz, xy=(row.mz, 1.03*row.i),rotation = 90, horizontalalignment = 'center', verticalalignment = 'left', fontsize=6)
labels.append(row.mz)
ax.axhline(0)
if ax_idx != 2:
ax.set_xlim(mz_peak_actual - mz_min, mz_peak_actual + mz_max)
ylim = ax.get_ylim()
if ax_idx == 1:
ax.set_ylim(ylim[0], df[((mz_peak_actual - .05 < df['mz']) & (df['mz'] < mz_peak_actual + .05))].iloc[0]['i']*1.43)
else:
ax.set_ylim(ylim[0], ylim[1]*1.43)
if not os.path.exists(output_loc):
os.makedirs(output_loc)
plt.savefig(os.path.join(output_loc, all_compound_names[compound_idx] + '.pdf'))
def export_atlas_to_spreadsheet(myAtlas, output_filename='', input_type = 'atlas'):
"""
Return a pandas dataframe containing Atlas info. Optionally save it.
This function can also work on a MetAtlas dataset (list of lists returned by get_data_for_atlas_and_groups).
"""
cols = [c for c in metob.Compound.class_trait_names() if not c.startswith('_')]
cols = sorted(cols)
atlas_export = pd.DataFrame( )
if input_type != 'atlas':
num_compounds = len(myAtlas[0])
else:
num_compounds = len(myAtlas.compound_identifications)
for i in range(num_compounds):
if input_type != 'atlas':
my_id = myAtlas[0][i]['identification']
n = my_id.name
else:
my_id = myAtlas.compound_identifications[i]
if my_id.compound:
for c in cols:
g = getattr(my_id.compound[0],c)
if g:
atlas_export.loc[i,c] = g
else:
atlas_export.loc[i,c] = ''
atlas_export.loc[i, 'label'] = my_id.name
atlas_export.loc[i, 'id_notes'] = my_id.description
atlas_export.loc[i,'rt_min'] = my_id.rt_references[0].rt_min
atlas_export.loc[i,'rt_max'] = my_id.rt_references[0].rt_max
atlas_export.loc[i,'rt_peak'] = my_id.rt_references[0].rt_peak
atlas_export.loc[i,'mz'] = my_id.mz_references[0].mz
atlas_export.loc[i,'mz_tolerance'] = my_id.mz_references[0].mz_tolerance
atlas_export.loc[i,'adduct'] = my_id.mz_references[0].adduct
atlas_export.loc[i,'polarity'] = my_id.mz_references[0].detected_polarity
# if my_id.frag_references:
# atlas_export.loc[i,'has_fragmentation_reference'] = True
# # TODO: Gather the frag reference information and export it
# else:
# atlas_export.loc[i,'has_fragmentation_reference'] = False
if output_filename:
if not os.path.exists(os.path.dirname(output_filename)):
os.makedirs(os.path.dirname(output_filename))
atlas_export.to_csv(output_filename)
return atlas_export
def get_data_for_groups_and_atlas(group,myAtlas,output_filename,use_set1 = False):
"""
get and pickle everything This is MSMS, raw MS1 datapoints, compound, group info, and file info
"""
data = []
import copy as copy
for i,treatment_groups in enumerate(group):
for j in range(len(treatment_groups.items)):
myFile = treatment_groups.items[j].hdf5_file
# try:
# rt_reference_index = int(treatment_groups.name[-1]) - 1
# except:
# rt_reference_index = 3
print(i, len(group), myFile)
row = []
for compound in myAtlas.compound_identifications:
result = {}
result['atlas_name'] = myAtlas.name
result['atlas_unique_id'] = myAtlas.unique_id
result['lcmsrun'] = treatment_groups.items[j]
result['group'] = treatment_groups
temp_compound = copy.deepcopy(compound)
if use_set1:
if '_Set1' in treatment_groups.name:
temp_compound.rt_references[0].rt_min -= 0.2
temp_compound.rt_references[0].rt_max -= 0.2
temp_compound.rt_references[0].rt_peak -= 0.2
temp_compound.mz_references[0].mz_tolerance = 20
result['identification'] = temp_compound
result['data'] = ma_data.get_data_for_a_compound(temp_compound.mz_references[0],
temp_compound.rt_references[0],
[ 'ms1_summary', 'eic', 'msms' ],
myFile,0.2)
# print result['data']['ms1_summary']
row.append(result)
data.append(row)
with open(output_filename,'w') as f:
dill.dump(data,f)
def filter_metatlas_objects_to_most_recent(object_list,field):
#from datetime import datetime, date
#remove from list if another copy exists that is newer
unique_values = []
for i,a in enumerate(object_list):
unique_values.append( getattr(a,field) )
unique_values = list(set(unique_values))
keep_object_list = []
for u in unique_values:
old_last_modified = 0
for i,a in enumerate(object_list):
if getattr(a,field) == u:
last_modified = getattr(a,'last_modified')
if last_modified > old_last_modified:
keep_object = a
old_last_modified = last_modified
keep_object_list.append(keep_object)
return keep_object_list
# print i, a.name, datetime.utcfromtimestamp(a.last_modified)
def get_metatlas_atlas(name = '%%',username = '*', most_recent = True,do_print = True):
from datetime import datetime, date
atlas = metob.retrieve('Atlas',name = name,username=username)
if most_recent:
atlas = filter_metatlas_objects_to_most_recent(atlas,'name')
if do_print:
for i,a in enumerate(atlas):
print(i, len(a.compound_identifications),a.name, datetime.utcfromtimestamp(a.last_modified))
return atlas
class interact_get_metatlas_files():
def __init__(self, experiment = '%violacein%', name = '%_%', most_recent = True):
self.experiment = experiment
self.name = name
self.most_recent = most_recent
# http://ipywidgets.readthedocs.io/en/latest/examples/Using%20Interact.html
self.w = interact(self.Task, experiment=self.experiment, name=self.name, most_recent = self.most_recent,__manual=True)#continuous_update=False)#
def Task(self,experiment,name,most_recent):
self.experiment = experiment
self.name = name
self.most_recent = most_recent
self.files = get_metatlas_files(experiment = experiment,name = name,most_recent = most_recent)#self.most_recent)
txt = widgets.Text()
txt.value = '%d Files were found matching that pattern'%len(self.files)
display(txt)
def get_metatlas_files(experiment = '%%',name = '%%',most_recent = True):
"""
experiment is the folder name
name is the filename
"""
files = metob.retrieve('LcmsRun',experiment=experiment,name=name, username='*')
if most_recent:
files = filter_metatlas_objects_to_most_recent(files,'mzml_file')
return files
def make_empty_fileinfo_sheet(filename,flist):
#dump all the files to a spreadheet, download it, and make a "filled in" one.
with open(filename,'w') as fid:
fid.write('mzml_file\tgroup\tdescription\n')
for f in flist:
fid.write('%s\t\t\n'%f.mzml_file)
def make_groups_from_fileinfo_sheet(filename,filetype='tab',store=False):
'''
'''
if filetype == 'tab':
df = pd.read_csv(filename,sep='\t')
elif filetype == 'csv':
df = pd.read_csv(filename,sep=',')
elif filetype == 'df':
df = filename
else:
df = pd.read_excel(filename)
grouped = df.groupby(by='group')
return_groups = []
for g in grouped.groups.keys():
indices = grouped.groups[g]
myGroup = metob.Group()
myGroup.name = '%s'%g
myGroup.description = df.loc[indices[0],'description']
file_set = []
for i in indices:
file_set.append(metob.retrieve('LcmsRun',mzml_file='%%%s'%df.loc[i,'mzml_file'],username='*')[0])
myGroup.items = file_set
return_groups.append(myGroup)
if store:
metob.store(myGroup)
return return_groups
def check_compound_names(df):
# compounds that have the wrong compound name will be listed
# Keep running this until no more compounds are listed
bad_names = []
for i,row in df.iterrows():
#if type(df.name[x]) != float or type(df.label[x]) != float:
#if type(df.name[x]) != float:
if not pd.isnull(row.inchi_key):# or type(df.inchi_key[x]) != float:
if not metob.retrieve('Compounds',inchi_key=row.inchi_key, username = '*'):
print(row.inchi_key, "compound is not in database. Exiting Without Completing Task!")
bad_names.append(row.inchi_key)
return bad_names
def check_file_names(df,field):
bad_files = []
for i,row in df.iterrows():
if row[field] != '':
if not metob.retrieve('Lcmsruns',name = '%%%s%%'%row[field],username = '*'):
print(row[field], "file is not in the database. Exiting Without Completing Task!")
bad_files.append(row[field])
return bad_files
def get_formatted_atlas_from_google_sheet(polarity='POS',
method='QE_HILIC',
mz_tolerance=10):
import metatlas.ms_monitor_util as mmu
df2 = mmu.get_ms_monitor_reference_data()
#print df.head()
#df2 = pd.DataFrame(df[1:],columns=df[0])
fields_to_keep = [ 'name',
'label',
'inchi_key',
'mz_%s'%polarity,
'rt_min_%s'%method,
'rt_max_%s'%method,
'rt_peak_%s'%method,
'file_mz_%s_%s'%(method,polarity),
'file_rt_%s_%s'%(method,polarity),
'file_msms_%s_%s'%(method,polarity)]
fields_there = []
for f in fields_to_keep:
if f in df2.keys():
fields_there.append(f)
df3 = df2.loc[:,fields_there]
df3['mz_tolerance'] = mz_tolerance
if polarity == 'POS':
df3['polarity'] = 'positive'
else:
df3['polarity'] = 'negative'
renamed_columns = [c.replace('_%s'%method,'').replace('_%s'%polarity,'') for c in df3.columns]
for i,c in enumerate(df3.columns):
df3 = df3.rename(columns = {c:renamed_columns[i]})
df3 = df3[df3['mz'] != '']
return df3
def make_atlas_from_spreadsheet(filename='valid atlas file.csv',
atlas_name='20161007_MP3umZHILIC_BPB_NEG_ExampleAtlasName',
filetype=('excel','csv','tab','dataframe'),
sheetname='only for excel type input',
polarity = ('positive','negative'),
store=False,
mz_tolerance=10):
'''
specify polarity as 'positive' or 'negative'
'''
if isinstance(filename,pd.DataFrame):
df = filename
else:
if ( filetype=='excel' ) and sheetname:
df = pd.read_excel(filename,sheetname=sheetname)
elif ( filetype=='excel' ):
df =
|
pd.read_excel(filename)
|
pandas.read_excel
|
import pandas as pd
import typing
def unpack_column_tokens(column_tokens: pd.Series, token_symbols: typing.List[str]) -> pd.DataFrame:
di = {}
for symbol in token_symbols:
di[f'token_{symbol}_balance'] = []
di[f'token_{symbol}_denorm_weight'] = []
di[f'token_{symbol}_weight'] = []
for r in column_tokens:
di[f'token_{symbol}_weight'].append(r[symbol.upper()].weight)
di[f'token_{symbol}_denorm_weight'].append(r[symbol.upper()].denorm_weight)
di[f'token_{symbol}_balance'].append(r[symbol.upper()].balance)
return pd.DataFrame.from_dict(di).astype('float64')
def unpack_column_generated_fees(column_fees: pd.Series, token_symbols: typing.List[str]) -> pd.DataFrame:
di = {}
for symbol in token_symbols:
di[f'generated_fees_{symbol}'] = []
for r in column_fees:
di[f'generated_fees_{symbol}'].append(r[symbol.upper()])
return
|
pd.DataFrame.from_dict(di)
|
pandas.DataFrame.from_dict
|
"""
Routines for filling missing data.
"""
from __future__ import annotations
from functools import (
partial,
wraps,
)
from typing import (
TYPE_CHECKING,
Any,
cast,
)
import numpy as np
from pandas._libs import (
algos,
lib,
)
from pandas._typing import (
ArrayLike,
Axis,
F,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import (
is_array_like,
is_numeric_v_string_like,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
na_value_for_dtype,
)
if TYPE_CHECKING:
from pandas import Index
def check_value_size(value, mask: np.ndarray, length: int):
"""
Validate the size of the values passed to ExtensionArray.fillna.
"""
if is_array_like(value):
if len(value) != length:
raise ValueError(
f"Length of 'value' does not match. Got ({len(value)}) "
f" expected {length}"
)
value = value[mask]
return value
def mask_missing(arr: ArrayLike, values_to_mask) -> npt.NDArray[np.bool_]:
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
Parameters
----------
arr : ArrayLike
values_to_mask: list, tuple, or scalar
Returns
-------
np.ndarray[bool]
"""
# When called from Block.replace/replace_list, values_to_mask is a scalar
# known to be holdable by arr.
# When called from Series._single_replace, values_to_mask is tuple or list
dtype, values_to_mask = infer_dtype_from(values_to_mask)
# error: Argument "dtype" to "array" has incompatible type "Union[dtype[Any],
# ExtensionDtype]"; expected "Union[dtype[Any], None, type, _SupportsDType, str,
# Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any],
# _DTypeDict, Tuple[Any, Any]]]"
values_to_mask = np.array(values_to_mask, dtype=dtype) # type: ignore[arg-type]
na_mask = isna(values_to_mask)
nonna = values_to_mask[~na_mask]
# GH 21977
mask = np.zeros(arr.shape, dtype=bool)
for x in nonna:
if is_numeric_v_string_like(arr, x):
# GH#29553 prevent numpy deprecation warnings
pass
else:
mask |= arr == x
if na_mask.any():
mask |= isna(arr)
if not isinstance(mask, np.ndarray):
# e.g. if arr is IntegerArray, then mask is BooleanArray
mask = mask.to_numpy(dtype=bool, na_value=False)
return mask
def clean_fill_method(method, allow_nearest: bool = False):
# asfreq is compat for resampling
if method in [None, "asfreq"]:
return None
if isinstance(method, str):
method = method.lower()
if method == "ffill":
method = "pad"
elif method == "bfill":
method = "backfill"
valid_methods = ["pad", "backfill"]
expecting = "pad (ffill) or backfill (bfill)"
if allow_nearest:
valid_methods.append("nearest")
expecting = "pad (ffill), backfill (bfill) or nearest"
if method not in valid_methods:
raise ValueError(f"Invalid fill method. Expecting {expecting}. Got {method}")
return method
# interpolation methods that dispatch to np.interp
NP_METHODS = ["linear", "time", "index", "values"]
# interpolation methods that dispatch to _interpolate_scipy_wrapper
SP_METHODS = [
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"barycentric",
"krogh",
"spline",
"polynomial",
"from_derivatives",
"piecewise_polynomial",
"pchip",
"akima",
"cubicspline",
]
def clean_interp_method(method: str, index: Index, **kwargs) -> str:
order = kwargs.get("order")
if method in ("spline", "polynomial") and order is None:
raise ValueError("You must specify the order of the spline or polynomial.")
valid = NP_METHODS + SP_METHODS
if method not in valid:
raise ValueError(f"method must be one of {valid}. Got '{method}' instead.")
if method in ("krogh", "piecewise_polynomial", "pchip"):
if not index.is_monotonic:
raise ValueError(
f"{method} interpolation requires that the index be monotonic."
)
return method
def find_valid_index(values, *, how: str) -> int | None:
"""
Retrieves the index of the first valid value.
Parameters
----------
values : ndarray or ExtensionArray
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
int or None
"""
assert how in ["first", "last"]
if len(values) == 0: # early stop
return None
is_valid = ~isna(values)
if values.ndim == 2:
is_valid = is_valid.any(1) # reduce axis 1
if how == "first":
idxpos = is_valid[::].argmax()
elif how == "last":
idxpos = len(values) - 1 - is_valid[::-1].argmax()
chk_notna = is_valid[idxpos]
if not chk_notna:
return None
return idxpos
def interpolate_array_2d(
data: np.ndarray,
method: str = "pad",
axis: int = 0,
index: Index | None = None,
limit: int | None = None,
limit_direction: str = "forward",
limit_area: str | None = None,
fill_value: Any | None = None,
coerce: bool = False,
downcast: str | None = None,
**kwargs,
) -> None:
"""
Wrapper to dispatch to either interpolate_2d or _interpolate_2d_with_fill.
Notes
-----
Alters 'data' in-place.
"""
try:
m = clean_fill_method(method)
except ValueError:
m = None
if m is not None:
if fill_value is not None:
# similar to validate_fillna_kwargs
raise ValueError("Cannot pass both fill_value and method")
interpolate_2d(
data,
method=m,
axis=axis,
limit=limit,
limit_area=limit_area,
)
else:
assert index is not None # for mypy
_interpolate_2d_with_fill(
data=data,
index=index,
axis=axis,
method=method,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
fill_value=fill_value,
**kwargs,
)
return
def _interpolate_2d_with_fill(
data: np.ndarray, # floating dtype
index: Index,
axis: int,
method: str = "linear",
limit: int | None = None,
limit_direction: str = "forward",
limit_area: str | None = None,
fill_value: Any | None = None,
**kwargs,
) -> None:
"""
Column-wise application of _interpolate_1d.
Notes
-----
Alters 'data' in-place.
The signature does differ from _interpolate_1d because it only
includes what is needed for Block.interpolate.
"""
# validate the interp method
clean_interp_method(method, index, **kwargs)
if
|
is_valid_na_for_dtype(fill_value, data.dtype)
|
pandas.core.dtypes.missing.is_valid_na_for_dtype
|
# Author: <NAME>
# Tail number downloader
"""This script downloads tail number data from for Ohare Airport from the US Federal Aviation Administration
Usage: python download_tailnums.py
"""
import requests
from selenium import webdriver
import pandas as pd
import time
def extract_tail_info(tails):
driver = webdriver.Chrome()
result = {'tail_num': [], 'manufacturer': [], 'model': []}
failures = {'tail_num': []}
count =0
driver = webdriver.Chrome()
for tail in tails:
time.sleep(5)
try:
tail = str(tail)
url_ = 'https://registry.faa.gov/aircraftinquiry/NNum_Results.aspx?NNumbertxt='
driver.get(url_+tail)
result['manufacturer'].append(driver.find_element_by_id("ctl00_content_lbMfrName").text)
result['model'].append(driver.find_element_by_id("ctl00_content_Label7").text)
result['tail_num'].append(tail)
count += 1
print(count)
except:
failures['tail_num'].append(tail)
pd.DataFrame(result).to_csv('data/tailnums.csv')
pd.DataFrame(failures).to_csv('data/missing_tailnums.csv')
def main():
load_ord =
|
pd.read_csv('data/ORD_OTP.csv')
|
pandas.read_csv
|
import os
import re
import pandas as pd
import numpy as np
from collections import defaultdict
from graphdb_builder import builder_utils, mapping
def parser(projectId, type='proteomics', directory=None):
#directory = None
data = {}
cwd = os.path.abspath(os.path.dirname(__file__))
config = builder_utils.get_config(config_name="proteomics.yml", data_type='experiments')
if directory is None:
directory = os.path.join(cwd, '../../../../data/experiments/PROJECTID/' + type)
if 'directory' in config:
directory = os.path.join(cwd, config['directory'] + type)
directory = directory.replace('PROJECTID', projectId)
data = parse_from_directory(projectId, directory, config)
return data
def parse_from_directory(projectId, directory, configuration={}):
data = {}
processing_results = [x[0] for x in os.walk(directory)]
for results_path in processing_results:
processing_tool = os.path.basename(os.path.normpath(results_path))
if processing_tool in configuration:
sample_mapping = mapping.get_mapping_analytical_samples(projectId)
if len(sample_mapping) > 0:
mapping.map_experiment_files(projectId, os.path.join(directory, processing_tool), sample_mapping)
tool_configuration = configuration[processing_tool]
for dtype in tool_configuration:
dataset_configuration = tool_configuration[dtype]
missing_conf = check_minimum_configuration(dataset_configuration)
if len(missing_conf) == 0:
dfile_regex = re.compile(dataset_configuration['file'])
filepath = ''
for dir_content in os.walk(results_path):
for f in dir_content[2]:
if dfile_regex.match(f):
filepath = os.path.join(results_path, f)
break
data.update(parser_from_file(file_path=filepath, configuration=dataset_configuration, data_type=dtype))
else:
raise Exception("Error when importing proteomics experiment.\n Missing configuration: {}".format(",".join(missing_conf)))
return data
def parser_from_file(file_path, configuration, data_type, is_standard=True):
data = {}
if is_standard:
df = parse_standard_dataset(file_path, configuration)
else:
df = parse_dataset(file_path, configuration)
if df is not None and not df.empty:
if data_type == "proteins":
data[(data_type, 'w')] = extract_protein_subject_rels(df, configuration)
elif data_type == "peptides":
data[('subject_peptide', 'w')] = extract_peptide_subject_rels(df, configuration)
data[('peptide_protein', 'w')] = extract_peptide_protein_rels(df, configuration)
data[(data_type, 'w')] = extract_peptides(df, configuration)
else:
data[('modifiedprotein_subject', 'a')] = extract_protein_modification_subject_rels(df, configuration)
data[('modifiedprotein_protein', 'a')] = extract_protein_protein_modification_rels(df, configuration)
data[('modifiedprotein_peptide', 'a')] = extract_peptide_protein_modification_rels(df, configuration)
data[('modifiedprotein', 'a')] = extract_protein_modifications_rels(df, configuration)
data[('modifiedprotein_modification', 'a')] = extract_protein_modifications_modification_rels(df, configuration)
return data
def get_configuration(processing_tool, data_type):
configuration = None
if processing_tool is not None:
config = builder_utils.get_config(config_name="proteomics.yml", data_type='experiments')
if processing_tool in config:
tool_configuration = config[processing_tool]
if data_type in tool_configuration:
configuration = tool_configuration[data_type]
return configuration
def update_configuration(data_type, processing_tool, value_col='LFQ intensity', columns=[], drop_cols=[], filters=None, new_config={}):
configuration = get_configuration(processing_tool, data_type)
if configuration is not None:
configuration['columns'].extend(columns)
configuration['valueCol'] = value_col
if len(drop_cols) > 0:
configuration['columns'] = [c for c in configuration['columns'] if c not in drop_cols]
if 'attributes' in configuration:
if 'cols' in configuration['attributes']:
configuration['attributes']['cols'] = [c for c in configuration['attributes']['cols'] if c not in drop_cols]
if 'regex' in configuration['attributes']:
configuration['attributes']['regex'] = [c for c in configuration['attributes']['regex'] if c not in drop_cols]
if filters is not None:
configuration['filters'] = filters
for key in new_config:
configuration[key] = new_config[key]
return configuration
def parse_dataset(filepath, configuration):
data = None
if os.path.isfile(filepath):
data, regex = load_dataset(filepath, configuration)
if data is not None:
if 'log' in configuration:
log = configuration['log']
cols = get_value_cols(data, configuration)
if log == 'log2':
data[cols] = np.log2(data[cols]).replace([np.inf, -np.inf], np.nan)
elif log == 'log10':
data[cols] = np.log10(data[cols]).replace([np.inf, -np.inf], np.nan)
return data
def parse_standard_dataset(file_path, configuration):
dataset = None
if os.path.isfile(file_path):
data, regex = load_dataset(file_path, configuration)
if data is not None:
log = configuration['log']
combine = 'regex'
if 'combine' in configuration:
combine = configuration['combine']
if combine == 'valueCol':
value_cols = get_value_cols(data, configuration)
subjectDict = extract_subject_replicates(data, value_cols)
else:
subjectDict = extract_subject_replicates_from_regex(data, regex)
delCols = []
for subject in subjectDict:
delCols.extend(subjectDict[subject])
aux = data[subjectDict[subject]]
data[subject] = calculate_median_replicates(aux, log)
dataset = data.drop(delCols, 1)
dataset = dataset.dropna(how='all')
return dataset
def check_columns(data, req_columns, generated_columns):
return set(req_columns).difference(set(data.columns)).difference(generated_columns)
def check_minimum_configuration(configuration):
minimum_req = ['columns', 'indexCol',
'proteinCol', 'log',
'file', 'valueCol', 'attributes']
return set(minimum_req).difference(set(configuration.keys()))
def load_dataset(uri, configuration):
''' This function gets the molecular data from a proteomics experiment.
Input: uri of the processed file resulting from MQ
Output: pandas DataFrame with the columns and filters defined in config.py '''
data = None
regexCols = None
filters = None
columns = configuration["columns"]
regexCols = [c.replace("\\\\", "\\") for c in columns if '+' in c]
columns = set(columns).difference(regexCols)
generated_columns = []
if 'generated_columns' in configuration:
generated_columns = configuration['generated_columns']
if 'filters' in configuration:
filters = configuration["filters"]
indexCol = configuration["indexCol"]
data = builder_utils.readDataset(uri)
missing_cols = check_columns(data, columns, generated_columns)
if len(missing_cols) == 0:
if filters is not None:
data = data[data[filters].isnull().all(1)]
data = data.drop(filters, axis=1)
columns = set(columns).difference(filters)
if 'numeric filter' in configuration:
for f in configuration['numeric filter']:
key = list(f.keys())[0]
if key in columns:
value = f[key]
data = data[data[key] >= value]
else:
raise Exception("Error when applying numeric filter on {}. The column is not in the dataset".format(f))
data = data.dropna(subset=[configuration["proteinCol"]], axis=0)
data = expand_groups(data, configuration)
columns.remove(indexCol)
for regex in regexCols:
r = re.compile(regex)
columns.update(set(filter(r.match, data.columns)))
data = data[list(columns)].replace('Filtered', np.nan)
value_cols = get_value_cols(data, configuration)
data[value_cols] = data[value_cols].apply(lambda x: pd.to_numeric(x, errors='coerce'))
data = data.dropna(how='all', subset=value_cols, axis=0)
else:
raise Exception("Error when importing proteomics experiment.\n Missing columns: {}".format(",".join(missing_cols)))
return data, regexCols
def remove_contaminant_tag(column, tag='CON__'):
new_column = [c.replace(tag, '') for c in column]
return new_column
def expand_groups(data, configuration):
default_group_col = 'id'
if "groupCol" not in configuration or configuration["groupCol"] is None:
data.index.name = default_group_col
data = data.reset_index()
configuration['groupCol'] = default_group_col
elif configuration['groupCol'] not in data.columns:
data.index.name = configuration['groupCol']
data = data.reset_index()
s = data[configuration["proteinCol"]].str.split(';').apply(pd.Series, 1).stack().reset_index(level=1, drop=True)
del data[configuration["proteinCol"]]
pdf = s.to_frame(configuration["proteinCol"])
if "multipositions" in configuration:
s2 = data[configuration["multipositions"]].str.split(';').apply(pd.Series, 1).stack().reset_index(level=1, drop=True)
del data[configuration["multipositions"]]
pdf =
|
pd.concat([s, s2], axis=1, keys=[configuration["proteinCol"], configuration["multipositions"]])
|
pandas.concat
|
# coding: utf-8
import pymysql
import numpy as np
import pandas as pd
import csv
import xgboost as xgb
from numpy import loadtxt
from xgboost import XGBClassifier
from xgboost import plot_importance
from xgboost import plot_tree
# 필요한 다른 python 파일
import feature
###################### DB connect
db = pymysql.connect(host="", port=3306, user="", passwd="",db="")
### train_set - 뼈대
def make_train_set():
SQL = "SELECT order_id, user_id, order_dow, order_hour_of_day FROM orders"
orders_df = pd.read_sql(SQL, db)
SQL = "SELECT order_id FROM order_products__train"
train_df = pd.read_sql(SQL, db)
print("make train set - basic start")
# ------------------ train id에 맞는 유저를 찾은 뒤 그 유저가 최근에 샀던 상품 확인
# order_id 중복 제거 >> 갯수 세는 것 같지만 중복 제거
train_df= train_df.groupby("order_id").aggregate("count").reset_index()
# order_id에 맞는 user_id를 찾아서 merge
train_df = pd.merge(train_df, orders_df, how="inner", on="order_id")
# prior과 merge
# 유저와 order_id 에 맞는 상품 목록
train_df = pd.merge(train_df, feature.latest_order(), how="inner", on="user_id")
# product table에서 id, 소분류, 대분류만 가져와서 merge
# products_df = pd.read_csv( "products.csv", usecols=["product_id", "aisle_id", "department_id"])
SQL = "SELECT product_id, aisle_id, department_id FROM products"
products_df = pd.read_sql(SQL, db)
train_df = pd.merge(train_df, products_df, how="inner", on="product_id")
del products_df, orders_df, SQL
print("make train set - basic finish")
return train_df
'''
새로 만든 feature를 붙이는 부분
만들어진 것은 많지만 제일 정확성이 높은 것만 활용
'''
def train_result():
train_x = make_train_set()
train_x = pd.merge(train_x, feature.order_ratio_bychance(), how="left", on = ["user_id, product_id"])
return train_x
### train answer : train_y
def make_answer(train_x):
SQL = "SELECT order_id, user_id FROM orders"
orders_df = pd.read_sql(SQL, db)
SQL = "SELECT order_id, product_id, reordered FROM order_products__train"
train_df = pd.read_sql(SQL, db)
print ("train_y start")
answer =
|
pd.merge(train_df, orders_df, how="inner", on="order_id")
|
pandas.merge
|
"""Predicts restoration scenarios"""
import copy
import os
import sys
import time
import multiprocessing
from operator import itemgetter
import numpy as np
import pandas as pd
import pymc3 as pm
import indp
import indputils
import indpalt
class NodeModel():
"""Stores information for a node model """
def __init__(self, name, net_id):
self.name = name
self.type = 'n'
self.net_id = net_id
self.initial_state = 1.0
self.state_hist = 0
self.model_status = 0
self.model_params = []
self.w_n_t_1 = 0
self.w_a_t_1 = 0
self.w_d_t_1 = 0
self.degree = 0
self.neighbors = []
self.arcs = []
self.num_dependee = 0
self.dependees = []
def initialize_state_matrices(self, time_step, num_pred):
"""Initializes state and predictor matrices """
self.state_hist = np.ones((time_step+1, num_pred))
self.state_hist[0, :] = self.initial_state
self.w_n_t_1 = np.zeros((time_step, num_pred))
self.w_a_t_1 = np.zeros((time_step, num_pred))
self.w_d_t_1 = np.zeros((time_step, num_pred))
def add_neighbor(self, neighbor):
"""Add a neighhbor node and updates connected arcs and degree"""
if neighbor not in self.neighbors:
self.neighbors.append(neighbor)
self.arcs.append('y_'+self.name[2:]+','+neighbor[2:])
self.arcs.append('y_'+neighbor[2:]+','+self.name[2:])
self.degree += 1
def add_dependee(self, dependee):
"""Add a dependee node and updates the number of dependee nodes"""
if dependee not in self.dependees:
self.dependees.append(dependee)
self.num_dependee += 1
def check_model_exist(self, param_folder):
"""Find and checks the model for the element"""
param_file = param_folder+'/model_parameters_'+self.name+'.txt'
if os.path.exists(param_file):
self.model_status = 1
self.model_params =
|
pd.read_csv(param_file, delimiter=' ')
|
pandas.read_csv
|
import os
import numpy as np
import pandas as pd
from os.path import join
# Check if directories exit or not
def check_dirs(path_list):
for dir_path in path_list:
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
class DataPreprocess(object):
def __init__(self, data_path, save_path, drop_list=[]):
self.data_path = data_path
self.drop_list = drop_list
self.save_path = save_path
df_cn_en = pd.read_csv(join(self.data_path, 'cn_to_en.csv'))
self.cn_to_en = {c: e for c, e in zip(df_cn_en['cn_name'], df_cn_en['en_name'])}
def get_delist(self, file_path):
df_info = pd.read_csv(file_path)
# Copy halt dataframe
df_delist = df_info.copy()
# Get delist stocks
# df_delist = df_delist[df_delist['listStatusCD'] == 'DE']
df_delist = df_delist[(df_delist['listStatusCD'] == 'DE') & (df_delist['ticker'].apply(lambda x: len(x) == 6))]
# Drop columns
df_delist = df_delist[['ticker', 'secShortName', 'delistDate']]
# Normalize ticker
df_delist['ticker'] = df_delist['ticker'].apply(lambda x: str(x).zfill(6))
# Delete duplicates (with different halt dates)
df_delist.drop_duplicates(inplace=True)
return set(df_delist['ticker'])
def normalize_l1(self, file_path, start_date=None, end_date=None):
df = pd.read_csv(file_path)
# Select mergedFlag == 1
if 'mergedFlag' in df.columns:
df = df[df['mergedFlag'] == 1]
# Set start date and end date
if start_date:
df = df[df['endDate'] >= start_date]
if end_date:
df = df[df['endDate'] <= end_date]
# Nomalize ticker
df['ticker'] = df['ticker'].apply(lambda x: str(x).zfill(6))
# Delete Companies
df.drop(df[df['ticker'].apply(
lambda x: (x[0] in ['9', 'A', '2']) | (x in self.drop_list)
)].index, inplace=True)
# Check fiscalPeriod = 3
# df = df.drop(df[(df['reportType']=='Q3') & (df['fiscalPeriod']==3)].index, axis=0, errors='ignore')
# If is not balance sheet, Drop Q3 and then rename report type
if 'CQ3' in set(df['reportType']):
df = df.drop(df[df['reportType']=='Q3'].index, axis=0, errors='ignore')
# Rename CQ3 to Q3
type_dict = {
'Q1': 'Q1',
'S1': 'S1',
'Q3': 'Q3',
'CQ3': 'Q3',
'A': 'A'
}
df['reportType'] = df['reportType'].apply(lambda x: type_dict[x])
# Delete Columns
df = df.drop(columns=[
'Unnamed: 0',
'Unnamed: 0.1',
'secID',
'partyID',
'publishDate',
'fiscalPeriod',
'mergedFlag',
'accoutingStandards',
'currencyCD',
'industryCategory'
], errors='ignore')
df = df.sort_values(
by=['ticker', 'endDate', 'endDateRep', 'actPubtime'],
ascending=[True, True, True, True]
)
return df
def normalize_l2(self, file_path, start_date=None, end_date=None):
df = pd.read_csv(file_path)
df = df.sort_values(
by=['ticker', 'endDate', 'endDateRep', 'actPubtime'],
ascending=[True, True, True, True]
)
# Set start date and end date
if start_date:
df = df[df['endDate'] >= start_date]
if end_date:
df = df[df['endDate'] <= end_date]
# Normalize ticker
df['ticker'] = df['ticker'].apply(lambda x: str(x).zfill(6))
df_orig = df.copy()
# Drop duplicated rows by checking endDate -> endDateRep
selected_1 = df.groupby(['ticker', 'endDate']).apply(
lambda x: tuple(x[x['endDateRep'] == x['endDateRep'].values[-1]].index))
df = df.loc[np.concatenate(selected_1.values)]
# Print out duplicated endDateRep
idx_dup_1 = []
for i in selected_1.values:
if len(i) > 1:
idx_dup_1.extend(i)
# if idx_dup_1:
# print('Duplicated endDateRep: ')
# print(df.loc[idx_dup_1][['ticker', 'secShortName', 'endDate', 'endDateRep', 'actPubtime']])
# Drop duplicated rows by checking endDateRep -> actPubtime
selected_2 = df.groupby(['ticker', 'endDate', 'endDateRep']).apply(lambda x: tuple(x[x['actPubtime'] == x['actPubtime'].values[-1]].index))
df = df.loc[np.concatenate(selected_2.values)]
# removed data
removed = np.setdiff1d(df_orig.index.values, np.concatenate(selected_2.values))
df_removed = df_orig.loc[removed]
# Print out duplicated actPubtime
idx_dup_2 = []
for i in selected_2.values:
if len(i) > 1:
idx_dup_2.extend(i)
if idx_dup_2:
df_dup_act = df.loc[idx_dup_2]
# print('Duplicated actPubtime: ')
# print(df_dup_act[['ticker', 'secShortName', 'endDate', 'endDateRep', 'actPubtime']])
else:
df_dup_act = None
# # Change column names
# df = df.rename(columns={
# 'aop': 'AOP',
# 'aor': 'AOR',
# 'cogs': 'COGS',
# 'bizTaSurchg': 'bizTaxSurchg',
# 'atoc': 'ATOC'
# }, errors='ignore')
return df, df_removed, df_dup_act
def process_l1(self, start_date=None, end_date=None):
orig_path = join(self.data_path, 'original/csv')
check_dirs([
join(self.save_path, 'normalized_l1'),
join(self.save_path, 'normalized_l1/csv'),
join(self.save_path, 'normalized_l1/excel'),
join(self.save_path, 'normalized_l1/statistics'),
join(self.save_path, 'normalized_l1/statistics/feature_info')
])
df_info = pd.DataFrame(columns=(
'SHEET_NAME', 'NUMBER_OF_COMPANIES'
))
for en_name in self.cn_to_en.values():
print(en_name)
df_l1 = self.normalize_l1(join(orig_path, en_name) + '.csv', start_date, end_date)
df_l1.to_csv(join(join(self.save_path, 'normalized_l1/csv'), en_name) + '.csv', index=False)
df_l1.to_excel(join(join(self.save_path, 'normalized_l1/excel'), en_name) + '.xlsx', index=False)
df_l1.count().to_excel(join(join(self.save_path, 'normalized_l1/statistics/feature_info'), en_name) + '_feature_info.xlsx', header=False)
df_info = df_info.append(pd.DataFrame({
'SHEET_NAME': [en_name],
'NUMBER_OF_COMPANIES': [len(set(df_l1['ticker']))]
}), ignore_index=True)
df_info.to_excel(join(join(self.save_path, 'normalized_l1/statistics'), 'number_of_companies.xlsx'), index=False)
def process_l2(self, start_date=None, end_date=None):
orig_path = join(self.data_path, 'normalized_l1/csv')
check_dirs([
join(self.save_path, 'normalized_l2'),
join(self.save_path, 'normalized_l2/csv'),
join(self.save_path, 'normalized_l2/excel'),
join(self.save_path, 'normalized_l2/statistics'),
join(self.save_path, 'normalized_l2/statistics/feature_info')
])
df_info = pd.DataFrame(columns=(
'SHEET_NAME', 'NUMBER_OF_COMPANIES'
))
for en_name in self.cn_to_en.values():
print('-'*70)
print(en_name)
df_l2, df_removed_l2, df_dup_act_l2 = self.normalize_l2(join(orig_path, en_name) + '.csv', start_date, end_date)
df_l2.to_csv(join(join(self.save_path, 'normalized_l2/csv'), en_name) + '.csv', index=False)
df_l2.to_excel(join(join(self.save_path, 'normalized_l2/excel'), en_name) + '.xlsx', index=False)
if df_removed_l2.shape[0] > 0:
check_dirs([
join(self.save_path, 'normalized_l2/removed')
])
df_removed_l2.to_excel(join(join(self.save_path, 'normalized_l2/removed'), en_name) + '_removed.xlsx', index=False)
if df_dup_act_l2 is not None:
check_dirs([
join(self.save_path, 'normalized_l2/duplicates')
])
df_dup_act_l2.to_excel(join(join(self.save_path, 'normalized_l2/duplicates'), en_name) + '_duplicates.xlsx', index=False)
df_l2.count().to_excel(join(join(self.save_path, 'normalized_l2/statistics/feature_info'), en_name) + '_feature_info.xlsx', header=False)
df_info = df_info.append(pd.DataFrame({
'SHEET_NAME': [en_name],
'NUMBER_OF_COMPANIES': [len(set(df_l2['ticker']))]
}), ignore_index=True)
df_info.to_excel(join(join(self.save_path, 'normalized_l2/statistics'), 'number_of_companies.xlsx'), index=False)
def find_zeros(self, file_path):
save_path = join(file_path, 'zeros')
check_dirs([save_path])
for en_name in self.cn_to_en.values():
print('File:', en_name)
df = pd.read_csv(join(join(file_path, 'csv'), en_name) + '.csv')
df_zero = []
for _, row in df.iterrows():
for k, v in row.items():
if v == 0:
df_zero.append(row)
if df_zero:
df_zero = pd.concat(df_zero, axis=1).T
df_zero.to_excel(join(save_path, en_name) + '.xlsx', index=False)
def check_Q3_fiscal_period(self, file_path, end_date_rep=False):
save_path = join(file_path, 'Q3-3-not-9_endDate_endDateRep')
check_dirs([save_path])
for en_name in self.cn_to_en.values():
print('File:', en_name)
df = pd.read_csv(join(join(self.data_path, 'original/csv'), en_name) + '.csv')
if end_date_rep:
df_Q3_3 = df[(df['reportType']=='Q3') | (df['reportType']=='CQ3')].groupby(
['ticker', 'endDate', 'endDateRep']).apply(
lambda x: 9 not in set(x['fiscalPeriod']))
df_Q3_3_not_9 = []
for t, ed, edr in df_Q3_3[df_Q3_3.values].index.to_list():
df_Q3_3_not_9.append(
df[(df['ticker']==t) \
& (df['endDate']==ed) \
& (df['endDateRep']==edr) \
& (df['mergedFlag']==1)])
else:
df_Q3_3 = df[(df['reportType']=='Q3') | (df['reportType']=='CQ3')].groupby(
['ticker', 'endDate']).apply(
lambda x: 9 not in set(x['fiscalPeriod']))
df_Q3_3_not_9 = []
for t, ed in df_Q3_3[df_Q3_3.values].index.to_list():
df_Q3_3_not_9.append(
df[(df['ticker']==t) \
& (df['endDate']==ed) \
& (df['mergedFlag']==1)])
if df_Q3_3_not_9:
df_Q3_3_not_9 = pd.concat(df_Q3_3_not_9)
df_Q3_3_not_9.to_excel(join(save_path, en_name) + '.xlsx', index=False)
def get_year_revenue_sheets(self, df_income, file_path, file_name):
df_income['ticker'] = df_income['ticker'].apply(lambda x: str(x).zfill(6))
df_income = df_income[df_income['endDate'].apply(lambda x: str(x)[-6:]=='-12-31')]
df_income.drop_duplicates(subset=['ticker', 'endDate'], keep='last', inplace=True)
df_income['endDate'] = df_income['endDate'].apply(lambda x: int(x[:4]))
df_income.set_index(['endDate', 'ticker'], inplace=True)
df_income.index.names = [None, 'TICKER']
df_income_r = df_income[['revenue']]
df_income_r = df_income_r.unstack(1).T
df_income_r.reset_index(inplace=True)
df_income_r = df_income_r.drop(columns=['level_0'])
df_income_r.set_index('TICKER', inplace=True)
df_income_r_count_list = pd.DataFrame(columns=('TICKER', 'COUNTS', 'YEARS'))
df_income_r_not_ct_list = pd.DataFrame(columns=('TICKER', 'YEARS'))
last_year = int(sorted(df_income_r.columns)[-1])
for ticker, row in df_income_r.iterrows():
row = row.notnull()
counts = sum(row)
years_list = []
for k in row.keys():
if row[k]:
years_list.append(k)
df_income_r_count_list = df_income_r_count_list.append(
pd.DataFrame({'TICKER': [ticker], 'COUNTS': [counts], 'YEARS': [years_list]}), ignore_index=True)
if not years_list:
print('No data:', ticker)
df_income_r_not_ct_list = df_income_r_not_ct_list.append(
pd.DataFrame({'TICKER': [ticker], 'YEARS': [[]]}), ignore_index=True)
# elif years_list[-1] - years_list[0] != counts - 1:
elif last_year - years_list[0] != counts - 1:
df_income_r_not_ct_list = df_income_r_not_ct_list.append(
pd.DataFrame({'TICKER': [ticker], 'YEARS': [years_list]}), ignore_index=True)
save_path = join(file_path, 'revenue')
check_dirs([save_path])
df_income_r.to_excel(join(save_path, file_name + '.xlsx'), index=True)
df_income_r_count_list.to_excel(join(save_path, file_name + '_counts.xlsx'), index=False)
df_income_r_not_ct_list.to_excel(join(save_path, file_name + '_discontinuous.xlsx'), index=False)
def get_quarter_revenue_sheets(self, df_income, file_path, file_name):
df_income['ticker'] = df_income['ticker'].apply(lambda x: str(x).zfill(6))
df_income.drop_duplicates(subset=['ticker', 'endDate'], keep='last', inplace=True)
df_income['endDate'] = df_income['endDate'].apply(lambda x: x[:7])
df_income.set_index(['endDate', 'ticker'], inplace=True)
df_income.index.names = [None, 'TICKER']
df_income_r = df_income[['revenue']]
df_income_r = df_income_r.unstack(1).T
df_income_r.reset_index(inplace=True)
df_income_r = df_income_r.drop(columns=['level_0'])
df_income_r.set_index('TICKER', inplace=True)
df_income_r_count_list = pd.DataFrame(columns=('TICKER', 'COUNTS', 'QUARTERS'))
df_income_r_not_ct_list =
|
pd.DataFrame(columns=('TICKER', 'QUARTERS'))
|
pandas.DataFrame
|
"""
Routines for casting.
"""
from contextlib import suppress
from datetime import date, datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
import numpy as np
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
Period,
Timedelta,
Timestamp,
conversion,
iNaT,
ints_to_pydatetime,
ints_to_pytimedelta,
)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
POSSIBLY_CAST_DTYPES,
TD64NS_DTYPE,
ensure_int8,
ensure_int16,
ensure_int32,
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
ABCExtensionArray,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
from pandas.core.dtypes.missing import (
is_valid_nat_for_dtype,
isna,
na_value_for_dtype,
notna,
)
if TYPE_CHECKING:
from pandas import Series
from pandas.core.arrays import ExtensionArray
from pandas.core.indexes.base import Index
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple, range)):
values = construct_1d_object_array_from_listlike(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj) -> bool:
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype):
if any(isinstance(v, ABCSeries) for v in obj._values):
return True
return False
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslibs.Timedelta(value)
return value
def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]):
"""
try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
do_round = False
if is_scalar(result):
return result
elif isinstance(result, ABCDataFrame):
# occurs in pivot_table doctest
return result
if isinstance(dtype, str):
if dtype == "infer":
inferred_type = lib.infer_dtype(ensure_object(result), skipna=False)
if inferred_type == "boolean":
dtype = "bool"
elif inferred_type == "integer":
dtype = "int64"
elif inferred_type == "datetime64":
dtype = "datetime64[ns]"
elif inferred_type == "timedelta64":
dtype = "timedelta64[ns]"
# try to upcast here
elif inferred_type == "floating":
dtype = "int64"
if issubclass(result.dtype.type, np.number):
do_round = True
else:
dtype = "object"
dtype = np.dtype(dtype)
elif dtype.type is Period:
from pandas.core.arrays import PeriodArray
with suppress(TypeError):
# e.g. TypeError: int() argument must be a string, a
# bytes-like object or a number, not 'Period
return PeriodArray(result, freq=dtype.freq)
converted = maybe_downcast_numeric(result, dtype, do_round)
if converted is not result:
return converted
# a datetimelike
# GH12821, iNaT is cast to float
if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]:
if hasattr(dtype, "tz"):
# not a numpy dtype
if dtype.tz:
# convert to datetime and change timezone
from pandas import to_datetime
result = to_datetime(result).tz_localize("utc")
result = result.tz_convert(dtype.tz)
else:
result = result.astype(dtype)
return result
def maybe_downcast_numeric(result, dtype: DtypeObj, do_round: bool = False):
"""
Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
Parameters
----------
result : ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
do_round : bool
Returns
-------
ndarray or ExtensionArray
"""
if not isinstance(dtype, np.dtype):
# e.g. SparseDtype has no itemsize attr
return result
if isinstance(result, list):
# reached via groupby.agg._ohlc; really this should be handled earlier
result = np.array(result)
def trans(x):
if do_round:
return x.round()
return x
if dtype.kind == result.dtype.kind:
# don't allow upcasts here (except if empty)
if result.dtype.itemsize <= dtype.itemsize and result.size:
return result
if is_bool_dtype(dtype) or is_integer_dtype(dtype):
if not result.size:
# if we don't have any elements, just astype it
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
if isna(arr).any():
# if we have any nulls, then we are done
return result
elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)):
# a comparable, e.g. a Decimal may slip in here
return result
if (
issubclass(result.dtype.type, (np.object_, np.number))
and notna(result).all()
):
new_result = trans(result).astype(dtype)
if new_result.dtype.kind == "O" or result.dtype.kind == "O":
# np.allclose may raise TypeError on object-dtype
if (new_result == result).all():
return new_result
else:
if np.allclose(new_result, result, rtol=0):
return new_result
elif (
issubclass(dtype.type, np.floating)
and not is_bool_dtype(result.dtype)
and not is_string_dtype(result.dtype)
):
return result.astype(dtype)
return result
def maybe_cast_result(
result: ArrayLike, obj: "Series", numeric_only: bool = False, how: str = ""
) -> ArrayLike:
"""
Try casting result to a different type if appropriate
Parameters
----------
result : array-like
Result to cast.
obj : Series
Input Series from which result was calculated.
numeric_only : bool, default False
Whether to cast only numerics or datetimes as well.
how : str, default ""
How the result was computed.
Returns
-------
result : array-like
result maybe casted to the dtype.
"""
dtype = obj.dtype
dtype = maybe_cast_result_dtype(dtype, how)
assert not is_scalar(result)
if (
is_extension_array_dtype(dtype)
and not is_categorical_dtype(dtype)
and dtype.kind != "M"
):
# We have to special case categorical so as not to upcast
# things like counts back to categorical
cls = dtype.construct_array_type()
result = maybe_cast_to_extension_array(cls, result, dtype=dtype)
elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : DtypeObj
Input dtype.
how : str
How the result was computed.
Returns
-------
DtypeObj
The desired dtype of the result.
"""
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.integer import Int64Dtype
if how in ["add", "cumsum", "sum"] and (dtype == np.dtype(bool)):
return np.dtype(np.int64)
elif how in ["add", "cumsum", "sum"] and isinstance(dtype, BooleanDtype):
return Int64Dtype()
return dtype
def maybe_cast_to_extension_array(
cls: Type["ExtensionArray"], obj: ArrayLike, dtype: Optional[ExtensionDtype] = None
) -> ArrayLike:
"""
Call to `_from_sequence` that returns the object unchanged on Exception.
Parameters
----------
cls : class, subclass of ExtensionArray
obj : arraylike
Values to pass to cls._from_sequence
dtype : ExtensionDtype, optional
Returns
-------
ExtensionArray or obj
"""
from pandas.core.arrays.string_ import StringArray
from pandas.core.arrays.string_arrow import ArrowStringArray
assert isinstance(cls, type), f"must pass a type: {cls}"
assertion_msg = f"must pass a subclass of ExtensionArray: {cls}"
assert issubclass(cls, ABCExtensionArray), assertion_msg
# Everything can be converted to StringArrays, but we may not want to convert
if (
issubclass(cls, (StringArray, ArrowStringArray))
and lib.infer_dtype(obj) != "string"
):
return obj
try:
result = cls._from_sequence(obj, dtype=dtype)
except Exception:
# We can't predict what downstream EA constructors may raise
result = obj
return result
def maybe_upcast_putmask(
result: np.ndarray, mask: np.ndarray, other: Scalar
) -> Tuple[np.ndarray, bool]:
"""
A safe version of putmask that potentially upcasts the result.
The result is replaced with the first N elements of other,
where N is the number of True values in mask.
If the length of other is shorter than N, other will be repeated.
Parameters
----------
result : ndarray
The destination array. This will be mutated in-place if no upcasting is
necessary.
mask : boolean ndarray
other : scalar
The source value.
Returns
-------
result : ndarray
changed : bool
Set to true if the result array was upcasted.
Examples
--------
>>> arr = np.arange(1, 6)
>>> mask = np.array([False, True, False, True, True])
>>> result, _ = maybe_upcast_putmask(arr, mask, False)
>>> result
array([1, 0, 3, 0, 0])
"""
if not isinstance(result, np.ndarray):
raise ValueError("The result input must be a ndarray.")
if not is_scalar(other):
# We _could_ support non-scalar other, but until we have a compelling
# use case, we assume away the possibility.
raise ValueError("other must be a scalar")
if mask.any():
# Two conversions for date-like dtypes that can't be done automatically
# in np.place:
# NaN -> NaT
# integer or integer array -> date-like array
if result.dtype.kind in ["m", "M"]:
if isna(other):
other = result.dtype.type("nat")
elif is_integer(other):
other = np.array(other, dtype=result.dtype)
def changeit():
# we are forced to change the dtype of the result as the input
# isn't compatible
r, _ = maybe_upcast(result, fill_value=other, copy=True)
np.place(r, mask, other)
return r, True
# we want to decide whether place will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibly), otherwise we DON't want to upcast (e.g. if we
# have values, say integers, in the success portion then it's ok to not
# upcast)
new_dtype, _ = maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if isna(other):
return changeit()
try:
np.place(result, mask, other)
except TypeError:
# e.g. int-dtype result and float-dtype other
return changeit()
return result, False
def maybe_casted_values(
index: "Index", codes: Optional[np.ndarray] = None
) -> ArrayLike:
"""
Convert an index, given directly or as a pair (level, code), to a 1D array.
Parameters
----------
index : Index
codes : np.ndarray[intp] or None, default None
Returns
-------
ExtensionArray or ndarray
If codes is `None`, the values of `index`.
If codes is passed, an array obtained by taking from `index` the indices
contained in `codes`.
"""
values = index._values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the codes, extract the values with a mask
if codes is not None:
mask: np.ndarray = codes == -1
if mask.size > 0 and mask.all():
# we can have situations where the whole mask is -1,
# meaning there is nothing found in codes, so make all nan's
dtype = index.dtype
fill_value = na_value_for_dtype(dtype)
values = construct_1d_arraylike_from_scalar(fill_value, len(mask), dtype)
else:
values = values.take(codes)
if mask.any():
if isinstance(values, np.ndarray):
values, _ = maybe_upcast_putmask(values, mask, np.nan)
else:
values[mask] = np.nan
return values
def maybe_promote(dtype, fill_value=np.nan):
"""
Find the minimal dtype that can hold both the given dtype and fill_value.
Parameters
----------
dtype : np.dtype or ExtensionDtype
fill_value : scalar, default np.nan
Returns
-------
dtype
Upcasted from dtype argument if necessary.
fill_value
Upcasted from fill_value argument if necessary.
"""
if not is_scalar(fill_value) and not is_object_dtype(dtype):
# with object dtype there is nothing to promote, and the user can
# pass pretty much any weird fill_value they like
raise ValueError("fill_value must be a scalar")
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
fill_value = fill_value.dtype.type("NaT", "ns")
else:
# we need to change to object type as our
# fill_value is of object type
if fill_value.dtype == np.object_:
dtype = np.dtype(np.object_)
fill_value = np.nan
if dtype == np.object_ or dtype.kind in ["U", "S"]:
# We treat string-like dtypes as object, and _always_ fill
# with np.nan
fill_value = np.nan
dtype = np.dtype(np.object_)
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, np.datetime64):
if isinstance(fill_value, datetime) and fill_value.tzinfo is not None:
# Trying to insert tzaware into tznaive, have to cast to object
dtype = np.dtype(np.object_)
elif is_integer(fill_value) or (is_float(fill_value) and not isna(fill_value)):
dtype = np.dtype(np.object_)
else:
try:
fill_value = Timestamp(fill_value).to_datetime64()
except (TypeError, ValueError):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.timedelta64):
if (
is_integer(fill_value)
or (is_float(fill_value) and not np.isnan(fill_value))
or isinstance(fill_value, str)
):
# TODO: What about str that can be a timedelta?
dtype = np.dtype(np.object_)
else:
try:
fv = Timedelta(fill_value)
except ValueError:
dtype = np.dtype(np.object_)
else:
if fv is NaT:
# NaT has no `to_timedelta64` method
fill_value = np.timedelta64("NaT", "ns")
else:
fill_value = fv.to_timedelta64()
elif is_datetime64tz_dtype(dtype):
if isna(fill_value):
fill_value = NaT
elif not isinstance(fill_value, datetime):
dtype = np.dtype(np.object_)
elif fill_value.tzinfo is None:
dtype = np.dtype(np.object_)
elif not tz_compare(fill_value.tzinfo, dtype.tz):
# TODO: sure we want to cast here?
dtype = np.dtype(np.object_)
elif is_extension_array_dtype(dtype) and isna(fill_value):
fill_value = dtype.na_value
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
dtype = np.dtype(np.float64)
elif dtype.kind == "f":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.float64 and dtype is np.float32
dtype = mst
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
if not np.can_cast(fill_value, dtype):
# upcast to prevent overflow
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
if dtype.kind == "f":
# Case where we disagree with numpy
dtype = np.dtype(np.object_)
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, (np.integer, np.floating)):
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.complex128 and dtype is np.complex64
dtype = mst
elif fill_value is None:
if is_float_dtype(dtype) or is_complex_dtype(dtype):
fill_value = np.nan
elif is_integer_dtype(dtype):
dtype = np.float64
fill_value = np.nan
elif is_datetime_or_timedelta_dtype(dtype):
fill_value = dtype.type("NaT", "ns")
else:
dtype = np.dtype(np.object_)
fill_value = np.nan
else:
dtype = np.dtype(np.object_)
# in case we have a string that looked like a number
if is_extension_array_dtype(dtype):
pass
elif issubclass(np.dtype(dtype).type, (bytes, str)):
dtype = np.dtype(np.object_)
fill_value = _ensure_dtype_type(fill_value, dtype)
return dtype, fill_value
def _ensure_dtype_type(value, dtype: DtypeObj):
"""
Ensure that the given value is an instance of the given dtype.
e.g. if out dtype is np.complex64_, we should have an instance of that
as opposed to a python complex object.
Parameters
----------
value : object
dtype : np.dtype or ExtensionDtype
Returns
-------
object
"""
# Start with exceptions in which we do _not_ cast to numpy types
if is_extension_array_dtype(dtype):
return value
elif dtype == np.object_:
return value
elif isna(value):
# e.g. keep np.nan rather than try to cast to np.float32(np.nan)
return value
return dtype.type(value)
def infer_dtype_from(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar or array.
Parameters
----------
val : object
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
"""
if is_scalar(val):
return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)
return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)
def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar.
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype: DtypeObj = np.dtype(object)
# a 1-element ndarray
if isinstance(val, np.ndarray):
msg = "invalid ndarray passed to infer_dtype_from_scalar"
if val.ndim != 0:
raise ValueError(msg)
dtype = val.dtype
val = val.item()
elif isinstance(val, str):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.dtype(object)
elif isinstance(val, (np.datetime64, datetime)):
val = Timestamp(val)
if val is NaT or val.tz is None:
dtype = np.dtype("M8[ns]")
else:
if pandas_dtype:
dtype = DatetimeTZDtype(unit="ns", tz=val.tz)
else:
# return datetimetz as object
return np.dtype(object), val
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = Timedelta(val).value
dtype = np.dtype("m8[ns]")
elif is_bool(val):
dtype = np.dtype(np.bool_)
elif is_integer(val):
if isinstance(val, np.integer):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.int64)
try:
np.array(val, dtype=dtype)
except OverflowError:
dtype = np.array(val).dtype
elif is_float(val):
if isinstance(val, np.floating):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.float64)
elif is_complex(val):
dtype = np.dtype(np.complex_)
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
elif lib.is_interval(val):
subtype = infer_dtype_from_scalar(val.left, pandas_dtype=True)[0]
dtype = IntervalDtype(subtype=subtype)
return dtype, val
def dict_compat(d: Dict[Scalar, Scalar]) -> Dict[Scalar, Scalar]:
"""
Convert datetimelike-keyed dicts to a Timestamp-keyed dict.
Parameters
----------
d: dict-like object
Returns
-------
dict
"""
return {maybe_box_datetimelike(key): value for key, value in d.items()}
def infer_dtype_from_array(
arr, pandas_dtype: bool = False
) -> Tuple[DtypeObj, ArrayLike]:
"""
Infer the dtype from an array.
Parameters
----------
arr : array
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, array belongs to pandas extension types
is inferred as object
Returns
-------
tuple (numpy-compat/pandas-compat dtype, array)
Notes
-----
if pandas_dtype=False. these infer to numpy dtypes
exactly with the exception that mixed / object dtypes
are not coerced by stringifying or conversion
if pandas_dtype=True. datetime64tz-aware/categorical
types will retain there character.
Examples
--------
>>> np.asarray([1, '1'])
array(['1', '1'], dtype='<U21')
>>> infer_dtype_from_array([1, '1'])
(dtype('O'), [1, '1'])
"""
if isinstance(arr, np.ndarray):
return arr.dtype, arr
if not is_list_like(arr):
arr = [arr]
if pandas_dtype and is_extension_array_dtype(arr):
return arr.dtype, arr
elif isinstance(arr, ABCSeries):
return arr.dtype, np.asarray(arr)
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr, skipna=False)
if inferred in ["string", "bytes", "mixed", "mixed-integer"]:
return (np.dtype(np.object_), arr)
arr = np.asarray(arr)
return arr.dtype, arr
def maybe_infer_dtype_type(element):
"""
Try to infer an object's dtype, for use in arithmetic ops.
Uses `element.dtype` if that's available.
Objects implementing the iterator protocol are cast to a NumPy array,
and from there the array's type is used.
Parameters
----------
element : object
Possibly has a `.dtype` attribute, and possibly the iterator
protocol.
Returns
-------
tipo : type
Examples
--------
>>> from collections import namedtuple
>>> Foo = namedtuple("Foo", "dtype")
>>> maybe_infer_dtype_type(Foo(np.dtype("i8")))
dtype('int64')
"""
tipo = None
if hasattr(element, "dtype"):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = element.dtype
return tipo
def maybe_upcast(
values: ArrayLike,
fill_value: Scalar = np.nan,
dtype: Dtype = None,
copy: bool = False,
) -> Tuple[ArrayLike, Scalar]:
"""
Provide explicit type promotion and coercion.
Parameters
----------
values : ndarray or ExtensionArray
The array that we want to maybe upcast.
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : bool, default True
If True always make a copy even if no upcast is required.
Returns
-------
values: ndarray or ExtensionArray
the original array, possibly upcast
fill_value:
the fill value, possibly upcast
"""
if not is_scalar(fill_value) and not is_object_dtype(values.dtype):
# We allow arbitrary fill values for object dtype
raise ValueError("fill_value must be a scalar")
if is_extension_array_dtype(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
def invalidate_string_dtypes(dtype_set: Set[DtypeObj]):
"""
Change string like dtypes to object for
``DataFrame.select_dtypes()``.
"""
non_string_dtypes = dtype_set - {np.dtype("S").type, np.dtype("<U").type}
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead")
def coerce_indexer_dtype(indexer, categories):
""" coerce the indexer input array to the smallest dtype possible """
length = len(categories)
if length < _int8_max:
return ensure_int8(indexer)
elif length < _int16_max:
return ensure_int16(indexer)
elif length < _int32_max:
return ensure_int32(indexer)
return ensure_int64(indexer)
def astype_nansafe(
arr, dtype: DtypeObj, copy: bool = True, skipna: bool = False
) -> ArrayLike:
"""
Cast the elements of an array to a given dtype a nan-safe manner.
Parameters
----------
arr : ndarray
dtype : np.dtype
copy : bool, default True
If False, a view will be attempted but may fail, if
e.g. the item sizes don't align.
skipna: bool, default False
Whether or not we should skip NaN when casting as a string-type.
Raises
------
ValueError
The dtype was a datetime64/timedelta64 dtype, but it had no unit.
"""
# dispatch on extension dtype if needed
if is_extension_array_dtype(dtype):
return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)
if not isinstance(dtype, np.dtype):
dtype = pandas_dtype(dtype)
if issubclass(dtype.type, str):
return lib.ensure_string_array(
arr.ravel(), skipna=skipna, convert_na_value=False
).reshape(arr.shape)
elif is_datetime64_dtype(arr):
if is_object_dtype(dtype):
return ints_to_pydatetime(arr.view(np.int64))
elif dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
# allow frequency conversions
if dtype.kind == "M":
return arr.astype(dtype)
raise TypeError(f"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]")
elif is_timedelta64_dtype(arr):
if is_object_dtype(dtype):
return ints_to_pytimedelta(arr.view(np.int64))
elif dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
if dtype not in [INT64_DTYPE, TD64NS_DTYPE]:
# allow frequency conversions
# we return a float here!
if dtype.kind == "m":
mask = isna(arr)
result = arr.astype(dtype).astype(np.float64)
result[mask] = np.nan
return result
elif dtype == TD64NS_DTYPE:
return arr.astype(TD64NS_DTYPE, copy=copy)
raise TypeError(f"cannot astype a timedelta from [{arr.dtype}] to [{dtype}]")
elif np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer):
if not np.isfinite(arr).all():
raise ValueError("Cannot convert non-finite values (NA or inf) to integer")
elif is_object_dtype(arr):
# work around NumPy brokenness, #1987
if np.issubdtype(dtype.type, np.integer):
return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
# if we have a datetime/timedelta array of objects
# then coerce to a proper dtype and recall astype_nansafe
elif is_datetime64_dtype(dtype):
from pandas import to_datetime
return astype_nansafe(to_datetime(arr).values, dtype, copy=copy)
elif is_timedelta64_dtype(dtype):
from pandas import to_timedelta
return astype_nansafe(to_timedelta(arr)._values, dtype, copy=copy)
if dtype.name in ("datetime64", "timedelta64"):
msg = (
f"The '{dtype.name}' dtype has no unit. Please pass in "
f"'{dtype.name}[ns]' instead."
)
raise ValueError(msg)
if copy or is_object_dtype(arr) or is_object_dtype(dtype):
# Explicit copy, or required since NumPy can't view from / to object.
return arr.astype(dtype, copy=True)
return arr.view(dtype)
def soft_convert_objects(
values: np.ndarray,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
copy: bool = True,
):
"""
Try to coerce datetime, timedelta, and numeric object-dtype columns
to inferred dtype.
Parameters
----------
values : np.ndarray[object]
datetime : bool, default True
numeric: bool, default True
timedelta : bool, default True
copy : bool, default True
Returns
-------
np.ndarray
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(copy, "copy")
conversion_count = sum((datetime, numeric, timedelta))
if conversion_count == 0:
raise ValueError("At least one of datetime, numeric or timedelta must be True.")
# Soft conversions
if datetime:
# GH 20380, when datetime is beyond year 2262, hence outside
# bound of nanosecond-resolution 64-bit integers.
try:
values = lib.maybe_convert_objects(values, convert_datetime=True)
except OutOfBoundsDatetime:
pass
if timedelta and is_object_dtype(values.dtype):
# Object check to ensure only run if previous did not convert
values = lib.maybe_convert_objects(values, convert_timedelta=True)
if numeric and is_object_dtype(values.dtype):
try:
converted = lib.maybe_convert_numeric(values, set(), coerce_numeric=True)
except (ValueError, TypeError):
pass
else:
# If all NaNs, then do not-alter
values = converted if not isna(converted).all() else values
values = values.copy() if copy else values
return values
def convert_dtypes(
input_array: AnyArrayLike,
convert_string: bool = True,
convert_integer: bool = True,
convert_boolean: bool = True,
convert_floating: bool = True,
) -> Dtype:
"""
Convert objects to best possible type, and optionally,
to types supporting ``pd.NA``.
Parameters
----------
input_array : ExtensionArray, Index, Series or np.ndarray
convert_string : bool, default True
Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
Whether object dtypes should be converted to ``BooleanDtypes()``.
convert_floating : bool, defaults True
Whether, if possible, conversion can be done to floating extension types.
If `convert_integer` is also True, preference will be give to integer
dtypes if the floats can be faithfully casted to integers.
Returns
-------
dtype
new dtype
"""
is_extension = is_extension_array_dtype(input_array.dtype)
if (
convert_string or convert_integer or convert_boolean or convert_floating
) and not is_extension:
try:
inferred_dtype = lib.infer_dtype(input_array)
except ValueError:
# Required to catch due to Period. Can remove once GH 23553 is fixed
inferred_dtype = input_array.dtype
if not convert_string and is_string_dtype(inferred_dtype):
inferred_dtype = input_array.dtype
if convert_integer:
target_int_dtype = "Int64"
if is_integer_dtype(input_array.dtype):
from pandas.core.arrays.integer import INT_STR_TO_DTYPE
inferred_dtype = INT_STR_TO_DTYPE.get(
input_array.dtype.name, target_int_dtype
)
if not is_integer_dtype(input_array.dtype) and is_numeric_dtype(
input_array.dtype
):
inferred_dtype = target_int_dtype
else:
if
|
is_integer_dtype(inferred_dtype)
|
pandas.core.dtypes.common.is_integer_dtype
|
import timeit
import pandas as pd
import numpy as np
from typing import Dict,List
loops=1000
inputfile:List[List[int]] = [[1,2,3,4,5,6] for x in range(0,1000)]
# input arrives as a list of row lists
# need to make columns
#######################
# zip
# 60us
def i1() -> List[int]:
return list(map(list, zip(*inputfile))) # type:ignore
t = timeit.timeit(i1,number=loops)
print(f'i1 transpose zip {1e6*t/loops} us')
#######################
# list
# 64us
def i2() -> List[List[int]]:
return [list(i) for i in zip(*inputfile)]
t = timeit.timeit(i2,number=loops)
print(f'i2 transpose list {1e6*t/loops} us')
#######################
# append
# 64us
def i3() -> List[List[int]]:
x = []
for i in zip(*inputfile):
x.append((list(i)))
return x
t = timeit.timeit(i3,number=loops)
print(f'i3 transpose append {1e6*t/loops} us')
#######################
# list to col dict
# 50us (winner!), 318 with np.array
def i4() -> Dict[int, int]:
return {x[0]:np.array(x[1]) for x in enumerate(zip(*inputfile))} #type:ignore
t = timeit.timeit(i4,number=loops)
print(f'i4 transpose list to dict {1e6*t/loops} us')
#######################
# list to dict to df
# should be 50+375 but is 1370. 743 if i do the np.array above
# this involves type conversion from series to ndarray
def g1() -> pd.DataFrame:
return pd.DataFrame(i4()) #type:ignore
t = timeit.timeit(g1,number=loops)
print(f'g1 list to col dict to df {1e6*t/loops} us')
#######################
# dictionary of column lists
x1 = list(range(0,1000)) # skipping the np array step is cheating
y1 = {'a':x1,'b':x1,'c':x1,'d':x1,'e':x1,'f':x1}
# 375 us, 650 if i include np array
def f1() -> pd.DataFrame:
y2 = {k:np.array(v) for (k,v) in y1.items()}
return pd.DataFrame(y2)
t = timeit.timeit(f1,number=loops)
print(f'f1 col dict of list {1e6*t/loops} us')
#######################
# list of row lists (slow)
# this is the file format
x2 = [[1,2,3,4,5,6] for x in range(0,1000)]
# 1250 us (!)
def f2() -> pd.DataFrame:
return pd.DataFrame(x2, columns=['a','b','c','d','e','f'])
t = timeit.timeit(f2,number=loops)
print(f'f2 list of row lists {1e6*t/loops} us')
#######################
# list of row dictionaries (slowest)
x3 = [{'a':x,'b':x,'c':x,'d':x,'e':x,'f':x} for x in range(0,1000)]
# 1590 us (!!)
def f3() -> pd.DataFrame:
return pd.DataFrame(x3)
t = timeit.timeit(f3,number=loops)
print(f'f3 row dicts {1e6*t/loops} us')
#######################
# dictionary of column series
# this involves type conversion from series to ndarray
x4 = pd.Series(list(range(0,1000)))
y4 = {'a':x4,'b':x4,'c':x4,'d':x4,'e':x4,'f':x4}
# 335 us
def f4() -> pd.DataFrame:
return
|
pd.DataFrame(y4)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.cluster import KMeans
from collections import Counter
import sys
print('HISTORY_LENGTH = int(sys.argv[1])')
print('n_samples = int(sys.argv[2])')
print('action_diff_mult = int(sys.argv[3])')
np.set_printoptions(threshold=sys.maxsize, linewidth=1000000)
df =
|
pd.read_csv('the_adherence_file.csv')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
# Dirichlet Mixing Module v1.2
# Implemented by <NAME>, based on original MatLab code by <NAME>.
# Mathematics described in Rudge et al.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Mean composition of melts from all lithologies
def mean_comp_total(f,w,c):
return np.sum(f*c*w)
# Import a Melts output file
def Import_Melts_output(filename,dX=0.0001,RudgeMethod=False):
"""Import a Melts csv file and recasts the melting column in terms of equal
dX increments. Converts pressure from bars to GPa.
Returns a pandas dataframe.
Parameters
----------
filename: string
Name of the file (and path relative to script) to import
dX: float
Discretization interval. Default 0.01%.
RudgeMethod: bool
Use the Rudge method for calculating melt fraction from the MELTS input.
I think this method is erroneous, but have kept it so his results may be
reproduced.
"""
meltsFile = pd.read_csv(filename,skiprows=1)
if RudgeMethod == True:
# Calculate Melt Fraction
meltsFile['F'] = (100-meltsFile.Mass)/100
# Calculate residual porosity
ResidualPorosity = meltsFile.F.iloc[0]
X = (meltsFile.F - ResidualPorosity)/(1-ResidualPorosity)
else:
X = (meltsFile.Mass[0]-meltsFile.Mass)/meltsFile.Mass[0]
# Find last X=0 term during upwelling (and first DeltaX>0 term)
# Find last melting step
MeltingBounds = [0,0]
MeltingBounds[0] = np.argmin(X[X>0]) - 1
MeltingBounds[1] = np.argmax(X)
# Make list of columns for new array
columns = ['X','P','T']
columns = columns + (meltsFile.columns[3:].tolist())
# Set up list of X values to map all other variables to
X_to_map = np.arange(X[MeltingBounds[0]],X[MeltingBounds[1]],dX)
# Create an array of zeroes with the length of the number of rows needed in the dataframe
EmptyColumns = np.array([np.zeros(np.shape(X_to_map))]*np.shape(columns)[0]).T
# Create Empty Dataframe
d = pd.DataFrame(EmptyColumns, columns=columns)
# Start filling Dataframe
d.X = X_to_map
d['T'] = np.interp(d.X,X,meltsFile.Temperature)
d['P'] = np.interp(d.X,X,meltsFile.Pressure)/1e4
# Map all the chemistry to the new X variable
for col in columns[3:]:
d[col] = np.interp(d.X,X,meltsFile[col])
return d
# Import a Melts output file
def Import_Katz_output(filename,dX=0.0001,MajorElements=pd.Series([7.48,8.51],index=['MgO','FeO']),WalterComps=False,file=True):
"""Import a numpy file generated by the single melting region function of
the DualLithologyMelting script and recasts the melting column in terms of equal
dX increments.
Returns a pandas dataframe.
Parameters
----------
filename: string
Name of the file (and path relative to script) to import
dX: float
Discretization interval. Default 0.01%.
MajorElements: series
Major Element concentrations to add to each fractional melt. Same composition
will apply to all melts. MgO and FeO must always be set, otherwise some
functionality of the dirichlet module won't work (but can be ignored).
WalterComps: bool
If true the major element composition of the melts will be calculated using
the parameterisation of the Walter KR4003 melting experiments by Duncan et al.
(2017).
file: bool
If true filename is interpreted as a file name, if false, filename is interpreted
as the array object itself.
"""
if file == True:
mr_raw = np.load(filename)
else:
mr_raw = filename
mr = np.zeros([3,np.shape(mr_raw)[1]])
mr[0] = mr_raw[0]
mr[1] = mr_raw[1]
mr[2] = mr_raw[3]
mr = pd.DataFrame(mr.T,columns=['P','T','X'])
# Find last X=0 term during upwelling (and first DeltaX>0 term)
# Find last melting step
MeltingBounds = [0,0]
MeltingBounds[0] = np.argmin(mr.X[mr.X>0]) - 1
MeltingBounds[1] = np.argmax(mr.X)
# Make list of columns for new array
columns = ['X','P','T']
if WalterComps == False:
columns = columns + MajorElements.index.tolist()
# Set up list of X values to map all other variables to
X_to_map = np.arange(mr.X[MeltingBounds[0]],mr.X[MeltingBounds[1]],dX)
# Create an array of zeroes with the length of the number of rows needed in the dataframe
EmptyColumns = np.array([np.zeros(np.shape(X_to_map))]*np.shape(columns)[0]).T
# Create Empty Dataframe
d = pd.DataFrame(EmptyColumns, columns=columns)
# Start filling Dataframe
d.X = X_to_map
d['T'] = np.interp(d.X,mr.X,mr['T'])
d['P'] = np.interp(d.X,mr.X,mr.P)
if WalterComps == False:
for el in MajorElements.index:
d[el] = MajorElements[el]
if WalterComps == True:
MajorElements = WalterComposition(np.array(d.X),np.array(d.P))[0]
d = pd.merge(d,MajorElements,on=['X','P'])
return d
# Import a MultiLith output file
def Import_MultiLith_output(mr_raw,dX=0.0001,MajorElements=pd.Series([7.48,8.51],index=['MgO','FeO']),WalterComps=False):
"""Import a 1D melting region object generated by the MultiLith code. Must have been
integrated already.
Returns a pandas dataframe.
Parameters
----------
filename: string
Name of the file (and path relative to script) to import
dX: float
Discretization interval. Default 0.01%.
MajorElements: series
Major Element concentrations to add to each fractional melt. Same composition
will apply to all melts. MgO and FeO must always be set, otherwise some
functionality of the dirichlet module won't work (but can be ignored).
WalterComps: bool
If true the major element composition of the melts will be calculated using
the parameterisation of the Walter KR4003 melting experiments by Duncan et al.
(2017).
"""
steps_under_crust = np.shape(mr_raw.P[mr_raw.P>mr_raw.P_base_of_crust])[0]
lithologies = len(mr_raw.mantle.names)
mcols = list()
for lith in mr_raw.mantle.names:
mr = np.zeros([3,steps_under_crust])
mr[0] = mr_raw.P[:steps_under_crust]
mr[1] = mr_raw.Temperature[:steps_under_crust]
mr[2] = mr_raw.F[lith][:steps_under_crust]
mr = pd.DataFrame(mr.T,columns=['P','T','X'])
# Find last X=0 term during upwelling (and first DeltaX>0 term)
# Find last melting step
if np.shape(mr.X[mr.X>0])[0] != 0:
MeltingBounds = [0,0]
MeltingBounds[0] = mr.X[mr.X>0].idxmin() - 1
MeltingBounds[1] = mr.X[mr.X>0].idxmax()
# Make list of columns for new array
columns = ['X','P','T']
if WalterComps == False:
columns = columns + MajorElements.index.tolist()
# Set up list of X values to map all other variables to
X_to_map = np.arange(mr.X[MeltingBounds[0]],mr.X[MeltingBounds[1]],dX)
# Create an array of zeroes with the length of the number of rows needed in the dataframe
EmptyColumns = np.array([np.zeros(np.shape(X_to_map))]*np.shape(columns)[0]).T
# Create Empty Dataframe
d =
|
pd.DataFrame(EmptyColumns, columns=columns)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from rcbm import fab
def test_calculate_fabric_heat_loss_coefficient():
"""Output is equivalent to DEAP 4.2.0 example A"""
floor_area = pd.Series([63])
roof_area = pd.Series([63])
wall_area = pd.Series([85.7])
window_area = pd.Series([29.6])
door_area = pd.Series([1.85])
floor_uvalue = pd.Series([0.14])
roof_uvalue = pd.Series([0.11])
wall_uvalue = pd.Series([0.13])
window_uvalue = pd.Series([0.87])
door_uvalue = pd.Series([1.5])
thermal_bridging_factor = pd.Series([0.05])
expected_output =
|
pd.Series([68], dtype="int64")
|
pandas.Series
|
"""
License
-------
Copyright (C) 2021 - <NAME>
This file is part of the ADMiniSter package. You can use it, redistribute
it, and/or modify it under the terms of the Creative Commons Attribution
4.0 International Public License.
Summary
-------
This module aims to provide tools to manage, locate, and process
large amounts of data files simply and efficiently. At the same time,
it seeks to work out of the box on most systems. To this end, it is built on
top of standard Python modules such as pandas and NumPy.
Specifically, this module achieves those goals by implementing a set of
functions that leverage the capabilities of a so-called file index. The file
index is a table that relates the file paths with some attributes characteristic of
each file. Each row contains a filename column, which is the table's primary key,
and defines the location of the data file within the file system. The rest of
the columns correspond to the data file attributes, which depend on the context
and are for the user to define (see next section). Since the file index contains
paths to the data files but not the data itself, the file index is typically lightweight and fast.
Thus, the file index is saved and re-loaded as a plain-text CSV file, and a
pandas DataFrame is used to manage it.
When a file index has been created, we can leverage it. Thus, with the tools
provided next, we can efficiently locate data files with queries based on their attributes using
the locate function. We can call the apply or
the group_and_apply functions to launch parallel analyses on the located data files.
File attributes
---------------
Too relate each file with its attributes, the user must define an attributes loader
function that, given a file path, it returns its attributes. The attributes
loader is passed to the file index build (or update) function, which does
the rest of the work.
A typical example of file attributes is, e.g., metadata contained within the
files, such as the model parameters used for generating the data. In this case,
an attributes loader function would load the data file, read the metadata, and
return it as a dictionary. Another typical scenario is when the file attributes
correspond to the results of some analyses. In this case, the attributes loader
would load the file, analyse the data, and return a dictionary with the names
of each analysis and their results.
For the specific case of the data format defined in the companion module 'csv_with_metada',
the file attributes might be e.g. extracted from the header. Let's consider a header with a
section named 'params'. In this case, a suitable attributes loader function would be:
>>> attributes_loader = lambda filename : csv_with_metada.parse_header(filename)['params']
which returns the header's 'params' section as a dictionary for each input filename.
In this way, we would create a file index, relating each file path with the parameters
stored in their headers.
Example
-------
Let's consider a set of plain text files with names following the pattern 'A_*+B_*+C_*.dat',
where the * is the wildcard for the values of the parameters 'A', 'B', and 'C'. In this case,
we are not interested in the contents of such files, just in how to index them.
We can leverage the fact the file names follow a regular pattern to extract the values
of 'A', 'B', and 'C' and use them to build the index. (this approach is desireable
since parsing the file names proves to be way faster than opening files for reading some sort
of metadata, such as header information).
>>> from ADMiniSter import file_index
>>> import glob
>>>
>>> files = glob.glob('A*+B*+C*.dat')
>>> files
['A_3+B_1.7+C_-2.dat',
'A_4+B_1.1+C_-7.dat',
'A_1+B_1.7+C_-5.dat',
'A_1+B_1.7+C_-2.dat',
'A_2+B_1.1+C_-5.dat',
'A_1+B_1.1+C_-7.dat',
...
Let's define an attributes loader function what parses the filenames as '/tmp/A_4+B_1.1.dat' -> {'A': 4.0, 'B': 1.1}.
This function does the job:
>>> attrs_loader = lambda filename: {k:float(v) for k,v in [s.split('_') for s in filename.strip('.dat').split('+')]}
Now we build the index
>>> df = file_index.build(files, attrs_loader)
>>> df
A B C filename
0 3.0 1.7 -2.0 A_3+B_1.7+C_-2.dat
1 4.0 1.1 -7.0 A_4+B_1.1+C_-7.dat
2 1.0 1.7 -5.0 A_1+B_1.7+C_-5.dat
3 1.0 1.7 -2.0 A_1+B_1.7+C_-2.dat
4 2.0 1.1 -5.0 A_2+B_1.1+C_-5.dat
5 1.0 1.1 -7.0 A_1+B_1.1+C_-7.dat
...
And we can write it to a file
>>> file_index.write(df, 'index.csv')
From now on, we don't need to build the index anymore (maybe update it, see update function). All
we need to do is to load it every time we need it. For that, we do
>>> df = file_index.load('index.csv')
Within the index, we can locate files matching some desired attributes values. For
example, let's look for all those files with A=2. and B=1.5
>>> sub_df = file_index.locate(df, {'A':2, 'B':1.5})
>>> sub_df
A B C filename
17 2.0 1.5 -7.0 A_2+B_1.5+C_-7.dat
24 2.0 1.5 -5.0 A_2+B_1.5+C_-5.dat
39 2.0 1.5 -2.0 A_2+B_1.5+C_-2.dat
If we were interested in analyzing those files, we could use the pandas' apply
function over sub_df. However, let's illustrate a more demanding situation, where
we want to analyze all the existing data files, which are potentially very numerous
and heavy. In that case, a parallel analysis is desirable. The functions apply
and group_and_apply defined in this module do that. Let's use the apply function.
The first thing to do is define the target function that we want to apply
in parallel to each different data file. In this case, for the sake of the
example, we don't define any complex function. Instead, we set the results
of some hypothetical analyses using random numbers.
>>> def target_func(row):
>>> filenames = row['filename']
>>> # here, the file would be loaded and its data analysised
>>> row['analysis_1'] = np.random.uniform()
>>> row['analysis_2'] = np.random.uniform()
>>> row['analysis_3'] = np.random.uniform()
>>> return row
Now, let's run the analysis,
>>> results = file_index.apply(df, target_func)
The call returns a list of single-rows DataFrames. We can create a new results
DataFrame with them as
>>> results_df = pd.DataFrame(results)
>>> results_df
A B C filename analysis_1 analysis_2 analysis_3
0 3.0 1.7 -2.0 A_3+B_1.7+C_-2.dat 0.193416 0.448960 0.982408
1 4.0 1.1 -7.0 A_4+B_1.1+C_-7.dat 0.702925 0.956540 0.825651
2 1.0 1.7 -5.0 A_1+B_1.7+C_-5.dat 0.235057 0.823497 0.334244
3 1.0 1.7 -2.0 A_1+B_1.7+C_-2.dat 0.345587 0.632414 0.788807
4 2.0 1.1 -5.0 A_2+B_1.1+C_-5.dat 0.408646 0.144957 0.179882
5 1.0 1.1 -7.0 A_1+B_1.1+C_-7.dat 0.734338 0.655969 0.596402
...
The function group_and_apply works similarly. However, it creates groups
of data files and passes those groups to the target function to analyze several
data files simultaneously. This is useful if, e.g., the data
files of each group correspond to different realizations of the same random process.
"""
import pandas as pd
import numpy as np
from joblib import Parallel, delayed
from progressbar import progressbar
import os
def load(index_filename):
"""
Load an existing file index from a CSV file.
Parameters
----------
- index_filename: the path to the file.
Returns
-------
A pandas DataFrame with the file index.
"""
return pd.read_csv(index_filename, engine="c")
def write(df, index_filename):
"""
Write a file index contained in a pandas DataFrame to a text CSV file.
Parameters
----------
- df: the DataFrame with the file index.
- index_filename: the path to the file.
"""
df.to_csv(index_filename, index=False)
print("-> {} successfully written".format(index_filename))
return
def build(files, attrs_loader):
"""
Build a new file index.
Parameters
----------
- files: the list of files to be indexed.
- attrs_loader: a user-defined function that returns the attributes of a data file. The
attributes loader function must take as input the path to a datafile
and return a dictionary relating the names of the attributes (as keys)
to their respective values. It is the responsibility of the attrs_loader
function to handle exceptions when reading the files. In the case that
no attributes are loaded, None must be returned.
Returns
-------
The file index as a pandas DataFrame.
"""
print("-> Reading {} files...".format(len(files)))
if len(files) == 0:
print("--> Nothing to do")
new_data = list()
problems = list()
for filename in progressbar(files):
attrs = attrs_loader(filename)
if attrs is not None:
attrs["filename"] = filename
new_data.append(attrs)
else:
problems.append(filename)
if len(problems) > 0:
print("-> Problems reading {} files".format(len(problems)))
for f in problems:
print("-->", f)
if len(new_data) == 0:
raise Exception("No file was read")
df =
|
pd.DataFrame(new_data)
|
pandas.DataFrame
|
"""
Data structure for 1-dimensional cross-sectional and time series data
"""
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
import itertools
import operator
import sys
import warnings
from numpy import nan, ndarray
import numpy as np
from pandas.core.common import (isnull, notnull, _ensure_index,
_is_bool_indexer, _default_index)
from pandas.core.daterange import DateRange
from pandas.core.generic import PandasObject
from pandas.core.index import Index, MultiIndex
from pandas.core.indexing import _SeriesIndexer, _maybe_droplevels
import pandas.core.datetools as datetools
import pandas._tseries as _tseries
__all__ = ['Series', 'TimeSeries']
def _numpy_lt_151():
return np.__version__ < '1.5.1'
#-------------------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
def _arith_method(op, name):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
from pandas.core.frame import DataFrame
if isinstance(other, Series):
if self.index.equals(other.index):
return Series(op(self.values, other.values), index=self.index)
new_index = self.index + other.index
this_reindexed = self.reindex(new_index)
other_reindexed = other.reindex(new_index)
arr = op(this_reindexed.values, other_reindexed.values)
return Series(arr, index=new_index)
elif isinstance(other, DataFrame):
return NotImplemented
else:
# scalars
return Series(op(self.values, other), index=self.index)
return wrapper
def _flex_method(op, name):
def f(self, other, fill_value=None):
return self._binop(other, op, fill_value=fill_value)
f.__doc__ = """
Binary operator %s with support to substitute a fill_value for missing data
in one of the inputs
Parameters
----------
other: Series or scalar value
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both Series are
missing, the result will be missing
Returns
-------
result : Series
""" % name
f.__name__ = name
return f
#-------------------------------------------------------------------------------
# Series class
class Series(np.ndarray, PandasObject):
"""
Generic indexed (labeled) vector, including time series
Contains values in a numpy-ndarray with an optional bound index
(also an array of dates, strings, or whatever you want the 'row
names' of your series to be)
Rows can be retrieved by index value (date, string, etc.) or
relative position in the underlying array.
Operations between Series (+, -, /, *, **) align values based on
their associated index values-- they need not be the same length.
Parameters
----------
data : array-like, dict, or scalar value
Contains data stored in Series
index : array-like
Index object (or other iterable of same length as data)
Must be input if first argument is not a dict. If both a dict
and index sequence are used, the index will override the keys
found in the dict.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
Notes
-----
If you combine two series, all values for an index position must
be present or the value for that index position will be nan. The
new index is the sorted union of the two Series indices.
Data is *not* copied from input arrays by default
"""
_AXIS_NUMBERS = {
'index' : 0
}
_AXIS_NAMES = dict((v, k) for k, v in _AXIS_NUMBERS.iteritems())
def __new__(cls, data, index=None, dtype=None, name=None, copy=False):
if isinstance(data, Series):
if index is None:
index = data.index
elif isinstance(data, dict):
if index is None:
index = Index(sorted(data.keys()))
data = [data[idx] for idx in index]
# Create array, do *not* copy data by default, infer type
try:
subarr = np.array(data, dtype=dtype, copy=copy)
except ValueError:
if dtype:
raise
subarr = np.array(data, dtype=object)
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
if isinstance(value, basestring) and dtype is None:
dtype = np.object_
if dtype is None:
subarr = np.empty(len(index), dtype=type(value))
else:
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
else:
return subarr.item()
elif subarr.ndim > 1:
raise Exception('Data must be 1-dimensional')
if index is None:
index = _default_index(len(subarr))
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, basestring):
subarr = np.array(data, dtype=object, copy=copy)
# Change the class of the array to be the subclass type.
subarr = subarr.view(cls)
subarr.index = index
subarr.name = name
if subarr.index.is_all_dates():
subarr = subarr.view(TimeSeries)
return subarr
def __init__(self, *args, **kwargs):
pass
def __hash__(self):
raise TypeError('unhashable type')
_index = None
def _get_index(self):
return self._index
def _set_index(self, index):
indexTypes = ndarray, Index, list, tuple
if not isinstance(index, indexTypes):
raise TypeError("Expected index to be in %s; was %s."
% (indexTypes, type(index)))
if len(self) != len(index):
raise AssertionError('Lengths of index and values did not match!')
self._index = _ensure_index(index)
index = property(fget=_get_index, fset=_set_index)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self._index = getattr(obj, '_index', None)
def toDict(self):
return dict(self.iteritems())
def to_sparse(self, kind='block', fill_value=None):
"""
Convert Series to SparseSeries
Parameters
----------
kind : {'block', 'integer'}
fill_value : float, defaults to NaN (missing)
Returns
-------
sp : SparseSeries
"""
from pandas.core.sparse import SparseSeries
return SparseSeries(self, kind=kind, fill_value=fill_value)
def __contains__(self, key):
return key in self.index
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(ndarray.__reduce__(self))
subclass_state = (self.index, )
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
nd_state, own_state = state
ndarray.__setstate__(self, nd_state)
index, = own_state
self.index = index
def __getitem__(self, key):
"""
Returns item(s) for requested index/sequence, overrides default behavior
for series[key].
Logic is as follows:
- If key is in the index, return the value corresponding
to that index
- Otherwise, use key (presumably one integer or a sequence
of integers) to obtain values from the series. In the case
of a sequence, a 'slice' of the series (with corresponding dates)
will be returned, otherwise a single value.
"""
try:
if isinstance(self.index, MultiIndex):
return self._multilevel_index(key)
else:
values = self.values
try:
return values[self.index.get_loc(key)]
except KeyError:
if isinstance(key, (int, np.integer)):
return values[key]
raise
except TypeError:
pass
def _index_with(indexer):
return Series(self.values[indexer],
index=self.index[indexer])
# special handling of boolean data with NAs stored in object
# arrays. Sort of an elaborate hack since we can't represent boolean
# NA. Hmm
if _is_bool_indexer(key):
self._check_bool_indexer(key)
key = np.asarray(key, dtype=bool)
return _index_with(key)
# TODO: [slice(0, 5, None)] will break if you convert to ndarray,
# e.g. as requested by np.median
try:
return _index_with(key)
except Exception:
key = np.asarray(key)
return _index_with(key)
def _multilevel_index(self, key):
values = self.values
try:
loc = self.index.get_loc(key)
if isinstance(loc, slice):
# TODO: what if a level contains tuples??
new_index = self.index[loc]
new_index = _maybe_droplevels(new_index, key)
return Series(values[loc], index=new_index)
else:
return values[loc]
except KeyError:
if isinstance(key, (int, np.integer)):
return values[key]
raise Exception('Requested index not in this series!')
def get(self, key, default=None):
"""
Returns value occupying requested index, default to specified
missing value if not present
Parameters
----------
key : object
Index value looking for
default : object, optional
Value to return if key not in index
Returns
-------
y : scalar
"""
if key in self.index:
return self._get_val_at(self.index.get_loc(key))
else:
return default
# help out SparseSeries
_get_val_at = ndarray.__getitem__
def __getslice__(self, i, j):
"""
Returns a slice of the Series.
Note that the underlying values are COPIES.
The reason that the getslice returns copies is that otherwise you
will have a reference to the original series which could be
inadvertently changed
"""
return Series(self.values[i:j].copy(), index=self.index[i:j])
def __setitem__(self, key, value):
values = self.values
try:
loc = self.index.get_loc(key)
values[loc] = value
return
except KeyError:
if isinstance(key, (int, np.integer)):
values[key] = value
return
raise Exception('Requested index not in this series!')
except TypeError:
# Could not hash item
pass
self._check_bool_indexer(key)
# special handling of boolean data with NAs stored in object
# arrays. Sort of an elaborate hack since we can't represent boolean
# NA. Hmm
if isinstance(key, np.ndarray) and key.dtype == np.object_:
mask = isnull(key)
if mask.any():
raise ValueError('cannot index with vector containing '
'NA / NaN values')
if set([True, False]).issubset(set(key)):
key = np.asarray(key, dtype=bool)
values[key] = value
return
values[key] = value
def _check_bool_indexer(self, key):
# boolean indexing, need to check that the data are aligned, otherwise
# disallowed
if isinstance(key, Series) and key.dtype == np.bool_:
if not key.index.equals(self.index):
raise Exception('can only boolean index with like-indexed '
'Series or raw ndarrays')
def __setslice__(self, i, j, value):
"""Set slice equal to given value(s)"""
ndarray.__setslice__(self, i, j, value)
def __repr__(self):
"""Clean string representation of a Series"""
if len(self.index) > 500:
return self._make_repr(50)
elif len(self.index) > 0:
return _seriesRepr(self.index, self.values)
else:
return '%s' % ndarray.__repr__(self)
def _make_repr(self, max_vals=50):
vals = self.values
index = self.index
num = max_vals // 2
head = _seriesRepr(index[:num], vals[:num])
tail = _seriesRepr(index[-(max_vals - num):], vals[-(max_vals - num):])
return head + '\n...\n' + tail + '\nlength: %d' % len(vals)
def toString(self, buffer=sys.stdout, nanRep='NaN'):
print >> buffer, _seriesRepr(self.index, self.values,
nanRep=nanRep)
def __str__(self):
return repr(self)
def __iter__(self):
return iter(self.values)
def copy(self):
return Series(self.values.copy(), index=self.index)
#-------------------------------------------------------------------------------
# Arithmetic operators
__add__ = _arith_method(operator.add, '__add__')
__sub__ = _arith_method(operator.sub, '__sub__')
__mul__ = _arith_method(operator.mul, '__mul__')
__div__ = _arith_method(operator.div, '__div__')
__truediv__ = _arith_method(operator.truediv, '__truediv__')
__pow__ = _arith_method(operator.pow, '__pow__')
__truediv__ = _arith_method(operator.truediv, '__truediv__')
__radd__ = _arith_method(operator.add, '__add__')
__rmul__ = _arith_method(operator.mul, '__mul__')
__rsub__ = _arith_method(lambda x, y: y - x, '__sub__')
__rdiv__ = _arith_method(lambda x, y: y / x, '__div__')
__rtruediv__ = _arith_method(lambda x, y: y / x, '__truediv__')
__rpow__ = _arith_method(lambda x, y: y ** x, '__pow__')
# Inplace operators
__iadd__ = __add__
__isub__ = __sub__
__imul__ = __mul__
__idiv__ = __div__
__ipow__ = __pow__
#-------------------------------------------------------------------------------
# Statistics, overridden ndarray methods
# TODO: integrate bottleneck
def count(self):
"""
Return number of observations of Series.
Returns
-------
nobs : int
"""
return notnull(self.values).sum()
def sum(self, axis=None, dtype=None, out=None):
"""
Sum of non-null values
"""
return self._ndarray_statistic('sum')
def mean(self, axis=None, dtype=None, out=None):
"""
Mean of non-null values
"""
return self._ndarray_statistic('mean')
def _ndarray_statistic(self, funcname):
arr = self.values
retVal = getattr(arr, funcname)()
if isnull(retVal):
arr = remove_na(arr)
if len(arr) == 0:
return np.nan
retVal = getattr(arr, funcname)()
return retVal
def quantile(self, q=0.5):
"""
Return value at the given quantile
Parameters
----------
q : quantile
0 <= q <= 1
Returns
-------
q : float
"""
from scipy.stats import scoreatpercentile
return scoreatpercentile(self.valid().values, q * 100)
def describe(self):
"""
Generate various summary statistics of columns, excluding NaN values
Returns
-------
DataFrame
"""
names = ['count', 'mean', 'std', 'min',
'10%', '50%', '90%', 'max']
data = [self.count(), self.mean(), self.std(), self.min(),
self.quantile(.1), self.median(), self.quantile(.9),
self.max()]
return Series(data, index=names)
def min(self, axis=None, out=None):
"""
Minimum of non-null values
"""
arr = self.values.copy()
if not issubclass(arr.dtype.type, np.int_):
arr[isnull(arr)] = np.inf
return arr.min()
def max(self, axis=None, out=None):
"""
Maximum of non-null values
"""
arr = self.values.copy()
if not issubclass(arr.dtype.type, np.int_):
arr[isnull(arr)] = -np.inf
return arr.max()
def std(self, axis=None, dtype=None, out=None, ddof=1):
"""
Unbiased standard deviation of non-null values
"""
nona = remove_na(self.values)
if len(nona) < 2:
return nan
return ndarray.std(nona, axis, dtype, out, ddof)
def var(self, axis=None, dtype=None, out=None, ddof=1):
"""
Unbiased variance of non-null values
"""
nona = remove_na(self.values)
if len(nona) < 2:
return nan
return ndarray.var(nona, axis, dtype, out, ddof)
def skew(self):
"""
Unbiased skewness of the non-null values
Returns
-------
skew : float
"""
y = np.array(self.values)
mask = notnull(y)
count = mask.sum()
np.putmask(y, -mask, 0)
A = y.sum() / count
B = (y**2).sum() / count - A**2
C = (y**3).sum() / count - A**3 - 3*A*B
return (np.sqrt((count**2-count))*C) / ((count-2)*np.sqrt(B)**3)
def cumsum(self, axis=0, dtype=None, out=None):
"""
Cumulative sum of values. Preserves NaN values
Extra parameters are to preserve ndarray interface.
Returns
-------
"""
arr = self.values.copy()
do_mask = not issubclass(self.dtype.type, np.int_)
if do_mask:
mask = isnull(arr)
np.putmask(arr, mask, 0.)
result = arr.cumsum()
if do_mask:
np.putmask(result, mask, np.nan)
return Series(result, index=self.index)
def cumprod(self, axis=0, dtype=None, out=None):
"""
Overriding numpy's built-in cumprod functionality
"""
arr = self.values.copy()
do_mask = not issubclass(self.dtype.type, np.int_)
if do_mask:
mask = isnull(arr)
np.putmask(arr, mask, 1.)
result = arr.cumprod()
if do_mask:
np.putmask(result, mask, np.nan)
return Series(result, index=self.index)
def median(self):
"""
Compute median value of non-null values
"""
arr = self.values
if arr.dtype != np.float_:
arr = arr.astype(float)
arr = arr[notnull(arr)]
return _tseries.median(arr)
def corr(self, other):
"""
Compute correlation two Series, excluding missing values
Parameters
----------
other : Series object
Returns
-------
correlation : float
"""
commonIdx = self.valid().index.intersection(other.valid().index)
if len(commonIdx) == 0:
return nan
this = self.reindex(commonIdx)
that = other.reindex(commonIdx)
return np.corrcoef(this, that)[0, 1]
def diff(self):
"""
1st discrete difference of object
Returns
-------
TimeSeries
"""
return (self - self.shift(1))
def autocorr(self):
"""
Lag-1 autocorrelation
Returns
-------
TimeSeries
"""
return self.corr(self.shift(1))
def clip(self, upper=None, lower=None):
"""
Trim values at input threshold(s)
Parameters
----------
lower : float, default None
upper : float, default None
Returns
-------
y : Series
"""
result = self
if lower is not None:
result = result.clip_lower(lower)
if upper is not None:
result = result.clip_upper(upper)
return result
def clip_upper(self, threshold):
"""Return copy of series with values above given value truncated"""
return np.where(self > threshold, threshold, self)
def clip_lower(self, threshold):
"""Return copy of series with values below given value truncated"""
return np.where(self < threshold, threshold, self)
#-------------------------------------------------------------------------------
# Iteration
def keys(self):
"Alias for Series index"
return self.index
@property
def values(self):
"""
Return Series as ndarray
Returns
-------
arr : numpy.ndarray
"""
return self.view(ndarray)
def iteritems(self):
"""
Lazily iterate over (index, value) tuples
"""
return itertools.izip(iter(self.index), iter(self))
#-------------------------------------------------------------------------------
# Combination
def append(self, other):
"""
Concatenate two Series. The indices should not overlap
Parameters
----------
other : Series
Returns
-------
y : Series
"""
new_index = np.concatenate((self.index, other.index))
new_index = Index(new_index)
new_index._verify_integrity()
new_values = np.concatenate((self, other))
return Series(new_values, index=new_index)
def _binop(self, other, func, fill_value=None):
"""
Parameters
----------
other : Series
Returns
-------
combined : Series
"""
# TODO: docstring
assert(isinstance(other, Series))
new_index = self.index
this = self
if not self.index.equals(other.index):
new_index = self.index + other.index
this = self.reindex(new_index)
other = other.reindex(new_index)
this_vals = this.values
other_vals = other.values
if fill_value is not None:
this_mask = isnull(this_vals)
other_mask = isnull(other_vals)
this_vals = this_vals.copy()
other_vals = other_vals.copy()
# one but not both
mask = this_mask ^ other_mask
this_vals[this_mask & mask] = fill_value
other_vals[other_mask & mask] = fill_value
result = func(this_vals, other_vals)
return Series(result, index=new_index)
add = _flex_method(operator.add, 'add')
sub = _flex_method(operator.sub, 'subtract')
mul = _flex_method(operator.mul, 'multiply')
div = _flex_method(operator.div, 'divide')
def combine(self, other, func, fill_value=nan):
"""
Perform elementwise binary operation on two Series using given function
with optional fill value when an index is missing from one Series or the
other
Parameters
----------
other : Series or scalar value
func : function
fill_value : scalar value
Returns
-------
result : Series
"""
if isinstance(other, Series):
new_index = self.index + other.index
new_values = np.empty(len(new_index), dtype=self.dtype)
for i, idx in enumerate(new_index):
new_values[i] = func(self.get(idx, fill_value),
other.get(idx, fill_value))
else:
new_index = self.index
new_values = func(self.values, other)
return Series(new_values, index=new_index)
def combineFirst(self, other):
"""
Combine Series values, choosing calling Series's values first.
Parameters
----------
other : Series
Returns
-------
y : Series
formed as union of two Series
"""
if self.index.equals(other.index):
new_index = self.index
# save ourselves the copying in this case
this = self
else:
new_index = self.index + other.index
this = self.reindex(new_index)
other = other.reindex(new_index)
result = Series(np.where(isnull(this), other, this), index=new_index)
return result
#----------------------------------------------------------------------
# Reindexing, sorting
def sort(self, axis=0, kind='quicksort', order=None):
"""
Overridden NumPy sort, taking care with missing values
"""
sortedSeries = self.order(na_last=True)
self[:] = sortedSeries
self.index = sortedSeries.index
def argsort(self, axis=0, kind='quicksort', order=None):
"""
Overriding numpy's built-in cumsum functionality
"""
values = self.values
mask =
|
isnull(values)
|
pandas.core.common.isnull
|
import os
import sys
from multiprocessing import Pool, cpu_count
from functools import partial
from operator import is_not
import pandas as pd
import numpy as np
levels = np.arange(0.05, .55, 0.05)
blast_cols = {"query_name": "str",
"subject_name": "str",
"pident": "float",
"nident": "int",
"sub_len": "int"}
def pll_maxident(filename):
df = pd.read_table(filename, comment="#", names=[*blast_cols], dtype=blast_cols)
if not len(df.index):
return None
max_pident = df.pident.max() / 100
max_nident = df.nident.max()
name = df.query_name[0]
return name, max_pident, max_nident
def main():
blast_dir = sys.argv[1]
in_ds = sys.argv[2]
out_dir = sys.argv[3]
os.makedirs(out_dir, exist_ok=True)
in_df = pd.read_csv(in_ds, sep=";", names=["name", "seq", "gos"], dtype="str").set_index("name")
blast_results = [os.path.join(blast_dir, f) for f in os.listdir(blast_dir) if os.path.isfile(os.path.join(blast_dir, f))]
with Pool(cpu_count()) as pool:
maxs = pool.map(pll_maxident, blast_results)
maxs = list(filter(partial(is_not, None), maxs))
max_df =
|
pd.DataFrame(maxs, columns=["name", "local_ident", "nident"])
|
pandas.DataFrame
|
# Python File containing program to register names, and made for the register button of main window.
# Importing necessary librariesthreading
print('Importing necessary libraries for register button...')
from Classes import *
from tkinter import ttk
from tkinter import *
import pandas as pd
import time
import string
import threading
from tkcalendar import DateEntry
import pandas as pd
try:
table =
|
pd.read_excel('Students_Records.xlsx')
|
pandas.read_excel
|
"""
分离gevent
这里面放置的为需要借助gevent的协程来下载的非一次性数据
"""
import gevent
from gevent.pool import Group
from gevent.queue import Queue
import socket
import pandas as pd
from stock.technical import (get_sh_margin_details, get_sz_margin_details,
get_tick_data, get_k_data)
from stock.fundamental import get_stock_basics
from stock.news import get_notices
def _load_sz_margin_details(date, output_list):
"""
获取某天的融资融券明细列表
Parameters
--------
date:string
日期 format:YYYY-MM-DD
output_list:list
存放结果
Return
------
None
"""
output_list.append(get_sz_margin_details(date=date))
def load_margin_details(code, start, end):
"""
获取融资融券明细列表
Parameters
--------
code:string
股票代码, e.g.600728
start:string
开始日期 format:YYYY-MM-DD
end:string
结束日期 format:YYYY-MM-DD
Return
------
DataFrame
"""
sh_details = get_sh_margin_details(start=start, end=end)
sz_list = list()
group = Group()
for date in sh_details['日期'].drop_duplicates():
group.add(gevent.spawn(_load_sz_margin_details, date, sz_list))
group.join()
if len(sz_list) == 0:
return sh_details
sz_details =
|
pd.concat(sz_list)
|
pandas.concat
|
import pytest
import numpy as np
import pandas as pd
from datetime import datetime, date
from sklearn.dummy import DummyRegressor
from sklearn_pandas import transformers
from sklearn_pandas.transformers.base import DataFrameModelTransformer, DataFrameFixColumnOrder, DataFrameFunctionApply, InferType
def test_transform_DataFrameModelTransformer():
df = pd.DataFrame({'A': [1, 2, ]})
expected_out = pd.DataFrame({'B': [0.0, 0.0, ]})
y = [0.0, 3.0, ]
model = DummyRegressor(strategy='constant', constant=0.0)
model_transform = DataFrameModelTransformer(
model, output_column_names=['B'])
model_transform.fit(df, y)
model_output = model_transform.transform(df)
pd.testing.assert_frame_equal(expected_out, model_output)
def test_identity_DataFrameFunctionApply():
df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
dffa = DataFrameFunctionApply()
expected_out = df
pd.testing.assert_frame_equal(expected_out, dffa.fit_transform(df))
def test_plus_one_prefix_DataFrameFunctionApply():
df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
dffa = DataFrameFunctionApply(func=lambda x: 2*x, prefix='dub_')
expected_out =
|
pd.DataFrame({'dub_A': [2, 4, 6], 'dub_B': [8, 10, 12]})
|
pandas.DataFrame
|
import os
import typing
import warnings
import numpy as np
import pandas as pd
import audeer
import audformat
from audinterface.core import utils
from audinterface.core.segment import Segment
from audinterface.core.typing import (
Timestamp,
Timestamps,
)
class Process:
r"""Processing interface.
Args:
process_func: processing function,
which expects the two positional arguments ``signal``
and ``sampling_rate``
and any number of additional keyword arguments
process_func_args: (keyword) arguments passed on to the processing
function
process_func_is_mono: if set to ``True`` and the input signal
has multiple channels, ``process_func`` will be applied to
every channel individually
sampling_rate: sampling rate in Hz.
If ``None`` it will call ``process_func`` with the actual
sampling rate of the signal
resample: if ``True`` enforces given sampling rate by resampling
channels: channel selection, see :func:`audresample.remix`
mixdown: apply mono mix-down on selection
segment: when a :class:`audinterface.Segment` object is provided,
it will be used to find a segmentation of the input signal.
Afterwards processing is applied to each segment
keep_nat: if the end of segment is set to ``NaT`` do not replace
with file duration in the result
num_workers: number of parallel jobs or 1 for sequential
processing. If ``None`` will be set to the number of
processors on the machine multiplied by 5 in case of
multithreading and number of processors in case of
multiprocessing
multiprocessing: use multiprocessing instead of multithreading
verbose: show debug messages
Raises:
ValueError: if ``resample = True``, but ``sampling_rate = None``
"""
def __init__(
self,
*,
process_func: typing.Callable[..., typing.Any] = None,
process_func_args: typing.Dict[str, typing.Any] = None,
process_func_is_mono: bool = False,
sampling_rate: int = None,
resample: bool = False,
channels: typing.Union[int, typing.Sequence[int]] = None,
mixdown: bool = False,
segment: Segment = None,
keep_nat: bool = False,
num_workers: typing.Optional[int] = 1,
multiprocessing: bool = False,
verbose: bool = False,
**kwargs,
):
if resample and sampling_rate is None:
raise ValueError(
'sampling_rate has to be provided for resample = True.'
)
self.sampling_rate = sampling_rate
r"""Sampling rate in Hz."""
self.resample = resample
r"""Resample signal."""
self.channels = None if channels is None else audeer.to_list(channels)
r"""Channel selection."""
self.mixdown = mixdown
r"""Mono mixdown."""
self.segment = segment
r"""Segmentation object."""
self.keep_nat = keep_nat
r"""Keep NaT in results."""
self.num_workers = num_workers
r"""Number of workers."""
self.multiprocessing = multiprocessing
r"""Use multiprocessing."""
self.verbose = verbose
r"""Show debug messages."""
if process_func is None:
def process_func(signal, _):
return signal
self.process_func = process_func
r"""Processing function."""
self.process_func_is_mono = process_func_is_mono
r"""Process channels individually."""
process_func_args = process_func_args or {}
if kwargs:
warnings.warn(
utils.kwargs_deprecation_warning,
category=UserWarning,
stacklevel=2,
)
for key, value in kwargs.items():
process_func_args[key] = value
self.process_func_args = process_func_args
r"""Additional keyword arguments to processing function."""
def _process_file(
self,
file: str,
*,
start: pd.Timedelta = None,
end: pd.Timedelta = None,
root: str = None,
) -> pd.Series:
signal, sampling_rate = utils.read_audio(
file,
start=start,
end=end,
root=root,
)
y = self._process_signal(
signal,
sampling_rate,
file=file,
)
if start is None or pd.isna(start):
start = y.index.levels[1][0]
if end is None or (pd.isna(end) and not self.keep_nat):
end = y.index.levels[2][0] + start
y.index = y.index.set_levels(
[[start], [end]],
level=[1, 2],
)
return y
def process_file(
self,
file: str,
*,
start: Timestamp = None,
end: Timestamp = None,
root: str = None,
) -> pd.Series:
r"""Process the content of an audio file.
Args:
file: file path
start: start processing at this position.
If value is as a float or integer it is treated as seconds
end: end processing at this position.
If value is as a float or integer it is treated as seconds
root: root folder to expand relative file path
Returns:
Series with processed file conform to audformat_
Raises:
RuntimeError: if sampling rates do not match
RuntimeError: if channel selection is invalid
.. _audformat: https://audeering.github.io/audformat/data-format.html
"""
start = utils.to_timedelta(start)
end = utils.to_timedelta(end)
if self.segment is not None:
index = self.segment.process_file(
file,
start=start,
end=end,
root=root,
)
return self._process_index_wo_segment(index, root)
else:
return self._process_file(file, start=start, end=end, root=root)
def process_files(
self,
files: typing.Sequence[str],
*,
starts: Timestamps = None,
ends: Timestamps = None,
root: str = None,
) -> pd.Series:
r"""Process a list of files.
Args:
files: list of file paths
starts: segment start positions.
Time values given as float or integers are treated as seconds.
If a scalar is given, it is applied to all files
ends: segment end positions.
Time values given as float or integers are treated as seconds
If a scalar is given, it is applied to all files
root: root folder to expand relative file paths
Returns:
Series with processed files conform to audformat_
Raises:
RuntimeError: if sampling rates do not match
RuntimeError: if channel selection is invalid
.. _audformat: https://audeering.github.io/audformat/data-format.html
"""
if isinstance(starts, (type(None), float, int, str, pd.Timedelta)):
starts = [starts] * len(files)
if isinstance(ends, (type(None), float, int, str, pd.Timedelta)):
ends = [ends] * len(files)
starts = utils.to_timedelta(starts)
ends = utils.to_timedelta(ends)
params = [
(
(file, ),
{
'start': start,
'end': end,
'root': root,
},
) for file, start, end in zip(files, starts, ends)
]
verbose = self.verbose
self.verbose = False # avoid nested progress bar
y = audeer.run_tasks(
self.process_file,
params,
num_workers=self.num_workers,
multiprocessing=self.multiprocessing,
progress_bar=verbose,
task_description=f'Process {len(files)} files',
)
self.verbose = verbose
return pd.concat(y)
def process_folder(
self,
root: str,
*,
filetype: str = 'wav',
) -> pd.Series:
r"""Process files in a folder.
.. note:: At the moment does not scan in sub-folders!
Args:
root: root folder
filetype: file extension
Returns:
Series with processed files conform to audformat_
Raises:
RuntimeError: if sampling rates do not match
RuntimeError: if channel selection is invalid
.. _audformat: https://audeering.github.io/audformat/data-format.html
"""
files = audeer.list_file_names(root, filetype=filetype)
files = [os.path.join(root, os.path.basename(f)) for f in files]
return self.process_files(files)
def _process_index_wo_segment(
self,
index: pd.Index,
root: typing.Optional[str],
) -> pd.Series:
r"""Like process_index, but does not apply segmentation."""
if index.empty:
return pd.Series(None, index=index, dtype=float)
params = [
(
(file, ),
{
'start': start,
'end': end,
'root': root,
},
)
for file, start, end in index
]
y = audeer.run_tasks(
self._process_file,
params,
num_workers=self.num_workers,
multiprocessing=self.multiprocessing,
progress_bar=self.verbose,
task_description=f'Process {len(index)} segments',
)
return pd.concat(y)
def process_index(
self,
index: pd.Index,
*,
root: str = None,
) -> pd.Series:
r"""Process from an index conform to audformat_.
Args:
index: index with segment information
root: root folder to expand relative file paths
Returns:
Series with processed segments conform to audformat_
Raises:
RuntimeError: if sampling rates do not match
RuntimeError: if channel selection is invalid
.. _audformat: https://audeering.github.io/audformat/data-format.html
"""
index = audformat.utils.to_segmented_index(index)
if self.segment is not None:
index = self.segment.process_index(index, root=root)
return self._process_index_wo_segment(index, root)
def _process_signal(
self,
signal: np.ndarray,
sampling_rate: int,
*,
file: str = None,
start: pd.Timedelta = None,
end: pd.Timedelta = None,
) -> pd.Series:
signal = np.atleast_2d(signal)
# Find start and end index
if start is None or pd.isna(start):
start = pd.to_timedelta(0)
if end is None or (pd.isna(end) and not self.keep_nat):
end = pd.to_timedelta(signal.shape[-1] / sampling_rate, unit='s')
start_i, end_i = utils.segment_to_indices(
signal, sampling_rate, start, end,
)
# Trim and process signal
y = self(signal[:, start_i:end_i], sampling_rate)
# Create index
if file is not None:
index = audformat.segmented_index(file, start, end)
else:
index = utils.signal_index(start, end)
return pd.Series([y], index)
def process_signal(
self,
signal: np.ndarray,
sampling_rate: int,
*,
file: str = None,
start: Timestamp = None,
end: Timestamp = None,
) -> typing.Any:
r"""Process audio signal and return result.
.. note:: If a ``file`` is given, the index of the returned frame
has levels ``file``, ``start`` and ``end``. Otherwise,
it consists only of ``start`` and ``end``.
Args:
signal: signal values
sampling_rate: sampling rate in Hz
file: file path
start: start processing at this position.
If value is as a float or integer it is treated as seconds
end: end processing at this position.
If value is as a float or integer it is treated as seconds
Returns:
Series with processed signal conform to audformat_
Raises:
RuntimeError: if sampling rates do not match
RuntimeError: if channel selection is invalid
.. _audformat: https://audeering.github.io/audformat/data-format.html
"""
start = utils.to_timedelta(start)
end = utils.to_timedelta(end)
if self.segment is not None:
index = self.segment.process_signal(
signal,
sampling_rate,
file=file,
start=start,
end=end,
)
return self._process_signal_from_index_wo_segment(
signal,
sampling_rate,
index,
)
else:
return self._process_signal(
signal,
sampling_rate,
file=file,
start=start,
end=end,
)
def _process_signal_from_index_wo_segment(
self,
signal: np.ndarray,
sampling_rate: int,
index: pd.Index,
) -> pd.Series:
r"""Like process_signal_from_index, but does not apply segmentation."""
if index.empty:
return pd.Series(None, index=index, dtype=float)
if isinstance(index, pd.MultiIndex) and len(index.levels) == 2:
params = [
(
(signal, sampling_rate),
{'start': start, 'end': end},
) for start, end in index
]
else:
index = audformat.utils.to_segmented_index(index)
params = [
(
(signal, sampling_rate),
{'file': file, 'start': start, 'end': end},
) for file, start, end in index
]
y = audeer.run_tasks(
self._process_signal,
params,
num_workers=self.num_workers,
multiprocessing=self.multiprocessing,
progress_bar=self.verbose,
task_description=f'Process {len(index)} segments',
)
return pd.concat(y)
def process_signal_from_index(
self,
signal: np.ndarray,
sampling_rate: int,
index: pd.Index,
) -> pd.Series:
r"""Split a signal into segments and process each segment.
Args:
signal: signal values
sampling_rate: sampling rate in Hz
index: a segmented index conform to audformat_
or a :class:`pandas.MultiIndex` with two levels
named `start` and `end` that hold start and end
positions as :class:`pandas.Timedelta` objects.
See also :func:`audinterface.utils.signal_index`
Returns:
Series with processed segments conform to audformat_
Raises:
RuntimeError: if sampling rates do not match
RuntimeError: if channel selection is invalid
ValueError: if index contains duplicates
.. _audformat: https://audeering.github.io/audformat/data-format.html
"""
utils.assert_index(index)
if index.empty:
return pd.Series(None, index=index, dtype=float)
if self.segment is not None:
index = self.segment.process_signal_from_index(
signal,
sampling_rate,
index,
)
return self._process_signal_from_index_wo_segment(
signal,
sampling_rate,
index,
)
def __call__(
self,
signal: np.ndarray,
sampling_rate: int,
) -> typing.Any:
r"""Apply processing to signal.
This function processes the signal **without** transforming the output
into a :class:`pd.Series`. Instead it will return the raw processed
signal. However, if channel selection, mixdown and/or resampling
is enabled, the signal will be first remixed and resampled if the
input sampling rate does not fit the expected sampling rate.
Args:
signal: signal values
sampling_rate: sampling rate in Hz
Returns:
Processed signal
Raises:
RuntimeError: if sampling rates do not match
RuntimeError: if channel selection is invalid
"""
signal, sampling_rate = utils.preprocess_signal(
signal,
sampling_rate,
self.sampling_rate,
self.resample,
self.channels,
self.mixdown,
)
if self.process_func_is_mono:
return [
self.process_func(
np.atleast_2d(channel),
sampling_rate,
**self.process_func_args,
) for channel in signal
]
return self.process_func(
signal,
sampling_rate,
**self.process_func_args,
)
class ProcessWithContext:
r"""Alternate processing interface that provides signal context.
In contrast to :class:`Process` this interface does not look at segments
in isolation, but passes the complete signal together with a list of
segments to the processing function. By doing so, it becomes possible to
process segments in context, e.g. by taking into account surrounding
signal values or other segments.
Args:
process_func: processing function, which expects four positional
arguments:
* ``signal``
* ``sampling_rate``
* ``starts`` sequence with start indices
* ``ends`` sequence with end indices
and any number of additional keyword arguments.
Must return a sequence of results for every segment
process_func_args: (keyword) arguments passed on to the processing
function
sampling_rate: sampling rate in Hz.
If ``None`` it will call ``process_func`` with the actual
sampling rate of the signal
resample: if ``True`` enforces given sampling rate by resampling
channels: channel selection, see :func:`audresample.remix`
mixdown: apply mono mix-down on selection
verbose: show debug messages
Raises:
ValueError: if ``resample = True``, but ``sampling_rate = None``
"""
def __init__(
self,
*,
process_func: typing.Callable[
...,
typing.Sequence[typing.Any]
] = None,
process_func_args: typing.Dict[str, typing.Any] = None,
sampling_rate: int = None,
resample: bool = False,
channels: typing.Union[int, typing.Sequence[int]] = None,
mixdown: bool = False,
verbose: bool = False,
**kwargs,
):
if resample and sampling_rate is None:
raise ValueError(
'sampling_rate has to be provided for resample = True.'
)
self.sampling_rate = sampling_rate
r"""Sampling rate in Hz."""
self.resample = resample
r"""Resample signal."""
self.channels = None if channels is None else audeer.to_list(channels)
r"""Channel selection."""
self.mixdown = mixdown
r"""Mono mixdown."""
self.verbose = verbose
r"""Show debug messages."""
if process_func is None:
def process_func(signal, _, starts, ends):
return [
signal[:, start:end] for start, end in zip(starts, ends)
]
self.process_func = process_func
r"""Process function."""
process_func_args = process_func_args or {}
if kwargs:
warnings.warn(
utils.kwargs_deprecation_warning,
category=UserWarning,
stacklevel=2,
)
for key, value in kwargs.items():
process_func_args[key] = value
self.process_func_args = process_func_args
r"""Additional keyword arguments to processing function."""
def process_index(
self,
index: pd.Index,
*,
root: str = None,
) -> pd.Series:
r"""Process from a segmented index conform to audformat_.
Args:
index: index with segment information
root: root folder to expand relative file paths
Returns:
Series with processed segments conform to audformat_
Raises:
RuntimeError: if sampling rates do not match
RuntimeError: if channel selection is invalid
.. _audformat: https://audeering.github.io/audformat/data-format.html
"""
index = audformat.utils.to_segmented_index(index)
if index.empty:
return
|
pd.Series(index=index, dtype=float)
|
pandas.Series
|
import xml.etree.ElementTree as ET
import math
import numpy as numpy
import numpy as np
import pandas as pd
def raw_file_converter(rawfilepath,rawdata):
tree=ET.parse(rawfilepath)
root=tree.getroot()
read_limit = len(root)
data=[]
for i in range(read_limit):
if root[i].tag=='a':
data.append(root[i])
elif root[i].tag=='g':
data.append(root[i])
elif root[i].tag=='m':
data.append(root[i])
root=data
read_limit=len(root)
if len(root) < 1:
print('Xml file is empty.')
exit(-1)
a_st = {}
g_st = {}
m_st = {}
max_st = 0
for i in range(read_limit):
st = int((float(root[i].attrib['st'])-float(root[0].attrib['st']))/1e6)
if root[i].tag == 'a':
a_st[st] = i
if st > max_st:
max_st = st
elif root[i].tag == 'g':
g_st[st] = i
if st > max_st:
max_st = st
elif root[i].tag == 'm':
m_st[st] = i
if st > max_st:
max_st = st
st=[]
ax=[]
ay=[]
az=[]
gx=[]
gy=[]
gz=[]
mx=[]
my=[]
mz=[]
for i in range(max_st+1):
st = numpy.append(st, i, axis=None)
if i in a_st:
ax = numpy.append(ax, float(root[a_st[i]].attrib['x']), axis=None)
ay = numpy.append(ay, float(root[a_st[i]].attrib['y']), axis=None)
az = numpy.append(az, float(root[a_st[i]].attrib['z']), axis=None)
else:
ax = numpy.append(ax, numpy.NaN, axis=None)
ay = numpy.append(ay, numpy.NaN, axis=None)
az = numpy.append(az, numpy.NaN, axis=None)
if i in g_st:
gx = numpy.append(gx, float(root[g_st[i]].attrib['x']), axis=None)
gy = numpy.append(gy, float(root[g_st[i]].attrib['y']), axis=None)
gz = numpy.append(gz, float(root[g_st[i]].attrib['z']), axis=None)
else:
gx = numpy.append(gx, numpy.NaN, axis=None)
gy = numpy.append(gy, numpy.NaN, axis=None)
gz = numpy.append(gz, numpy.NaN, axis=None)
if i in m_st:
mx = numpy.append(mx, float(root[m_st[i]].attrib['x']), axis=None)
my = numpy.append(my, float(root[m_st[i]].attrib['y']), axis=None)
mz = numpy.append(mz, float(root[m_st[i]].attrib['z']), axis=None)
else:
mx = numpy.append(mx, numpy.NaN, axis=None)
my = numpy.append(my, numpy.NaN, axis=None)
mz = numpy.append(mz, numpy.NaN, axis=None)
df = pd.DataFrame(data=st,columns=['st'])
df['ax'] = ax
df['ay'] = ay
df['az'] = az
df['gx'] = gx
df['gy'] = gy
df['gz'] = gz
df['mx'] = mx
df['my'] = my
df['mz'] = mz
df=df.drop_duplicates(subset='st', keep='first', inplace=False)
df['AccTotal'] = numpy.sqrt(df['ax']**2+df['ay']**2+df['az']**2)
df['GyrTotal'] = numpy.sqrt(df['gx']**2+df['gy']**2+df['gz']**2)
df['MagTotal'] = numpy.sqrt(df['mx']**2+df['my']**2+df['mz']**2)
df=df.interpolate(method='linear', axis=0, limit=None, inplace=False, limit_direction='forward', limit_area=None, downcast=None)
for i in range(1000):
if not math.isnan(df['gx'][i]):
df['gx'][0]=df['gx'][i]
df['gy'][0]=df['gy'][i]
df['gz'][0]=df['gz'][i]
df['GyrTotal'][0]=df['GyrTotal'][i]
break
for j in range(1000):
if not math.isnan(df['ax'][j]):
df['ax'][0]=df['ax'][j]
df['ay'][0]=df['ay'][j]
df['az'][0]=df['az'][j]
df['AccTotal'][0]=df['AccTotal'][j]
break
for k in range(1000):
if not math.isnan(df['mx'][k]):
df['mx'][0]=df['mx'][k]
df['my'][0]=df['my'][k]
df['mz'][0]=df['mz'][k]
df['MagTotal'][0]=df['MagTotal'][k]
break
df=df.interpolate(method='linear', axis=0, limit=None, inplace=False, limit_direction='forward', limit_area=None, downcast=None)
df.to_csv(rawdata)
#return df
def data_generator(time_step, sensor_data, rawdata, timerfilepath):
df = pd.read_csv(rawdata)
point = pd.read_csv(timerfilepath)
point['Time']=point['Time']*1000
df['lat']=numpy.NaN
df['lng']=numpy.NaN
sequcnce=0
s=0.0
sl=0.0
l_s=0
for i in range (len(df)):
if sequcnce >len(point)-1:
break
if df['st'][i]==point['Time'][sequcnce]:
df['lat'][i]=point['lat'][sequcnce]
df['lng'][i]=point['Lng'][sequcnce]
diff=(point['lat'][sequcnce] - s)/(i-l_s)
difflng=(point['Lng'][sequcnce] - sl)/(i-l_s)
counter=1
sum=s
suml=sl
for j in range (l_s+1,i):
if counter%time_step==0:
sum=sum+diff*time_step
suml=suml+difflng*time_step
df['lat'][j]=sum
df['lng'][j]=suml
counter=counter+1
s=point['lat'][sequcnce]
sl=point['Lng'][sequcnce]
sequcnce=sequcnce+1
l_s=i
df=df.drop(df[df.st < point['Time'][0]].index)
df=df.drop(df[df.st > point['Time'][len(point)-1]].index)
df.to_csv(sensor_data)
def overlap_generator(cover_range,over_lapping, overlap_data, rawdata,timerfilepath):
dftest=pd.read_csv(rawdata)
Acct=[None] * cover_range
Gyrt=[None] * cover_range
Magt=[None] * cover_range
Ax=[None] * cover_range
Ay=[None] * cover_range
Az=[None] * cover_range
Gx=[None] * cover_range
Gy=[None] * cover_range
Gz=[None] * cover_range
Mx=[None] * cover_range
My=[None] * cover_range
Mz=[None] * cover_range
time=[None] * 1
drop_num=abs((len(dftest)+over_lapping)//cover_range*cover_range-len(dftest))
if drop_num==0:
dftest=dftest
else:
dftest=dftest[:-drop_num]
for i in range (0, len(dftest), cover_range-over_lapping):
slide_window_acc=dftest['AccTotal'][i:i+cover_range]
slide_window_gyr=dftest['GyrTotal'][i:i+cover_range]
slide_window_mag=dftest['MagTotal'][i:i+cover_range]
slide_window_ax=dftest['ax'][i:i+cover_range]
slide_window_ay=dftest['ay'][i:i+cover_range]
slide_window_az=dftest['az'][i:i+cover_range]
slide_window_gx=dftest['gx'][i:i+cover_range]
slide_window_gy=dftest['gy'][i:i+cover_range]
slide_window_gz=dftest['gz'][i:i+cover_range]
slide_window_mx=dftest['mx'][i:i+cover_range]
slide_window_my=dftest['my'][i:i+cover_range]
slide_window_mz=dftest['mz'][i:i+cover_range]
cur_time=i
if not slide_window_acc.shape[0]==cover_range:
break
Acct = numpy.row_stack((Acct,slide_window_acc))
Gyrt = numpy.row_stack((Gyrt,slide_window_gyr))
Magt = numpy.row_stack((Magt,slide_window_mag))
Ax = numpy.row_stack((Ax,slide_window_ax))
Ay = numpy.row_stack((Ay,slide_window_ay))
Az = numpy.row_stack((Az,slide_window_az))
Gx = numpy.row_stack((Gx,slide_window_gx))
Gy = numpy.row_stack((Gy,slide_window_gy))
Gz = numpy.row_stack((Gz,slide_window_gz))
Mx = numpy.row_stack((Mx,slide_window_mx))
My = numpy.row_stack((My,slide_window_my))
Mz = numpy.row_stack((Mz,slide_window_mz))
time = numpy.row_stack((time,cur_time))
abc=np.concatenate((Acct,Gyrt,Magt,Ax,Ay,Az,Gx,Gy,Gz,Mx,My,Mz),axis=1)
abc=np.delete(abc, 0, 0)#drop the first row which contains all None value used for generating empty array to store data
abc=pd.DataFrame(abc)
point =
|
pd.read_csv(timerfilepath)
|
pandas.read_csv
|
import pandas as pd
from product_list.data.company_api import CompanyApi
from product_list.data.products_api import ProductApi
from product_list.models.company import Company
from product_list.models.product import Product
from typing import Mapping, Sequence
import uuid
import product_list.core.config
df =
|
pd.read_excel('assets/bpdprielist.xlsx')
|
pandas.read_excel
|
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.preprocessing import Imputer
from sklearn import linear_model
from sklearn.ensemble import BaggingRegressor, RandomForestRegressor
from sklearn import svm
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import make_scorer, r2_score
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import tflearn
import tensorflow as tf
import warnings
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore')
def lets_try(train, labels):
results = {}
def test_model(clf):
cv = KFold(n_splits=5, shuffle=True, random_state=45)
r2 = make_scorer(r2_score)
r2_val_score = cross_val_score(clf, train, labels, cv=cv, scoring=r2)
scores = [r2_val_score.mean()]
return scores
clf = linear_model.LinearRegression()
results["Linear"] = test_model(clf)
clf = linear_model.Ridge()
results["Ridge"] = test_model(clf)
clf = linear_model.BayesianRidge()
results["Bayesian Ridge"] = test_model(clf)
clf = linear_model.HuberRegressor()
results["Hubber"] = test_model(clf)
clf = linear_model.Lasso(alpha=1e-4)
results["Lasso"] = test_model(clf)
clf = BaggingRegressor()
results["Bagging"] = test_model(clf)
clf = RandomForestRegressor()
results["RandomForest"] = test_model(clf)
clf = AdaBoostRegressor()
results["AdaBoost"] = test_model(clf)
clf = svm.SVR()
results["SVM RBF"] = test_model(clf)
clf = svm.SVR(kernel="linear")
results["SVM Linear"] = test_model(clf)
results = pd.DataFrame.from_dict(results, orient='index')
results.columns = ["R Square Score"]
# results = results.sort(columns=["R Square Score"], ascending=False)
results.plot(kind="bar", title="Model Scores")
axes = plt.gca()
axes.set_ylim([0.5, 1])
return results
'''
Pre-process is referred from: https://www.kaggle.com/miguelangelnieto/pca-and-regression/notebook/notebook
'''
train = pd.read_csv('../train.csv')
test = pd.read_csv('../test.csv')
labels = train["SalePrice"]
data = pd.concat([train, test], ignore_index=True)
print(data.shape)
print(data.dtypes.value_counts())
print(labels.describe())
print(data.head())
print(data.tail())
data = data.drop("SalePrice", 1)
ids = test['Id']
# Remove id and columns with more than a thousand missing values
data = data.drop("Id", 1)
data = data.drop("Alley", 1)
data = data.drop("Fence", 1)
data = data.drop("MiscFeature", 1)
data = data.drop("PoolQC", 1)
data = data.drop("FireplaceQu", 1)
# Count the column types
all_columns = data.columns.values
non_categorical = ["LotFrontage", "LotArea", "MasVnrArea", "BsmtFinSF1", "BsmtFinSF2", "BsmtUnfSF", "TotalBsmtSF",
"1stFlrSF", "2ndFlrSF", "LowQualFinSF", "GrLivArea", "GarageArea", "WoodDeckSF", "OpenPorchSF",
"EnclosedPorch", "3SsnPorch", "ScreenPorch","PoolArea", "MiscVal"]
categorical = [value for value in all_columns if value not in non_categorical]
# One Hot Encoding and nan transformation
data =
|
pd.get_dummies(data)
|
pandas.get_dummies
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
df =
|
pd.read_csv("../input/timesData.csv")
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import requests
# class name,必須跟檔案名一致,例如 class SHORTSALE_MARGINPURCHASE_RATIO,檔名也是 SHORTSALE_MARGINPURCHASE_RATIO.py
class SHORTSALE_MARGINPURCHASE_RATIO:
def __init__(self,
stock_price,
ShortSaleMarginPurchaseTodayRatioThreshold=0.3,
**kwargs, ):
# -------------------------------------------------------------------
# 此區塊請勿更動
stock_price = stock_price.sort_values('date')
# 股價
self.stock_price = stock_price
# 融資融券
self.MarginPurchaseShortSale = kwargs.get("MarginPurchaseShortSale",
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import py2neo
import pandas as pd
import networkx as nx
from scipy import sparse
DATA_DIR = "data/mag"
def get_db():
username = "neo4j"
password = "<PASSWORD>"
uri = "http://localhost:7474"
graph = py2neo.Graph(uri=uri, user=username, password=password)
return graph
def construct_adjacency_matrix(nodes, edges):
# Compute the mapping from node ids to id
max_node_id = np.max(nodes)
node2id = -np.ones(max_node_id + 1)
node2id[nodes] = np.arange(nodes.size)
max_edge_id = np.max(edges[:, :2])
# Remove edges that do not exist in nodes list
edges = edges[(np.max(edges[:, :2], axis=1) <= max_node_id), :]
edges[:, :2] = node2id[edges[:, :2].reshape(-1)].reshape((edges.shape[0], 2))
# Remove edges that do not exist in nodes list again
edges = edges[(np.min(edges[:, :2], axis=1) >= 0), :]
# Adjacency matrix
N = len(nodes)
A = sparse.csr_matrix((edges[:, 2], (edges[:, 0], edges[:, 1])), shape=(N, N))
return A
def load_network(years, net_data_dir=None):
if hasattr(years, "__len__") == False:
years = [years]
if net_data_dir is None:
net_data_dir = "%s/networks/" % DATA_DIR
# Load the node and edge files
df_nodes = []
df_edges = []
df_raw_edges = []
for year in years:
node_file = "{root}/nodes-{year}.csv".format(
root=net_data_dir, year=year
)
edge_file = "{root}/edges-{year}.csv".format(
root=net_data_dir, year=year
)
raw_edge_file = "{root}/raw-edges-{year}.csv".format(
root=net_data_dir, year=year
)
_df_nodes = pd.read_csv(node_file, sep="\t")
_df_edges = pd.read_csv(edge_file, sep="\t")
_df_raw_edges =
|
pd.read_csv(raw_edge_file, sep="\t")
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 26 09:12:51 2017
@author: RunNing
"""
import pandas as pd
from pandas import DataFrame as df
def join_data(dataset='train'):
"""
Join data for training/testing
Join data from different csv files for training/testing
and write joined data to local disk.
Parameters
----------
dataset: {'train', 'test'}
Raises
------
ValueError
when `dataset` is not in ['train', 'test'].
"""
if (dataset != 'train' and dataset != 'test'):
raise ValueError("`dataset` must be 'train' or 'test'")
if dataset == 'train':
ori_data = pd.read_csv('../data/train.csv')
else:
ori_data = pd.read_csv('../data/test.csv')
user = pd.read_csv('../data/user.csv')
position = pd.read_csv('../data/position.csv')
ad =
|
pd.read_csv('../data/ad.csv')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
import pdb,importlib,inspect,time,datetime,json
# from PyFin.api import advanceDateByCalendar
# from data.polymerize import DBPolymerize
from data.storage_engine import StorageEngine
import time
import pandas as pd
import numpy as np
from datetime import datetime
from financial import factor_cash_flow
from data.model import BalanceTTM
from data.model import CashFlowTTM, CashFlowReport
from data.model import IncomeReport, IncomeTTM
from vision.table.valuation import Valuation
from vision.db.signletion_engine import *
from data.sqlengine import sqlEngine
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
# from ultron.cluster.invoke.cache_data import cache_data
class CalcEngine(object):
def __init__(self, name, url, methods=[{'packet':'financial.factor_cash_flow','class':'FactorCashFlow'},]):
self._name = name
self._methods = methods
self._url = url
def _func_sets(self, method):
# 私有函数和保护函数过滤
return list(filter(lambda x: not x.startswith('_') and callable(getattr(method,x)), dir(method)))
def loading_data(self, trade_date):
"""
获取基础数据
按天获取当天交易日所有股票的基础数据
:param trade_date: 交易日
:return:
"""
# 转换时间格式
time_array = datetime.strptime(trade_date, "%Y-%m-%d")
trade_date = datetime.strftime(time_array, '%Y%m%d')
# 读取目前涉及到的因子
columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date']
# report data
engine = sqlEngine()
cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowReport,
[CashFlowReport.MANANETR, # 经营活动现金流量净额
CashFlowReport.LABORGETCASH, # 销售商品、提供劳务收到的现金
], dates=[trade_date]).drop(columns, axis=1)
income_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeReport,
[IncomeReport.BIZINCO, # 营业收入
IncomeReport.BIZTOTCOST, # 营业总成本
IncomeReport.BIZTOTINCO, # 营业总收入
], dates=[trade_date]).drop(columns, axis=1)
tp_cash_flow = pd.merge(cash_flow_sets, income_sets, on="security_code")
tp_cash_flow = tp_cash_flow.rename(columns={'MANANETR': 'net_operate_cash_flow', # 经营活动现金流量净额
'LABORGETCASH': 'goods_sale_and_service_render_cash', # 销售商品、提供劳务收到的现金
'BIZINCO': 'operating_revenue', # 营业收入
'BIZTOTINCO': 'total_operating_revenue', # 营业总收入
'BIZTOTCOST': 'total_operating_cost', # 营业总成本
})
# ttm data
balance_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(BalanceTTM,
[BalanceTTM.TOTLIAB, # 负债合计
BalanceTTM.SHORTTERMBORR, # 短期借款
BalanceTTM.LONGBORR, # 长期借款
BalanceTTM.TOTALCURRLIAB, # 流动负债合计
BalanceTTM.TOTCURRASSET, # 流动资产合计
BalanceTTM.TOTASSET, # 资产总计
],
dates=[trade_date]).drop(columns, axis=1)
cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(CashFlowTTM,
[CashFlowTTM.MANANETR, # 经营活动现金流量净额
CashFlowTTM.FINALCASHBALA, # 期末现金及现金等价物余额
CashFlowTTM.LABORGETCASH, # 销售商品、提供劳务收到的现金
],
dates=[trade_date]).drop(columns, axis=1)
income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(IncomeTTM,
[IncomeTTM.BIZTOTCOST, # 营业总成本
IncomeTTM.BIZINCO, # 营业收入
IncomeTTM.BIZTOTINCO, # 营业总收入
IncomeTTM.NETPROFIT, # 净利润
IncomeTTM.PARENETP, # 归属于母公司所有者的净利润
IncomeTTM.PERPROFIT, # 营业利润
],
dates=[trade_date]).drop(columns, axis=1)
ttm_cash_flow = pd.merge(balance_ttm_sets, cash_flow_ttm_sets, on="security_code")
ttm_cash_flow = pd.merge(income_ttm_sets, ttm_cash_flow, on="security_code")
ttm_cash_flow = ttm_cash_flow.rename(columns={'MANANETR': 'net_operate_cash_flow', # 经营活动现金流量净额
'BIZINCO': 'operating_revenue', # 营业收入
'BIZTOTINCO': 'total_operating_revenue', # 营业总收入
'BIZTOTCOST': 'total_operating_cost', # 营业总成本
'NETPROFIT': 'net_profit', # 净利润
'PARENETP': 'np_parent_company_owners', # 归属于母公司所有者的净利润
'TOTLIAB': 'total_liability', # 负债合计
'SHORTTERMBORR': 'shortterm_loan', # 短期借款
'LONGBORR': 'longterm_loan', # 长期借款
'TOTALCURRLIAB': 'total_current_liability', # 流动负债合计
'LABORGETCASH': 'goods_sale_and_service_render_cash', # 销售商品、提供劳务收到的现金
# 'NDEBT':'net_liability', # 净负债
'TOTCURRASSET': 'total_current_assets', # 流动资产合计
'TOTASSET': 'total_assets', # 资产总计
'FINALCASHBALA': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额
'PERPROFIT': 'operating_profit', # 期末现金及现金等价物余额
})
column = ['trade_date']
valuation_sets = get_fundamentals(query(Valuation.security_code,
Valuation.trade_date,
Valuation.market_cap, )
.filter(Valuation.trade_date.in_([trade_date]))).drop(column, axis=1)
ttm_cash_flow =
|
pd.merge(ttm_cash_flow, valuation_sets, how='outer', on='security_code')
|
pandas.merge
|
from functools import reduce
from time import time
import logging
import numpy as np
import pandas as pd
from nltk.tokenize import word_tokenize, RegexpTokenizer
from nltk.stem.snowball import RussianStemmer
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
#from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
import joblib
def read_input_data_set(filename):
# read CSV into a DF
df = pd.read_csv("test.csv")
# make DF bigger: 3 rows -> 15 rows
return pd.concat([df], ignore_index=True)
# tokenize and stem text
def normailize_text(
data,
tok=RegexpTokenizer(r'\w[\w\/\-]+'),
stemmers=[RussianStemmer(ignore_stopwords=True), PorterStemmer()]
):
# tokenize text into words
# sequentially apply all stemmers to tokenized words
# join stemmed words back to sentences
return [' '.join([reduce(lambda v,f: f.stem(v), stemmers, w) for w in tok.tokenize(line)])
for line in data]
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# read CSV into a DF
df = pd.read_csv("test.csv")
print('Input Data Set:')
print(df)
print()
# word tokenizer
tok = RegexpTokenizer(r'\w[\w\/\-]+')
en = PorterStemmer()
ru = RussianStemmer(ignore_stopwords=True)
data = normailize_text(df['Name'].values.tolist(), tok=tok, stemmers=[ru,en])
pipeline = Pipeline([
('tfidf', TfidfVectorizer()),
('clf', SGDClassifier()),
])
parameters = {
#'tfidf__max_df': (0.5, 0.75, 1.0),
'tfidf__max_features': (None, 10000, 50000, 100000),
#'tfidf__stop_words': ['russian','english'],
'tfidf__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
'clf__alpha': np.logspace(-7, 2, 10),
'clf__penalty': ('l2', 'elasticnet'),
'clf__max_iter': (1000, 5000, 10000, 100000),
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, cv=3, verbose=1)
# train model
t0 = time()
grid_search.fit(data, df['Code'])
print("done in %0.3fs" % (time() - t0))
print()
print('best parameters:')
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
print()
best_parameters = grid_search.best_estimator_.get_params()
joblib.dump(grid_search, 'gs_object.pkl')
test =
|
pd.DataFrame({'Name':['Как активировать карту', 'заблокировать карту', 'платежи по карте']})
|
pandas.DataFrame
|
from io import StringIO
import re
from string import ascii_uppercase as uppercase
import sys
import textwrap
import numpy as np
import pytest
from pandas.compat import (
IS64,
PYPY,
)
from pandas import (
CategoricalIndex,
DataFrame,
MultiIndex,
Series,
date_range,
option_context,
)
@pytest.fixture
def duplicate_columns_frame():
"""Dataframe with duplicate column names."""
return DataFrame(np.random.randn(1500, 4), columns=["a", "a", "b", "b"])
def test_info_empty():
# GH #45494
df = DataFrame()
buf = StringIO()
df.info(buf=buf)
result = buf.getvalue()
expected = textwrap.dedent(
"""\
<class 'pandas.core.frame.DataFrame'>
Index: 0 entries
Empty DataFrame\n"""
)
assert result == expected
def test_info_categorical_column_smoke_test():
n = 2500
df = DataFrame({"int64": np.random.randint(100, size=n)})
df["category"] = Series(
np.array(list("abcdefghij")).take(np.random.randint(0, 10, size=n))
).astype("category")
df.isna()
buf = StringIO()
df.info(buf=buf)
df2 = df[df["category"] == "d"]
buf = StringIO()
df2.info(buf=buf)
@pytest.mark.parametrize(
"fixture_func_name",
[
"int_frame",
"float_frame",
"datetime_frame",
"duplicate_columns_frame",
],
)
def test_info_smoke_test(fixture_func_name, request):
frame = request.getfixturevalue(fixture_func_name)
buf = StringIO()
frame.info(buf=buf)
result = buf.getvalue().splitlines()
assert len(result) > 10
@pytest.mark.parametrize(
"num_columns, max_info_columns, verbose",
[
(10, 100, True),
(10, 11, True),
(10, 10, True),
(10, 9, False),
(10, 1, False),
],
)
def test_info_default_verbose_selection(num_columns, max_info_columns, verbose):
frame = DataFrame(np.random.randn(5, num_columns))
with option_context("display.max_info_columns", max_info_columns):
io_default = StringIO()
frame.info(buf=io_default)
result = io_default.getvalue()
io_explicit = StringIO()
frame.info(buf=io_explicit, verbose=verbose)
expected = io_explicit.getvalue()
assert result == expected
def test_info_verbose_check_header_separator_body():
buf = StringIO()
size = 1001
start = 5
frame = DataFrame(np.random.randn(3, size))
frame.info(verbose=True, buf=buf)
res = buf.getvalue()
header = " # Column Dtype \n--- ------ ----- "
assert header in res
frame.info(verbose=True, buf=buf)
buf.seek(0)
lines = buf.readlines()
assert len(lines) > 0
for i, line in enumerate(lines):
if i >= start and i < start + size:
line_nr = f" {i - start} "
assert line.startswith(line_nr)
@pytest.mark.parametrize(
"size, header_exp, separator_exp, first_line_exp, last_line_exp",
[
(
4,
" # Column Non-Null Count Dtype ",
"--- ------ -------------- ----- ",
" 0 0 3 non-null float64",
" 3 3 3 non-null float64",
),
(
11,
" # Column Non-Null Count Dtype ",
"--- ------ -------------- ----- ",
" 0 0 3 non-null float64",
" 10 10 3 non-null float64",
),
(
101,
" # Column Non-Null Count Dtype ",
"--- ------ -------------- ----- ",
" 0 0 3 non-null float64",
" 100 100 3 non-null float64",
),
(
1001,
" # Column Non-Null Count Dtype ",
"--- ------ -------------- ----- ",
" 0 0 3 non-null float64",
" 1000 1000 3 non-null float64",
),
(
10001,
" # Column Non-Null Count Dtype ",
"--- ------ -------------- ----- ",
" 0 0 3 non-null float64",
" 10000 10000 3 non-null float64",
),
],
)
def test_info_verbose_with_counts_spacing(
size, header_exp, separator_exp, first_line_exp, last_line_exp
):
"""Test header column, spacer, first line and last line in verbose mode."""
frame = DataFrame(np.random.randn(3, size))
with StringIO() as buf:
frame.info(verbose=True, show_counts=True, buf=buf)
all_lines = buf.getvalue().splitlines()
# Here table would contain only header, separator and table lines
# dframe repr, index summary, memory usage and dtypes are excluded
table = all_lines[3:-2]
header, separator, first_line, *rest, last_line = table
assert header == header_exp
assert separator == separator_exp
assert first_line == first_line_exp
assert last_line == last_line_exp
def test_info_memory():
# https://github.com/pandas-dev/pandas/issues/21056
df = DataFrame({"a": Series([1, 2], dtype="i8")})
buf = StringIO()
df.info(buf=buf)
result = buf.getvalue()
bytes = float(df.memory_usage().sum())
expected = textwrap.dedent(
f"""\
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 2 entries, 0 to 1
Data columns (total 1 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 2 non-null int64
dtypes: int64(1)
memory usage: {bytes} bytes
"""
)
assert result == expected
def test_info_wide():
io = StringIO()
df = DataFrame(np.random.randn(5, 101))
df.info(buf=io)
io = StringIO()
df.info(buf=io, max_cols=101)
result = io.getvalue()
assert len(result.splitlines()) > 100
expected = result
with option_context("display.max_info_columns", 101):
io = StringIO()
df.info(buf=io)
result = io.getvalue()
assert result == expected
def test_info_duplicate_columns_shows_correct_dtypes():
# GH11761
io = StringIO()
frame = DataFrame([[1, 2.0]], columns=["a", "a"])
frame.info(buf=io)
lines = io.getvalue().splitlines(True)
assert " 0 a 1 non-null int64 \n" == lines[5]
assert " 1 a 1 non-null float64\n" == lines[6]
def test_info_shows_column_dtypes():
dtypes = [
"int64",
"float64",
"datetime64[ns]",
"timedelta64[ns]",
"complex128",
"object",
"bool",
]
data = {}
n = 10
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
buf = StringIO()
df.info(buf=buf)
res = buf.getvalue()
header = (
" # Column Non-Null Count Dtype \n"
"--- ------ -------------- ----- "
)
assert header in res
for i, dtype in enumerate(dtypes):
name = f" {i:d} {i:d} {n:d} non-null {dtype}"
assert name in res
def test_info_max_cols():
df = DataFrame(np.random.randn(10, 5))
for len_, verbose in [(5, None), (5, False), (12, True)]:
# For verbose always ^ setting ^ summarize ^ full output
with option_context("max_info_columns", 4):
buf = StringIO()
df.info(buf=buf, verbose=verbose)
res = buf.getvalue()
assert len(res.strip().split("\n")) == len_
for len_, verbose in [(12, None), (5, False), (12, True)]:
# max_cols not exceeded
with option_context("max_info_columns", 5):
buf = StringIO()
df.info(buf=buf, verbose=verbose)
res = buf.getvalue()
assert len(res.strip().split("\n")) == len_
for len_, max_cols in [(12, 5), (5, 4)]:
# setting truncates
with option_context("max_info_columns", 4):
buf = StringIO()
df.info(buf=buf, max_cols=max_cols)
res = buf.getvalue()
assert len(res.strip().split("\n")) == len_
# setting wouldn't truncate
with option_context("max_info_columns", 5):
buf = StringIO()
df.info(buf=buf, max_cols=max_cols)
res = buf.getvalue()
assert len(res.strip().split("\n")) == len_
def test_info_memory_usage():
# Ensure memory usage is displayed, when asserted, on the last line
dtypes = [
"int64",
"float64",
"datetime64[ns]",
"timedelta64[ns]",
"complex128",
"object",
"bool",
]
data = {}
n = 10
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
buf = StringIO()
# display memory usage case
df.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
assert "memory usage: " in res[-1]
# do not display memory usage case
df.info(buf=buf, memory_usage=False)
res = buf.getvalue().splitlines()
assert "memory usage: " not in res[-1]
df.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
# memory usage is a lower bound, so print it as XYZ+ MB
assert re.match(r"memory usage: [^+]+\+", res[-1])
df.iloc[:, :5].info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
# excluded column with object dtype, so estimate is accurate
assert not re.match(r"memory usage: [^+]+\+", res[-1])
# Test a DataFrame with duplicate columns
dtypes = ["int64", "int64", "int64", "float64"]
data = {}
n = 100
for i, dtype in enumerate(dtypes):
data[i] = np.random.randint(2, size=n).astype(dtype)
df = DataFrame(data)
df.columns = dtypes
df_with_object_index = DataFrame({"a": [1]}, index=["foo"])
df_with_object_index.info(buf=buf, memory_usage=True)
res = buf.getvalue().splitlines()
assert re.match(r"memory usage: [^+]+\+", res[-1])
df_with_object_index.info(buf=buf, memory_usage="deep")
res = buf.getvalue().splitlines()
assert re.match(r"memory usage: [^+]+$", res[-1])
# Ensure df size is as expected
# (cols * rows * bytes) + index size
df_size = df.memory_usage().sum()
exp_size = len(dtypes) * n * 8 + df.index.nbytes
assert df_size == exp_size
# Ensure number of cols in memory_usage is the same as df
size_df = np.size(df.columns.values) + 1 # index=True; default
assert size_df == np.size(df.memory_usage())
# assert deep works only on object
assert df.memory_usage().sum() == df.memory_usage(deep=True).sum()
# test for validity
DataFrame(1, index=["a"], columns=["A"]).memory_usage(index=True)
|
DataFrame(1, index=["a"], columns=["A"])
|
pandas.DataFrame
|
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import datetime
from datetime import timedelta
from functools import partial
from textwrap import dedent
from copy import deepcopy
import logbook
import toolz
from logbook import TestHandler, WARNING
from parameterized import parameterized
from six import iteritems, itervalues, string_types
from six.moves import range
from testfixtures import TempDirectory
import numpy as np
import pandas as pd
import pytz
from pandas.errors import PerformanceWarning
from trading_calendars import get_calendar, register_calendar
import zipline.api
from zipline.api import FixedSlippage
from zipline.assets import Equity, Future, Asset
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.assets.synthetic import (
make_jagged_equity_info,
make_simple_equity_info,
)
from zipline.errors import (
AccountControlViolation,
CannotOrderDelistedAsset,
IncompatibleSlippageModel,
RegisterTradingControlPostInit,
ScheduleFunctionInvalidCalendar,
SetCancelPolicyPostInit,
SymbolNotFound,
TradingControlViolation,
UnsupportedCancelPolicy,
UnsupportedDatetimeFormat,
ZeroCapitalError
)
from zipline.finance.commission import PerShare, PerTrade
from zipline.finance.execution import LimitOrder
from zipline.finance.order import ORDER_STATUS
from zipline.finance.trading import SimulationParameters
from zipline.finance.asset_restrictions import (
Restriction,
HistoricalRestrictions,
StaticRestrictions,
RESTRICTION_STATES,
)
from zipline.finance.controls import AssetDateBounds
from zipline.testing import (
FakeDataPortal,
create_daily_df_for_asset,
create_data_portal_from_trade_history,
create_minute_df_for_asset,
make_test_handler,
make_trade_data_for_asset_info,
parameter_space,
str_to_seconds,
to_utc,
)
from zipline.testing import RecordBatchBlotter
import zipline.testing.fixtures as zf
from zipline.test_algorithms import (
access_account_in_init,
access_portfolio_in_init,
api_algo,
api_get_environment_algo,
api_symbol_algo,
handle_data_api,
handle_data_noop,
initialize_api,
initialize_noop,
noop_algo,
record_float_magic,
record_variables,
call_with_kwargs,
call_without_kwargs,
call_with_bad_kwargs_current,
call_with_bad_kwargs_history,
bad_type_history_assets,
bad_type_history_fields,
bad_type_history_bar_count,
bad_type_history_frequency,
bad_type_history_assets_kwarg_list,
bad_type_current_assets,
bad_type_current_fields,
bad_type_can_trade_assets,
bad_type_is_stale_assets,
bad_type_history_assets_kwarg,
bad_type_history_fields_kwarg,
bad_type_history_bar_count_kwarg,
bad_type_history_frequency_kwarg,
bad_type_current_assets_kwarg,
bad_type_current_fields_kwarg,
call_with_bad_kwargs_get_open_orders,
call_with_good_kwargs_get_open_orders,
call_with_no_kwargs_get_open_orders,
empty_positions,
no_handle_data,
)
from zipline.testing.predicates import assert_equal
from zipline.utils.api_support import ZiplineAPI
from zipline.utils.context_tricks import CallbackManager, nop_context
from zipline.utils.events import (
date_rules,
time_rules,
Always,
ComposedRule,
Never,
OncePerDay,
)
import zipline.utils.factory as factory
# Because test cases appear to reuse some resources.
_multiprocess_can_split_ = False
class TestRecord(zf.WithMakeAlgo, zf.ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (133,)
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
def test_record_incr(self):
def initialize(self):
self.incr = 0
def handle_data(self, data):
self.incr += 1
self.record(incr=self.incr)
name = 'name'
self.record(name, self.incr)
zipline.api.record(name, self.incr, 'name2', 2, name3=self.incr)
output = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
)
np.testing.assert_array_equal(output['incr'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name2'].values,
[2] * len(output))
np.testing.assert_array_equal(output['name3'].values,
range(1, len(output) + 1))
class TestMiscellaneousAPI(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'minute'
sids = 1, 2
# FIXME: Pass a benchmark source instead of this.
BENCHMARK_SID = None
@classmethod
def make_equity_info(cls):
return pd.concat((
make_simple_equity_info(cls.sids, '2002-02-1', '2007-01-01'),
pd.DataFrame.from_dict(
{3: {'symbol': 'PLAY',
'start_date': '2002-01-01',
'end_date': '2004-01-01',
'exchange': 'TEST'},
4: {'symbol': 'PLAY',
'start_date': '2005-01-01',
'end_date': '2006-01-01',
'exchange': 'TEST'}},
orient='index',
),
))
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
5: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'exchange': 'TEST'
},
6: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC'),
'exchange': 'TEST',
},
7: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC'),
'exchange': 'TEST',
},
8: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC'),
'exchange': 'TEST',
}
},
orient='index',
)
def test_cancel_policy_outside_init(self):
code = """
from zipline.api import cancel_policy, set_cancel_policy
def initialize(algo):
pass
def handle_data(algo, data):
set_cancel_policy(cancel_policy.NeverCancel())
"""
algo = self.make_algo(script=code)
with self.assertRaises(SetCancelPolicyPostInit):
algo.run()
def test_cancel_policy_invalid_param(self):
code = """
from zipline.api import set_cancel_policy
def initialize(algo):
set_cancel_policy("foo")
def handle_data(algo, data):
pass
"""
algo = self.make_algo(script=code)
with self.assertRaises(UnsupportedCancelPolicy):
algo.run()
def test_zipline_api_resolves_dynamically(self):
# Make a dummy algo.
algo = self.make_algo(
initialize=lambda context: None,
handle_data=lambda context, data: None,
)
# Verify that api methods get resolved dynamically by patching them out
# and then calling them
for method in algo.all_api_methods():
name = method.__name__
sentinel = object()
def fake_method(*args, **kwargs):
return sentinel
setattr(algo, name, fake_method)
with ZiplineAPI(algo):
self.assertIs(sentinel, getattr(zipline.api, name)())
def test_sid_datetime(self):
algo_text = """
from zipline.api import sid, get_datetime
def initialize(context):
pass
def handle_data(context, data):
aapl_dt = data.current(sid(1), "last_traded")
assert_equal(aapl_dt, get_datetime())
"""
self.run_algorithm(
script=algo_text,
namespace={'assert_equal': self.assertEqual},
)
def test_datetime_bad_params(self):
algo_text = """
from zipline.api import get_datetime
from pytz import timezone
def initialize(context):
pass
def handle_data(context, data):
get_datetime(timezone)
"""
algo = self.make_algo(script=algo_text)
with self.assertRaises(TypeError):
algo.run()
@parameterized.expand([
(-1000, 'invalid_base'),
(0, 'invalid_base'),
])
def test_invalid_capital_base(self, cap_base, name):
"""
Test that the appropriate error is being raised and orders aren't
filled for algos with capital base <= 0
"""
algo_text = """
def initialize(context):
pass
def handle_data(context, data):
order(sid(24), 1000)
"""
sim_params = SimulationParameters(
start_session=pd.Timestamp("2006-01-04", tz='UTC'),
end_session=pd.Timestamp("2006-01-06", tz='UTC'),
capital_base=cap_base,
data_frequency="minute",
trading_calendar=self.trading_calendar
)
with self.assertRaises(ZeroCapitalError) as exc:
# make_algo will trace to TradingAlgorithm,
# where the exception will be raised
self.make_algo(script=algo_text, sim_params=sim_params)
# Make sure the correct error was raised
error = exc.exception
self.assertEqual(str(error),
'initial capital base must be greater than zero')
def test_get_environment(self):
expected_env = {
'arena': 'backtest',
'data_frequency': 'minute',
'start': pd.Timestamp('2006-01-04 14:31:00+0000', tz='utc'),
'end': pd.Timestamp('2006-01-05 21:00:00+0000', tz='utc'),
'capital_base': 100000.0,
'platform': 'zipline'
}
def initialize(algo):
self.assertEqual('zipline', algo.get_environment())
self.assertEqual(expected_env, algo.get_environment('*'))
def handle_data(algo, data):
pass
self.run_algorithm(initialize=initialize, handle_data=handle_data)
def test_get_open_orders(self):
def initialize(algo):
algo.minute = 0
def handle_data(algo, data):
if algo.minute == 0:
# Should be filled by the next minute
algo.order(algo.sid(1), 1)
# Won't be filled because the price is too low.
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [1, 2])
self.assertEqual(all_orders[1], algo.get_open_orders(1))
self.assertEqual(len(all_orders[1]), 1)
self.assertEqual(all_orders[2], algo.get_open_orders(2))
self.assertEqual(len(all_orders[2]), 3)
if algo.minute == 1:
# First order should have filled.
# Second order should still be open.
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [2])
self.assertEqual([], algo.get_open_orders(1))
orders_2 = algo.get_open_orders(2)
self.assertEqual(all_orders[2], orders_2)
self.assertEqual(len(all_orders[2]), 3)
for order_ in orders_2:
algo.cancel_order(order_)
all_orders = algo.get_open_orders()
self.assertEqual(all_orders, {})
algo.minute += 1
self.run_algorithm(initialize=initialize, handle_data=handle_data)
def test_schedule_function_custom_cal(self):
# run a simulation on the CMES cal, and schedule a function
# using the NYSE cal
algotext = """
from zipline.api import (
schedule_function, get_datetime, time_rules, date_rules, calendars,
)
def initialize(context):
schedule_function(
func=log_nyse_open,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
calendar=calendars.CN_EQUITIES,
)
schedule_function(
func=log_nyse_close,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close(),
calendar=calendars.CN_EQUITIES,
)
context.nyse_opens = []
context.nyse_closes = []
def log_nyse_open(context, data):
context.nyse_opens.append(get_datetime())
def log_nyse_close(context, data):
context.nyse_closes.append(get_datetime())
"""
algo = self.make_algo(
script=algotext,
sim_params=self.make_simparams(
trading_calendar=get_calendar("XSHG"),
)
)
algo.run()
nyse = get_calendar("XSHG")
for minute in algo.nyse_opens:
# each minute should be a nyse session open
session_label = nyse.minute_to_session_label(minute)
session_open = nyse.session_open(session_label)
self.assertEqual(session_open, minute)
for minute in algo.nyse_closes:
# each minute should be a minute before a nyse session close
session_label = nyse.minute_to_session_label(minute)
session_close = nyse.session_close(session_label)
self.assertEqual(session_close - timedelta(minutes=1), minute)
# Test that passing an invalid calendar parameter raises an error.
erroring_algotext = dedent(
"""
from zipline.api import schedule_function
from trading_calendars import get_calendar
def initialize(context):
schedule_function(func=my_func, calendar=get_calendar('XNYS'))
def my_func(context, data):
pass
"""
)
algo = self.make_algo(
script=erroring_algotext,
sim_params=self.make_simparams(
trading_calendar=get_calendar("CMES"),
),
)
with self.assertRaises(ScheduleFunctionInvalidCalendar):
algo.run()
def test_schedule_function(self):
us_eastern = pytz.timezone('US/Eastern')
def incrementer(algo, data):
algo.func_called += 1
curdt = algo.get_datetime().tz_convert(pytz.utc)
self.assertEqual(
curdt,
us_eastern.localize(
datetime.datetime.combine(
curdt.date(),
datetime.time(9, 31)
),
),
)
def initialize(algo):
algo.func_called = 0
algo.days = 1
algo.date = None
algo.schedule_function(
func=incrementer,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
)
def handle_data(algo, data):
if not algo.date:
algo.date = algo.get_datetime().date()
if algo.date < algo.get_datetime().date():
algo.days += 1
algo.date = algo.get_datetime().date()
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
)
algo.run()
self.assertEqual(algo.func_called, algo.days)
def test_event_context(self):
expected_data = []
collected_data_pre = []
collected_data_post = []
function_stack = []
def pre(data):
function_stack.append(pre)
collected_data_pre.append(data)
def post(data):
function_stack.append(post)
collected_data_post.append(data)
def initialize(context):
context.add_event(Always(), f)
context.add_event(Always(), g)
def handle_data(context, data):
function_stack.append(handle_data)
expected_data.append(data)
def f(context, data):
function_stack.append(f)
def g(context, data):
function_stack.append(g)
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
create_event_context=CallbackManager(pre, post),
)
algo.run()
self.assertEqual(len(expected_data), 480)
self.assertEqual(collected_data_pre, expected_data)
self.assertEqual(collected_data_post, expected_data)
self.assertEqual(
len(function_stack),
2400,
'Incorrect number of functions called: %s != 2400' %
len(function_stack),
)
expected_functions = [pre, handle_data, f, g, post] * 60030
for n, (f, g) in enumerate(zip(function_stack, expected_functions)):
self.assertEqual(
f,
g,
'function at position %d was incorrect, expected %s but got %s'
% (n, g.__name__, f.__name__),
)
@parameterized.expand([
('daily',),
('minute'),
])
def test_schedule_function_rule_creation(self, mode):
def nop(*args, **kwargs):
return None
self.sim_params.data_frequency = mode
algo = self.make_algo(
initialize=nop,
handle_data=nop,
sim_params=self.sim_params,
)
# Schedule something for NOT Always.
# Compose two rules to ensure calendar is set properly.
algo.schedule_function(nop, time_rule=Never() & Always())
event_rule = algo.event_manager._events[1].rule
self.assertIsInstance(event_rule, OncePerDay)
self.assertEqual(event_rule.cal, algo.trading_calendar)
inner_rule = event_rule.rule
self.assertIsInstance(inner_rule, ComposedRule)
self.assertEqual(inner_rule.cal, algo.trading_calendar)
first = inner_rule.first
second = inner_rule.second
composer = inner_rule.composer
self.assertIsInstance(first, Always)
self.assertEqual(first.cal, algo.trading_calendar)
self.assertEqual(second.cal, algo.trading_calendar)
if mode == 'daily':
self.assertIsInstance(second, Always)
else:
self.assertIsInstance(second, ComposedRule)
self.assertIsInstance(second.first, Never)
self.assertEqual(second.first.cal, algo.trading_calendar)
self.assertIsInstance(second.second, Always)
self.assertEqual(second.second.cal, algo.trading_calendar)
self.assertIs(composer, ComposedRule.lazy_and)
def test_asset_lookup(self):
algo = self.make_algo()
# this date doesn't matter
start_session = pd.Timestamp("2000-01-01", tz="UTC")
# Test before either PLAY existed
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2001-12-01', tz='UTC')
)
with self.assertRaises(SymbolNotFound):
algo.symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.symbols('PLAY')
# Test when first PLAY exists
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2002-12-01', tz='UTC')
)
list_result = algo.symbols('PLAY')
self.assertEqual(3, list_result[0])
# Test after first PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2004-12-01', tz='UTC')
)
self.assertEqual(3, algo.symbol('PLAY'))
# Test after second PLAY begins
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2005-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
# Test after second PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2006-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
list_result = algo.symbols('PLAY')
self.assertEqual(4, list_result[0])
# Test lookup SID
self.assertIsInstance(algo.sid(3), Equity)
self.assertIsInstance(algo.sid(4), Equity)
# Supplying a non-string argument to symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.symbol(1)
with self.assertRaises(TypeError):
algo.symbol((1,))
with self.assertRaises(TypeError):
algo.symbol({1})
with self.assertRaises(TypeError):
algo.symbol([1])
with self.assertRaises(TypeError):
algo.symbol({'foo': 'bar'})
def test_future_symbol(self):
""" Tests the future_symbol API function.
"""
algo = self.make_algo()
algo.datetime = pd.Timestamp('2006-12-01', tz='UTC')
# Check that we get the correct fields for the CLG06 symbol
cl = algo.future_symbol('CLG06')
self.assertEqual(cl.sid, 5)
self.assertEqual(cl.symbol, 'CLG06')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.start_date, pd.Timestamp('2005-12-01', tz='UTC'))
self.assertEqual(cl.notice_date, pd.Timestamp('2005-12-20', tz='UTC'))
self.assertEqual(cl.expiration_date,
pd.Timestamp('2006-01-20', tz='UTC'))
with self.assertRaises(SymbolNotFound):
algo.future_symbol('')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('FOOBAR')
# Supplying a non-string argument to future_symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.future_symbol(1)
with self.assertRaises(TypeError):
algo.future_symbol((1,))
with self.assertRaises(TypeError):
algo.future_symbol({1})
with self.assertRaises(TypeError):
algo.future_symbol([1])
with self.assertRaises(TypeError):
algo.future_symbol({'foo': 'bar'})
class TestSetSymbolLookupDate(zf.WithMakeAlgo, zf.ZiplineTestCase):
# January 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30 31
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-06', tz='UTC')
SIM_PARAMS_START_DATE = pd.Timestamp('2006-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
BENCHMARK_SID = 3
@classmethod
def make_equity_info(cls):
dates = pd.date_range(cls.START_DATE, cls.END_DATE)
assert len(dates) == 4, "Expected four dates."
# Two assets with the same ticker, ending on days[1] and days[3], plus
# a benchmark that spans the whole period.
cls.sids = [1, 2, 3]
cls.asset_starts = [dates[0], dates[2]]
cls.asset_ends = [dates[1], dates[3]]
return pd.DataFrame.from_records([
{'symbol': 'DUP',
'start_date': cls.asset_starts[0],
'end_date': cls.asset_ends[0],
'exchange': 'TEST',
'asset_name': 'FIRST'},
{'symbol': 'DUP',
'start_date': cls.asset_starts[1],
'end_date': cls.asset_ends[1],
'exchange': 'TEST',
'asset_name': 'SECOND'},
{'symbol': 'BENCH',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE,
'exchange': 'TEST',
'asset_name': 'BENCHMARK'},
], index=cls.sids)
def test_set_symbol_lookup_date(self):
"""
Test the set_symbol_lookup_date API method.
"""
set_symbol_lookup_date = zipline.api.set_symbol_lookup_date
def initialize(context):
set_symbol_lookup_date(self.asset_ends[0])
self.assertEqual(zipline.api.symbol('DUP').sid, self.sids[0])
set_symbol_lookup_date(self.asset_ends[1])
self.assertEqual(zipline.api.symbol('DUP').sid, self.sids[1])
with self.assertRaises(UnsupportedDatetimeFormat):
set_symbol_lookup_date('foobar')
self.run_algorithm(initialize=initialize)
class TestPositions(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2020-09-01', tz='utc')
END_DATE = pd.Timestamp('2020-09-04', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 1000
ASSET_FINDER_EQUITY_SIDS = (1, 133)
SIM_PARAMS_DATA_FREQUENCY = 'daily'
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
frame = pd.DataFrame(
{
'open': [90, 95, 100, 105],
'high': [90, 95, 100, 105],
'low': [90, 95, 100, 105],
'close': [90, 95, 100, 105],
'volume': 100,
},
index=cls.equity_daily_bar_days,
)
return ((sid, frame) for sid in sids)
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
1000: {
'symbol': 'CLF06',
'root_symbol': 'CL',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE,
'auto_close_date': cls.END_DATE + cls.trading_calendar.day,
'exchange': 'CMES',
'multiplier': 100,
},
},
orient='index',
)
@classmethod
def make_future_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Future]
sids = cls.asset_finder.futures_sids
minutes = trading_calendar.minutes_for_sessions_in_range(
cls.future_minute_bar_days[0],
cls.future_minute_bar_days[-1],
)
frame = pd.DataFrame(
{
'open': 2.0,
'high': 2.0,
'low': 2.0,
'close': 2.0,
'volume': 100,
},
index=minutes,
)
return ((sid, frame) for sid in sids)
def test_portfolio_exited_position(self):
# This test ensures ensures that 'phantom' positions do not appear in
# context.portfolio.positions in the case that a position has been
# entered and fully exited.
def initialize(context, sids):
context.ordered = False
context.exited = False
context.sids = sids
def handle_data(context, data):
if not context.ordered:
for s in context.sids:
context.order(context.sid(s), 1)
context.ordered = True
if not context.exited:
amounts = [pos.amount for pos
in itervalues(context.portfolio.positions)]
if (
len(amounts) > 0 and
all([(amount == 1) for amount in amounts])
):
for stock in context.portfolio.positions:
context.order(context.sid(stock), -1)
context.exited = True
# Should be 0 when all positions are exited.
context.record(num_positions=len(context.portfolio.positions))
result = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
sids=self.ASSET_FINDER_EQUITY_SIDS,
)
expected_position_count = [
0, # Before entering the first position
2, # After entering, exiting on this date
0, # After exiting
0,
]
for i, expected in enumerate(expected_position_count):
self.assertEqual(result.iloc[i,:]['num_positions'], expected)
def test_noop_orders(self):
asset = self.asset_finder.retrieve_asset(1)
# Algorithm that tries to buy with extremely low stops/limits and tries
# to sell with extremely high versions of same. Should not end up with
# any positions for reasonable data.
def handle_data(algo, data):
########
# Buys #
########
# Buy with low limit, shouldn't trigger.
algo.order(asset, 100, limit_price=1)
# But with high stop, shouldn't trigger
algo.order(asset, 100, stop_price=10000000)
# Buy with high limit (should trigger) but also high stop (should
# prevent trigger).
algo.order(asset, 100, limit_price=10000000, stop_price=10000000)
# Buy with low stop (should trigger), but also low limit (should
# prevent trigger).
algo.order(asset, 100, limit_price=1, stop_price=1)
#########
# Sells #
#########
# Sell with high limit, shouldn't trigger.
algo.order(asset, -100, limit_price=1000000)
# Sell with low stop, shouldn't trigger.
algo.order(asset, -100, stop_price=1)
# Sell with low limit (should trigger), but also high stop (should
# prevent trigger).
algo.order(asset, -100, limit_price=1000000, stop_price=1000000)
# Sell with low limit (should trigger), but also low stop (should
# prevent trigger).
algo.order(asset, -100, limit_price=1, stop_price=1)
###################
# Rounding Checks #
###################
algo.order(asset, 100, limit_price=.00000001)
algo.order(asset, -100, stop_price=.00000001)
daily_stats = self.run_algorithm(handle_data=handle_data)
# Verify that positions are empty for all dates.
empty_positions = daily_stats.positions.map(lambda x: len(x) == 0)
self.assertTrue(empty_positions.all())
def test_position_weights(self):
sids = (1, 133, 1000)
equity_1, equity_133, future_1000 = \
self.asset_finder.retrieve_all(sids)
def initialize(algo, sids_and_amounts, *args, **kwargs):
algo.ordered = False
algo.sids_and_amounts = sids_and_amounts
algo.set_commission(
us_equities=PerTrade(0), us_futures=PerTrade(0),
)
algo.set_slippage(
us_equities=FixedSlippage(0),
us_futures=FixedSlippage(0),
)
def handle_data(algo, data):
if not algo.ordered:
for s, amount in algo.sids_and_amounts:
algo.order(algo.sid(s), amount)
algo.ordered = True
algo.record(
position_weights=algo.portfolio.current_portfolio_weights,
)
daily_stats = self.run_algorithm(
sids_and_amounts=zip(sids, [2, -1, 1]),
initialize=initialize,
handle_data=handle_data,
)
expected_position_weights = [
# No positions held on the first day.
pd.Series({}),
# Each equity's position value is its price times the number of
# shares held. In this example, we hold a long position in 2 shares
# of equity_1 so its weight is (95.0 * 2) = 190.0 divided by the
# total portfolio value. The total portfolio value is the sum of
# cash ($905.00) plus the value of all equity positions.
#
# For a futures contract, its weight is the unit price times number
# of shares held times the multiplier. For future_1000, this is
# (2.0 * 1 * 100) = 200.0 divided by total portfolio value.
pd.Series({
equity_1: 190.0 / (190.0 - 95.0 + 905.0),
equity_133: -95.0 / (190.0 - 95.0 + 905.0),
future_1000: 200.0 / (190.0 - 95.0 + 905.0),
}),
pd.Series({
equity_1: 200.0 / (200.0 - 100.0 + 905.0),
equity_133: -100.0 / (200.0 - 100.0 + 905.0),
future_1000: 200.0 / (200.0 - 100.0 + 905.0),
}),
pd.Series({
equity_1: 210.0 / (210.0 - 105.0 + 905.0),
equity_133: -105.0 / (210.0 - 105.0 + 905.0),
future_1000: 200.0 / (210.0 - 105.0 + 905.0),
}),
]
for i, expected in enumerate(expected_position_weights):
assert_equal(daily_stats.iloc[i]['position_weights'], expected)
class TestBeforeTradingStart(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2016-01-06', tz='utc')
END_DATE = pd.Timestamp('2016-01-07', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 10000
SIM_PARAMS_DATA_FREQUENCY = 'minute'
EQUITY_DAILY_BAR_LOOKBACK_DAYS = EQUITY_MINUTE_BAR_LOOKBACK_DAYS = 1
DATA_PORTAL_FIRST_TRADING_DAY = pd.Timestamp("2016-01-05", tz='UTC')
EQUITY_MINUTE_BAR_START_DATE = pd.Timestamp("2016-01-05", tz='UTC')
FUTURE_MINUTE_BAR_START_DATE = pd.Timestamp("2016-01-05", tz='UTC')
data_start = ASSET_FINDER_EQUITY_START_DATE = pd.Timestamp(
'2016-01-05',
tz='utc',
)
SPLIT_ASSET_SID = 3
ASSET_FINDER_EQUITY_SIDS = 1, 2, SPLIT_ASSET_SID
@classmethod
def make_equity_minute_bar_data(cls):
asset_minutes = \
cls.trading_calendar.minutes_in_range(
cls.data_start,
cls.END_DATE,
)
minutes_count = len(asset_minutes)
minutes_arr = np.arange(minutes_count) + 1
split_data = pd.DataFrame(
{
'open': minutes_arr + 1,
'high': minutes_arr + 2,
'low': minutes_arr - 1,
'close': minutes_arr,
'volume': 100 * minutes_arr,
},
index=asset_minutes,
)
split_data.iloc[480:] = split_data.iloc[480:] / 2.0
for sid in (1, 8554):
yield sid, create_minute_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.END_DATE,
)
yield 2, create_minute_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.END_DATE,
50,
)
yield cls.SPLIT_ASSET_SID, split_data
@classmethod
def make_splits_data(cls):
return pd.DataFrame.from_records([
{
'effective_date': str_to_seconds('2016-01-07'),
'ratio': 0.5,
'sid': cls.SPLIT_ASSET_SID,
}
])
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
for sid in sids:
yield sid, create_daily_df_for_asset(
cls.trading_calendar,
cls.data_start,
cls.END_DATE,
)
def test_data_in_bts_minute(self):
algo_code = dedent("""
from zipline.api import record, sid
def initialize(context):
context.history_values = []
def before_trading_start(context, data):
record(the_price1=data.current(sid(1), "price"))
record(the_high1=data.current(sid(1), "high"))
record(the_price2=data.current(sid(2), "price"))
record(the_high2=data.current(sid(2), "high"))
context.history_values.append(data.history(
[sid(1), sid(2)],
["price", "high"],
60,
"1m"
))
def handle_data(context, data):
pass
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
# fetching data at midnight gets us the previous market minute's data
self.assertEqual(240, results.iloc[0].the_price1)
self.assertEqual(242, results.iloc[0].the_high1)
# make sure that price is ffilled, but not other fields
self.assertEqual(350, results.iloc[0].the_price2)
self.assertTrue(np.isnan(results.iloc[0].the_high2))
# 10-minute history
# asset1 day1 price should be 331-390
np.testing.assert_array_equal(
range(331, 391), algo.history_values[0]["price"][1]
)
# asset1 day1 high should be 333-392
np.testing.assert_array_equal(
range(333, 393), algo.history_values[0]["high"][1]
)
# asset2 day1 price should be 19 300s, then 40 350s
np.testing.assert_array_equal(
[300] * 19, algo.history_values[0]["price"][2][0:19]
)
np.testing.assert_array_equal(
[350] * 40, algo.history_values[0]["price"][2][20:]
)
# asset2 day1 high should be all NaNs except for the 19th item
# = 2016-01-05 20:20:00+00:00
np.testing.assert_array_equal(
np.full(19, np.nan), algo.history_values[0]["high"][2][0:19]
)
self.assertEqual(352, algo.history_values[0]["high"][2][19])
np.testing.assert_array_equal(
np.full(40, np.nan), algo.history_values[0]["high"][2][20:]
)
def test_data_in_bts_daily(self):
algo_code = dedent("""
from zipline.api import record, sid
def initialize(context):
context.history_values = []
def before_trading_start(context, data):
record(the_price1=data.current(sid(1), "price"))
record(the_high1=data.current(sid(1), "high"))
record(the_price2=data.current(sid(2), "price"))
record(the_high2=data.current(sid(2), "high"))
context.history_values.append(data.history(
[sid(1), sid(2)],
["price", "high"],
1,
"1d",
))
def handle_data(context, data):
pass
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
self.assertEqual(392, results.the_high1[0])
self.assertEqual(390, results.the_price1[0])
# nan because asset2 only trades every 50 minutes
self.assertTrue(np.isnan(results.the_high2[0]))
self.assertTrue(350, results.the_price2[0])
self.assertEqual(392, algo.history_values[0]["high"][1][0])
self.assertEqual(390, algo.history_values[0]["price"][1][0])
self.assertEqual(352, algo.history_values[0]["high"][2][0])
self.assertEqual(350, algo.history_values[0]["price"][2][0])
def test_portfolio_bts(self):
algo_code = dedent("""
from zipline.api import order, sid, record
def initialize(context):
context.ordered = False
context.hd_portfolio = context.portfolio
def before_trading_start(context, data):
bts_portfolio = context.portfolio
# Assert that the portfolio in BTS is the same as the last
# portfolio in handle_data
assert (context.hd_portfolio == bts_portfolio)
record(pos_value=bts_portfolio.positions_value)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_portfolio = context.portfolio
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
# Asset starts with price 1 on 1/05 and increases by 1 every minute.
# Simulation starts on 1/06, where the price in bts is 390, and
# positions_value is 0. On 1/07, price is 780, and after buying one
# share on the first bar of 1/06, positions_value is 780
self.assertEqual(results.pos_value.iloc[0], 0)
self.assertEqual(results.pos_value.iloc[1], 780)
def test_account_bts(self):
algo_code = dedent("""
from zipline.api import order, sid, record, set_slippage, slippage
def initialize(context):
context.ordered = False
context.hd_account = context.account
set_slippage(slippage.VolumeShareSlippage())
def before_trading_start(context, data):
bts_account = context.account
# Assert that the account in BTS is the same as the last account
# in handle_data
assert (context.hd_account == bts_account)
record(port_value=context.account.equity_with_loan)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_account = context.account
""")
algo = self.make_algo(script=algo_code)
results = algo.run()
# Starting portfolio value is 10000. Order for the asset fills on the
# second bar of 1/06, where the price is 391, and costs the default
# commission of 0. On 1/07, the price is 780, and the increase in
# portfolio value is 780-392-0
self.assertEqual(results.port_value.iloc[0], 10000)
self.assertAlmostEqual(results.port_value.iloc[1],
10000 + 780 - 392 - 0,
places=2)
def test_portfolio_bts_with_overnight_split(self):
algo_code = dedent("""
from zipline.api import order, sid, record
def initialize(context):
context.ordered = False
context.hd_portfolio = context.portfolio
def before_trading_start(context, data):
bts_portfolio = context.portfolio
# Assert that the portfolio in BTS is the same as the last
# portfolio in handle_data, except for the positions
for k in bts_portfolio.__dict__:
if k != 'positions':
assert (context.hd_portfolio.__dict__[k]
== bts_portfolio.__dict__[k])
record(pos_value=bts_portfolio.positions_value)
record(pos_amount=bts_portfolio.positions[sid(3)].amount)
record(
last_sale_price=bts_portfolio.positions[sid(3)].last_sale_price
)
def handle_data(context, data):
if not context.ordered:
order(sid(3), 1)
context.ordered = True
context.hd_portfolio = context.portfolio
""")
results = self.run_algorithm(script=algo_code)
# On 1/07, positions value should by 780, same as without split
self.assertEqual(results.pos_value.iloc[0], 0)
self.assertEqual(results.pos_value.iloc[1], 780)
# On 1/07, after applying the split, 1 share becomes 2
self.assertEqual(results.pos_amount.iloc[0], 0)
self.assertEqual(results.pos_amount.iloc[1], 2)
# On 1/07, after applying the split, last sale price is halved
self.assertEqual(results.last_sale_price.iloc[0], 0)
self.assertEqual(results.last_sale_price.iloc[1], 390)
def test_account_bts_with_overnight_split(self):
algo_code = dedent("""
from zipline.api import order, sid, record, set_slippage, slippage
def initialize(context):
context.ordered = False
context.hd_account = context.account
set_slippage(slippage.VolumeShareSlippage())
def before_trading_start(context, data):
bts_account = context.account
# Assert that the account in BTS is the same as the last account
# in handle_data
assert (context.hd_account == bts_account)
record(port_value=bts_account.equity_with_loan)
def handle_data(context, data):
if not context.ordered:
order(sid(1), 1)
context.ordered = True
context.hd_account = context.account
""")
results = self.run_algorithm(script=algo_code)
# On 1/07, portfolio value is the same as without split
self.assertEqual(results.port_value.iloc[0], 10000)
self.assertAlmostEqual(results.port_value.iloc[1],
10000 + 780 - 392 - 0, places=2)
class TestAlgoScript(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='utc')
END_DATE = pd.Timestamp('2006-12-31', tz='utc')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
EQUITY_DAILY_BAR_LOOKBACK_DAYS = 5 # max history window length
STRING_TYPE_NAMES = [s.__name__ for s in string_types]
STRING_TYPE_NAMES_STRING = ', '.join(STRING_TYPE_NAMES)
ASSET_TYPE_NAME = Asset.__name__
CONTINUOUS_FUTURE_NAME = ContinuousFuture.__name__
ASSET_OR_STRING_TYPE_NAMES = ', '.join([ASSET_TYPE_NAME] +
STRING_TYPE_NAMES)
ASSET_OR_STRING_OR_CF_TYPE_NAMES = ', '.join([ASSET_TYPE_NAME,
CONTINUOUS_FUTURE_NAME] +
STRING_TYPE_NAMES)
ARG_TYPE_TEST_CASES = (
('history__assets', (bad_type_history_assets,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history__fields', (bad_type_history_fields,
STRING_TYPE_NAMES_STRING,
True)),
('history__bar_count', (bad_type_history_bar_count, 'int', False)),
('history__frequency', (bad_type_history_frequency,
STRING_TYPE_NAMES_STRING,
False)),
('current__assets', (bad_type_current_assets,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('current__fields', (bad_type_current_fields,
STRING_TYPE_NAMES_STRING,
True)),
('is_stale__assets', (bad_type_is_stale_assets, 'Asset', True)),
('can_trade__assets', (bad_type_can_trade_assets, 'Asset', True)),
('history_kwarg__assets',
(bad_type_history_assets_kwarg,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history_kwarg_bad_list__assets',
(bad_type_history_assets_kwarg_list,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('history_kwarg__fields',
(bad_type_history_fields_kwarg, STRING_TYPE_NAMES_STRING, True)),
('history_kwarg__bar_count',
(bad_type_history_bar_count_kwarg, 'int', False)),
('history_kwarg__frequency',
(bad_type_history_frequency_kwarg, STRING_TYPE_NAMES_STRING, False)),
('current_kwarg__assets',
(bad_type_current_assets_kwarg,
ASSET_OR_STRING_OR_CF_TYPE_NAMES,
True)),
('current_kwarg__fields',
(bad_type_current_fields_kwarg, STRING_TYPE_NAMES_STRING, True)),
)
sids = 0, 1, 3, 133
# FIXME: Pass a benchmark explicitly here.
BENCHMARK_SID = None
@classmethod
def make_equity_info(cls):
register_calendar("TEST", get_calendar("NYSE"), force=True)
data = make_simple_equity_info(
cls.sids,
cls.START_DATE,
cls.END_DATE,
)
data.loc[3, 'symbol'] = 'TEST'
return data
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
cal = cls.trading_calendars[Equity]
sessions = cal.sessions_in_range(cls.START_DATE, cls.END_DATE)
frame = pd.DataFrame({
'close': 10., 'high': 10.5, 'low': 9.5, 'open': 10., 'volume': 100,
}, index=sessions)
for sid in sids:
yield sid, frame
def test_noop(self):
self.run_algorithm(
initialize=initialize_noop,
handle_data=handle_data_noop,
)
def test_noop_string(self):
self.run_algorithm(script=noop_algo)
def test_no_handle_data(self):
self.run_algorithm(script=no_handle_data)
def test_api_calls(self):
self.run_algorithm(
initialize=initialize_api,
handle_data=handle_data_api,
)
def test_api_calls_string(self):
self.run_algorithm(script=api_algo)
def test_api_get_environment(self):
platform = 'zipline'
algo = self.make_algo(
script=api_get_environment_algo,
platform=platform,
)
algo.run()
self.assertEqual(algo.environment, platform)
def test_api_symbol(self):
self.run_algorithm(script=api_symbol_algo)
def test_fixed_slippage(self):
# verify order -> transaction -> portfolio position.
# --------------
test_algo = self.make_algo(
script="""
from zipline.api import (slippage,
commission,
set_slippage,
set_commission,
order,
record,
sid)
def initialize(context):
model = slippage.FixedSlippage(spread=0.10)
set_slippage(model)
set_commission(commission.PerTrade(100.00))
context.count = 1
context.incr = 0
def handle_data(context, data):
if context.incr < context.count:
order(sid(0), -1000)
record(price=data.current(sid(0), "price"))
context.incr += 1""",
)
results = test_algo.run()
# flatten the list of txns
all_txns = [val for sublist in results["transactions"].tolist()
for val in sublist]
self.assertEqual(len(all_txns), 1)
txn = all_txns[0]
expected_spread = 0.05
expected_price = test_algo.recorded_vars["price"] - expected_spread
self.assertEqual(expected_price, txn['price'])
# make sure that the $100 commission was applied to our cash
# the txn was for -1000 shares at 9.95, means -9.95k. our capital_used
# for that day was therefore 9.95k, but after the $100 commission,
# it should be 9.85k.
self.assertEqual(9850, results.capital_used[1])
self.assertEqual(100, results["orders"].iloc[1][0]["commission"])
@parameterized.expand(
[
('no_minimum_commission', 0,),
('default_minimum_commission', 0,),
('alternate_minimum_commission', 2,),
]
)
def test_volshare_slippage(self, name, minimum_commission):
tempdir = TempDirectory()
try:
if name == "default_minimum_commission":
commission_line = "set_commission(commission.PerShare(0.02))"
else:
commission_line = \
"set_commission(commission.PerShare(0.02, " \
"min_trade_cost={0}))".format(minimum_commission)
# verify order -> transaction -> portfolio position.
# --------------
# XXX: This is the last remaining consumer of
# create_daily_trade_source.
trades = factory.create_daily_trade_source(
[0], self.sim_params, self.asset_finder, self.trading_calendar
)
data_portal = create_data_portal_from_trade_history(
self.asset_finder, self.trading_calendar, tempdir,
self.sim_params, {0: trades}
)
test_algo = self.make_algo(
data_portal=data_portal,
script="""
from zipline.api import *
def initialize(context):
model = slippage.VolumeShareSlippage(
volume_limit=.3,
price_impact=0.05
)
set_slippage(model)
{0}
context.count = 2
context.incr = 0
def handle_data(context, data):
if context.incr < context.count:
# order small lots to be sure the
# order will fill in a single transaction
order(sid(0), 5000)
record(price=data.current(sid(0), "price"))
record(volume=data.current(sid(0), "volume"))
record(incr=context.incr)
context.incr += 1
""".format(commission_line),
)
results = test_algo.run()
all_txns = [
val for sublist in results["transactions"].tolist()
for val in sublist]
self.assertEqual(len(all_txns), 67)
# all_orders are all the incremental versions of the
# orders as each new fill comes in.
all_orders = list(toolz.concat(results['orders']))
if minimum_commission == 0:
# for each incremental version of each order, the commission
# should be its filled amount * 0.02
for order_ in all_orders:
self.assertAlmostEqual(
order_["filled"] * 0.02,
order_["commission"]
)
else:
# the commission should be at least the min_trade_cost
for order_ in all_orders:
if order_["filled"] > 0:
self.assertAlmostEqual(
max(order_["filled"] * 0.02, minimum_commission),
order_["commission"]
)
else:
self.assertEqual(0, order_["commission"])
finally:
tempdir.cleanup()
def test_incorrectly_set_futures_slippage_model(self):
code = dedent(
"""
from zipline.api import set_slippage, slippage
class MySlippage(slippage.FutureSlippageModel):
def process_order(self, data, order):
return data.current(order.asset, 'price'), order.amount
def initialize(context):
set_slippage(MySlippage())
"""
)
test_algo = self.make_algo(script=code)
with self.assertRaises(IncompatibleSlippageModel):
# Passing a futures slippage model as the first argument, which is
# for setting equity models, should fail.
test_algo.run()
def test_algo_record_vars(self):
test_algo = self.make_algo(script=record_variables)
results = test_algo.run()
for i in range(1, 252):
self.assertEqual(results.iloc[i-1]["incr"], i)
def test_algo_record_nan(self):
test_algo = self.make_algo(script=record_float_magic % 'nan')
results = test_algo.run()
for i in range(1, 252):
self.assertTrue(np.isnan(results.iloc[i-1]["data"]))
def test_batch_market_order_matches_multiple_manual_orders(self):
share_counts = pd.Series([50, 100])
multi_blotter = RecordBatchBlotter()
multi_test_algo = self.make_algo(
script=dedent("""\
from collections import OrderedDict
from six import iteritems
from zipline.api import sid, order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
it = zip(context.assets, {share_counts})
for asset, shares in it:
order(asset, shares)
context.placed = True
""").format(share_counts=list(share_counts)),
blotter=multi_blotter,
)
multi_stats = multi_test_algo.run()
self.assertFalse(multi_blotter.order_batch_called)
batch_blotter = RecordBatchBlotter()
batch_test_algo = self.make_algo(
script=dedent("""\
import pandas as pd
from zipline.api import sid, batch_market_order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
orders = batch_market_order(pd.Series(
index=context.assets, data={share_counts}
))
assert len(orders) == 2, \
"len(orders) was %s but expected 2" % len(orders)
for o in orders:
assert o is not None, "An order is None"
context.placed = True
""").format(share_counts=list(share_counts)),
blotter=batch_blotter,
)
batch_stats = batch_test_algo.run()
self.assertTrue(batch_blotter.order_batch_called)
for stats in (multi_stats, batch_stats):
stats.orders = stats.orders.apply(
lambda orders: [toolz.dissoc(o, 'id') for o in orders]
)
stats.transactions = stats.transactions.apply(
lambda txns: [toolz.dissoc(txn, 'order_id') for txn in txns]
)
assert_equal(multi_stats, batch_stats)
def test_batch_market_order_filters_null_orders(self):
share_counts = [50, 0]
batch_blotter = RecordBatchBlotter()
batch_test_algo = self.make_algo(
script=dedent("""\
import pandas as pd
from zipline.api import sid, batch_market_order
def initialize(context):
context.assets = [sid(0), sid(3)]
context.placed = False
def handle_data(context, data):
if not context.placed:
orders = batch_market_order(pd.Series(
index=context.assets, data={share_counts}
))
assert len(orders) == 1, \
"len(orders) was %s but expected 1" % len(orders)
for o in orders:
assert o is not None, "An order is None"
context.placed = True
""").format(share_counts=share_counts),
blotter=batch_blotter,
)
batch_test_algo.run()
self.assertTrue(batch_blotter.order_batch_called)
def test_order_dead_asset(self):
# after asset 0 is dead
params = SimulationParameters(
start_session=pd.Timestamp("2007-01-03", tz='UTC'),
end_session=pd.Timestamp("2007-01-05", tz='UTC'),
trading_calendar=self.trading_calendar,
)
# order method shouldn't blow up
self.run_algorithm(
script="""
from zipline.api import order, sid
def initialize(context):
pass
def handle_data(context, data):
order(sid(0), 10)
""",
)
# order_value and order_percent should blow up
for order_str in ["order_value", "order_percent"]:
test_algo = self.make_algo(
script="""
from zipline.api import order_percent, order_value, sid
def initialize(context):
pass
def handle_data(context, data):
{0}(sid(0), 10)
""".format(order_str),
sim_params=params,
)
with self.assertRaises(CannotOrderDelistedAsset):
test_algo.run()
def test_portfolio_in_init(self):
"""
Test that accessing portfolio in init doesn't break.
"""
self.run_algorithm(script=access_portfolio_in_init)
def test_account_in_init(self):
"""
Test that accessing account in init doesn't break.
"""
self.run_algorithm(script=access_account_in_init)
def test_without_kwargs(self):
"""
Test that api methods on the data object can be called with positional
arguments.
"""
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
self.run_algorithm(sim_params=params, script=call_without_kwargs)
def test_good_kwargs(self):
"""
Test that api methods on the data object can be called with keyword
arguments.
"""
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
self.run_algorithm(script=call_with_kwargs, sim_params=params)
@parameterized.expand([('history', call_with_bad_kwargs_history),
('current', call_with_bad_kwargs_current)])
def test_bad_kwargs(self, name, algo_text):
"""
Test that api methods on the data object called with bad kwargs return
a meaningful TypeError that we create, rather than an unhelpful cython
error
"""
algo = self.make_algo(script=algo_text)
with self.assertRaises(TypeError) as cm:
algo.run()
self.assertEqual("%s() got an unexpected keyword argument 'blahblah'"
% name, cm.exception.args[0])
@parameterized.expand(ARG_TYPE_TEST_CASES)
def test_arg_types(self, name, inputs):
keyword = name.split('__')[1]
algo = self.make_algo(script=inputs[0])
with self.assertRaises(TypeError) as cm:
algo.run()
expected = "Expected %s argument to be of type %s%s" % (
keyword,
'or iterable of type ' if inputs[2] else '',
inputs[1]
)
self.assertEqual(expected, cm.exception.args[0])
def test_empty_asset_list_to_history(self):
params = SimulationParameters(
start_session=pd.Timestamp("2006-01-10", tz='UTC'),
end_session=pd.Timestamp("2006-01-11", tz='UTC'),
trading_calendar=self.trading_calendar,
)
self.run_algorithm(
script=dedent("""
def initialize(context):
pass
def handle_data(context, data):
data.history([], "price", 5, '1d')
"""),
sim_params=params,
)
@parameterized.expand(
[('bad_kwargs', call_with_bad_kwargs_get_open_orders),
('good_kwargs', call_with_good_kwargs_get_open_orders),
('no_kwargs', call_with_no_kwargs_get_open_orders)]
)
def test_get_open_orders_kwargs(self, name, script):
algo = self.make_algo(script=script)
if name == 'bad_kwargs':
with self.assertRaises(TypeError) as cm:
algo.run()
self.assertEqual('Keyword argument `sid` is no longer '
'supported for get_open_orders. Use `asset` '
'instead.', cm.exception.args[0])
else:
algo.run()
def test_empty_positions(self):
"""
Test that when we try context.portfolio.positions[stock] on a stock
for which we have no positions, we return a Position with values 0
(but more importantly, we don't crash) and don't save this Position
to the user-facing dictionary PositionTracker._positions_store
"""
results = self.run_algorithm(script=empty_positions)
num_positions = results.num_positions
amounts = results.amounts
self.assertTrue(all(num_positions == 0))
self.assertTrue(all(amounts == 0))
def test_schedule_function_time_rule_positionally_misplaced(self):
"""
Test that when a user specifies a time rule for the date_rule argument,
but no rule in the time_rule argument
(e.g. schedule_function(func, <time_rule>)), we assume that means
assign a time rule but no date rule
"""
sim_params = factory.create_simulation_parameters(
start=pd.Timestamp('2006-01-12', tz='UTC'),
end=pd.Timestamp('2006-01-13', tz='UTC'),
data_frequency='minute'
)
algocode = dedent("""
from zipline.api import time_rules, schedule_function
def do_at_open(context, data):
context.done_at_open.append(context.get_datetime())
def do_at_close(context, data):
context.done_at_close.append(context.get_datetime())
def initialize(context):
context.done_at_open = []
context.done_at_close = []
schedule_function(do_at_open, time_rules.market_open())
schedule_function(do_at_close, time_rules.market_close())
def handle_data(algo, data):
pass
""")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore", PerformanceWarning)
algo = self.make_algo(script=algocode, sim_params=sim_params)
algo.run()
self.assertEqual(len(w), 2)
for i, warning in enumerate(w):
self.assertIsInstance(warning.message, UserWarning)
self.assertEqual(
warning.message.args[0],
'Got a time rule for the second positional argument '
'date_rule. You should use keyword argument '
'time_rule= when calling schedule_function without '
'specifying a date_rule'
)
# The warnings come from line 13 and 14 in the algocode
self.assertEqual(warning.lineno, 13 + i)
self.assertEqual(
algo.done_at_open,
[pd.Timestamp('2006-01-12 14:31:00', tz='UTC'),
pd.Timestamp('2006-01-13 14:31:00', tz='UTC')]
)
self.assertEqual(
algo.done_at_close,
[pd.Timestamp('2006-01-12 20:59:00', tz='UTC'),
pd.Timestamp('2006-01-13 20:59:00', tz='UTC')]
)
class TestCapitalChanges(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-09', tz='UTC')
# XXX: This suite only has daily data for sid 0 and only has minutely data
# for sid 1.
sids = ASSET_FINDER_EQUITY_SIDS = (0, 1)
DAILY_SID = 0
MINUTELY_SID = 1
# FIXME: Pass a benchmark source explicitly here.
BENCHMARK_SID = None
@classmethod
def make_equity_minute_bar_data(cls):
minutes = cls.trading_calendar.minutes_in_range(
cls.START_DATE,
cls.END_DATE,
)
closes = np.arange(100, 100 + len(minutes), 1)
opens = closes
highs = closes + 5
lows = closes - 5
frame = pd.DataFrame(
index=minutes,
data={
'open': opens,
'high': highs,
'low': lows,
'close': closes,
'volume': 10000,
},
)
yield cls.MINUTELY_SID, frame
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
days = cls.trading_calendar.sessions_in_range(
cls.START_DATE,
cls.END_DATE,
)
closes = np.arange(10.0, 10.0 + len(days), 1.0)
opens = closes
highs = closes + 0.5
lows = closes - 0.5
frame = pd.DataFrame(
index=days,
data={
'open': opens,
'high': highs,
'low': lows,
'close': closes,
'volume': 10000,
},
)
yield cls.DAILY_SID, frame
@parameterized.expand([
('target', 151000.0), ('delta', 50000.0)
])
def test_capital_changes_daily_mode(self, change_type, value):
capital_changes = {
pd.Timestamp('2006-01-06', tz='UTC'):
{'type': change_type, 'value': value}
}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(0), 1000)
"""
algo = self.make_algo(
script=algocode,
capital_changes=capital_changes,
sim_params=SimulationParameters(
start_session=self.START_DATE,
end_session=self.END_DATE,
trading_calendar=self.nyse_calendar,
)
)
# We call get_generator rather than `run()` here because we care about
# the raw capital change packets.
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), 1)
self.assertEqual(
capital_change_packets[0],
{'date': pd.Timestamp('2006-01-06', tz='UTC'),
'type': 'cash',
'target': 151000.0 if change_type == 'target' else None,
'delta': 50000.0})
# 1/03: price = 10, place orders
# 1/04: orders execute at price = 11, place orders
# 1/05: orders execute at price = 12, place orders
# 1/06: +50000 capital change,
# orders execute at price = 13, place orders
# 1/09: orders execute at price = 14, place orders
expected_daily = {}
expected_capital_changes = np.array([
0.0, 0.0, 0.0, 50000.0, 0.0
])
# Day 1, no transaction. Day 2, we transact, but the price of our stock
# does not change. Day 3, we start getting returns
expected_daily['returns'] = np.array([
0.0,
0.0,
# 1000 shares * gain of 1
(100000.0 + 1000.0) / 100000.0 - 1.0,
# 2000 shares * gain of 1, capital change of +50000
(151000.0 + 2000.0) / 151000.0 - 1.0,
# 3000 shares * gain of 1
(153000.0 + 3000.0) / 153000.0 - 1.0,
])
expected_daily['pnl'] = np.array([
0.0,
0.0,
1000.00, # 1000 shares * gain of 1
2000.00, # 2000 shares * gain of 1
3000.00, # 3000 shares * gain of 1
])
expected_daily['capital_used'] = np.array([
0.0,
-11000.0, # 1000 shares at price = 11
-12000.0, # 1000 shares at price = 12
-13000.0, # 1000 shares at price = 13
-14000.0, # 1000 shares at price = 14
])
expected_daily['ending_cash'] = \
np.array([100000.0] * 5) + \
np.cumsum(expected_capital_changes) + \
np.cumsum(expected_daily['capital_used'])
expected_daily['starting_cash'] = \
expected_daily['ending_cash'] - \
expected_daily['capital_used']
expected_daily['starting_value'] = np.array([
0.0,
0.0,
11000.0, # 1000 shares at price = 11
24000.0, # 2000 shares at price = 12
39000.0, # 3000 shares at price = 13
])
expected_daily['ending_value'] = \
expected_daily['starting_value'] + \
expected_daily['pnl'] - \
expected_daily['capital_used']
expected_daily['portfolio_value'] = \
expected_daily['ending_value'] + \
expected_daily['ending_cash']
stats = [
'returns', 'pnl', 'capital_used', 'starting_cash', 'ending_cash',
'starting_value', 'ending_value', 'portfolio_value'
]
expected_cumulative = {
'returns': np.cumprod(expected_daily['returns'] + 1) - 1,
'pnl': np.cumsum(expected_daily['pnl']),
'capital_used': np.cumsum(expected_daily['capital_used']),
'starting_cash':
np.repeat(expected_daily['starting_cash'][0:1], 5),
'ending_cash': expected_daily['ending_cash'],
'starting_value':
np.repeat(expected_daily['starting_value'][0:1], 5),
'ending_value': expected_daily['ending_value'],
'portfolio_value': expected_daily['portfolio_value'],
}
for stat in stats:
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in daily_perf]),
expected_daily[stat],
err_msg='daily ' + stat,
)
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in cumulative_perf]),
expected_cumulative[stat],
err_msg='cumulative ' + stat,
)
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-06', tz='UTC'): 50000.0}
)
@parameterized.expand([
('interday_target', [('2006-01-05', 2388.0)]),
('interday_delta', [('2006-01-05', 1000.0)]),
('intraday_target', [('2006-01-05 17:00', 2184.0),
('2006-01-05 18:00', 2804.0)]),
('intraday_delta', [('2006-01-05 17:00', 500.0),
('2006-01-05 18:00', 500.0)]),
])
def test_capital_changes_minute_mode_daily_emission(self, change, values):
change_loc, change_type = change.split('_')
sim_params = SimulationParameters(
start_session=pd.Timestamp('2006-01-04', tz='UTC'),
end_session=pd.Timestamp('2006-01-05', tz='UTC'),
data_frequency='minute',
capital_base=1000.0,
trading_calendar=self.nyse_calendar,
)
capital_changes = {
pd.Timestamp(datestr, tz='UTC'): {
'type': change_type,
'value': value
}
for datestr, value in values
}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(1), 1)
"""
algo = self.make_algo(
script=algocode,
sim_params=sim_params,
capital_changes=capital_changes
)
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), len(capital_changes))
expected = [
{'date': pd.Timestamp(val[0], tz='UTC'),
'type': 'cash',
'target': val[1] if change_type == 'target' else None,
'delta': 1000.0 if len(values) == 1 else 500.0}
for val in values]
self.assertEqual(capital_change_packets, expected)
# 1/03: place orders at price = 100, execute at 101
# 1/04: place orders at price = 490, execute at 491,
# +500 capital change at 17:00 and 18:00 (intraday)
# or +1000 at 00:00 (interday),
# 1/05: place orders at price = 880, execute at 881
expected_daily = {}
expected_capital_changes = np.array([0.0, 1000.0, 0.0])
if change_loc == 'intraday':
# Fills at 491, +500 capital change comes at 638 (17:00) and
# 698 (18:00), ends day at 879
day2_return = (
(1388.0 + 149.0 + 147.0) / 1388.0 *
(2184.0 + 60.0 + 60.0) / 2184.0 *
(2804.0 + 181.0 + 181.0) / 2804.0 - 1.0
)
else:
# Fills at 491, ends day at 879, capital change +1000
day2_return = (2388.0 + 390.0 + 388.0) / 2388.0 - 1
expected_daily['returns'] = np.array([
# Fills at 101, ends day at 489
(1000.0 + 489 - 101) / 1000.0 - 1.0,
day2_return,
# Fills at 881, ends day at 1269
(3166.0 + 390.0 + 390.0 + 388.0) / 3166.0 - 1.0,
])
expected_daily['pnl'] = np.array([
388.0,
390.0 + 388.0,
390.0 + 390.0 + 388.0,
])
expected_daily['capital_used'] = np.array([
-101.0, -491.0, -881.0
])
expected_daily['ending_cash'] = \
np.array([1000.0] * 3) + \
np.cumsum(expected_capital_changes) + \
np.cumsum(expected_daily['capital_used'])
expected_daily['starting_cash'] = \
expected_daily['ending_cash'] - \
expected_daily['capital_used']
if change_loc == 'intraday':
# Capital changes come after day start
expected_daily['starting_cash'] -= expected_capital_changes
expected_daily['starting_value'] = np.array([
0.0, 489.0, 879.0 * 2
])
expected_daily['ending_value'] = \
expected_daily['starting_value'] + \
expected_daily['pnl'] - \
expected_daily['capital_used']
expected_daily['portfolio_value'] = \
expected_daily['ending_value'] + \
expected_daily['ending_cash']
stats = [
'returns', 'pnl', 'capital_used', 'starting_cash', 'ending_cash',
'starting_value', 'ending_value', 'portfolio_value'
]
expected_cumulative = {
'returns': np.cumprod(expected_daily['returns'] + 1) - 1,
'pnl': np.cumsum(expected_daily['pnl']),
'capital_used': np.cumsum(expected_daily['capital_used']),
'starting_cash':
np.repeat(expected_daily['starting_cash'][0:1], 3),
'ending_cash': expected_daily['ending_cash'],
'starting_value':
np.repeat(expected_daily['starting_value'][0:1], 3),
'ending_value': expected_daily['ending_value'],
'portfolio_value': expected_daily['portfolio_value'],
}
for stat in stats:
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in daily_perf]),
expected_daily[stat]
)
np.testing.assert_array_almost_equal(
np.array([perf[stat] for perf in cumulative_perf]),
expected_cumulative[stat]
)
if change_loc == 'interday':
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-05', tz='UTC'): 1000.0}
)
else:
self.assertEqual(
algo.capital_change_deltas,
{pd.Timestamp('2006-01-05 17:00', tz='UTC'): 500.0,
pd.Timestamp('2006-01-05 18:00', tz='UTC'): 500.0}
)
@parameterized.expand([
('interday_target', [('2006-01-05', 2388.0)]),
('interday_delta', [('2006-01-05', 1000.0)]),
('intraday_target', [('2006-01-05 17:00', 2184.0),
('2006-01-05 18:00', 2804.0)]),
('intraday_delta', [('2006-01-05 17:00', 500.0),
('2006-01-05 18:00', 500.0)]),
])
def test_capital_changes_minute_mode_minute_emission(self, change, values):
change_loc, change_type = change.split('_')
sim_params = SimulationParameters(
start_session=pd.Timestamp('2006-01-04', tz='UTC'),
end_session=pd.Timestamp('2006-01-05', tz='UTC'),
data_frequency='minute',
emission_rate='minute',
capital_base=1000.0,
trading_calendar=self.nyse_calendar,
)
capital_changes = {pd.Timestamp(val[0], tz='UTC'): {
'type': change_type, 'value': val[1]} for val in values}
algocode = """
from zipline.api import set_slippage, set_commission, slippage, commission, \
schedule_function, time_rules, order, sid
def initialize(context):
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(0, 0))
schedule_function(order_stuff, time_rule=time_rules.market_open())
def order_stuff(context, data):
order(sid(1), 1)
"""
algo = self.make_algo(
script=algocode,
sim_params=sim_params,
capital_changes=capital_changes
)
gen = algo.get_generator()
results = list(gen)
cumulative_perf = \
[r['cumulative_perf'] for r in results if 'cumulative_perf' in r]
minute_perf = [r['minute_perf'] for r in results if 'minute_perf' in r]
daily_perf = [r['daily_perf'] for r in results if 'daily_perf' in r]
capital_change_packets = \
[r['capital_change'] for r in results if 'capital_change' in r]
self.assertEqual(len(capital_change_packets), len(capital_changes))
expected = [
{'date':
|
pd.Timestamp(val[0], tz='UTC')
|
pandas.Timestamp
|
import pandas as pd
#import geoglows
import os
import requests
import datetime as dt
#regions = ['central_america-geoglows', 'south_america-geoglows', 'north_america-geoglows', 'australia-geoglows',
# 'africa-geoglows','central_asia-geoglows', 'east_asia-geoglows', 'europe-geoglows',
# 'islands-geoglows', 'japan-geoglows', 'middle_east-geoglows', 'south_asia-geoglows',
# 'west_asia-geoglows']
watersheds = ['africa']
subbasins = ['geoglows']
#for region in regions:
for watershed, subbasin in zip (watersheds, subbasins):
region = watershed+'-'+subbasin
df = pd.read_csv('/Volumes/BYU_HD/Streamflow_Prediction_Tool/Shapes/{0}-drainageline_2.csv'.format(region))
COMIDs = df['COMID'].tolist()
for comid in COMIDs:
print (region, '-', comid)
'''Get Era_5 Data'''
#df1 = geoglows.streamflow.historic_simulation(comid, forcing='era_5', return_format='csv')
#df1[df1 < 0] = 0
#df1.index = df1.index.to_series().dt.strftime("%Y-%m-%d")
#df1.index = pd.to_datetime(df1.index)
#pairs = [list(a) for a in zip(df1.index, df1.iloc[:,0].values)]
#era5_df = pd.DataFrame(pairs, columns=['Datetime', 'era5 Streamflow (m3/s)'])
#era5_df.set_index('Datetime', inplace=True)
#era5_df.index = pd.to_datetime(era5_df.index)
text = os.popen('curl -X GET "https://tethys2.byu.edu/localsptapi/api/HistoricSimulation/?reach_id={0}&return_format=csv" -H "accept: text/csv"'.format(comid)).read()
df1 = pd.DataFrame([x.split(',') for x in text.split('\n')])
df1.drop(df1.index[0], inplace=True)
df1 = df1[:-1]
pairs = [list(a) for a in zip(df1.iloc[:, 0].values, df1.iloc[:, 1].values)]
era5_df = pd.DataFrame(pairs, columns=['Datetime', 'era5 Streamflow (m3/s)'])
era5_df.set_index('Datetime', inplace=True)
era5_df.index = pd.to_datetime(era5_df.index)
era5_df.index = era5_df.index.to_series().dt.strftime("%Y-%m-%d")
'''Get Era_Interim Data'''
#df2 = geoglows.streamflow.historic_simulation(comid, forcing='era_interim', return_format='csv')
#df2[df2 < 0] = 0
#df2.index = df2.index.to_series().dt.strftime("%Y-%m-%d")
#df2.index = pd.to_datetime(df2.index)
#pairs = [list(a) for a in zip(df2.index, df2.iloc[:, 0].values)]
#eraI_df = pd.DataFrame(pairs, columns=['Datetime', 'erai Streamflow (m3/s)'])
#eraI_df.set_index('Datetime', inplace=True)
#eraI_df.index = pd.to_datetime(eraI_df.index)
request_params = dict(watershed_name=watershed, subbasin_name=subbasin, reach_id=comid, return_format='csv')
request_headers = dict(Authorization='Token <PASSWORD>3552705cd86ac681f3717510b6937f6')
era_res = requests.get('https://tethys2.byu.edu/apps/streamflow-prediction-tool/api/GetHistoricData/',params=request_params, headers=request_headers)
era_pairs = era_res.content.splitlines()
era_pairs.pop(0)
era_dates = []
era_values = []
for era_pair in era_pairs:
era_pair = era_pair.decode('utf-8')
era_dates.append(dt.datetime.strptime(era_pair.split(',')[0], '%Y-%m-%d %H:%M:%S'))
era_values.append(float(era_pair.split(',')[1]))
pairs = [list(a) for a in zip(era_dates, era_values)]
eraI_df = pd.DataFrame(pairs, columns=['Datetime', 'erai Streamflow (m3/s)'])
eraI_df.set_index('Datetime', inplace=True)
eraI_df.index =
|
pd.to_datetime(eraI_df.index)
|
pandas.to_datetime
|
import json
import os
import pandas as pd
import sys
sys.path.append("..")
sys.path.append("../../column_matching")
import column_matching.column_match as cm
import data_build_scripts.helpers as hlp
def main():
print("got to main madden build")
local_path = os.path.dirname(os.path.abspath(__file__))
f = open(os.path.join(local_path, "madden_build.json"))
data = json.load(f)
matching = hlp.return_matching_dict() # get global matching dictionary
two_up = os.path.abspath(os.path.join(local_path, "../.."))
df = pd.DataFrame(columns=data['columns'])
source_dir = os.path.join(two_up, data['source']) # should work in both mac and windows
target_dir = os.path.join(two_up, data['target'])
counter = 0 # first one will not be matched
for file in data['file_list']:
source = os.path.join(source_dir, file['folder'], file['file'])
temp_df = pd.read_csv(source)
temp_df.rename(columns=data['column_rename'], inplace=True)
temp_df['year'] = data['year'][file['file']] # add year
temp_df['position_group'] = temp_df['position'].map(matching['position_groups'])
temp_df = temp_df[data['columns']] # cut all extra columns
new_column_name = str(data['year'][file['file']]) + "_madden_rating"
temp_df[new_column_name] = temp_df['madden_rating']
if counter == 0:
df = df.append(temp_df)
else:
df_1 = cm.fuzzy_merge(df, temp_df, ['first_name', 'last_name', 'position_group'],
['first_name', 'last_name', 'position_group'], threshold=95, limit=1) # inner join
df_2 = pd.concat([temp_df, df_1])
df = pd.concat([df, df_2])
df = df.drop_duplicates(subset=['first_name', 'last_name', 'position_group'], keep='last')
counter += 1
df['section'] = df['position_group'].map(matching['section'])
df.rename(columns=data['column_rename'], inplace=True)
print(df.columns)
df = df[data['column_order']]
target_folder = os.path.join(target_dir, data['output_folder'])
hlp.make_folder_if_not_exists(target_folder)
target = os.path.join(target_folder, data['output_file'])
df.to_csv(target, index=False)
def add_espn_id():
local_path = os.path.dirname(os.path.abspath(__file__))
f = open(os.path.join(local_path, "madden_build.json"))
data = json.load(f)
two_up = os.path.abspath(os.path.join(local_path, "../.."))
source_dir = os.path.join(two_up, data['target']) # should work in both mac and windows
target_dir = os.path.join(two_up, data['target'])
source = os.path.join(source_dir, data['output_folder'], data['output_file'])
df =
|
pd.read_csv(source)
|
pandas.read_csv
|
"""
test date_range, bdate_range construction from the convenience range functions
"""
from datetime import datetime, time, timedelta
import numpy as np
import pytest
import pytz
from pytz import timezone
from pandas._libs.tslibs import timezones
from pandas._libs.tslibs.offsets import BDay, CDay, DateOffset, MonthEnd, prefix_mapping
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DatetimeIndex, Timestamp, bdate_range, date_range, offsets
import pandas._testing as tm
from pandas.core.arrays.datetimes import generate_range
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestTimestampEquivDateRange:
# Older tests in TestTimeSeries constructed their `stamp` objects
# using `date_range` instead of the `Timestamp` constructor.
# TestTimestampEquivDateRange checks that these are equivalent in the
# pertinent cases.
def test_date_range_timestamp_equiv(self):
rng = date_range("20090415", "20090519", tz="US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_dateutil(self):
rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="dateutil/US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_explicit_pytz(self):
rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"), freq="D")
assert ts == stamp
@td.skip_if_windows_python_3
def test_date_range_timestamp_equiv_explicit_dateutil(self):
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
rng = date_range("20090415", "20090519", tz=gettz("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=gettz("US/Eastern"), freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_from_datetime_instance(self):
datetime_instance = datetime(2014, 3, 4)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0]
ts = Timestamp(datetime_instance, freq="D")
assert ts == timestamp_instance
def test_date_range_timestamp_equiv_preserve_frequency(self):
timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0]
ts = Timestamp("2014-03-05", freq="D")
assert timestamp_instance == ts
class TestDateRanges:
def test_date_range_nat(self):
# GH#11587
msg = "Neither `start` nor `end` can be NaT"
with pytest.raises(ValueError, match=msg):
date_range(start="2016-01-01", end=pd.NaT, freq="D")
with pytest.raises(ValueError, match=msg):
date_range(start=pd.NaT, end="2016-01-01", freq="D")
def test_date_range_multiplication_overflow(self):
# GH#24255
# check that overflows in calculating `addend = periods * stride`
# are caught
with tm.assert_produces_warning(None):
# we should _not_ be seeing a overflow RuntimeWarning
dti = date_range(start="1677-09-22", periods=213503, freq="D")
assert dti[0] == Timestamp("1677-09-22")
assert len(dti) == 213503
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("1969-05-04", periods=200000000, freq="30000D")
def test_date_range_unsigned_overflow_handling(self):
# GH#24255
# case where `addend = periods * stride` overflows int64 bounds
# but not uint64 bounds
dti = date_range(start="1677-09-22", end="2262-04-11", freq="D")
dti2 = date_range(start=dti[0], periods=len(dti), freq="D")
assert dti2.equals(dti)
dti3 = date_range(end=dti[-1], periods=len(dti), freq="D")
assert dti3.equals(dti)
def test_date_range_int64_overflow_non_recoverable(self):
# GH#24255
# case with start later than 1970-01-01, overflow int64 but not uint64
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(start="1970-02-01", periods=106752 * 24, freq="H")
# case with end before 1970-01-01, overflow int64 but not uint64
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1969-11-14", periods=106752 * 24, freq="H")
def test_date_range_int64_overflow_stride_endpoint_different_signs(self):
# cases where stride * periods overflow int64 and stride/endpoint
# have different signs
start = Timestamp("2262-02-23")
end = Timestamp("1969-11-14")
expected = date_range(start=start, end=end, freq="-1H")
assert expected[0] == start
assert expected[-1] == end
dti = date_range(end=end, periods=len(expected), freq="-1H")
tm.assert_index_equal(dti, expected)
start2 = Timestamp("1970-02-01")
end2 = Timestamp("1677-10-22")
expected2 = date_range(start=start2, end=end2, freq="-1H")
assert expected2[0] == start2
assert expected2[-1] == end2
dti2 = date_range(start=start2, periods=len(expected2), freq="-1H")
tm.assert_index_equal(dti2, expected2)
def test_date_range_out_of_bounds(self):
# GH#14187
msg = "Cannot generate range"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("2016-01-01", periods=100000, freq="D")
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1763-10-12", periods=100000, freq="D")
def test_date_range_gen_error(self):
rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5min")
assert len(rng) == 4
@pytest.mark.parametrize("freq", ["AS", "YS"])
def test_begin_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"],
freq=freq,
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["A", "Y"])
def test_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["BA", "BY"])
def test_business_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq
)
tm.assert_index_equal(rng, exp)
def test_date_range_negative_freq(self):
# GH 11018
rng = date_range("2011-12-31", freq="-2A", periods=3)
exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2A"
rng = date_range("2011-01-31", freq="-2M", periods=3)
exp = DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2M"
def test_date_range_bms_bug(self):
# #1645
rng = date_range("1/1/2000", periods=10, freq="BMS")
ex_first = Timestamp("2000-01-03")
assert rng[0] == ex_first
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq="2D")
offset = timedelta(2)
values = DatetimeIndex([snap + i * offset for i in range(n)], freq=offset)
tm.assert_index_equal(rng, values)
rng = date_range("1/1/2000 08:15", periods=n, normalize=False, freq="B")
the_time = time(8, 15)
for val in rng:
assert val.time() == the_time
def test_date_range_fy5252(self):
dr = date_range(
start="2013-01-01",
periods=2,
freq=offsets.FY5253(startingMonth=1, weekday=3, variation="nearest"),
)
assert dr[0] == Timestamp("2013-01-31")
assert dr[1] == Timestamp("2014-01-30")
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start, end, periods=10, freq="s")
def test_date_range_convenience_periods(self):
# GH 20808
result = date_range("2018-04-24", "2018-04-27", periods=3)
expected = DatetimeIndex(
["2018-04-24 00:00:00", "2018-04-25 12:00:00", "2018-04-27 00:00:00"],
freq=None,
)
tm.assert_index_equal(result, expected)
# Test if spacing remains linear if tz changes to dst in range
result = date_range(
"2018-04-01 01:00:00",
"2018-04-01 04:00:00",
tz="Australia/Sydney",
periods=3,
)
expected = DatetimeIndex(
[
Timestamp("2018-04-01 01:00:00+1100", tz="Australia/Sydney"),
Timestamp("2018-04-01 02:00:00+1000", tz="Australia/Sydney"),
Timestamp("2018-04-01 04:00:00+1000", tz="Australia/Sydney"),
]
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"start,end,result_tz",
[
["20180101", "20180103", "US/Eastern"],
[datetime(2018, 1, 1), datetime(2018, 1, 3), "US/Eastern"],
[Timestamp("20180101"), Timestamp("20180103"), "US/Eastern"],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
"US/Eastern",
],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
None,
],
],
)
def test_date_range_linspacing_tz(self, start, end, result_tz):
# GH 20983
result = date_range(start, end, periods=3, tz=result_tz)
expected = date_range("20180101", periods=3, freq="D", tz="US/Eastern")
tm.assert_index_equal(result, expected)
def test_date_range_businesshour(self):
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-04 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(["2014-07-04 16:00", "2014-07-07 09:00"], freq="BH")
rng = date_range("2014-07-04 16:00", "2014-07-07 09:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
"2014-07-07 09:00",
"2014-07-07 10:00",
"2014-07-07 11:00",
"2014-07-07 12:00",
"2014-07-07 13:00",
"2014-07-07 14:00",
"2014-07-07 15:00",
"2014-07-07 16:00",
"2014-07-08 09:00",
"2014-07-08 10:00",
"2014-07-08 11:00",
"2014-07-08 12:00",
"2014-07-08 13:00",
"2014-07-08 14:00",
"2014-07-08 15:00",
"2014-07-08 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-08 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
def test_range_misspecified(self):
# GH #1095
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(periods=10)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(periods=10, freq="H")
with pytest.raises(ValueError, match=msg):
date_range()
def test_compat_replace(self):
# https://github.com/statsmodels/statsmodels/issues/3349
# replace should take ints/longs for compat
result = date_range(
Timestamp("1960-04-01 00:00:00", freq="QS-JAN"), periods=76, freq="QS-JAN"
)
assert len(result) == 76
def test_catch_infinite_loop(self):
offset = offsets.DateOffset(minute=5)
# blow up, don't loop forever
msg = "Offset <DateOffset: minute=5> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range(datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset)
@pytest.mark.parametrize("periods", (1, 2))
def test_wom_len(self, periods):
# https://github.com/pandas-dev/pandas/issues/20517
res = date_range(start="20110101", periods=periods, freq="WOM-1MON")
assert len(res) == periods
def test_construct_over_dst(self):
# GH 20854
pre_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=True
)
pst_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=False
)
expect_data = [
Timestamp("2010-11-07 00:00:00", tz="US/Pacific"),
pre_dst,
pst_dst,
]
expected = DatetimeIndex(expect_data, freq="H")
result = date_range(start="2010-11-7", periods=3, freq="H", tz="US/Pacific")
tm.assert_index_equal(result, expected)
def test_construct_with_different_start_end_string_format(self):
# GH 12064
result = date_range(
"2013-01-01 00:00:00+09:00", "2013/01/01 02:00:00+09:00", freq="H"
)
expected = DatetimeIndex(
[
Timestamp("2013-01-01 00:00:00+09:00"),
Timestamp("2013-01-01 01:00:00+09:00"),
Timestamp("2013-01-01 02:00:00+09:00"),
],
freq="H",
)
tm.assert_index_equal(result, expected)
def test_error_with_zero_monthends(self):
msg = r"Offset <0 \* MonthEnds> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range("1/1/2000", "1/1/2001", freq=MonthEnd(0))
def test_range_bug(self):
# GH #770
offset = DateOffset(months=3)
result = date_range("2011-1-1", "2012-1-31", freq=offset)
start = datetime(2011, 1, 1)
expected = DatetimeIndex([start + i * offset for i in range(5)], freq=offset)
tm.assert_index_equal(result, expected)
def test_range_tz_pytz(self):
# see gh-2906
tz = timezone("US/Eastern")
start = tz.localize(datetime(2011, 1, 1))
end = tz.localize(datetime(2011, 1, 3))
dr = date_range(start=start, periods=3)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
dr = date_range(end=end, periods=3)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
dr = date_range(start=start, end=end)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
@pytest.mark.parametrize(
"start, end",
[
[
Timestamp(datetime(2014, 3, 6), tz="US/Eastern"),
Timestamp(datetime(2014, 3, 12), tz="US/Eastern"),
],
[
Timestamp(datetime(2013, 11, 1), tz="US/Eastern"),
Timestamp(datetime(2013, 11, 6), tz="US/Eastern"),
],
],
)
def test_range_tz_dst_straddle_pytz(self, start, end):
dr = date_range(start, end, freq="D")
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
dr = date_range(start, end, freq="D", tz="US/Eastern")
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
dr = date_range(
start.replace(tzinfo=None),
end.replace(tzinfo=None),
freq="D",
tz="US/Eastern",
)
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
def test_range_tz_dateutil(self):
# see gh-2906
# Use maybe_get_tz to fix filename in tz under dateutil.
from pandas._libs.tslibs.timezones import maybe_get_tz
tz = lambda x: maybe_get_tz("dateutil/" + x)
start = datetime(2011, 1, 1, tzinfo=tz("US/Eastern"))
end = datetime(2011, 1, 3, tzinfo=tz("US/Eastern"))
dr = date_range(start=start, periods=3)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
dr = date_range(end=end, periods=3)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
dr = date_range(start=start, end=end)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
@pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"])
def test_range_closed(self, freq):
begin = datetime(2011, 1, 1)
end = datetime(2014, 1, 1)
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
def test_range_closed_with_tz_aware_start_end(self):
# GH12409, GH12684
begin = Timestamp("2011/1/1", tz="US/Eastern")
end = Timestamp("2014/1/1", tz="US/Eastern")
for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
begin = Timestamp("2011/1/1")
end = Timestamp("2014/1/1")
begintz = Timestamp("2011/1/1", tz="US/Eastern")
endtz = Timestamp("2014/1/1", tz="US/Eastern")
for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq, tz="US/Eastern")
left =
|
date_range(begin, end, closed="left", freq=freq, tz="US/Eastern")
|
pandas.date_range
|
from erddapy import ERDDAP
from joblib import Parallel, delayed
import multiprocessing
import pandas as pd
import xarray as xr
import logging
import os
import re
import numpy as np
import pathlib
import ocean_data_gateway as odg
# Capture warnings in log
logging.captureWarnings(True)
# formatting for logfile
formatter = logging.Formatter('%(asctime)s %(message)s','%a %b %d %H:%M:%S %Z %Y')
log_name = 'reader_erddap'
loglevel=logging.WARNING
path_logs_reader = odg.path_logs.joinpath(f'{log_name}.log')
# set up logger file
handler = logging.FileHandler(path_logs_reader)
handler.setFormatter(formatter)
logger_erd = logging.getLogger(log_name)
logger_erd.setLevel(loglevel)
logger_erd.addHandler(handler)
# this can be queried with
# search.ErddapReader.reader
reader = 'erddap'
class ErddapReader:
def __init__(self, known_server='ioos', protocol=None, server=None, parallel=True):
# # run checks for KW
# self.kw = kw
self.parallel = parallel
# either select a known server or input protocol and server string
if known_server == 'ioos':
protocol = 'tabledap'
server = 'http://erddap.sensors.ioos.us/erddap'
elif known_server == 'coastwatch':
protocol = 'griddap'
server = 'http://coastwatch.pfeg.noaa.gov/erddap'
elif known_server is not None:
statement = 'either select a known server or input protocol and server string'
assert (protocol is not None) & (server is not None), statement
else:
known_server = server.strip('/erddap').strip('http://').replace('.','_')
statement = 'either select a known server or input protocol and server string'
assert (protocol is not None) & (server is not None), statement
self.known_server = known_server
self.e = ERDDAP(server=server)
self.e.protocol = protocol
self.e.server = server
# columns for metadata
self.columns = ['geospatial_lat_min', 'geospatial_lat_max',
'geospatial_lon_min', 'geospatial_lon_max',
'time_coverage_start', 'time_coverage_end',
'defaultDataQuery', 'subsetVariables', # first works for timeseries sensors, 2nd for gliders
'keywords', # for hf radar
'id', 'infoUrl', 'institution', 'featureType', 'source', 'sourceUrl']
# name
self.name = f'erddap_{known_server}'
self.reader = 'ErddapReader'
# # self.data_type = data_type
# self.standard_names = standard_names
# # DOESN'T CURRENTLY LIMIT WHICH VARIABLES WILL BE FOUND ON EACH SERVER
@property
def dataset_ids(self):
'''Find dataset_ids for server.'''
if not hasattr(self, '_dataset_ids'):
# This should be a region search
if self.approach == 'region':
# find all the dataset ids which we will use to get the data
# This limits the search to our keyword arguments in kw which should
# have min/max lon/lat/time values
dataset_ids = []
if self.variables is not None:
for variable in self.variables:
# find and save all dataset_ids associated with variable
search_url = self.e.get_search_url(response="csv", **self.kw,
variableName=variable,
items_per_page=10000)
try:
search = pd.read_csv(search_url)
dataset_ids.extend(search["Dataset ID"])
except Exception as e:
logger_erd.exception(e)
logger_erd.warning(f"variable {variable} was not found in the search")
logger_erd.warning(f'search_url: {search_url}')
else:
# find and save all dataset_ids associated with variable
search_url = self.e.get_search_url(response="csv", **self.kw,
items_per_page=10000)
try:
search = pd.read_csv(search_url)
dataset_ids.extend(search["Dataset ID"])
except Exception as e:
logger_erd.exception(e)
logger_erd.warning(f"nothing found in the search")
logger_erd.warning(f'search_url: {search_url}')
# only need a dataset id once since we will check them each for all standard_names
self._dataset_ids = list(set(dataset_ids))
# This should be a search for the station names
elif self.approach == 'stations':
# elif self._stations is not None:
# search by station name for each of stations
dataset_ids = []
for station in self._stations:
# if station has more than one word, AND will be put between to search for multiple
# terms together
url = self.e.get_search_url(response="csv", items_per_page=5, search_for=station)
try:
df = pd.read_csv(url)
except Exception as e:
logger_erd.exception(e)
logger_erd.warning(f'search url {url} did not work for station {station}.')
continue
# first try for exact station match
try:
dataset_id = [dataset_id for dataset_id in df['Dataset ID'] if station.lower() in dataset_id.lower().split('_')][0]
# if that doesn't work, trying for more general match and just take first returned option
except Exception as e:
logger_erd.exception(e)
logger_erd.warning('When searching for a dataset id to match station name %s, the first attempt to match the id did not work.' % (station))
dataset_id = df.iloc[0]['Dataset ID']
# if 'tabs' in org_id: # don't split
# axiom_id = [axiom_id for axiom_id in df['Dataset ID'] if org_id.lower() == axiom_id.lower()]
# else:
# axiom_id = [axiom_id for axiom_id in df['Dataset ID'] if org_id.lower() in axiom_id.lower().split('_')][0]
# except:
# dataset_id = None
dataset_ids.append(dataset_id)
self._dataset_ids = list(set(dataset_ids))
else:
logger_erd.warning('Neither stations nor region approach were used in function dataset_ids.')
return self._dataset_ids
def meta_by_dataset(self, dataset_id):
info_url = self.e.get_info_url(response="csv", dataset_id=dataset_id)
try:
info = pd.read_csv(info_url)
except Exception as e:
logger_erd.exception(e)
logger_erd.warning(f'Could not read info from {info_url}')
return {dataset_id: []}
items = []
for col in self.columns:
try:
item = info[info['Attribute Name'] == col]['Value'].values[0]
dtype = info[info['Attribute Name'] == col]['Data Type'].values[0]
except:
if col == 'featureType':
# this column is not present in HF Radar metadata but want it to
# map to data_type, so input 'grid' in that case.
item = 'grid'
else:
item = 'NA'
if dtype == 'String':
pass
elif dtype == 'double':
item = float(item)
elif dtype == 'int':
item = int(item)
items.append(item)
# if self.standard_names is not None:
# # In case the variable is named differently from the standard names,
# # we back out the variable names here for each dataset. This also only
# # returns those names for which there is data in the dataset.
# varnames = self.e.get_var_by_attr(
# dataset_id=dataset_id,
# standard_name=lambda v: v in self.standard_names
# )
# else:
# varnames = None
## include download link ##
self.e.dataset_id = dataset_id
if self.e.protocol == 'tabledap':
if self.variables is not None:
self.e.variables = ["time","longitude", "latitude", "station"] + self.variables
# set the same time restraints as before
self.e.constraints = {'time<=': self.kw['max_time'], 'time>=': self.kw['min_time'],}
download_url = self.e.get_download_url(response='csvp')
elif self.e.protocol == 'griddap':
# the search terms that can be input for tabledap do not work for griddap
# in erddapy currently. Instead, put together an opendap link and then
# narrow the dataset with xarray.
# get opendap link
download_url = self.e.get_download_url(response='opendap')
# add erddap server name
return {dataset_id: [self.e.server, download_url] + items + [self.variables]}
@property
def meta(self):
if not hasattr(self, '_meta'):
if self.parallel:
# get metadata for datasets
# run in parallel to save time
num_cores = multiprocessing.cpu_count()
downloads = Parallel(n_jobs=num_cores)(
delayed(self.meta_by_dataset)(dataset_id) for dataset_id in self.dataset_ids
)
else:
downloads = []
for dataset_id in self.dataset_ids:
downloads.append(self.meta_by_dataset(dataset_id))
# make dict from individual dicts
from collections import ChainMap
meta = dict(ChainMap(*downloads))
# Make dataframe of metadata
# variable names are the column names for the dataframe
self._meta = pd.DataFrame.from_dict(meta, orient='index',
columns=['database','download_url'] \
+ self.columns + ['variable names'])
return self._meta
def data_by_dataset(self, dataset_id):
download_url = self.meta.loc[dataset_id, 'download_url']
# data variables in ds that are not the variables we searched for
# varnames = self.meta.loc[dataset_id, 'variable names']
if self.e.protocol == 'tabledap':
try:
# fetch metadata if not already present
# found download_url from metadata and use
dd = pd.read_csv(download_url, index_col=0, parse_dates=True)
# Drop cols and rows that are only NaNs.
dd = dd.dropna(axis='index', how='all').dropna(axis='columns', how='all')
if self.variables is not None:
# check to see if there is any actual data
# this is a bit convoluted because the column names are the variable names
# plus units so can't match 1 to 1.
datacols = 0 # number of columns that represent data instead of metadata
for col in dd.columns:
datacols += [varname in col for varname in self.variables].count(True)
# if no datacols, we can skip this one.
if datacols == 0:
dd = None
except Exception as e:
logger_erd.exception(e)
logger_erd.warning('no data to be read in for %s' % dataset_id)
dd = None
elif self.e.protocol == 'griddap':
try:
dd = xr.open_dataset(download_url, chunks='auto').sel(time=slice(self.kw['min_time'],self.kw['max_time']))
if ('min_lat' in self.kw) and ('max_lat' in self.kw):
dd = dd.sel(latitude=slice(self.kw['min_lat'],self.kw['max_lat']))
if ('min_lon' in self.kw) and ('max_lon' in self.kw):
dd = dd.sel(longitude=slice(self.kw['min_lon'],self.kw['max_lon']))
# use variable names to drop other variables (should. Ido this?)
if self.variables is not None:
l = set(dd.data_vars) - set(self.variables)
dd = dd.drop_vars(l)
except Exception as e:
logger_erd.exception(e)
logger_erd.warning('no data to be read in for %s' % dataset_id)
dd = None
return (dataset_id, dd)
# @property
def data(self):
# if not hasattr(self, '_data'):
if self.parallel:
num_cores = multiprocessing.cpu_count()
downloads = Parallel(n_jobs=num_cores)(
delayed(self.data_by_dataset)(dataset_id) for dataset_id in self.dataset_ids
)
else:
downloads = []
for dataset_id in self.dataset_ids:
downloads.append(self.data_by_dataset(dataset_id))
# if downloads is not None:
dds = {dataset_id: dd for (dataset_id, dd) in downloads}
# else:
# dds = None
return dds
# self._data = dds
# return self._data
def count(self,url):
try:
return len(pd.read_csv(url))
except:
return np.nan
def all_variables(self):
'''Return a list of all possible variables.'''
path_name_counts = odg.path_variables.joinpath(f'erddap_variable_list_{self.known_server}.csv')
if path_name_counts.is_file():
return
|
pd.read_csv(path_name_counts, index_col='variable')
|
pandas.read_csv
|
import pandas as pd
class Converter(object):
"""Used to covert a list of DB objects to a DataFrame and vice versa"""
def __init__(self, df=None, obj_list=None):
self.df = df
self.obj_list = obj_list
self.__is_valid_obj_list = None
self.__is_valid_df = None
def validate_obj_list(self):
return (self.obj_list is not None) and (type(self.obj_list) == list) and (self.obj_list != [])
def __set_is_valid_obj_list(self):
self.__is_valid_obj_list = self.validate_obj_list()
def __get_df(self):
# Decide columns
columns = list(self.obj_list[0].__dict__)
columns.remove('_state')
# Initialize empty df
df =
|
pd.DataFrame(columns=columns)
|
pandas.DataFrame
|
import os
import pandas as pd
import warnings
_ROOT = os.path.abspath(os.path.dirname(__file__))
def get_data(path):
return os.path.join(_ROOT, 'data', path)
def get_plot(path):
return os.path.join(_ROOT, 'plots', path)
def get_thesis_figure(chapter, file):
d = "/Users/Jason/Dropbox/DropboxDocuments/University/Oxford/Reports/Thesis/figures"
path = os.path.join(d, chapter, file)
return path
class ThesisHDF5Writer:
def __init__(self, path):
self.path = path
output_dir = os.path.dirname(path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("Created directory: {}".format(output_dir))
self.store = pd.HDFStore(
path, mode='w', complevel=9, complib='blosc:blosclz'
)
print("HDF5 Created: {}".format(self.path))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.store.close()
def write(self, **kwargs):
for key, value in kwargs.items():
self.store[key] = value
def write_mapping(self, mapping):
self.store['mapping'] = mapping
mapping_meta = mapping.metadata
self.store.get_storer('mapping').attrs.metadata = mapping_meta
def write_metadata(self, **metadata):
self.store['metadata'] =
|
pd.DataFrame()
|
pandas.DataFrame
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box):
# GH#18849
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box):
# GH#18824, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box_df_fail):
# GH#18849
box = box_df_fail
box2 = Series if box in [pd.Index, tm.to_array] else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox,
box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# TODO: Moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns'])
def test_timedelta64_conversions(self, m, unit):
startdate = Series(pd.date_range('2013-01-01', '2013-01-03'))
enddate = Series(pd.date_range('2013-03-01', '2013-03-03'))
ser = enddate - startdate
ser[2] = np.nan
# op
expected = Series([x / np.timedelta64(m, unit) for x in ser])
result = ser / np.timedelta64(m, unit)
tm.assert_series_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x
for x in ser])
result = np.timedelta64(m, unit) / ser
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 1.0))
expected = tm.box_expected(expected, xbox)
result = idx * Series(rng5f + 1.0)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_with_array):
box = box_with_array
xbox = get_upcast_box(box, other)
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
expected._data.freq = None
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__, __rdiv__
def test_td64arr_div_nat_invalid(self, box_with_array):
# don't allow division by NaT (maybe could in the future)
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError,
match="'?true_divide'? cannot use operands"):
rng / pd.NaT
with pytest.raises(TypeError, match='Cannot divide NaTType by'):
pd.NaT / rng
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
rng = timedelta_range('1 days', '10 days',)
rng = tm.box_expected(rng, box_with_array)
other = np.timedelta64('NaT')
expected = np.array([np.nan] * 10)
expected = tm.box_expected(expected, box_with_array)
result = rng / other
tm.assert_equal(result, expected)
result = other / rng
tm.assert_equal(result, expected)
def test_td64arr_div_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx / 1
tm.assert_equal(result, idx)
with pytest.raises(TypeError, match='Cannot divide'):
# GH#23829
1 / idx
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours,
box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
expected = pd.Float64Index([12, np.nan, 24])
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
other = np.array([2, 4, 2], dtype='m8[h]')
result = rng / other
tm.assert_equal(result, expected)
result = rng / tm.box_expected(other, box_with_array)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
tm.assert_equal(result, expected)
result = rng / list(other)
tm.assert_equal(result, expected)
# reversed op
expected = 1 / expected
result = other / rng
tm.assert_equal(result, expected)
result = tm.box_expected(other, box_with_array) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
tm.assert_equal(result, expected)
result = list(other) / rng
tm.assert_equal(result, expected)
def test_tdarr_div_length_mismatch(self, box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
with pytest.raises(ValueError):
rng / other
with pytest.raises(ValueError):
other / rng
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = td1 // scalar_td
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = scalar_td // td1
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box_with_array,
scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx // 1
tm.assert_equal(result, idx)
pattern = ('floor_divide cannot use operands|'
'Cannot divide int by Timedelta*')
with pytest.raises(TypeError, match=pattern):
1 // idx
def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi // two_hours
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_with_array):
# GH#19125
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# mod, divmod
# TODO: operations with timedelta-like arrays, numeric arrays,
# reversed ops
def test_td64arr_mod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range('1 Day', '9 days')
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(['1 Day', '2 Days', '0 Days'] * 3)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % three_days
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, three_days)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // three_days)
def test_td64arr_mod_int(self, box_with_array):
tdi = timedelta_range('1 ns', '10 ns', periods=10)
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(['1 ns', '0 ns'] * 5)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % 2
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
2 % tdarr
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, 2)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // 2)
def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range('1 Day', '9 days')
tdarr = tm.box_expected(tdi, box_with_array)
expected = ['0 Days', '1 Day', '0 Days'] + ['3 Days'] * 6
expected = TimedeltaIndex(expected)
expected = tm.box_expected(expected, box_with_array)
result = three_days % tdarr
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(three_days, tdarr)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], three_days // tdarr)
# ------------------------------------------------------------------
# Operations with invalid others
def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with pytest.raises(TypeError, match=pattern):
td1 * scalar_td
with pytest.raises(TypeError, match=pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx[:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box_with_array, one):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(['118 Days', '118 Days', 'NaT'],
dtype='timedelta64[ns]')
expected = tm.box_expected(expected, box_with_array)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('two', [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box_with_array, two):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser / two
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match='Cannot divide'):
two / tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_rmul_numeric_array(self, box_with_array, vector, dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser * vector
tm.assert_equal(result, expected)
result = vector * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_div_numeric_array(self, box_with_array, vector, dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
vector = vector.astype(dtype)
expected = Series(['2.95D', '1D 23H 12m', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser / vector
tm.assert_equal(result, expected)
pattern = ('true_divide cannot use operands|'
'cannot perform __div__|'
'cannot perform __truediv__|'
'unsupported operand|'
'Cannot divide')
with pytest.raises(TypeError, match=pattern):
vector / tdser
if not isinstance(vector, pd.Index):
# Index.__rdiv__ won't try to operate elementwise, just raises
result = tdser / vector.astype(object)
if box_with_array is pd.DataFrame:
expected = [tdser.iloc[0, n] / vector[n]
for n in range(len(vector))]
else:
expected = [tdser[n] / vector[n] for n in range(len(tdser))]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match=pattern):
vector.astype(object) / tdser
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_mul_int_series(self, box_df_fail, names):
# GH#19042 test for correct name attachment
box = box_df_fail # broadcasts along wrong axis, but doesn't raise
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
# TODO: Should we be parametrizing over types for `ser` too?
ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1])
expected = Series(['0days', '1day', '4days', '9days', '16days'],
dtype='timedelta64[ns]',
name=names[2])
tdi = tm.box_expected(tdi, box)
box = Series if (box is pd.Index and type(ser) is Series) else box
expected =
|
tm.box_expected(expected, box)
|
pandas.util.testing.box_expected
|
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
from puget.recordlinkage import link_records
def test_linkage():
link_list = [{'block_variable': 'lname',
'match_variables': {"fname": "string",
"ssn_as_str": "string",
"dob": "date"}},
{'block_variable': 'fname',
'match_variables': {"lname": "string",
"ssn_as_str": "string",
"dob": "date"}},
{'block_variable': 'ssn_as_str',
'match_variables': {"fname": "string",
"lname": "string",
"dob": "date"}}]
# Simplest case - both items are identical in all respects:
prelink_ids = pd.DataFrame(data = {'pid0':["PHA0_1", "HMIS0_1"],
'ssn_as_str':['123456789', '123456789'],
'lname':["QWERT", "QWERT"],
'fname':["QWERT", "QWERT"],
'dob':["1990-02-01", "1990-02-01"]})
prelink_ids["dob"] = pd.to_datetime(prelink_ids["dob"])
test_df = prelink_ids.copy()
linked = link_records(prelink_ids, link_list)
test_df["linkage_PID"] = [1, 1]
pdt.assert_frame_equal(test_df, linked)
# Items differ completely in their last name
prelink_ids = pd.DataFrame(data={'pid0': ["PHA0_1", "HMIS0_1"],
'ssn_as_str': ['123456789', '123456789'],
'lname': ["ASDF", "QWERT"],
'fname': ["QWERT", "QWERT"],
'dob': ["1990-02-01", "1990-02-01"]})
prelink_ids["dob"] = pd.to_datetime(prelink_ids["dob"])
test_df = prelink_ids.copy()
linked = link_records(prelink_ids, link_list)
test_df["linkage_PID"] = [1, 1]
pdt.assert_frame_equal(test_df, linked)
# Items differ completely in their first name
prelink_ids = pd.DataFrame(data={'pid0': ["PHA0_1", "HMIS0_1"],
'ssn_as_str': ['123456789', '123456789'],
'lname': ["QWERT", "QWERT"],
'fname': ["ASDF", "QWERT"],
'dob': ["1990-02-01", "1990-02-01"]})
prelink_ids["dob"] = pd.to_datetime(prelink_ids["dob"])
test_df = prelink_ids.copy()
linked = link_records(prelink_ids, link_list)
test_df["linkage_PID"] = [1, 1]
pdt.assert_frame_equal(test_df, linked)
# Items differ completely in their SSN
prelink_ids = pd.DataFrame(data={'pid0': ["PHA0_1", "HMIS0_1"],
'ssn_as_str': ['123456789', '246801357'],
'lname': ["QWERT", "QWERT"],
'fname': ["QWERT", "QWERT"],
'dob': ["1990-02-01", "1990-02-01"]})
prelink_ids["dob"] = pd.to_datetime(prelink_ids["dob"])
test_df = prelink_ids.copy()
linked = link_records(prelink_ids, link_list)
test_df["linkage_PID"] = [1, 1]
pdt.assert_frame_equal(test_df, linked)
# Items differ by permutation in their last name
prelink_ids = pd.DataFrame(data={'pid0': ["PHA0_1", "HMIS0_1"],
'ssn_as_str': ['123456789', '123456789'],
'lname': ["QERWT", "QWERT"],
'fname': ["QWERT", "QWERT"],
'dob': ["1990-02-01", "1990-02-01"]})
prelink_ids["dob"] = pd.to_datetime(prelink_ids["dob"])
test_df = prelink_ids.copy()
linked = link_records(prelink_ids, link_list)
test_df["linkage_PID"] = [1, 1]
pdt.assert_frame_equal(test_df, linked)
# Items differ by permutation in their first name
prelink_ids = pd.DataFrame(data={'pid0': ["PHA0_1", "HMIS0_1"],
'ssn_as_str': ['123456789', '123456789'],
'lname': ["QWERT", "QWERT"],
'fname': ["QWERT", "QEWRT"],
'dob': ["1990-02-01", "1990-02-01"]})
prelink_ids["dob"] = pd.to_datetime(prelink_ids["dob"])
test_df = prelink_ids.copy()
linked = link_records(prelink_ids, link_list)
test_df["linkage_PID"] = [1, 1]
pdt.assert_frame_equal(test_df, linked)
# Items differ by permutation in their dob day/month
prelink_ids = pd.DataFrame(data={'pid0': ["PHA0_1", "HMIS0_1"],
'ssn_as_str': ['123456789', '123456789'],
'lname': ["QWERT", "QWERT"],
'fname': ["QWERT", "QEWRT"],
'dob': ["1990-01-02", "1990-02-01"]})
prelink_ids["dob"] = pd.to_datetime(prelink_ids["dob"])
test_df = prelink_ids.copy()
linked = link_records(prelink_ids, link_list)
test_df["linkage_PID"] = [1, 1]
|
pdt.assert_frame_equal(test_df, linked)
|
pandas.util.testing.assert_frame_equal
|
import pandas as pd
def getQueryString(string):
return string.split("_")[0]
data = pd.read_csv("../../results/AnalyticsBenchmark.csv", index_col=False, names=["data", "query", "language", "lazy",
"ts_1_min", "ts_1_max", "ts_1_avg",
"ts_2_min", "ts_2_max", "ts_2_avg"])
js = data[data["language"] == "js"]
js = js[["ts_2_avg","query"]].reset_index()
js = js.rename(columns={"ts_2_avg": "js_ts"})
java = data[data["language"] == "java"]
java = java[["ts_2_avg"]].reset_index()
java = java.rename(columns={"ts_2_avg": "java_ts"})
python = data[data["language"] == "python"]
python = python[["ts_2_avg"]].reset_index()
python = python.rename(columns={"ts_2_avg": "python_ts"})
typer = pd.read_csv("../hand.out", names=["query", "hand_ts"])
result =
|
pd.concat([js, java, python, typer], axis=1, sort=False)
|
pandas.concat
|
# -*- coding: utf-8 -*-
'''
* finance4py
* Based on Python Data Analysis Library.
* 2016/03/22 by <NAME> <<EMAIL>>
* Copyright (c) 2016, finance4py team
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to
endorse or promote products derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
try:
# Python 3
from urllib.request import urlopen
from urllib.parse import urlencode
from urllib.request import Request
except ImportError:
# Python 2
from urllib import urlencode
from urllib2 import urlopen
from urllib2 import Request
import json
import datetime
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as dates
from pandas import Series
from pandas import DatetimeIndex
from pandas_datareader import data as pda
from pandas.io.json import json_normalize
from matplotlib.dates import date2num
from matplotlib.dates import num2date
from matplotlib.patches import Rectangle
from matplotlib.lines import Line2D
class Stock(object):
def __init__(self, stock_id, start_date = None, end_date = None):
self.df = None
self.sid = stock_id
self.start = start_date
self.end = end_date
try:
self.df = pda.get_data_yahoo(stock_id + u'.TW', start_date, end_date)
except:
pass
if self.df is None:
try:
self.df = pda.get_data_yahoo(stock_id + u'.TWO', start_date, end_date)
except:
pass
if self.df is None:
try:
self.df = pda.get_data_yahoo(stock_id, start_date, end_date)
except:
print("KEY_ERROR: Wrong stock id.")
raise
self.df = self.df[self.df.Volume != 0]
def __getitem__(self, key):
return self.df[key]
def __setitem__(self, key, value):
self.df[key] = value
def __repr__(self):
return repr(self.df)
def MA(self, window = 5):
return self.df['Close'].rolling(window, center=False).mean()
def MA_Volume(self, window = 5):
return self.df['Volume'].rolling(window, center=False).mean()
def KD(self, window = 9):
df_min = self.df['Low'].rolling(window, center=False).min()
df_max = self.df['High'].rolling(window, center=False).max()
df_RSV = (self.df['Close'] - df_min) / (df_max - df_min) * 100
K = []
curr_K = 50
for rsv in df_RSV:
if pd.isnull(rsv):
K.append(rsv)
continue
curr_K = rsv * (1.0/3) + curr_K * (2.0/3)
K.append(curr_K)
df_K = Series(K, df_RSV.index)
D = []
curr_D = 50
for k in df_K:
if pd.isnull(k):
D.append(k)
continue
curr_D = k * (1.0/3) + curr_D * (2.0/3)
D.append(curr_D)
df_D =
|
Series(D, df_RSV.index)
|
pandas.Series
|
# Best Buy AMD Ryzen Processor Webscraper
# Python - Beautiful Soup
import requests
from bs4 import BeautifulSoup
from requests.models import Response
import pandas as pd
def extract():
url = 'https://www.bestbuy.com/site/searchpage.jsp?cp=1&id=pcat17071&qp=category_facet%3Dname~abcat0507010&st=ryzen+processor'
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15'}
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.content, 'html.parser')
return soup
def transform(soup):
products = soup.find_all('li', class_='sku-item')
#print(len(products))
for item in products:
title = item.find('h4', class_ = 'sku-header').text
#print(title)
price = item.find('div', class_= 'priceView-hero-price')
for dollars in price:
dollars = item.find('span', class_='sr-only').text
#print(dollars)
words = dollars.split(' ')
currency = (words[-1])
#print(currency)
try:
status = item.find('button', class_='c-button').text
#print(status)
except Exception:
status = 'Unavailable Nearby'
#print(status)
link = item.find('a', href=True)
product_url = 'http://www.bestbuy.com/' + link['href']
#print(product_url)
cpu = {
'title': title,
'price': currency,
'status': status,
'link': product_url,
}
CPUs.append(cpu)
return
CPUs = []
c = extract()
transform(c)
df =
|
pd.DataFrame(CPUs)
|
pandas.DataFrame
|
import matplotlib.pyplot as plt
import csv
from binance.client import Client
import config
from binance.enums import *
from datetime import date, datetime
import time
import pandas as pd
import os
import numpy as np
client = Client(config.API_KEY, config.API_SECRET)
# this will only be ran to get the data if the server crashes
def get_data_onStart():
# from the last data point selected we update the data
# then we start the stream
file = '../ETH_hourly_data.csv'
# if data file exists delet it and re fetch the data
if(os.path.exists(file) and os.path.isfile(file)):
os.remove(file)
print("file deleted")
else:
print("file not found")
# today = date.today().strftime('%Y-%m-%d')
# TODO: this only needs to go back like 50 days max for now
# candlesticks = client.get_historical_klines(
# "ETHUSDT", Client.KLINE_INTERVAL_1HOUR, 'April 1 2021')
candlesticks = client.get_historical_klines(
"ETHUSDT", Client.KLINE_INTERVAL_1DAY, 'January 1 2020')
processed_candlesticks = []
for data in candlesticks:
candlestick = {
"time": data[0]/1000,
# "open": data[1],
# "high": data[2],
# "low": data[3],
"close": data[4],
}
processed_candlesticks.append(candlestick)
df =
|
pd.DataFrame(processed_candlesticks)
|
pandas.DataFrame
|
#%%
import itertools
import warnings
from collections import defaultdict
from multiprocessing import Pool
import joblib
import numpy as np
import numpyro
import pandas as pd
from logger_tt import logger
from metaDMG.errors import BadDataError
from metaDMG.fit import bayesian, fit_utils, frequentist
numpyro.enable_x64()
#%%
timeout_first_fit = 5 * 60 # 5 minutes, very first fit
timeout_subsequent_fits = 60 # 1 minute
#%%
def get_groupby(df_mismatches):
return df_mismatches.groupby("tax_id", sort=False, observed=True)
def group_to_numpyro_data(config, group):
x = np.array(group.iloc[: config["max_position"]]["position"], dtype=int)
if config["forward_only"]:
forward = "CT"
forward_ref = forward[0]
k = np.array(group.iloc[: config["max_position"]][forward], dtype=int)
N = np.array(group.iloc[: config["max_position"]][forward_ref], dtype=int)
data = {"x": x, "k": k, "N": N}
return data
else:
forward = "CT"
forward_ref = forward[0]
reverse = "GA"
reverse_ref = reverse[0]
k_forward = np.array(group.iloc[: config["max_position"]][forward], dtype=int)
N_forward = np.array(
group.iloc[: config["max_position"]][forward_ref], dtype=int
)
k_reverse = np.array(group.iloc[-config["max_position"] :][reverse], dtype=int)
N_reverse = np.array(
group.iloc[-config["max_position"] :][reverse_ref], dtype=int
)
data = {
"x": np.concatenate([x, -x]),
"k": np.concatenate([k_forward, k_reverse]),
"N": np.concatenate([N_forward, N_reverse]),
}
return data
#%%
def add_count_information(fit_result, config, group, data):
if config["forward_only"]:
fit_result["N_x=1_forward"] = data["N"][0]
# fit_result["N_x=1_reverse"] = np.nan
fit_result["N_sum_total"] = data["N"].sum()
fit_result["N_sum_forward"] = fit_result["N_sum_total"]
# fit_result["N_sum_reverse"] = np.nan
fit_result["N_min"] = data["N"].min()
fit_result["k_sum_total"] = data["k"].sum()
fit_result["k_sum_forward"] = fit_result["k_sum_total"]
# fit_result["k_sum_reverse"] = np.nan
else:
fit_result["N_x=1_forward"] = data["N"][0]
fit_result["N_x=1_reverse"] = data["N"][config["max_position"]]
fit_result["N_sum_total"] = data["N"].sum()
fit_result["N_sum_forward"] = data["N"][: config["max_position"]].sum()
fit_result["N_sum_reverse"] = data["N"][config["max_position"] :].sum()
fit_result["N_min"] = data["N"].min()
fit_result["k_sum_total"] = data["k"].sum()
fit_result["k_sum_forward"] = data["k"][: config["max_position"]].sum()
fit_result["k_sum_reverse"] = data["k"][config["max_position"] :].sum()
#%%
# def timer_fit_MAP(config, data):
# # data = group_to_numpyro_data(config, group)
# # %timeit timer_fit_MAP(config, data)
# fit_result = {}
# with warnings.catch_warnings():
# warnings.filterwarnings("ignore")
# fit_all, fit_forward, fit_reverse = frequentist.make_fits(fit_result, data)
# def timer_fit_bayesian(config, data, mcmc_PMD, mcmc_null):
# # data = group_to_numpyro_data(config, group)
# # %timeit timer_fit_bayesian(config, data, mcmc_PMD, mcmc_null)
# fit_result = {}
# bayesian.make_fits(fit_result, data, mcmc_PMD, mcmc_null)
#%%
def fit_single_group(
config,
group,
mcmc_PMD=None,
mcmc_null=None,
):
fit_result = {}
data = group_to_numpyro_data(config, group)
sample = config["sample"]
tax_id = group["tax_id"].iloc[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
frequentist.make_fits(
config,
fit_result,
data,
sample,
tax_id,
) # fit_all, fit_forward, fit_reverse
add_count_information(fit_result, config, group, data)
if mcmc_PMD is not None and mcmc_null is not None:
bayesian.make_fits(fit_result, data, mcmc_PMD, mcmc_null)
return fit_result
from tqdm import tqdm
def compute_fits_seriel(config, df_mismatches, with_progressbar=False):
# Do not initialise MCMC if config["bayesian"] is False
mcmc_PMD, mcmc_null = bayesian.init_mcmcs(config)
groupby = get_groupby(df_mismatches)
if with_progressbar:
groupby = tqdm(groupby, total=len(groupby))
d_fit_results = {}
for tax_id, group in groupby:
# break
if with_progressbar:
groupby.set_description(f"Fitting Tax ID {tax_id}")
d_fit_results[tax_id] = fit_single_group(
config,
group,
mcmc_PMD,
mcmc_null,
)
return d_fit_results
def compute_fits_parallel_worker(df_mismatches_config):
df_mismatches, config, with_progressbar = df_mismatches_config
return compute_fits_seriel(
config=config,
df_mismatches=df_mismatches,
with_progressbar=with_progressbar,
)
def grouper(iterable, n):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
def use_progressbar(config, position):
if config["bayesian"]:
return False
if config["parallel_samples"] == 1 or len(config["samples"]) == 1:
if position == 0:
return True
return False
def get_list_of_groups(config, df_mismatches, N_in_each_group=100):
cores_per_sample = config["cores_per_sample"]
tax_ids = df_mismatches["tax_id"].unique()
if not config["bayesian"]:
N_splits = cores_per_sample
else:
# make splits, each with N_in_each_group groups in them
N_splits = len(tax_ids) // N_in_each_group + 1
tax_id_list = np.array_split(tax_ids, N_splits)
dfs = []
for position, tax_ids in enumerate(tax_id_list):
dfs.append(
(
df_mismatches.query(f"tax_id in {list(tax_ids)}"),
config,
use_progressbar(config, position),
)
)
return dfs
def compute_fits_parallel(config, df_mismatches, N_in_each_group=100):
cores_per_sample = config["cores_per_sample"]
dfs = get_list_of_groups(
config,
df_mismatches,
N_in_each_group=N_in_each_group,
)
d_fit_results = {}
with Pool(processes=cores_per_sample) as pool:
for d_fit_results_ in pool.imap_unordered(
compute_fits_parallel_worker,
dfs,
):
d_fit_results.update(d_fit_results_)
return d_fit_results
def compute_fits_parallel_Bayesian(config, df_mismatches, N_in_each_group=100):
cores_per_sample = config["cores_per_sample"]
dfs = get_list_of_groups(
config=config,
df_mismatches=df_mismatches,
N_in_each_group=N_in_each_group,
)
do_progressbar = config["parallel_samples"] == 1 or len(config["samples"]) == 1
it = grouper(dfs, cores_per_sample)
if do_progressbar:
it = tqdm(
grouper(dfs, cores_per_sample),
total=len(dfs) // cores_per_sample,
unit="chunks",
)
d_fit_results = {}
for dfs_ in it:
# break
if do_progressbar:
size = dfs_[0][0]["tax_id"].nunique()
it.set_description(f"Fitting in chunks of size {size}")
with Pool(processes=cores_per_sample) as pool:
for d_fit_results_ in pool.imap_unordered(
compute_fits_parallel_worker,
dfs_,
):
d_fit_results.update(d_fit_results_)
return d_fit_results
#%%
def match_tax_id_order_in_df_fit_results(df_fit_results, df_mismatches):
tax_ids_all = pd.unique(df_mismatches["tax_id"])
ordered = [tax_id for tax_id in tax_ids_all if tax_id in df_fit_results.index]
return df_fit_results.loc[ordered]
def make_df_fit_results_from_fit_results(config, d_fit_results, df_mismatches):
df_fit_results =
|
pd.DataFrame.from_dict(d_fit_results, orient="index")
|
pandas.DataFrame.from_dict
|
from io import StringIO
import pandas as pd
import numpy as np
import pytest
import bioframe
import bioframe.core.checks as checks
# import pyranges as pr
# def bioframe_to_pyranges(df):
# pydf = df.copy()
# pydf.rename(
# {"chrom": "Chromosome", "start": "Start", "end": "End"},
# axis="columns",
# inplace=True,
# )
# return pr.PyRanges(pydf)
# def pyranges_to_bioframe(pydf):
# df = pydf.df
# df.rename(
# {"Chromosome": "chrom", "Start": "start", "End": "end", "Count": "n_intervals"},
# axis="columns",
# inplace=True,
# )
# return df
# def pyranges_overlap_to_bioframe(pydf):
# ## convert the df output by pyranges join into a bioframe-compatible format
# df = pydf.df.copy()
# df.rename(
# {
# "Chromosome": "chrom_1",
# "Start": "start_1",
# "End": "end_1",
# "Start_b": "start_2",
# "End_b": "end_2",
# },
# axis="columns",
# inplace=True,
# )
# df["chrom_1"] = df["chrom_1"].values.astype("object") # to remove categories
# df["chrom_2"] = df["chrom_1"].values
# return df
chroms = ["chr12", "chrX"]
def mock_bioframe(num_entries=100):
pos = np.random.randint(1, 1e7, size=(num_entries, 2))
df = pd.DataFrame()
df["chrom"] = np.random.choice(chroms, num_entries)
df["start"] = np.min(pos, axis=1)
df["end"] = np.max(pos, axis=1)
df.sort_values(["chrom", "start"], inplace=True)
return df
############# tests #####################
def test_select():
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
region1 = "chr1:4-10"
df_result = pd.DataFrame([["chr1", 4, 5]], columns=["chrom", "start", "end"])
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX:4-6"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
### select with non-standard column names
region1 = "chrX:4-6"
new_names = ["chr", "chrstart", "chrend"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=new_names,
)
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]],
columns=new_names,
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
region1 = "chrX"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
### select from a DataFrame with NaNs
colnames = ["chrom", "start", "end", "view_region"]
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_result = pd.DataFrame(
[["chr1", -6, 12, "chr1p"]],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
region1 = "chr1:0-1"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df, region1).reset_index(drop=True)
)
def test_trim():
### trim with view_df
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 32, 36, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 26, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
with pytest.raises(ValueError):
bioframe.trim(df, view_df=view_df)
# df_view_col already exists, so need to specify it:
pd.testing.assert_frame_equal(
df_trimmed, bioframe.trim(df, view_df=view_df, df_view_col="view_region")
)
### trim with view_df interpreted from dictionary for chromsizes
chromsizes = {"chr1": 20, "chrX_0": 5}
df = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX_0", 1, 8],
],
columns=["chrom", "startFunky", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 20],
["chrX_0", 1, 5],
],
columns=["chrom", "startFunky", "end"],
).astype({"startFunky": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(
df,
view_df=chromsizes,
cols=["chrom", "startFunky", "end"],
return_view_columns=False,
),
)
### trim with default limits=None and negative values
df = pd.DataFrame(
[
["chr1", -4, 12],
["chr1", 13, 26],
["chrX", -5, -1],
],
columns=["chrom", "start", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX", 0, 0],
],
columns=["chrom", "start", "end"],
)
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim when there are NaN intervals
df = pd.DataFrame(
[
["chr1", -4, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", -5, -1, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 0, 0, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim with view_df and NA intervals
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12],
["chr1", 0, 12],
[pd.NA, pd.NA, pd.NA],
["chrX", 1, 20],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, pd.NA],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
# infer df_view_col with assign_view and ignore NAs
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(df, view_df=view_df, df_view_col=None, return_view_columns=True)[
["chrom", "start", "end", "view_region"]
],
)
def test_expand():
d = """chrom start end
0 chr1 1 5
1 chr1 50 55
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+")
expand_bp = 10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 -9 15
1 chr1 40 65
2 chr2 90 210"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with negative pad
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 110 190"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp, side="left")
d = """chrom start end
0 chr1 3 5
1 chr1 52 55
2 chr2 110 200"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with multiplicative pad
mult = 0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 150 150"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
mult = 2.0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 -1 7
1 chr1 48 58
2 chr2 50 250"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with NA and non-integer multiplicative pad
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
mult = 1.10
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 95 205"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df, fake_expanded)
def test_overlap():
### test consistency of overlap(how='inner') with pyranges.join ###
### note does not test overlap_start or overlap_end columns of bioframe.overlap
df1 = mock_bioframe()
df2 = mock_bioframe()
assert df1.equals(df2) == False
# p1 = bioframe_to_pyranges(df1)
# p2 = bioframe_to_pyranges(df2)
# pp = pyranges_overlap_to_bioframe(p1.join(p2, how=None))[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# bb = bioframe.overlap(df1, df2, how="inner")[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# pp = pp.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# bb = bb.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# pd.testing.assert_frame_equal(bb, pp, check_dtype=False, check_exact=False)
# print("overlap elements agree")
### test overlap on= [] ###
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[["chr1", 6, 10, "+", "dog"], ["chrX", 7, 10, "-", "dog"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 3
b = bioframe.overlap(
df1,
df2,
on=["strand"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 2
b = bioframe.overlap(
df1,
df2,
on=None,
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 0
### test overlap 'left', 'outer', and 'right'
b = bioframe.overlap(
df1,
df2,
on=None,
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 5
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="inner",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 0
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="right",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 2
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
### test keep_order and NA handling
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+"],
[pd.NA, pd.NA, pd.NA, "-"],
["chrX", 1, 8, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, "+"], [pd.NA, pd.NA, pd.NA, "-"], ["chrX", 7, 10, "-"]],
columns=["chrom2", "start2", "end2", "strand"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=True, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
assert ~df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=False, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chrX", 1, 8, pd.NA, pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, pd.NA, "tiger"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert (
bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
keep_order=False,
).shape
== (3, 12)
)
### result of overlap should still have bedframe-like properties
overlap_df = bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
overlap_df = bioframe.overlap(
df1,
df2,
how="innter",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
# test keep_order incompatible if how!= 'left'
with pytest.raises(ValueError):
bioframe.overlap(
df1,
df2,
how="outer",
on=["animal"],
cols2=["chrom2", "start2", "end2"],
keep_order=True,
)
def test_cluster():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 1])
).all() # the last interval does not overlap the first three
df_annotated = bioframe.cluster(df1, min_dist=2)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 0])
).all() # all intervals part of the same cluster
df_annotated = bioframe.cluster(df1, min_dist=None)
assert (
df_annotated["cluster"].values == np.array([0, 0, 1, 2])
).all() # adjacent intervals not clustered
df1.iloc[0, 0] = "chrX"
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([2, 0, 0, 1])
).all() # do not cluster intervals across chromosomes
# test consistency with pyranges (which automatically sorts df upon creation and uses 1-based indexing for clusters)
# assert (
# (bioframe_to_pyranges(df1).cluster(count=True).df["Cluster"].values - 1)
# == bioframe.cluster(df1.sort_values(["chrom", "start"]))["cluster"].values
# ).all()
# test on=[] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert (
bioframe.cluster(df1, on=["animal"])["cluster"].values == np.array([0, 1, 0, 2])
).all()
assert (
bioframe.cluster(df1, on=["strand"])["cluster"].values == np.array([0, 1, 1, 2])
).all()
assert (
bioframe.cluster(df1, on=["location", "animal"])["cluster"].values
== np.array([0, 2, 1, 3])
).all()
### test cluster with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.cluster(df1)["cluster"].max() == 3
assert bioframe.cluster(df1, on=["strand"])["cluster"].max() == 4
pd.testing.assert_frame_equal(df1, bioframe.cluster(df1)[df1.columns])
assert checks.is_bedframe(
bioframe.cluster(df1, on=["strand"]),
cols=["chrom", "cluster_start", "cluster_end"],
)
assert checks.is_bedframe(
bioframe.cluster(df1), cols=["chrom", "cluster_start", "cluster_end"]
)
assert checks.is_bedframe(bioframe.cluster(df1))
def test_merge():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
# the last interval does not overlap the first three with default min_dist=0
assert (bioframe.merge(df1)["n_intervals"].values == np.array([3, 1])).all()
# adjacent intervals are not clustered with min_dist=none
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values == np.array([2, 1, 1])
).all()
# all intervals part of one cluster
assert (
bioframe.merge(df1, min_dist=2)["n_intervals"].values == np.array([4])
).all()
df1.iloc[0, 0] = "chrX"
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values
== np.array([1, 1, 1, 1])
).all()
assert (
bioframe.merge(df1, min_dist=0)["n_intervals"].values == np.array([2, 1, 1])
).all()
# total number of intervals should equal length of original dataframe
mock_df = mock_bioframe()
assert np.sum(bioframe.merge(mock_df, min_dist=0)["n_intervals"].values) == len(
mock_df
)
# # test consistency with pyranges
# pd.testing.assert_frame_equal(
# pyranges_to_bioframe(bioframe_to_pyranges(df1).merge(count=True)),
# bioframe.merge(df1),
# check_dtype=False,
# check_exact=False,
# )
# test on=['chrom',...] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert len(bioframe.merge(df1, on=None)) == 2
assert len(bioframe.merge(df1, on=["strand"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location", "animal"])) == 4
d = """ chrom start end animal n_intervals
0 chr1 3 10 cat 2
1 chr1 3 8 dog 1
2 chrX 6 10 cat 1"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.merge(df1, on=["animal"]),
check_dtype=False,
)
# merge with repeated indices
df = pd.DataFrame(
{"chrom": ["chr1", "chr2"], "start": [100, 400], "end": [110, 410]}
)
df.index = [0, 0]
pd.testing.assert_frame_equal(
df.reset_index(drop=True), bioframe.merge(df)[["chrom", "start", "end"]]
)
# test merge with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.merge(df1).shape[0] == 4
assert bioframe.merge(df1)["start"].iloc[0] == 1
assert bioframe.merge(df1)["end"].iloc[0] == 12
assert bioframe.merge(df1, on=["strand"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[1] == df1.shape[1] + 1
assert checks.is_bedframe(bioframe.merge(df1, on=["strand", "animal"]))
def test_complement():
### complementing a df with no intervals in chrX by a view with chrX should return entire chrX region
df1 = pd.DataFrame(
[["chr1", 1, 5], ["chr1", 3, 8], ["chr1", 8, 10], ["chr1", 12, 14]],
columns=["chrom", "start", "end"],
)
df1_chromsizes = {"chr1": 100, "chrX": 100}
df1_complement = pd.DataFrame(
[
["chr1", 0, 1, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with two chromosomes ###
df1.iloc[0, 0] = "chrX"
df1_complement = pd.DataFrame(
[
["chr1", 0, 3, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 1, "chrX:0-100"],
["chrX", 5, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with no view_df and a negative interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-9223372036854775807"],
["chr1", 20, np.iinfo(np.int64).max, "chr1:0-9223372036854775807"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1), df1_complement)
### test complement with an overhanging interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
chromsizes = {"chr1": 15}
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-15"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=chromsizes, view_name_col="VR"), df1_complement
)
### test complement where an interval from df overlaps two different regions from view
### test complement with no view_df and a negative interval
df1 = pd.DataFrame([["chr1", 5, 15]], columns=["chrom", "start", "end"])
chromsizes = [("chr1", 0, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
df1_complement = pd.DataFrame(
[["chr1", 0, 5, "chr1p"], ["chr1", 15, 20, "chr1q"]],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
### test complement with NAs
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 5, 15], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
).astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
with pytest.raises(ValueError): # no NAs allowed in chromsizes
bioframe.complement(
df1, [("chr1", pd.NA, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
)
assert checks.is_bedframe(bioframe.complement(df1, chromsizes))
def test_closest():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 4, 8], ["chr1", 10, 11]], columns=["chrom", "start", "end"]
)
### closest(df1,df2,k=1) ###
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 4 8 0"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### closest(df1,df2, ignore_overlaps=True)) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True)
)
### closest(df1,df2,k=2) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 4 8 0
1 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), k=2)
)
### closest(df2,df1) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 4 8 chr1 1 5 0
1 chr1 10 11 chr1 1 5 5 """
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df2, df1, suffixes=("_1", "_2")))
### change first interval to new chrom ###
df2.iloc[0, 0] = "chrA"
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### test other return arguments ###
df2.iloc[0, 0] = "chr1"
d = """
index index_ have_overlap overlap_start overlap_end distance
0 0 0 True 4 5 0
1 0 1 False <NA> <NA> 5
"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.closest(
df1,
df2,
k=2,
return_overlap=True,
return_index=True,
return_input=False,
return_distance=True,
),
check_dtype=False,
)
# closest should ignore empty groups (e.g. from categorical chrom)
df = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
d = """ chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chrX 1 8 chrX 2 10 0
1 chrX 2 10 chrX 1 8 0"""
df_closest = pd.read_csv(StringIO(d), sep=r"\s+")
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df = df.astype({"chrom": df_cat})
pd.testing.assert_frame_equal(
df_closest,
bioframe.closest(df, suffixes=("_1", "_2")),
check_dtype=False,
check_categorical=False,
)
# closest should ignore null rows: code will need to be modified
# as for overlap if an on=[] option is added
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_1": pd.Int64Dtype(),
"end_1": pd.Int64Dtype(),
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True, k=5)
)
with pytest.raises(ValueError): # inputs must be valid bedFrames
df1.iloc[0, 0] = "chr10"
bioframe.closest(df1, df2)
def test_coverage():
#### coverage does not exceed length of original interval
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chr1", 2, 10]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of interval on different chrom returns zero for coverage and n_overlaps
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chrX", 3, 8]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 0 """
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### when a second overlap starts within the first
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8]], columns=["chrom", "start", "end"]
)
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of NA interval returns zero for coverage
df1 = pd.DataFrame(
[
["chr1", 10, 20],
[pd.NA, pd.NA, pd.NA],
["chr1", 3, 8],
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
)
df1 = bioframe.sanitize_bedframe(df1)
df2 = bioframe.sanitize_bedframe(df2)
df_coverage = pd.DataFrame(
[
["chr1", 10, 20, 0],
[pd.NA, pd.NA, pd.NA, 0],
["chr1", 3, 8, 5],
[pd.NA, pd.NA, pd.NA, 0],
],
columns=["chrom", "start", "end", "coverage"],
).astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype(), "coverage": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df_coverage, bioframe.coverage(df1, df2))
### coverage without return_input returns a single column dataFrame
assert (
bioframe.coverage(df1, df2, return_input=False)["coverage"].values
== np.array([0, 0, 5, 0])
).all()
def test_subtract():
### no intervals should be left after self-subtraction
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
assert len(bioframe.subtract(df1, df1)) == 0
### no intervals on chrX should remain after subtracting a longer interval
### interval on chr1 should be split.
### additional column should be propagated to children.
df2 = pd.DataFrame(
[
["chrX", 0, 18],
["chr1", 5, 6],
],
columns=["chrom", "start", "end"],
)
df1["animal"] = "sea-creature"
df_result = pd.DataFrame(
[["chr1", 4, 5, "sea-creature"], ["chr1", 6, 7, "sea-creature"]],
columns=["chrom", "start", "end", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
### no intervals on chrX should remain after subtracting a longer interval
df2 = pd.DataFrame(
[["chrX", 0, 4], ["chr1", 6, 6], ["chrX", 4, 9]],
columns=["chrom", "start", "end"],
)
df1["animal"] = "sea-creature"
df_result = pd.DataFrame(
[["chr1", 4, 6, "sea-creature"], ["chr1", 6, 7, "sea-creature"]],
columns=["chrom", "start", "end", "animal"],
)
pd.testing.assert_frame_equal(
df_result.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
### subtracting dataframes funny column names
funny_cols = ["C", "chromStart", "chromStop"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=funny_cols,
)
df1["strand"] = "+"
assert len(bioframe.subtract(df1, df1, cols1=funny_cols, cols2=funny_cols)) == 0
funny_cols2 = ["chr", "st", "e"]
df2 = pd.DataFrame(
[
["chrX", 0, 18],
["chr1", 5, 6],
],
columns=funny_cols2,
)
df_result = pd.DataFrame(
[["chr1", 4, 5, "+"], ["chr1", 6, 7, "+"]],
columns=funny_cols + ["strand"],
)
df_result = df_result.astype(
{funny_cols[1]: pd.Int64Dtype(), funny_cols[2]: pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2, cols1=funny_cols, cols2=funny_cols2)
.sort_values(funny_cols)
.reset_index(drop=True),
)
# subtract should ignore empty groups
df1 = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[
["chrX", 1, 8],
],
columns=["chrom", "start", "end"],
)
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df1 = df1.astype({"chrom": df_cat})
df_subtracted = pd.DataFrame(
[
["chrX", 8, 10],
],
columns=["chrom", "start", "end"],
)
assert bioframe.subtract(df1, df1).empty
pd.testing.assert_frame_equal(
df_subtracted.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2),
check_dtype=False,
check_categorical=False,
)
## test transferred from deprecated bioframe.split
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[
["chrX", 4],
["chr1", 5],
],
columns=["chrom", "pos"],
)
df2["start"] = df2["pos"]
df2["end"] = df2["pos"]
df_result = (
pd.DataFrame(
[
["chrX", 1, 4],
["chrX", 3, 4],
["chrX", 4, 5],
["chrX", 4, 8],
["chr1", 5, 7],
["chr1", 4, 5],
],
columns=["chrom", "start", "end"],
)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True)
.astype({"start": pd.Int64Dtype(), "end":
|
pd.Int64Dtype()
|
pandas.Int64Dtype
|
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserWarning
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, MultiIndex, Series, Timestamp, concat
import pandas._testing as tm
@pytest.mark.parametrize("dtype", [str, object])
@pytest.mark.parametrize("check_orig", [True, False])
def test_dtype_all_columns(all_parsers, dtype, check_orig):
# see gh-3795, gh-6607
parser = all_parsers
df = DataFrame(
np.random.rand(5, 2).round(4),
columns=list("AB"),
index=["1A", "1B", "1C", "1D", "1E"],
)
with tm.ensure_clean("__passing_str_as_dtype__.csv") as path:
df.to_csv(path)
result = parser.read_csv(path, dtype=dtype, index_col=0)
if check_orig:
expected = df.copy()
result = result.astype(float)
else:
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
def test_dtype_all_columns_empty(all_parsers):
# see gh-12048
parser = all_parsers
result = parser.read_csv(StringIO("A,B"), dtype=str)
expected = DataFrame({"A": [], "B": []}, index=[], dtype=str)
tm.assert_frame_equal(result, expected)
def test_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
expected = DataFrame(
[[1, "2.5"], [2, "3.5"], [3, "4.5"], [4, "5.5"]], columns=["one", "two"]
)
expected["one"] = expected["one"].astype(np.float64)
expected["two"] = expected["two"].astype(object)
result = parser.read_csv(StringIO(data), dtype={"one": np.float64, 1: str})
tm.assert_frame_equal(result, expected)
def test_invalid_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
with pytest.raises(TypeError, match="data type [\"']foo[\"'] not understood"):
parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"})
@pytest.mark.parametrize(
"dtype",
[
"category",
CategoricalDtype(),
{"a": "category", "b": "category", "c": CategoricalDtype()},
],
)
def test_categorical_dtype(all_parsers, dtype):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["a", "a", "b"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("dtype", [{"b": "category"}, {1: "category"}])
def test_categorical_dtype_single(all_parsers, dtype):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = DataFrame(
{"a": [1, 1, 2], "b": Categorical(["a", "a", "b"]), "c": [3.4, 3.4, 4.5]}
)
actual = parser.read_csv(StringIO(data), dtype=dtype)
|
tm.assert_frame_equal(actual, expected)
|
pandas._testing.assert_frame_equal
|
import numpy as np
import pandas as pd
import keras
import keras.utils
import tensorflow as tf
import time
import os
import neptune
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import *
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from src.AAUFile import *
from getpass import getpass
import smbclient as smbc
import trainers.topKmetrics as trainerTop
#from trainers.topKmetrics import topKMetrics
start_time = time.time()
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
print ('Loading dataset..')
# path = "/home/user/data/"
# all_files = glob.glob(os.path.join(path, "data_*.csv"))
# df_merged = (pd.read_csv(f, sep=',') for f in all_files)
# df_merged = pd.concat(df_from_each_file, ignore_index=True)
# df_merged.to_csv( "merged.csv")
# def crossValidation(filenames, k, learningRate, optimiser, loss, epoch, embNum, batchSize):
# #Load the files for cross-validation.
# dataSets = []
# username = input("username:")
# psw = getpass()
# print("Loading files")
# for filename in filenames:
# dataSets.append(gfData(filename, username, psw))
# print("Loading done.")
# #getting all unique users id and materials id
# usersId = []
# matId = []
# datas = []
# for dataSet in dataSets:
# usersId.append(pd.Series(dataSet["usersId"]))
# matId.append(pd.Series(dataSet["materialsId"]))
# datas.append(dataSet["ratings"])
# usersId = pd.unique(pd.concat(usersId))
# matId = pd.unique(pd.concat(matId))
# dataSets = datas
# #cross-validation
# res = []
# for i in range(len(dataSets)):
# print("cross validation it: " + str(i) + "/" + str(len(dataSets)))
# #creating test set and training set
# testData = dataSets.pop(0)
# testSet = tf.data.Dataset.from_tensor_slices(dict(testData))
# trainSet = tf.data.Dataset.from_tensor_slices(dict(pd.concat(dataSets, ignore_index=True)))
# with smbc.open_file((r""), mode="r", username=input("username: "), password=getpass()) as f:
# train = pd.read_csv(f, header=0, names=['customer_id', 'normalized_customer_id', 'material', 'product_id', 'rating_type'])
# with smbc.open_file((r""), mode="r", username=input("username: "), password=getpass()) as f:
# test = pd.read_csv(f, header=0, names=['customer_id', 'normalized_customer_id', 'material', 'product_id', 'rating_type'])
#dataset = pd.read_csv('D:/ML/dataset/100kNew.csv', header=0, names=['customer_id', 'normalized_customer_id', 'material', 'product_id', 'rating_type'])
train =
|
pd.read_csv('', header=0, names=['customer_id', 'normalized_customer_id', 'material', 'product_id', 'rating_type'])
|
pandas.read_csv
|
import sys
import os
import math
import datetime
import itertools
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.tsa.stattools import grangercausalitytests
import scipy.stats as stats
from mesa.batchrunner import BatchRunner, BatchRunnerMP
from mesa.datacollection import DataCollector
from project_material.model.network import HostNetwork
class CustomBatchRunner(BatchRunner):
def run_model(self, model):
while model.schedule.steps < self.max_steps:
model.step()
def track_params(model):
return (
model.num_nodes,
model.avg_node_degree,
model.initial_outbreak_size,
model.prob_spread_virus_gamma_shape,
model.prob_spread_virus_gamma_scale,
model.prob_spread_virus_gamma_loc,
model.prob_spread_virus_gamma_magnitude_multiplier,
model.prob_recover_gamma_shape,
model.prob_recover_gamma_scale,
model.prob_recover_gamma_loc,
model.prob_recover_gamma_magnitude_multiplier,
model.prob_virus_kill_host_gamma_shape,
model.prob_virus_kill_host_gamma_scale,
model.prob_virus_kill_host_gamma_loc,
model.prob_virus_kill_host_gamma_magnitude_multiplier,
model.prob_infectious_no_to_mild_symptom_gamma_shape,
model.prob_infectious_no_to_mild_symptom_gamma_scale,
model.prob_infectious_no_to_mild_symptom_gamma_loc,
model.prob_infectious_no_to_mild_symptom_gamma_magnitude_multiplier,
model.prob_infectious_no_to_severe_symptom_gamma_shape,
model.prob_infectious_no_to_severe_symptom_gamma_scale,
model.prob_infectious_no_to_severe_symptom_gamma_loc,
model.prob_infectious_no_to_severe_symptom_gamma_magnitude_multiplier,
model.prob_infectious_no_to_critical_symptom_gamma_shape,
model.prob_infectious_no_to_critical_symptom_gamma_scale,
model.prob_infectious_no_to_critical_symptom_gamma_loc,
model.prob_infectious_no_to_critical_symptom_gamma_magnitude_multiplier,
model.prob_infectious_mild_to_no_symptom_gamma_shape,
model.prob_infectious_mild_to_no_symptom_gamma_scale,
model.prob_infectious_mild_to_no_symptom_gamma_loc,
model.prob_infectious_mild_to_no_symptom_gamma_magnitude_multiplier,
model.prob_infectious_mild_to_severe_symptom_gamma_shape,
model.prob_infectious_mild_to_severe_symptom_gamma_scale,
model.prob_infectious_mild_to_severe_symptom_gamma_loc,
model.prob_infectious_mild_to_severe_symptom_gamma_magnitude_multiplier,
model.prob_infectious_mild_to_critical_symptom_gamma_shape,
model.prob_infectious_mild_to_critical_symptom_gamma_scale,
model.prob_infectious_mild_to_critical_symptom_gamma_loc,
model.prob_infectious_mild_to_critical_symptom_gamma_magnitude_multiplier,
model.prob_infectious_severe_to_no_symptom_gamma_shape,
model.prob_infectious_severe_to_no_symptom_gamma_scale,
model.prob_infectious_severe_to_no_symptom_gamma_loc,
model.prob_infectious_severe_to_no_symptom_gamma_magnitude_multiplier,
model.prob_infectious_severe_to_mild_symptom_gamma_shape,
model.prob_infectious_severe_to_mild_symptom_gamma_scale,
model.prob_infectious_severe_to_mild_symptom_gamma_loc,
model.prob_infectious_severe_to_mild_symptom_gamma_magnitude_multiplier,
model.prob_infectious_severe_to_critical_symptom_gamma_shape,
model.prob_infectious_severe_to_critical_symptom_gamma_scale,
model.prob_infectious_severe_to_critical_symptom_gamma_loc,
model.prob_infectious_severe_to_critical_symptom_gamma_magnitude_multiplier,
model.prob_infectious_critical_to_no_symptom_gamma_shape,
model.prob_infectious_critical_to_no_symptom_gamma_scale,
model.prob_infectious_critical_to_no_symptom_gamma_loc,
model.prob_infectious_critical_to_no_symptom_gamma_magnitude_multiplier,
model.prob_infectious_critical_to_mild_symptom_gamma_shape,
model.prob_infectious_critical_to_mild_symptom_gamma_scale,
model.prob_infectious_critical_to_mild_symptom_gamma_loc,
model.prob_infectious_critical_to_mild_symptom_gamma_magnitude_multiplier,
model.prob_infectious_critical_to_severe_symptom_gamma_shape,
model.prob_infectious_critical_to_severe_symptom_gamma_scale,
model.prob_infectious_critical_to_severe_symptom_gamma_loc,
model.prob_infectious_critical_to_severe_symptom_gamma_magnitude_multiplier,
model.prob_recovered_no_to_mild_complication,
model.prob_recovered_no_to_severe_complication,
model.prob_recovered_mild_to_no_complication,
model.prob_recovered_mild_to_severe_complication,
model.prob_recovered_severe_to_no_complication,
model.prob_recovered_severe_to_mild_complication,
model.prob_gain_immunity,
model.hospital_bed_capacity_as_percent_of_population,
model.hospital_bed_cost_per_day,
model.icu_bed_capacity_as_percent_of_population,
model.icu_bed_cost_per_day,
model.ventilator_capacity_as_percent_of_population,
model.ventilator_cost_per_day,
model.drugX_capacity_as_percent_of_population,
model.drugX_cost_per_day,
)
def track_run(model):
return model.uid
class BatchHostNetwork(HostNetwork):
# id generator to track run number in batch run data
id_gen = itertools.count(1)
def __init__(self, num_nodes, avg_node_degree, initial_outbreak_size,
prob_spread_virus_gamma_shape,
prob_spread_virus_gamma_scale,
prob_spread_virus_gamma_loc,
prob_spread_virus_gamma_magnitude_multiplier,
prob_recover_gamma_shape,
prob_recover_gamma_scale,
prob_recover_gamma_loc,
prob_recover_gamma_magnitude_multiplier,
prob_virus_kill_host_gamma_shape,
prob_virus_kill_host_gamma_scale,
prob_virus_kill_host_gamma_loc,
prob_virus_kill_host_gamma_magnitude_multiplier,
prob_infectious_no_to_mild_symptom_gamma_shape,
prob_infectious_no_to_mild_symptom_gamma_scale,
prob_infectious_no_to_mild_symptom_gamma_loc,
prob_infectious_no_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_no_to_severe_symptom_gamma_shape,
prob_infectious_no_to_severe_symptom_gamma_scale,
prob_infectious_no_to_severe_symptom_gamma_loc,
prob_infectious_no_to_severe_symptom_gamma_magnitude_multiplier,
prob_infectious_no_to_critical_symptom_gamma_shape,
prob_infectious_no_to_critical_symptom_gamma_scale,
prob_infectious_no_to_critical_symptom_gamma_loc,
prob_infectious_no_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_no_symptom_gamma_shape,
prob_infectious_mild_to_no_symptom_gamma_scale,
prob_infectious_mild_to_no_symptom_gamma_loc,
prob_infectious_mild_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_severe_symptom_gamma_shape,
prob_infectious_mild_to_severe_symptom_gamma_scale,
prob_infectious_mild_to_severe_symptom_gamma_loc,
prob_infectious_mild_to_severe_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_critical_symptom_gamma_shape,
prob_infectious_mild_to_critical_symptom_gamma_scale,
prob_infectious_mild_to_critical_symptom_gamma_loc,
prob_infectious_mild_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_no_symptom_gamma_shape,
prob_infectious_severe_to_no_symptom_gamma_scale,
prob_infectious_severe_to_no_symptom_gamma_loc,
prob_infectious_severe_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_mild_symptom_gamma_shape,
prob_infectious_severe_to_mild_symptom_gamma_scale,
prob_infectious_severe_to_mild_symptom_gamma_loc,
prob_infectious_severe_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_critical_symptom_gamma_shape,
prob_infectious_severe_to_critical_symptom_gamma_scale,
prob_infectious_severe_to_critical_symptom_gamma_loc,
prob_infectious_severe_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_no_symptom_gamma_shape,
prob_infectious_critical_to_no_symptom_gamma_scale,
prob_infectious_critical_to_no_symptom_gamma_loc,
prob_infectious_critical_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_mild_symptom_gamma_shape,
prob_infectious_critical_to_mild_symptom_gamma_scale,
prob_infectious_critical_to_mild_symptom_gamma_loc,
prob_infectious_critical_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_severe_symptom_gamma_shape,
prob_infectious_critical_to_severe_symptom_gamma_scale,
prob_infectious_critical_to_severe_symptom_gamma_loc,
prob_infectious_critical_to_severe_symptom_gamma_magnitude_multiplier,
prob_recovered_no_to_mild_complication,
prob_recovered_no_to_severe_complication,
prob_recovered_mild_to_no_complication,
prob_recovered_mild_to_severe_complication,
prob_recovered_severe_to_no_complication,
prob_recovered_severe_to_mild_complication,
prob_gain_immunity,
hospital_bed_capacity_as_percent_of_population,
hospital_bed_cost_per_day,
icu_bed_capacity_as_percent_of_population,
icu_bed_cost_per_day,
ventilator_capacity_as_percent_of_population,
ventilator_cost_per_day,
drugX_capacity_as_percent_of_population,
drugX_cost_per_day,
):
super().__init__(
num_nodes, avg_node_degree, initial_outbreak_size,
prob_spread_virus_gamma_shape,
prob_spread_virus_gamma_scale,
prob_spread_virus_gamma_loc,
prob_spread_virus_gamma_magnitude_multiplier,
prob_recover_gamma_shape,
prob_recover_gamma_scale,
prob_recover_gamma_loc,
prob_recover_gamma_magnitude_multiplier,
prob_virus_kill_host_gamma_shape,
prob_virus_kill_host_gamma_scale,
prob_virus_kill_host_gamma_loc,
prob_virus_kill_host_gamma_magnitude_multiplier,
prob_infectious_no_to_mild_symptom_gamma_shape,
prob_infectious_no_to_mild_symptom_gamma_scale,
prob_infectious_no_to_mild_symptom_gamma_loc,
prob_infectious_no_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_no_to_severe_symptom_gamma_shape,
prob_infectious_no_to_severe_symptom_gamma_scale,
prob_infectious_no_to_severe_symptom_gamma_loc,
prob_infectious_no_to_severe_symptom_gamma_magnitude_multiplier,
prob_infectious_no_to_critical_symptom_gamma_shape,
prob_infectious_no_to_critical_symptom_gamma_scale,
prob_infectious_no_to_critical_symptom_gamma_loc,
prob_infectious_no_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_no_symptom_gamma_shape,
prob_infectious_mild_to_no_symptom_gamma_scale,
prob_infectious_mild_to_no_symptom_gamma_loc,
prob_infectious_mild_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_severe_symptom_gamma_shape,
prob_infectious_mild_to_severe_symptom_gamma_scale,
prob_infectious_mild_to_severe_symptom_gamma_loc,
prob_infectious_mild_to_severe_symptom_gamma_magnitude_multiplier,
prob_infectious_mild_to_critical_symptom_gamma_shape,
prob_infectious_mild_to_critical_symptom_gamma_scale,
prob_infectious_mild_to_critical_symptom_gamma_loc,
prob_infectious_mild_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_no_symptom_gamma_shape,
prob_infectious_severe_to_no_symptom_gamma_scale,
prob_infectious_severe_to_no_symptom_gamma_loc,
prob_infectious_severe_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_mild_symptom_gamma_shape,
prob_infectious_severe_to_mild_symptom_gamma_scale,
prob_infectious_severe_to_mild_symptom_gamma_loc,
prob_infectious_severe_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_severe_to_critical_symptom_gamma_shape,
prob_infectious_severe_to_critical_symptom_gamma_scale,
prob_infectious_severe_to_critical_symptom_gamma_loc,
prob_infectious_severe_to_critical_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_no_symptom_gamma_shape,
prob_infectious_critical_to_no_symptom_gamma_scale,
prob_infectious_critical_to_no_symptom_gamma_loc,
prob_infectious_critical_to_no_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_mild_symptom_gamma_shape,
prob_infectious_critical_to_mild_symptom_gamma_scale,
prob_infectious_critical_to_mild_symptom_gamma_loc,
prob_infectious_critical_to_mild_symptom_gamma_magnitude_multiplier,
prob_infectious_critical_to_severe_symptom_gamma_shape,
prob_infectious_critical_to_severe_symptom_gamma_scale,
prob_infectious_critical_to_severe_symptom_gamma_loc,
prob_infectious_critical_to_severe_symptom_gamma_magnitude_multiplier,
prob_recovered_no_to_mild_complication,
prob_recovered_no_to_severe_complication,
prob_recovered_mild_to_no_complication,
prob_recovered_mild_to_severe_complication,
prob_recovered_severe_to_no_complication,
prob_recovered_severe_to_mild_complication,
prob_gain_immunity,
hospital_bed_capacity_as_percent_of_population,
hospital_bed_cost_per_day,
icu_bed_capacity_as_percent_of_population,
icu_bed_cost_per_day,
ventilator_capacity_as_percent_of_population,
ventilator_cost_per_day,
drugX_capacity_as_percent_of_population,
drugX_cost_per_day,
)
self.model_reporters_dict.update({'Model params': track_params, 'Run': track_run})
self.datacollector = DataCollector(model_reporters=self.model_reporters_dict)
# parameter lists for each parameter to be tested in batch run
br_params = {
'num_nodes': [500],
'avg_node_degree': [10],
'initial_outbreak_size': [2],
'prob_spread_virus_gamma_shape': [1],
'prob_spread_virus_gamma_scale': [3],
'prob_spread_virus_gamma_loc': [0],
'prob_spread_virus_gamma_magnitude_multiplier': [0.25],
'prob_recover_gamma_shape': [7],
'prob_recover_gamma_scale': [3],
'prob_recover_gamma_loc': [0],
'prob_recover_gamma_magnitude_multiplier': [0.75],
'prob_virus_kill_host_gamma_shape': [5.2],
'prob_virus_kill_host_gamma_scale': [3.2],
'prob_virus_kill_host_gamma_loc': [0],
'prob_virus_kill_host_gamma_magnitude_multiplier': [0.069],
'prob_infectious_no_to_mild_symptom_gamma_shape': [4.1],
'prob_infectious_no_to_mild_symptom_gamma_scale': [1],
'prob_infectious_no_to_mild_symptom_gamma_loc': [0],
'prob_infectious_no_to_mild_symptom_gamma_magnitude_multiplier': [0.75],
'prob_infectious_no_to_severe_symptom_gamma_shape': [1],
'prob_infectious_no_to_severe_symptom_gamma_scale': [2],
'prob_infectious_no_to_severe_symptom_gamma_loc': [0],
'prob_infectious_no_to_severe_symptom_gamma_magnitude_multiplier': [0.1],
'prob_infectious_no_to_critical_symptom_gamma_shape': [1],
'prob_infectious_no_to_critical_symptom_gamma_scale': [2.8],
'prob_infectious_no_to_critical_symptom_gamma_loc': [0],
'prob_infectious_no_to_critical_symptom_gamma_magnitude_multiplier': [0.15],
'prob_infectious_mild_to_no_symptom_gamma_shape': [3],
'prob_infectious_mild_to_no_symptom_gamma_scale': [3],
'prob_infectious_mild_to_no_symptom_gamma_loc': [0],
'prob_infectious_mild_to_no_symptom_gamma_magnitude_multiplier': [0.25],
'prob_infectious_mild_to_severe_symptom_gamma_shape': [4.9],
'prob_infectious_mild_to_severe_symptom_gamma_scale': [2.2],
'prob_infectious_mild_to_severe_symptom_gamma_loc': [0],
'prob_infectious_mild_to_severe_symptom_gamma_magnitude_multiplier': [0.11],
'prob_infectious_mild_to_critical_symptom_gamma_shape': [3.3],
'prob_infectious_mild_to_critical_symptom_gamma_scale': [3.1],
'prob_infectious_mild_to_critical_symptom_gamma_loc': [0],
'prob_infectious_mild_to_critical_symptom_gamma_magnitude_multiplier': [0.11],
'prob_infectious_severe_to_no_symptom_gamma_shape': [3],
'prob_infectious_severe_to_no_symptom_gamma_scale': [2],
'prob_infectious_severe_to_no_symptom_gamma_loc': [0],
'prob_infectious_severe_to_no_symptom_gamma_magnitude_multiplier': [0.001],
'prob_infectious_severe_to_mild_symptom_gamma_shape': [5],
'prob_infectious_severe_to_mild_symptom_gamma_scale': [3],
'prob_infectious_severe_to_mild_symptom_gamma_loc': [0],
'prob_infectious_severe_to_mild_symptom_gamma_magnitude_multiplier': [0.001],
'prob_infectious_severe_to_critical_symptom_gamma_shape': [7],
'prob_infectious_severe_to_critical_symptom_gamma_scale': [3],
'prob_infectious_severe_to_critical_symptom_gamma_loc': [0],
'prob_infectious_severe_to_critical_symptom_gamma_magnitude_multiplier': [0.01],
'prob_infectious_critical_to_no_symptom_gamma_shape': [7],
'prob_infectious_critical_to_no_symptom_gamma_scale': [1],
'prob_infectious_critical_to_no_symptom_gamma_loc': [0],
'prob_infectious_critical_to_no_symptom_gamma_magnitude_multiplier': [0.001],
'prob_infectious_critical_to_mild_symptom_gamma_shape': [4],
'prob_infectious_critical_to_mild_symptom_gamma_scale': [2],
'prob_infectious_critical_to_mild_symptom_gamma_loc': [0],
'prob_infectious_critical_to_mild_symptom_gamma_magnitude_multiplier': [0.001],
'prob_infectious_critical_to_severe_symptom_gamma_shape': [5],
'prob_infectious_critical_to_severe_symptom_gamma_scale': [2],
'prob_infectious_critical_to_severe_symptom_gamma_loc': [0],
'prob_infectious_critical_to_severe_symptom_gamma_magnitude_multiplier': [0.25],
'prob_recovered_no_to_mild_complication': [0.016],
'prob_recovered_no_to_severe_complication': [0],
'prob_recovered_mild_to_no_complication': [0.02],
'prob_recovered_mild_to_severe_complication': [0.02],
'prob_recovered_severe_to_no_complication': [0.001],
'prob_recovered_severe_to_mild_complication': [0.001],
'prob_gain_immunity': [0.005],
'hospital_bed_capacity_as_percent_of_population': [0.10],
'hospital_bed_cost_per_day': [2000],
'icu_bed_capacity_as_percent_of_population': [0.10],
'icu_bed_cost_per_day': [3000],
'ventilator_capacity_as_percent_of_population': [0.1],
'ventilator_cost_per_day': [100],
'drugX_capacity_as_percent_of_population': [0.1],
'drugX_cost_per_day': [20],
}
start_date = datetime.datetime(2020, 2, 20) # Setting
num_iterations = 1 # Setting
num_max_steps_in_reality = 95 # Setting
num_max_steps_in_simulation = 165 # Setting
end_date_in_reality = start_date + datetime.timedelta(days=num_max_steps_in_reality) # 2020-05-25
end_date_in_simulation = start_date + datetime.timedelta(days=num_max_steps_in_simulation) # 2020-09-22 if num_max_steps_in_simulation == 215
try:
br = BatchRunnerMP(BatchHostNetwork,
br_params,
iterations=num_iterations,
max_steps=num_max_steps_in_simulation,
model_reporters={'Data Collector': lambda m: m.datacollector})
except Exception as e:
print('Multiprocessing batch run not applied, reason as:', e)
br = CustomBatchRunner(BatchHostNetwork,
br_params,
iterations=num_iterations,
max_steps=num_max_steps_in_simulation,
model_reporters={'Data Collector': lambda m: m.datacollector})
def main(on_switch=False, graph_switch=False, stats_test_switch=False, save_switch=False,
realworld_prediction_switch=False, filename_tag=''):
if on_switch:
br.run_all()
br_df = br.get_model_vars_dataframe()
br_step_data = pd.DataFrame()
for i in range(len(br_df['Data Collector'])):
if isinstance(br_df['Data Collector'][i], DataCollector):
print('>>>>> Run #{}'.format(i))
i_run_data = br_df['Data Collector'][i].get_model_vars_dataframe()
i_run_data['Date'] = i_run_data.apply(lambda row: convert_time_to_date(row, 'Time', start_date), axis=1)
br_step_data = br_step_data.append(i_run_data, ignore_index=True)
model_param = i_run_data['Model params'][0]
df_real = prepare_realworld_data().copy()
df_real['date_formatted'] = pd.to_datetime(df_real['date_formatted'])
df_real.sort_values(by=['date_formatted'])
df_sim = i_run_data.copy()
df_sim['Date'] = pd.to_datetime(df_sim['Date'])
df_sim.sort_values(by=['Date'])
df_merged = pd.merge(df_real, df_sim, how='outer', left_on=['date_formatted'],
right_on=['Date'])
if graph_switch:
print('>> For graphs')
print('Model param:', model_param)
graphing(df=df_merged)
if stats_test_switch:
print('>> For statistical tests')
print('Model param:', model_param)
df_merged_sliced = df_merged[(df_merged['date_formatted'] >= start_date)
& (df_merged['date_formatted'] <= end_date_in_reality)]
statistical_test_validation(df=df_merged_sliced)
if realworld_prediction_switch:
print('>> For real-world predictions')
print('Model param:', model_param)
df_merged = predict_by_percent_change_of_another_col(
df=df_merged,
predicted_col='cumulative_cases',
feature_col='Cumulative test-confirmed infectious'
)
df_merged = predict_by_percent_change_of_another_col(
df=df_merged,
predicted_col='cumulative_deaths',
feature_col='Cumulative test-confirmed dead'
)
df_merged = predict_by_percent_change_of_another_col(
df=df_merged,
predicted_col='active_cases',
feature_col='Test-confirmed infectious'
)
br_step_data['File ID'] = filename_tag
if save_switch:
br_step_data.to_csv(os.getcwd() +
'\\project_result\\disease_model_step_data{}_p{}.csv'.format(filename_tag, i),
index=False)
df_merged.to_csv(os.getcwd() +
'\\project_result\\disease_model_merged_data{}_p{}.csv'.format(filename_tag, i),
index=False)
# Helper functions
curr_dir = os.getcwd()
covid19_dir = '\\data\Covid19Canada'
covid19_timeseries_prov_dir = covid19_dir+'\\timeseries_prov'
cases_timeseries_filename = 'cases_timeseries_prov.csv'
mortality_timeseries_filename = 'mortality_timeseries_prov.csv'
overall_timeseries_filename = 'active_timeseries_prov.csv'
testing_timeseries_filename = 'testing_timeseries_prov.csv'
project_result_dir = '\\project_result'
output_real_data_filename = 'realworldCovid19_step_data_processed.csv'
popn_factor = 1000000 # Setting
def convert_time_to_date(row, var, start_date):
current_date = start_date + datetime.timedelta(days=(int(row[var]-1)))
return current_date
def get_realworld_data():
path_overall = curr_dir+covid19_timeseries_prov_dir+'\\'+overall_timeseries_filename
path_testing = curr_dir+covid19_timeseries_prov_dir+'\\'+testing_timeseries_filename
df_overall = pd.read_csv(path_overall, encoding='utf-8', low_memory=False)
df_overall.rename(columns={'date_active': 'date'}, inplace=True)
df_testing = pd.read_csv(path_testing, encoding='utf-8', low_memory=False)
df_testing.rename(columns={'date_testing': 'date'}, inplace=True)
df_merged = pd.merge(df_overall, df_testing, on=['province', 'date'], how='outer')
df_merged['testing'].fillna(0, inplace=True)
df_merged['cumulative_testing'].fillna(0, inplace=True)
del df_merged['testing_info']
return df_merged
def prepare_realworld_data():
df_canada = get_realworld_data().copy()
# Restrict location
prov = 'Alberta'
if prov == 'Alberta':
prov_popn = 4.41 * 1000000 # Source: https://economicdashboard.alberta.ca/Population
df = df_canada[df_canada['province'] == 'Alberta']
# Restrict date range
df['date_formatted'] =
|
pd.to_datetime(df['date'], format='%d-%m-%Y')
|
pandas.to_datetime
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/11/25 15:47
Desc: 金十数据 websocket 实时数据接口-新闻
https://www.jin10.com/
wss://wss-flash-1.jin10.com/
# TODO 此接口在 Ubuntu 18.04 里面有问题
"""
import pandas as pd
import requests
def js_news(indicator: str = '最新资讯') -> pd.DataFrame:
"""
金十数据-最新资讯
https://www.jin10.com/
:param indicator: choice of {'最新资讯', '最新数据'}
:type indicator: str
:return: 金十数据
:rtype: pandas.DataFrame
"""
url = 'https://m.jin10.com/flash'
r = requests.get(url)
text_data = r.json()
text_data = [item.strip() for item in text_data]
big_df = pd.DataFrame()
try:
temp_df_part_one = pd.DataFrame([item.split("#") for item in text_data if item.startswith('0#1#')]).iloc[:, [2, 3]]
except IndexError:
temp_df_part_one = pd.DataFrame()
try:
temp_df_part_two = pd.DataFrame([item.split('#') for item in text_data if item.startswith('0#0#')]).iloc[:, [2, 3]]
except IndexError:
temp_df_part_two =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import os.path as osp
import os
import shutil
import glob
import re
from RLA.easy_log.const import *
from RLA.const import DEFAULT_X_NAME
import stat
import distutils.dir_util
import yaml
import pandas as pd
import csv
class Filter(object):
ALL = 'all'
SMALL_TIMESTEP = 'small_ts'
def config(self, type, timstep_bound):
self.type = type
self.timstep_bound = timstep_bound
class BasicLogTool(object):
def __init__(self, optional_log_type=None):
self.log_types = default_log_types.copy()
if optional_log_type is not None:
self.log_types.extend(optional_log_type)
def _find_small_timestep_log(self, proj_root, task_table_name, regex, timstep_upper_bound=np.inf, timestep_lower_bound=0):
small_timestep_regs = []
root_dir_regex = osp.join(proj_root, LOG, task_table_name, regex)
for root_dir in glob.glob(root_dir_regex):
print("searching dirs", root_dir)
if os.path.exists(root_dir):
for file_list in os.walk(root_dir):
if re.search(r'\d{4}/\d{2}/\d{2}/\d{2}-\d{2}-\d{2}-\d{6}', file_list[0]):
target_reg = re.search(r'\d{4}/\d{2}/\d{2}/\d{2}-\d{2}-\d{2}-\d{6}', file_list[0]).group(0)
else:
target_reg = None
if target_reg is not None:
if LOG in root_dir_regex:
try:
print(
re.search(r'\d{4}/\d{2}/\d{2}/\d{2}-\d{2}-\d{2}-\d{6}', file_list[0]).group(1))
raise RuntimeError("found repeated timestamp")
except IndexError as e:
pass
progress_csv_file = file_list[0] + '/progress.csv'
if file_list[1] == ['tb'] or os.path.exists(progress_csv_file): # in root of logdir
if not os.path.exists(progress_csv_file) or os.path.getsize(progress_csv_file) == 0:
print("[delete] find an experiment without progress.csv.", file_list[0])
if timestep_lower_bound <= 0:
small_timestep_regs.append([target_reg, file_list[0]])
else:
try:
reader = pd.read_csv(progress_csv_file, chunksize=100000, quoting=csv.QUOTE_NONE,
encoding='utf-8', index_col=False, comment='#')
raw_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
# Copyright 2021 The Funnel Rocket Maintainers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import shutil
from contextlib import contextmanager
from dataclasses import dataclass
from enum import auto
from typing import List
import numpy as np
import pytest
from pandas import RangeIndex, Series, DataFrame
from frocket.common.dataset import DatasetPartsInfo, DatasetId, DatasetPartId, PartNamingMethod, DatasetInfo, \
DatasetColumnType, DatasetShortSchema
from frocket.common.serializable import AutoNamedEnum
from frocket.worker.runners.part_loader import shared_part_loader
from tests.utils.base_test_utils import temp_filename, TEMP_DIR, DisablePyTestCollectionMixin
from tests.utils.mock_s3_utils import SKIP_S3_TESTS, new_mock_s3_bucket
class TestColumn(DisablePyTestCollectionMixin, str, AutoNamedEnum):
int_64_userid = auto()
int_64_ts = auto()
int_u32 = auto()
float_64_ts = auto()
float_all_none = auto()
float_32 = auto()
float_category = auto()
str_userid = auto()
str_and_none = auto()
str_all_none = auto()
str_object_all_none = auto()
str_category_userid = auto()
str_category_few = auto()
str_category_many = auto()
bool = auto()
unsupported_datetimes = auto()
unsupported_lists = auto()
DEFAULT_GROUP_COUNT = 200
DEFAULT_ROW_COUNT = 1000
DEFAULT_GROUP_COLUMN = TestColumn.int_64_userid.value
DEFAULT_TIMESTAMP_COLUMN = TestColumn.int_64_ts.value
BASE_TIME = 1609459200000 # Start of 2021, UTC
BASE_USER_ID = 100000
TIME_SHIFT = 10000
UNSUPPORTED_COLUMN_DTYPES = {TestColumn.unsupported_datetimes: 'datetime64[ns]',
TestColumn.unsupported_lists: 'object'}
STR_AND_NONE_VALUES = ["1", "2", "3"]
STR_CAT_FEW_WEIGHTS = [0.9, 0.07, 0.02, 0.01]
STR_CAT_MANY_WEIGHTS = [0.5, 0.2] + [0.01] * 30
def test_colname_to_coltype(name: str) -> DatasetColumnType:
prefix_to_type = {
'int': DatasetColumnType.INT,
'float': DatasetColumnType.FLOAT,
'str': DatasetColumnType.STRING,
'bool': DatasetColumnType.BOOL,
'unsupported': None
}
coltype = prefix_to_type[name.split('_')[0]]
return coltype
def datafile_schema(part: int = 0) -> DatasetShortSchema:
# noinspection PyUnresolvedReferences
result = DatasetShortSchema(
min_timestamp=float(BASE_TIME),
max_timestamp=float(BASE_TIME + TIME_SHIFT),
source_categoricals=[TestColumn.str_category_userid, TestColumn.str_category_many],
potential_categoricals=[TestColumn.str_and_none, TestColumn.str_category_few],
columns={col.value: test_colname_to_coltype(col)
for col in TestColumn
if test_colname_to_coltype(col)})
# print(f"Test dataset short schema is:\n{result.to_json(indent=2)}")
return result
def weighted_list(size: int, weights: list) -> list:
res = []
for idx, w in enumerate(weights):
v = str(idx)
vlen = size * w
res += [v] * int(vlen)
assert len(res) == size
return res
def str_and_none_column_values(part: int = 0, with_none: bool = True) -> List[str]:
result = [*STR_AND_NONE_VALUES, f"part-{part}"]
if with_none:
result.append(None)
return result
def create_datafile(part: int = 0, size: int = DEFAULT_ROW_COUNT, filename: str = None) -> str:
# First, prepare data for columns
# Each part has a separate set of user (a.k.a. group) IDs
initial_user_id = BASE_USER_ID * part
min_user_id = initial_user_id
max_user_id = initial_user_id + DEFAULT_GROUP_COUNT - 1
# To each tests, ensure that each user ID appears in the file at least once, by including the whole range,
# then add random IDs in the range
int64_user_ids = \
list(range(min_user_id, max_user_id + 1)) + \
random.choices(range(min_user_id, max_user_id + 1), k=size - DEFAULT_GROUP_COUNT)
# And also represent as strings in another column
str_user_ids = [str(uid) for uid in int64_user_ids]
# Timestamp: each part has a range of values of size TIME_SHIFT
min_ts = BASE_TIME + (TIME_SHIFT * part)
max_ts = BASE_TIME + (TIME_SHIFT * (part + 1))
# Ensure that min & max timestamps appear exactly once, and fill the rest randomly in the range
int_timestamps = \
[min_ts, max_ts] + \
random.choices(range(min_ts + 1, max_ts), k=size-2)
# Now as floats and as (incorrect!) datetimes (datetimes currently unsupported)
float_timestamps = [ts + random.random() for ts in int_timestamps]
# More test columns
int_u32_values = random.choices(range(100), k=size)
float_32_values = [np.nan, *[random.random() for _ in range(size - 2)], np.nan]
str_and_none_values = random.choices(str_and_none_column_values(part), k=size)
bool_values = random.choices([True, False], k=size)
# For yet-unsupported columns below
lists_values = [[1, 2, 3]] * size
datetimes = [ts * 1000000 for ts in float_timestamps]
# Now create all series
idx = RangeIndex(size)
columns = {
TestColumn.int_64_userid: Series(data=int64_user_ids),
TestColumn.int_64_ts: Series(data=int_timestamps),
TestColumn.int_u32: Series(data=int_u32_values, dtype='uint32'),
TestColumn.float_64_ts: Series(data=float_timestamps),
TestColumn.float_all_none: Series(data=None, index=idx, dtype='float64'),
TestColumn.float_32: Series(data=float_32_values, dtype='float32'),
TestColumn.float_category:
|
Series(data=float_timestamps, index=idx, dtype='category')
|
pandas.Series
|
import argparse
import json
import os
import boto3
import pandas as pd
def sr2df(response):
data = [[sr['SpotInstanceRequestId'], sr.get('InstanceId'), sr['State'], sr['Status']['Code']]
for sr in response['SpotInstanceRequests']]
return pd.DataFrame(data, columns=['spot_req_id', 'ec2_id', 'state', 'status'])
def ins2df(response):
data = [[i['InstanceType'], i['State']['Name'], i.get('PublicDnsName'), i['SpotInstanceRequestId']]
for res in response['Reservations'] for i in res['Instances']]
return pd.DataFrame(data, columns=['type', 'state', 'host', 'spot_req_id'])
def cancel2df(response):
data = [[sr['SpotInstanceRequestId'], sr['State']] for sr in response['CancelledSpotInstanceRequests']]
return pd.DataFrame(data, columns=['spot_req_id', 'state'])
def terminate2df(response):
data = [[sr['InstanceId'], sr['CurrentState']['Name']] for sr in response['TerminatingInstances']]
return pd.DataFrame(data, columns=['ec2_id', 'state'])
class Ec2Client:
def __init__(self, ec2):
self.ec2 = ec2
def list_instances(self):
response = self.ec2.describe_instances()
return ins2df(response)
def list_spot_requests(self):
response = self.ec2.describe_spot_instance_requests()
return sr2df(response)
def launch_spot(self, spec):
response = self.ec2.request_spot_instances(**spec)
def cancel_spot(self, ids):
response = self.ec2.cancel_spot_instance_requests(SpotInstanceRequestIds=ids)
return cancel2df(response)
def terminate_instances(self, ids):
response = self.ec2.terminate_instances(InstanceIds=ids)
return terminate2df(response)
def _create_ec2():
return Ec2Client(boto3.client("ec2"))
def _load_specs(settings_path):
with open(settings_path) as f:
return json.load(f)
def _display_instances(df):
df.head()
def _display_df(df):
if df.shape[0] == 0:
print("No items")
return
print(df.to_string(index=False))
def ls_instances():
ec2 = _create_ec2()
df = ec2.list_instances()
_display_df(df)
def ls_spot_req():
ec2 = _create_ec2()
df = ec2.list_spot_requests()
_display_df(df)
def launch_ec2():
ec2 = _create_ec2()
specs = _load_specs(CONFIG_NAME)
data = ec2.launch_spot(specs)
return _display_df(data)
def ls_msk():
pass
def launch_msk():
pass
def stop_msk():
pass
def stop_all():
ec2 = _create_ec2()
df = ec2.list_spot_requests()
active = df[df.state == 'active']
sr_ids = list(set(active.spot_req_id))
vm_ids = list(set(active.ec2_id))
if sr_ids:
df = ec2.cancel_spot(sr_ids)
_display_df(df)
if vm_ids:
df = ec2.terminate_instances(vm_ids)
_display_df(df)
def _scan_settings():
return [f for f in os.listdir('.') if f.endswith('json')]
def main():
global CONFIG_NAME
commands = {
"ls": ls_instances,
"ls-sr": ls_spot_req,
"launch": launch_ec2,
"msk" : launch_msk,
"stop-all": stop_all
}
parser = argparse.ArgumentParser()
parser.add_argument("command", choices=commands.keys())
parser.add_argument("--config", choices=_scan_settings(), default='ingest.json')
args = parser.parse_args()
CONFIG_NAME = args.config
command = commands[args.command]
|
pd.option_context('display.max_rows', None, 'display.max_columns', None)
|
pandas.option_context
|
import itertools as itt
import pathlib as pl
from configparser import ConfigParser
import joblib as jl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as sst
import seaborn as sns
from statannot import add_stat_annotation
from src.visualization import fancy_plots as fplt
from src.data.cache import set_name
"""
2020-05-??
Used an exponential decay to model the volution of contextual effectsos over time. Here thee fitted parameters (tau and
y intercept r0) are compared across different treatments (probes, transitions_pairs), between single cell and population
analysis (dPCA, LDA) and finally between fitting the dprime or its profile of significance.
tau is selected from the fitted significance profile, and r0 form the fitted dprime
"""
config = ConfigParser()
config.read_file(open(pl.Path(__file__).parents[2] / 'config' / 'settings.ini'))
# analysis should be createde and cached with trp_batch_dprime.py beforehand, using the same meta parameters
meta = {'reliability': 0.1, # r value
'smoothing_window': 0, # ms
'raster_fs': 30,
'transitions': ['silence', 'continuous', 'similar', 'sharp'],
'montecarlo': 1000,
'zscore': True,
'dprime_absolute': None}
# transferable plotting parameters
plt.rcParams['svg.fonttype'] = 'none'
sup_title_size = 30
sub_title_size = 20
ax_lab_size = 15
ax_val_size = 11
full_screen = [19.2, 9.83]
sns.set_style("ticks")
########################################################################################################################
########################################################################################################################
# data frame containing all the important summary data, i.e. exponential decay fits for dprime and significance, for
# all combinations of transition pairs, and probes, for the means across probes, transitions pairs or for both, and
# for the single cell analysis or the dPCA projections
summary_DF_file = pl.Path(config['paths']['analysis_cache']) / 'DF_summary' / set_name(meta)
print('loading cached summary DataFrame')
DF = jl.load(summary_DF_file)
########################################################################################################################
# SC
########################################################################################################################
# compare tau between different probe means
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe != 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter == 'tau'
ff_source = DF.source == 'significance'
ff_outliers = DF.value < 1000
filtered = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_outliers,
['cellid', 'probe', 'value']]
pivoted = filtered.pivot(index='cellid', columns='probe', values='value').dropna().reset_index()
molten = pivoted.melt(id_vars='cellid', var_name='probe')
fig, ax = plt.subplots()
# ax = sns.violinplot(x='probe', y='value', data=molten, ax=ax, color='gray', cut=0)
ax = sns.swarmplot(x='probe', y='value', data=molten, ax=ax, color='gray')
sns.despine(ax=ax)
# no significant comparisons
box_pairs = list(itt.combinations(filtered.probe.unique(), 2))
# box_pairs = [('probe_2', 'probe_3'), ('probe_3', 'probe_5')]
stat_resutls = add_stat_annotation(ax, data=molten, x='probe', y='value', test='Wilcoxon',
box_pairs=box_pairs, comparisons_correction=None)
ax.set_ylabel(f'tau (ms)', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'summary significance-tau comparison between probes'
fig.suptitle(title)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
########################################################################################################################
# compare tau between different transition pair means
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair != 'mean'
ff_param = DF.parameter == 'tau'
ff_source = DF.source == 'significance'
ff_outliers = DF.value < 1000
filtered = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_outliers,
['cellid', 'transition_pair', 'value']]
pivoted = filtered.pivot(index='cellid', columns='transition_pair', values='value').dropna().reset_index()
molten = pivoted.melt(id_vars='cellid', var_name='transition_pair')
fig, ax = plt.subplots()
# ax = sns.violinplot(x='transition_pair', y='value', data=molten, ax=ax, color='gray', cut=0)
ax = sns.swarmplot(x='transition_pair', y='value', data=molten, ax=ax, color='gray')
sns.despine(ax=ax)
# box_pairs = list(itt.combinations(filtered.transition_pair.unique(), 2))
box_pairs = [('continuous_sharp', 'continuous_similar'), ('continuous_similar', 'silence_continuous'),
('continuous_similar', 'silence_sharp'), ('continuous_similar', 'silence_similar'),
('continuous_similar', 'similar_sharp')]
stat_resutls = add_stat_annotation(ax, data=molten, x='transition_pair', y='value', test='Wilcoxon',
box_pairs=box_pairs, comparisons_correction=None)
ax.set_ylabel(f'tau (ms)', fontsize=ax_lab_size)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'summary significance-tau comparison between transitions'
fig.suptitle(title)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
########################################################################################################################
# compare r0 between different probe means
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe != 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter == 'r0'
ff_source = DF.source == 'dprime'
filtered = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source,
['cellid', 'probe', 'value']]
pivoted = filtered.pivot(index='cellid', columns='probe', values='value').dropna().reset_index()
molten = pivoted.melt(id_vars='cellid', var_name='probe')
fig, ax = plt.subplots()
# ax = sns.violinplot(x='probe', y='value', data=molten, ax=ax, color='gray', cut=0)
ax = sns.swarmplot(x='probe', y='value', data=molten, ax=ax, color='gray')
sns.despine(ax=ax)
box_pairs = list(itt.combinations(filtered.probe.unique(), 2))
# box_pairs = [('probe_2', 'probe_3')]
stat_resutls = add_stat_annotation(ax, data=molten, x='probe', y='value', test='Wilcoxon',
box_pairs=box_pairs, comparisons_correction=None)
ax.set_ylabel(f'amplitude (z-score)', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'summary dprime-r0 comparison between probes'
fig.suptitle(title)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
########################################################################################################################
# compare r0 between different transition pair means
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair != 'mean'
ff_param = DF.parameter == 'r0'
ff_source = DF.source == 'dprime'
filtered = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source,
['cellid', 'transition_pair', 'value']]
pivoted = filtered.pivot(index='cellid', columns='transition_pair', values='value').dropna().reset_index()
molten = pivoted.melt(id_vars='cellid', var_name='transition_pair')
fig, ax = plt.subplots()
# ax = sns.violinplot(x='transition_pair', y='value', data=molten, ax=ax, color='gray', cut=0)
ax = sns.swarmplot(x='transition_pair', y='value', data=molten, ax=ax, color='gray')
sns.despine(ax=ax)
box_pairs = list(itt.combinations(filtered.transition_pair.unique(), 2))
# box_pairs = [('continuous_sharp', 'continuous_similar'), ('continuous_sharp', 'silence_continuous'),
# ('continuous_sharp', 'silence_sharp'), ('continuous_sharp', 'silence_similar'),
# ('continuous_similar', 'silence_continuous'), ('continuous_similar', 'silence_sharp'),
# ('continuous_similar', 'silence_similar'), ('continuous_similar', 'similar_sharp'),
# ('silence_similar', 'similar_sharp')]
stat_resutls = add_stat_annotation(ax, data=molten, x='transition_pair', y='value', test='Wilcoxon',
box_pairs=box_pairs, comparisons_correction=None)
ax.set_ylabel(f'amplitude (z-score)', fontsize=ax_lab_size)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'summary dprime-r0 comparison between transitions'
fig.suptitle(title)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
########################################################################################################################
# Distribution of cells in r0 tau space
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter == 'r0'
ff_source = DF.source == 'dprime'
R0 = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source,
['region', 'siteid', 'cellid', 'parameter', 'value']]
ff_param = DF.parameter == 'tau'
ff_source = DF.source == 'significance'
ff_outliers = DF.value < 2000
Tau = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_outliers,
['region', 'siteid', 'cellid', 'parameter', 'value']]
filtered = pd.concat([R0, Tau])
pivoted = filtered.pivot_table(index=['region', 'siteid', 'cellid'],
columns='parameter', values='value').dropna().reset_index()
fig, ax = plt.subplots()
# ax = sns.scatterplot(x='r0', y='tau', data=pivoted, color='black')
ax = sns.regplot(x='r0', y='tau', data=pivoted, color='black')
sns.despine(ax=ax)
ax.set_ylabel(f'tau (ms)', fontsize=ax_lab_size)
ax.set_xlabel('amplitude (z-score)', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
_, _, r2, _, _ = sst.linregress(pivoted.r0, pivoted.tau)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'all cell summary parameter space r={r2:.3f}'
fig.suptitle(title)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
#########################################################
# cells in parameter space colored by site
fig, ax = plt.subplots()
# ax = sns.scatterplot(x='r0', y='tau', data=pivoted, color='black')
ax = sns.scatterplot(x='r0', y='tau', hue='siteid', data=pivoted, legend='full')
ax.legend(loc='upper right', fontsize='large', markerscale=1, frameon=False)
sns.despine(ax=ax)
ax.set_ylabel(f'tau (ms)', fontsize=ax_lab_size)
ax.set_xlabel('amplitude (z-score)', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'cells in parameter space by site'
fig.suptitle(title, fontsize=sub_title_size)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
#########################################################
# cells in parameter space colored by region
fig, ax = plt.subplots()
# ax = sns.scatterplot(x='r0', y='tau', data=pivoted, color='black')
ax = sns.scatterplot(x='r0', y='tau', hue='region', data=pivoted, legend='full')
ax.legend(loc='upper right', fontsize='large', markerscale=1, frameon=False)
sns.despine(ax=ax)
ax.set_ylabel(f'tau (ms)', fontsize=ax_lab_size)
ax.set_xlabel('amplitude (z-score)', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'cells in parameter space by region'
fig.suptitle(title, fontsize=sub_title_size)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
########################################################################################################################
# single cell comparison between regions and parameters
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter == 'r0'
ff_source = DF.source == 'dprime'
R0 = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source,
['region', 'siteid', 'cellid', 'parameter', 'value']]
ff_param = DF.parameter == 'tau'
ff_source = DF.source == 'significance'
ff_outliers = DF.value < 2000
Tau = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_outliers,
['region', 'siteid', 'cellid', 'parameter', 'value']]
filtered = pd.concat([R0, Tau])
# molten = pivoted.melt(id_vars='cellid', var_name='transition_pair')
g = sns.catplot(x='region', y='value', col='parameter', data=filtered, kind="violin", cut=0,
sharex=True, sharey=False)
sns.despine()
# add significnace
for ax, param in zip(np.ravel(g.axes), filtered.parameter.unique()):
sub_filtered = filtered.loc[filtered.parameter == param, :]
box_pairs = [('PEG', 'A1')]
stat_resutls = add_stat_annotation(ax, data=sub_filtered, x='region', y='value', test='Mann-Whitney',
box_pairs=box_pairs, comparisons_correction=None)
if param == 'r0':
param = 'z-score'
elif param == 'tau':
param = 'ms'
ax.set_ylabel(f'{param}', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'SC parameter comparison between regions'
fig.suptitle(title, fontsize=sub_title_size)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
########################################################################################################################
# Compares tau between dprime and significance
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter.isin(['tau', 'r0'])
ff_outliers = DF.value < 10000
filtered = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_outliers,
['cellid', 'source', 'parameter', 'value']]
pivoted = filtered.pivot_table(index=['cellid', 'parameter'], columns='source', values='value').dropna().reset_index()
facet_grid = sns.lmplot(x='dprime', y='significance', col='parameter', data=pivoted,
sharex=False, sharey=False, scatter_kws={'color': 'black'}, line_kws={'color': 'black'})
# draws unit line, formats ax
for ax in np.ravel(facet_grid.axes):
_ = fplt.unit_line(ax)
ax.xaxis.label.set_size(ax_lab_size)
ax.yaxis.label.set_size(ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((16, 8))
title = f'significance vs dprime fitted params comparison'
fig.suptitle(title, fontsize=20)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
########################################################################################################################
# dCPA dPCA
########################################################################################################################
# dPCA compare tau between different probe means
ff_anal = DF.analysis == 'dPCA'
ff_probe = DF.probe != 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter == 'tau'
ff_source = DF.source == 'significance'
ff_outliers = DF.value < 2000
filtered = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_outliers,
['siteid', 'probe', 'value']]
pivoted = filtered.pivot(index='siteid', columns='probe', values='value').dropna().reset_index()
molten = pivoted.melt(id_vars='siteid', var_name='probe')
fig, ax = plt.subplots()
# ax = sns.violinplot(x='probe', y='value', data=molten, ax=ax, color='gray', cut=0)
ax = sns.swarmplot(x='probe', y='value', data=molten, ax=ax, color='gray')
sns.despine(ax=ax)
# box_pairs = list(itt.combinations(filtered.probe.unique(), 2))
# box_pairs = [('probe_2', 'probe_3'), ('probe_3', 'probe_5')]
# stat_resutls = add_stat_annotation(ax, data=molten, x='probe', y='value', test='Wilcoxon',
# box_pairs=box_pairs, comparisons_correction=None)
ax.set_ylabel(f'tau (ms)', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'dPCA summary significance-tau comparison between probes'
fig.suptitle(title)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
########################################################################################################################
# dPCA compare tau between different transition pair means
ff_anal = DF.analysis == 'dPCA'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair != 'mean'
ff_param = DF.parameter == 'tau'
ff_source = DF.source == 'significance'
ff_outliers = DF.value < 2000
filtered = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_outliers,
['siteid', 'transition_pair', 'value']]
pivoted = filtered.pivot(index='siteid', columns='transition_pair', values='value').dropna().reset_index()
molten = pivoted.melt(id_vars='siteid', var_name='transition_pair')
fig, ax = plt.subplots()
# ax = sns.violinplot(x='transition_pair', y='value', data=molten, ax=ax, color='gray', cut=0)
ax = sns.swarmplot(x='transition_pair', y='value', data=molten, ax=ax, color='gray')
sns.despine(ax=ax)
# box_pairs = list(itt.combinations(filtered.transition_pair.unique(), 2))
box_pairs = [('continuous_sharp', 'continuous_similar'), ('continuous_similar', 'silence_continuous')]
stat_resutls = add_stat_annotation(ax, data=molten, x='transition_pair', y='value', test='Wilcoxon',
box_pairs=box_pairs, comparisons_correction=None)
ax.set_ylabel(f'tau (ms)', fontsize=ax_lab_size)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'dPCA summary significance-tau comparison between transitions'
fig.suptitle(title)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
########################################################################################################################
# dPCA compare r0 between different probe means
ff_anal = DF.analysis == 'dPCA'
ff_probe = DF.probe != 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter == 'r0'
ff_source = DF.source == 'dprime'
filtered = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source,
['siteid', 'probe', 'value']]
pivoted = filtered.pivot(index='siteid', columns='probe', values='value').dropna().reset_index()
molten = pivoted.melt(id_vars='siteid', var_name='probe')
fig, ax = plt.subplots()
# ax = sns.violinplot(x='probe', y='value', data=molten, ax=ax, color='gray', cut=0)
ax = sns.swarmplot(x='probe', y='value', data=molten, ax=ax, color='gray')
sns.despine(ax=ax)
# box_pairs = list(itt.combinations(filtered.probe.unique(), 2))
box_pairs = [('probe_2', 'probe_3')]
stat_resutls = add_stat_annotation(ax, data=molten, x='probe', y='value', test='Wilcoxon',
box_pairs=box_pairs, comparisons_correction=None)
ax.set_ylabel(f'amplitude (z-score)', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'dPCA summary dprime-r0 comparison between probes'
fig.suptitle(title)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
########################################################################################################################
# dPCA compare r0 between different transition pair means
ff_anal = DF.analysis == 'dPCA'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair != 'mean'
ff_param = DF.parameter == 'r0'
ff_source = DF.source == 'dprime'
filtered = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source,
['siteid', 'transition_pair', 'value']]
pivoted = filtered.pivot(index='siteid', columns='transition_pair', values='value').dropna().reset_index()
molten = pivoted.melt(id_vars='siteid', var_name='transition_pair')
fig, ax = plt.subplots()
# ax = sns.violinplot(x='transition_pair', y='value', data=molten, ax=ax, color='gray', cut=0)
ax = sns.swarmplot(x='transition_pair', y='value', data=molten, ax=ax, color='gray')
sns.despine(ax=ax)
# box_pairs = list(itt.combinations(filtered.transition_pair.unique(), 2))
box_pairs = [('continuous_sharp', 'continuous_similar')]
stat_resutls = add_stat_annotation(ax, data=molten, x='transition_pair', y='value', test='Wilcoxon',
box_pairs=box_pairs, comparisons_correction=None)
ax.set_ylabel(f'amplitude (z-score)', fontsize=ax_lab_size)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'dPCA summary dprime-r0 comparison between transitions'
fig.suptitle(title)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
########################################################################################################################
# dPCA comparison between regions and parameters
ff_anal = DF.analysis == 'dPCA'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter == 'r0'
ff_source = DF.source == 'dprime'
R0 = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source,
['region', 'siteid', 'cellid', 'parameter', 'value']]
ff_param = DF.parameter == 'tau'
ff_source = DF.source == 'significance'
ff_outliers = DF.value < 2000
Tau = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_outliers,
['region', 'siteid', 'cellid', 'parameter', 'value']]
filtered = pd.concat([R0, Tau])
# g = sns.catplot(x='region', y='value', col='parameter', data=filtered, kind="violin", cut=0,
# sharex=True, sharey=False)
g = sns.catplot(x='region', y='value', col='parameter', data=filtered, kind="swarm",
sharex=True, sharey=False)
sns.despine()
# add significnace
for ax, param in zip(np.ravel(g.axes), filtered.parameter.unique()):
sub_filtered = filtered.loc[filtered.parameter == param, :]
box_pairs = [('PEG', 'A1')]
stat_resutls = add_stat_annotation(ax, data=sub_filtered, x='region', y='value', test='Mann-Whitney',
box_pairs=box_pairs, comparisons_correction=None)
if param == 'r0':
param = 'z-score'
elif param == 'tau':
param = 'ms'
ax.set_ylabel(f'{param}', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
ax.set_xlabel('', fontsize=ax_lab_size)
ax.tick_params(labelsize=ax_val_size)
fig = ax.figure
fig.set_size_inches((6, 6))
title = f'dPCA parameter comparison between regions'
fig.suptitle(title, fontsize=sub_title_size)
fig.tight_layout(rect=(0, 0, 1, 0.95))
fplt.savefig(fig, 'wip3_figures', title)
########################################################################################################################
# SC vs dPCA taus, filtering SC with r0 of dPCA
ff_anal = DF.analysis == 'SC'
ff_probe = DF.probe == 'mean'
ff_trans = DF.transition_pair == 'mean'
ff_param = DF.parameter == 'tau'
ff_source = DF.source == 'significance'
ff_outliers = DF.value < 2000
sing = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_outliers,
['region', 'siteid', 'cellid', 'parameter', 'value']]
sing_pivot = sing.pivot(index='siteid', columns='cellid', values='value')
sing_pivot['max'] = sing_pivot.mean(axis=1)
ff_anal = DF.analysis == 'dPCA'
pops = DF.loc[ff_anal & ff_probe & ff_trans & ff_param & ff_source & ff_outliers,
['region', 'siteid', 'cellid', 'parameter', 'value']]
pops = pops.set_index('siteid')
toplot =
|
pd.concat((pops.loc[:, ['region', 'value']], sing_pivot.loc[:, 'max']), axis=1)
|
pandas.concat
|
import os
import yaml
import re
import json
import importlib
import unidecode
import datetime as dt
import numpy as np
import pandas as pd
from typing import Union, Any, Optional
from app.src.logger import logger
def listfloat(a: list) -> list:
"""Given a list in input of string expressed as numbers, transform the number inside the list in float and return the list transformed.
Args:
a: list of string (e.g. ['1', '2', '3'])
Returns:
the list transformed
"""
if "" in a:
a.remove("")
return [float(x) for x in a]
# Conversions
def underscore_to_camelcase(text: str) -> str:
"""Converts a name or test with underscore to camelcase
Args:
text (str): string with underscore
Returns:
str: string with camelcase
"""
return "".join(x.capitalize() for x in text.split("_"))
def millis2date(ms: int) -> Optional[dt.datetime]:
"""Converts a number of milliseconds elapsed since January 1st, 1970 00:00:00 UTC to a datatime object (ms=0 <=> January 1st, 1970 00:00:00 UTC)
Args:
ms (int): number of ms elapsed since January 1, 1970 00:00:00 UTC
Returns:
dt.datetime: datetime corresponding to the given number of milliseconds
"""
return dt.datetime.fromtimestamp(ms / 1000.0) if
|
pd.notnull(ms)
|
pandas.notnull
|
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Soft sensing via XGBoost on UCI Wastewater Treatment Plant data
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% read data
import pandas as pd
data_raw = pd.read_csv('water-treatment.data', header=None,na_values="?" ) # dataset uses '?' to denote missing value
X_raw = data_raw.iloc[:,1:23]
y_raw = data_raw.iloc[:,29]
#%% handle missing data
# generate a dataframe from inputs dataframe and output series
data =
|
pd.concat([X_raw, y_raw], axis=1)
|
pandas.concat
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV
data = pd.read_csv('dataset/train.csv')
data.columns
print("Dimensions before removing null values")
data.shape
print("Null values?")
print(data.isnull().any())
data.head()
data.info()
dept = data.iloc[:,[1,12]].copy()
dept_per = dept.copy()
print(dept_per)
plt.figure(figsize=(15,4))
ax = sns.countplot(x="department",data=data, palette="viridis",hue="is_promoted", order = data['department'].value_counts().index)
ax.grid(False)
plt.suptitle("Department")
plt.show()
plt.figure(figsize=(15,20))
ax = sns.countplot(y="region",data=data,
palette="viridis", order = data['region'].value_counts().index)
ax.grid(False)
sns.set(style="whitegrid")
plt.suptitle("Region")
plt.show()
plt.figure(figsize=(6,4))
ax = sns.countplot(x="gender",data=data, palette="viridis",hue="is_promoted", order=data['gender'].value_counts().index)
sns.set(style="whitegrid")
ax.grid(False)
plt.suptitle("Gender")
plt.show()
plt.figure(figsize=(6,4))
ax = sns.countplot(x="recruitment_channel",data=data, palette="viridis",hue="is_promoted", order=data['recruitment_channel'].value_counts().index)
ax.grid(False)
sns.set(style="whitegrid")
plt.suptitle("Recruitment Channel")
plt.show()
data.is_promoted.value_counts(normalize=True)
data.is_promoted.value_counts()
data_test =
|
pd.read_csv('dataset/test.csv')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
|
tm.assert_index_equal(sliced.categories, expected.categories)
|
pandas.util.testing.assert_index_equal
|
import numpy as np
import pandas as pd
from numpy import inf, nan
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pandas import DataFrame, Series, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from shapely.geometry.point import Point
from pymove import MoveDataFrame
from pymove.utils import integration
from pymove.utils.constants import (
ADDRESS,
CITY,
DATETIME,
DIST_EVENT,
DIST_HOME,
DIST_POI,
EVENT_ID,
EVENT_TYPE,
GEOMETRY,
HOME,
ID_POI,
LATITUDE,
LONGITUDE,
NAME_POI,
POI,
TRAJ_ID,
TYPE_POI,
VIOLATING,
)
list_random_banks = [
[39.984094, 116.319236, 1, 'bank'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bancos_postos'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'bancos_PAE'],
[39.984710, 116.319865, 6, 'bancos_postos'],
[39.984674, 116.319810, 7, 'bancos_agencias'],
[39.984623, 116.319773, 8, 'bancos_filiais'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
]
list_random_bus_station = [
[39.984094, 116.319236, 1, 'transit_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'transit_station'],
[39.984211, 116.319389, 4, 'pontos_de_onibus'],
[39.984217, 116.319422, 5, 'transit_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_random_bar_restaurant = [
[39.984094, 116.319236, 1, 'restaurant'],
[39.984198, 116.319322, 2, 'restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar'],
[39.984217, 116.319422, 5, 'bar'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
]
list_random_parks = [
[39.984094, 116.319236, 1, 'pracas_e_parques'],
[39.984198, 116.319322, 2, 'park'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'park'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'pracas_e_parques'],
]
list_random_police = [
[39.984094, 116.319236, 1, 'distritos_policiais'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'distritos_policiais'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
]
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'), 2],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3],
]
list_pois = [
[39.984094, 116.319236, 1, 'policia', 'distrito_pol_1'],
[39.991013, 116.326384, 2, 'policia', 'policia_federal'],
[40.01, 116.312615, 3, 'comercio', 'supermercado_aroldo'],
[40.013821, 116.306531, 4, 'show', 'forro_tropykalia'],
[40.008099, 116.31771100000002, 5, 'risca-faca',
'rinha_de_galo_world_cup'],
[39.985704, 116.326877, 6, 'evento', 'adocao_de_animais'],
[39.979393, 116.3119, 7, 'show', 'dia_do_municipio']
]
# Testes de Unions
def test_union_poi_bank():
pois_df = DataFrame(
data=list_random_banks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'banks'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'banks'],
[39.984211, 116.319389, 4, 'randomvalue'],
[39.984217, 116.319422, 5, 'banks'],
[39.984710, 116.319865, 6, 'banks'],
[39.984674, 116.319810, 7, 'banks'],
[39.984623, 116.319773, 8, 'banks'],
[39.984606, 116.319732, 9, 'banks'],
[39.984555, 116.319728, 10, 'banks']
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
integration.union_poi_bank(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bus_station():
pois_df = DataFrame(
data=list_random_bus_station,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bus_station'],
[39.984198, 116.319322, 2, 'randomvalue'],
[39.984224, 116.319402, 3, 'bus_station'],
[39.984211, 116.319389, 4, 'bus_station'],
[39.984217, 116.319422, 5, 'bus_station'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, 'bus_station'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bus_station(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_bar_restaurant():
pois_df = DataFrame(
data=list_random_bar_restaurant,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'bar-restaurant'],
[39.984198, 116.319322, 2, 'bar-restaurant'],
[39.984224, 116.319402, 3, 'randomvalue'],
[39.984211, 116.319389, 4, 'bar-restaurant'],
[39.984217, 116.319422, 5, 'bar-restaurant'],
[39.984710, 116.319865, 6, 'bar-restaurant'],
[39.984674, 116.319810, 7, 'random123'],
[39.984623, 116.319773, 8, '123'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_bar_restaurant(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_parks():
pois_df = DataFrame(
data=list_random_parks,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'parks'],
[39.984198, 116.319322, 2, 'parks'],
[39.984224, 116.319402, 3, 'parks'],
[39.984211, 116.319389, 4, 'random'],
[39.984217, 116.319422, 5, '123'],
[39.984710, 116.319865, 6, 'parks'],
[39.984674, 116.319810, 7, 'parks'],
[39.984623, 116.319773, 8, 'parks'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_parks(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_union_poi_police():
pois_df = DataFrame(
data=list_random_police,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, 1, 'police'],
[39.984198, 116.319322, 2, 'police'],
[39.984224, 116.319402, 3, 'police'],
[39.984211, 116.319389, 4, 'police'],
[39.984217, 116.319422, 5, 'random'],
[39.984710, 116.319865, 6, 'randomvalue'],
[39.984674, 116.319810, 7, '123'],
[39.984623, 116.319773, 8, 'bus_station'],
],
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7]
)
integration.union_poi_police(pois_df, TYPE_POI, inplace=True)
assert_frame_equal(pois_df, expected)
def test_join_colletive_areas():
move_df = MoveDataFrame(
data=list_move,
)
move_df['geometry'] = move_df.apply(lambda x: Point(x['lon'], x['lat']), axis=1)
expected = move_df.copy()
indexes_ac = np.linspace(0, move_df.shape[0], 5, dtype=int)
area_c = move_df[move_df.index.isin(indexes_ac)].copy()
integration.join_collective_areas(move_df, area_c, inplace=True)
expected[VIOLATING] = [True, False, True, False, True, False, True, False, False]
assert_frame_equal(move_df, expected)
def test__reset_and_creates_id_and_lat_lon():
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_pois,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6]
)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, True, True
)
)
id_expected = np.full(9, '', dtype='object_')
tag_expected = np.full(9, '', dtype='object_')
dist_expected = np.full(
9, np.Infinity, dtype=np.float64
)
lat_expected = np.full(7, np.Infinity, dtype=np.float64)
lon_expected = np.full(7, np.Infinity, dtype=np.float64)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, True, False
)
)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, False, True
)
)
lat_expected = np.full(9, np.Infinity, dtype=np.float64)
lon_expected = np.full(9, np.Infinity, dtype=np.float64)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
dists, ids, tags, lats, lons = (
integration._reset_and_creates_id_and_lat_lon(
move_df, pois, False, False
)
)
assert_array_almost_equal(dists, dist_expected)
assert_array_equal(ids, id_expected)
assert_array_equal(tags, tag_expected)
assert_array_almost_equal(lats, lat_expected)
assert_array_almost_equal(lons, lon_expected)
def test__reset_set_window__and_creates_event_id_type():
list_events = [
[39.984094, 116.319236, 1,
Timestamp('2008-10-24 01:57:57'), 'show do tropykalia'],
[39.991013, 116.326384, 2,
Timestamp('2008-10-24 00:22:01'), 'evento da prefeitura'],
[40.01, 116.312615, 3,
Timestamp('2008-10-25 00:21:01'), 'show do seu joao'],
[40.013821, 116.306531, 4,
Timestamp('2008-10-26 00:22:01'), 'missa']
]
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
list_win_start = [
'2008-10-22T17:23:05.000000000', '2008-10-22T22:07:26.000000000',
'2008-10-22T22:20:16.000000000', '2008-10-22T22:33:06.000000000',
'2008-10-22T23:28:33.000000000', '2008-10-23T11:20:45.000000000',
'2008-10-23T11:32:14.000000000', '2008-10-23T11:52:01.000000000',
'2008-10-23T13:27:57.000000000'
]
win_start_expected = Series(pd.to_datetime(list_win_start), name=DATETIME)
list_win_end = [
'2008-10-23T18:23:05.000000000', '2008-10-23T23:07:26.000000000',
'2008-10-23T23:20:16.000000000', '2008-10-23T23:33:06.000000000',
'2008-10-24T00:28:33.000000000', '2008-10-24T12:20:45.000000000',
'2008-10-24T12:32:14.000000000', '2008-10-24T12:52:01.000000000',
'2008-10-24T14:27:57.000000000'
]
win_end_expected = Series(pd.to_datetime(list_win_end), name=DATETIME)
dist_expected = np.full(
9, np.Infinity, dtype=np.float64
)
type_expected = np.full(9, '', dtype='object_')
id_expected = np.full(9, '', dtype='object_')
window_starts, window_ends, current_distances, event_id, event_type = (
integration._reset_set_window__and_creates_event_id_type(
move_df, pois, 45000, DATETIME
)
)
assert_series_equal(window_starts, win_start_expected)
assert_series_equal(window_ends, win_end_expected)
assert_array_almost_equal(current_distances, dist_expected)
assert_array_equal(event_id, id_expected)
assert_array_equal(event_type, type_expected)
def test_reset_set_window_and_creates_event_id_type_all():
list_move = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'), 1],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'), 1],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'), 1],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'), 2],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3],
]
move_df = MoveDataFrame(list_move)
list_events = [
[39.984094, 116.319236, 1, Timestamp('2008-10-24 01:57:57'),
'show do tropykalia'],
[39.991013, 116.326384, 2, Timestamp('2008-10-24 00:22:01'),
'evento da prefeitura'],
[40.01, 116.312615, 3, Timestamp('2008-10-25 00:21:01'),
'show do seu joao'],
[40.013821, 116.306531, 4, Timestamp('2008-10-26 00:22:01'),
'missa']
]
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
list_win_start = [
'2008-10-23T03:53:05.000000000', '2008-10-23T08:37:26.000000000',
'2008-10-23T08:50:16.000000000', '2008-10-23T09:03:06.000000000',
'2008-10-23T09:58:33.000000000', '2008-10-23T21:50:45.000000000',
'2008-10-23T22:02:14.000000000', '2008-10-23T22:22:01.000000000',
'2008-10-23T23:57:57.000000000'
]
win_start_expected = Series(pd.to_datetime(list_win_start), name=DATETIME)
list_win_end = [
'2008-10-23T07:53:05.000000000', '2008-10-23T12:37:26.000000000',
'2008-10-23T12:50:16.000000000', '2008-10-23T13:03:06.000000000',
'2008-10-23T13:58:33.000000000', '2008-10-24T01:50:45.000000000',
'2008-10-24T02:02:14.000000000', '2008-10-24T02:22:01.000000000',
'2008-10-24T03:57:57.000000000'
]
win_end_expected = Series(pd.to_datetime(list_win_end), name=DATETIME)
dist_expected = np.full(9, None, dtype=np.ndarray)
type_expected = np.full(9, None, dtype=np.ndarray)
id_expected = np.full(9, None, dtype=np.ndarray)
window_starts, window_ends, current_distances, event_id, event_type = (
integration._reset_set_window_and_creates_event_id_type_all(
move_df, pois, 7200, DATETIME
)
)
assert_series_equal(window_starts, win_start_expected)
assert_series_equal(window_ends, win_end_expected)
assert_array_equal(current_distances, dist_expected)
assert_array_equal(event_id, id_expected)
assert_array_equal(event_type, type_expected)
def test_join_with_pois():
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_pois,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 1,
0.0, 'distrito_pol_1'],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'),
1, 6, 128.24869775642176, 'adocao_de_animais'],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'),
1, 5, 663.0104596559174, 'rinha_de_galo_world_cup'],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'),
1, 4, 286.3387434682031, 'forro_tropykalia'],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2, 4,
0.9311014399622559, 'forro_tropykalia'],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2, 3,
211.06912863495492, 'supermercado_aroldo'],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'),
2, 2, 279.6712398549538, 'policia_federal'],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3, 6,
792.7526066105717, 'adocao_de_animais'],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3, 7,
270.7018856738821, 'dia_do_municipio']
],
columns=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID, ID_POI, DIST_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8]
)
integration.join_with_pois(move_df, pois, inplace=True)
assert_frame_equal(move_df, expected, check_dtype=False)
def test_join_with_pois_by_category():
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_pois,
columns=[LATITUDE, LONGITUDE, TRAJ_ID, TYPE_POI, NAME_POI],
index=[0, 1, 2, 3, 4, 5, 6]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 1,
0.0, 3, 2935.3102772960456, 7, 814.8193850933852, 5,
2672.393533820207, 6, 675.1730686007362],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'),
1, 1, 637.6902157810676, 3, 3072.6963790707114, 7,
1385.3649632111096, 5, 2727.1360691122813, 6, 128.24869775642176],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'),
1, 2, 1385.0871812075436, 3, 1094.8606633486436, 4,
1762.0085654338782, 5, 663.0104596559174, 6, 1965.702358742657],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'),
1, 2, 3225.288830967221, 3, 810.5429984051405, 4,
286.3387434682031, 5, 1243.8915481769327, 6, 3768.0652637796675],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2, 2,
3047.8382223981853, 3, 669.9731550451877, 4, 0.9311014399622559,
5, 1145.172578151837, 6, 3574.252994707609],
[40.009735, 116.315069, Timestamp('2008-10-23 23:50:45'), 2, 2,
2294.0758201547073, 3, 211.06912863495492, 4, 857.4175399672413,
5, 289.35378153627966, 6, 2855.1657930463994],
[39.993527, 116.32648300000001, Timestamp('2008-10-24 00:02:14'),
2, 2, 279.6712398549538, 3, 2179.5701631051966, 7,
2003.4096341742952, 5, 1784.3132149978549, 6, 870.5252810680124],
[39.978575, 116.326975, Timestamp('2008-10-24 00:22:01'), 3, 1,
900.7798955139455, 3, 3702.2394204188754, 7, 1287.7039084016499,
5, 3376.4438614084356, 6, 792.7526066105717],
[39.981668, 116.310769, Timestamp('2008-10-24 01:57:57'), 3, 1,
770.188754517813, 3, 3154.296880053552, 7, 270.7018856738821, 5,
2997.898227057909, 6, 1443.9247752786023]
],
columns=[
LATITUDE, LONGITUDE, DATETIME, TRAJ_ID, 'id_policia', 'dist_policia',
'id_comercio', 'dist_comercio', 'id_show', 'dist_show', 'id_risca-faca',
'dist_risca-faca', 'id_evento', 'dist_evento'
],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8]
)
integration.join_with_pois_by_category(move_df, pois, inplace=True)
assert_frame_equal(move_df, expected, check_dtype=False)
def test_join_with_events():
list_events = [
[39.984094, 116.319236, 1,
Timestamp('2008-10-24 01:57:57'), 'show do tropykalia'],
[39.991013, 116.326384, 2,
Timestamp('2008-10-24 00:22:01'), 'evento da prefeitura'],
[40.01, 116.312615, 3,
Timestamp('2008-10-25 00:21:01'), 'show do seu joao'],
[40.013821, 116.306531, 4,
Timestamp('2008-10-26 00:22:01'), 'missa']
]
move_df = MoveDataFrame(list_move)
pois = DataFrame(
data=list_events,
columns=[LATITUDE, LONGITUDE, EVENT_ID, DATETIME, EVENT_TYPE],
index=[0, 1, 2, 3]
)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1,
'', inf, ''],
[39.984559000000004, 116.326696, Timestamp('2008-10-23 10:37:26'),
1, '', inf, ''],
[40.002899, 116.32151999999999, Timestamp('2008-10-23 10:50:16'),
1, '', inf, ''],
[40.016238, 116.30769099999999, Timestamp('2008-10-23 11:03:06'),
1, '', inf, ''],
[40.013814, 116.306525, Timestamp('2008-10-23 11:58:33'), 2, 2,
3047.8382223981853, 'evento da prefeitura'],
[40.009735, 116.315069,
|
Timestamp('2008-10-23 23:50:45')
|
pandas.Timestamp
|
import os
import re
import sys
from io import StringIO
import numpy as np
import pandas as pd
from Bio import AlignIO, SeqIO
from Bio.Align.Applications import MafftCommandline, MuscleCommandline
from Bio.Phylo.PAML import yn00
import wgdi.base as base
class ks():
def __init__(self, options):
base_conf = base.config()
self.pair_pep_file = 'pair.pep'
self.pair_cds_file = 'pair.cds'
self.prot_align_file = 'prot.aln'
self.mrtrans = 'pair.mrtrans'
self.pair_yn = 'pair.yn'
self.cds_file = 'cds'
self.pep_file = 'pep'
for k, v in base_conf:
setattr(self, str(k), v)
for k, v in options:
setattr(self, str(k), v)
print(str(k), ' = ', v)
def auto_file(self):
pairs = []
p = pd.read_csv(self.pairs_file, sep='\n', header=None, nrows=30)
p = '\n'.join(p[0])
if 'path length' in p or 'MAXIMUM GAP' in p:
collinearity = base.read_colinearscan(self.pairs_file)
pairs = [[v[0], v[2]] for k in collinearity for v in k[1]]
elif 'MATCH_SIZE' in p or '## Alignment' in p:
collinearity = base.read_mcscanx(self.pairs_file)
pairs = [[v[0], v[2]] for k in collinearity for v in k[1]]
elif '# Alignment' in p:
collinearity = base.read_coliearity(self.pairs_file)
pairs = [[v[0], v[2]] for k in collinearity for v in k[1]]
elif '###' in p:
collinearity = base.read_jcvi(self.pairs_file)
pairs = [[v[0], v[2]] for k in collinearity for v in k[1]]
elif ',' in p:
collinearity =
|
pd.read_csv(self.pairs_file, header=None)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import re
#===============================================================================
def cleandots(x,mark):
if str(x) == mark : # The mark was a '.' in HW 3 use case.
return np.NaN
else:
return str (x)
def cleandotsincolumn (series,mark):
return series.apply (lambda x : cleandots (x,mark))
def cleandotsindataframe (mydataframe,mark):
columns = mydataframe.columns
for col in columns:
mydataframe [col] = cleandotsincolumn (mydataframe [col],mark)
return mydataframe
#===============================================================================
def rename(df,col,name1 , name2):
df[col][df[col] == name1] = name2
def WantedCols (df,wantedcol):
columns = df.columns
for col in columns:
if col not in wantedcol:
del df[col]
return df
def unWantedCols (df,unwanted):
for col in unwanted:
del df [col]
return df
#===============================================================================
def onlyNum(variable):
try :
variable = round (float (variable),6)
return [variable if type (variable) == int or type (variable) == float else np.NaN] [0]
except:
variable = np.NaN
return variable
# if type(variable) == int or type(variable) == float:
# return variable
# else:
# return np.NaN
def dfonlynum (df,*columns):
for col in columns:
df [col] = df [col].apply (onlyNum)
return df
def NumMask (df,col,x,y): # Select a set of numbers from a column. I used it for Ranks.
Numberlist= [i for i in range (x,y+1)]
mask= df [col].apply(lambda x : x in Numberlist)
return mask
#===============================================================================
def catchpattern(df,col,mypattern):
return df [col].apply (lambda x: re.findall(mypattern,str(x)) [0])
def splitname (df,col,splitchar):
return df [col].apply (lambda x : str(x).split (splitchar) [0].rstrip()) # Removing explanations (descriptions)
#===============================================================================
def print_full_rows(x):
pd.set_option('display.max_rows', len(x))
print(x)
pd.reset_option('display.max_rows')
def print_full_col (x):
pd.set_option('display.max_columns', len(x.columns))
print(x)
pd.reset_option('display.max_columns')
def print_full (x) :
pd.set_option('display.max_rows', len(x))
pd.set_option('display.max_columns', len(x.columns))
print(x)
|
pd.reset_option('display.max_rows')
|
pandas.reset_option
|
import datetime as dt
import json
import pandas as pd
import pytest
import networkx as nx
from data_pipeline_api.file_formats import object_file
from simple_network_sim import loaders
def test_readCompartmentRatesByAge(data_api):
result = loaders.readCompartmentRatesByAge(data_api.read_table("human/compartment-transition", "compartment-transition"))
assert result == {
"70+": {
"E": {"E": pytest.approx(0.573), "A": pytest.approx(0.427)},
"A": {"A": pytest.approx(0.803), "I": pytest.approx(0.0197), "R": pytest.approx(0.1773)},
"I": {"I": pytest.approx(0.67), "D": pytest.approx(0.0165), "H": pytest.approx(0.0495), "R": pytest.approx(0.264)},
"H": {"H": pytest.approx(0.9), "D": pytest.approx(0.042), "R": pytest.approx(0.058)},
"R": {"R": pytest.approx(1.0)},
"D": {"D": pytest.approx(1.0)},
},
"[17,70)": {
"E": {"E": pytest.approx(0.573), "A": pytest.approx(0.427)},
"A": {"A": pytest.approx(0.803), "I": pytest.approx(0.0197), "R": pytest.approx(0.1773)},
"I": {"I": pytest.approx(0.67), "D": pytest.approx(0.0165), "H": pytest.approx(0.0495), "R": pytest.approx(0.264)},
"H": {"H": pytest.approx(0.9), "D": pytest.approx(0.042), "R": pytest.approx(0.058)},
"R": {"R": pytest.approx(1.0)},
"D": {"D": pytest.approx(1.0)},
},
"[0,17)": {
"E": {"E": pytest.approx(0.573), "A": pytest.approx(0.427)},
"A": {"A": pytest.approx(0.803), "I": pytest.approx(0.0197), "R": pytest.approx(0.1773)},
"I": {"I": pytest.approx(0.67), "D": pytest.approx(0.0165), "H": pytest.approx(0.0495), "R": pytest.approx(0.264)},
"H": {"H": pytest.approx(0.9), "D": pytest.approx(0.042), "R": pytest.approx(0.058)},
"R": {"R": pytest.approx(1.0)},
"D": {"D": pytest.approx(1.0)},
},
}
def test_readCompartmentRatesByAge_approximately_one():
result = loaders.readCompartmentRatesByAge(pd.DataFrame([{"age": "70+", "src": "A", "dst": "A", "rate": 0.999999999}]))
assert result == {"70+": {"A": {"A": 0.999999999}}}
def test_readParametersAgeStructured_invalid_float():
with pytest.raises(AssertionError):
df = pd.DataFrame([
{"age": "70+", "src": "A", "dst": "A", "rate": 1.5},
{"age": "70+", "src": "A", "dst": "I", "rate": -0.5},
])
loaders.readCompartmentRatesByAge(df)
def test_readParametersAgeStructured_more_than_100_percent():
with pytest.raises(AssertionError):
df = pd.DataFrame([
{"age": "70+", "src": "A", "dst": "A", "rate": 0.7},
{"age": "70+", "src": "A", "dst": "I", "rate": 0.5},
])
loaders.readCompartmentRatesByAge(df)
def test_readParametersAgeStructured_less_than_100_percent():
with pytest.raises(AssertionError):
df = pd.DataFrame([
{"age": "70+", "src": "A", "dst": "A", "rate": 0.5},
{"age": "70+", "src": "A", "dst": "I", "rate": 0.2},
])
loaders.readCompartmentRatesByAge(df)
def test_readPopulationAgeStructured(data_api):
population = loaders.readPopulationAgeStructured(data_api.read_table("human/population", "population"))
expected = {
"S08000015": {"[0,17)": 65307, "[17,70)": 245680, "70+": 58683},
"S08000016": {"[0,17)": 20237, "[17,70)": 75008, "70+": 20025},
"S08000017": {"[0,17)": 24842, "[17,70)": 96899, "70+": 27049},
"S08000019": {"[0,17)": 55873, "[17,70)": 209221, "70+": 40976},
"S08000020": {"[0,17)": 105607, "[17,70)": 404810, "70+": 74133},
"S08000022": {"[0,17)": 55711, "[17,70)": 214008, "70+": 52081},
"S08000024": {"[0,17)": 159238, "[17,70)": 635249, "70+": 103283},
"S08000025": {"[0,17)": 3773, "[17,70)": 14707, "70+": 3710},
"S08000026": {"[0,17)": 4448, "[17,70)": 15374, "70+": 3168},
"S08000028": {"[0,17)": 4586, "[17,70)": 17367, "70+": 4877},
"S08000029": {"[0,17)": 68150, "[17,70)": 250133, "70+": 53627},
"S08000030": {"[0,17)": 71822, "[17,70)": 280547, "70+": 63711},
"S08000031": {"[0,17)": 208091, "[17,70)": 829574, "70+": 137315},
"S08000032": {"[0,17)": 125287, "[17,70)": 450850, "70+": 83063},
}
assert population == expected
@pytest.mark.parametrize("total", ["-20", "NaN", "ten"])
def test_readPopulationAgeStructured_bad_total(total):
df = pd.DataFrame([
{"Health_Board": "S08000015", "Sex": "Female", "Age": "[17,70)", "Total": total},
])
with pytest.raises(ValueError):
loaders.readPopulationAgeStructured(df)
def test_readPopulationAgeStructured_aggregate_ages():
df = pd.DataFrame([
{"Health_Board": "S08000015", "Sex": "Female", "Age": "[17,70)", "Total": 100},
{"Health_Board": "S08000015", "Sex": "Male", "Age": "[17,70)", "Total": 100},
])
population = loaders.readPopulationAgeStructured(df)
assert population == {"S08000015": {"[17,70)": 200}}
def test_readInfectiousStates():
assert set(loaders.readInfectiousStates(pd.DataFrame([{"Compartment": "A"}, {"Compartment": "I"}]))) == {"A", "I"}
def test_readInfectiousStates_empty():
assert loaders.readInfectiousStates(pd.DataFrame([])) == []
@pytest.mark.parametrize("invalid_infected", ["asdf", float("NaN"), -1, float("inf")])
def test_readInitialInfections_invalid_total(invalid_infected):
df = pd.DataFrame([("S08000015", "[17,70)", invalid_infected)], columns=("Health_Board", "Age", "Infected"))
with pytest.raises(ValueError):
loaders.readInitialInfections(df)
def test_readInitialInfections():
df = pd.DataFrame(
[("S08000015", "[17,70)", 10), ("S08000015", "70+", 5), ("S08000016", "70+", 5)],
columns=("Health_Board", "Age", "Infected"),
)
infected = loaders.readInitialInfections(df)
assert infected == {"S08000015": {"[17,70)": 10.0, "70+": 5.0}, "S08000016": {"70+": 5.0}}
def test_readNodeAttributesJSON(locations):
with open(locations) as fp:
assert loaders.readNodeAttributesJSON(locations) == json.load(fp)
def test_genGraphFromContactFile(base_data_dir, data_api):
with open(str(base_data_dir / "human" / "commutes" / "1" / "data.h5"), "rb") as fp:
df = object_file.read_table(fp, "commutes")
graph = nx.convert_matrix.from_pandas_edgelist(df, edge_attr=True, create_using=nx.DiGraph)
assert nx.is_isomorphic(loaders.genGraphFromContactFile(data_api.read_table("human/commutes", "commutes")), graph)
def test_genGraphFromContactFile_negative_delta_adjustment():
df = pd.DataFrame([
{"source": "a", "target": "b", "weight": 0, "delta_adjustment": -1.0}
])
with pytest.raises(ValueError):
loaders.genGraphFromContactFile(df)
def test_genGraphFromContactFile_negative_weight():
df = pd.DataFrame([
{"source": "a", "target": "b", "weight": -30.0, "delta_adjustment": 1.0}
])
with pytest.raises(ValueError):
loaders.genGraphFromContactFile(df)
def test_genGraphFromContactFile_missing_weight():
df = pd.DataFrame([
{"source": "a", "target": "b", "delta_adjustment": 1.0}
])
with pytest.raises(ValueError):
loaders.genGraphFromContactFile(df)
def test_genGraphFromContactFile_missing_adjustmentt():
df = pd.DataFrame([
{"source": "a", "target": "b", "weight": 30.0}
])
with pytest.raises(ValueError):
loaders.genGraphFromContactFile(df)
def test_readMovementMultipliers(data_api):
ms = loaders.readMovementMultipliers(data_api.read_table("human/movement-multipliers", "movement-multipliers"))
assert ms == {
dt.date(2020, 3, 16): loaders.Multiplier(movement=1.0, contact=1.0),
dt.date(2020, 5, 5): loaders.Multiplier(movement=0.05, contact=0.05),
dt.date(2020, 5, 30): loaders.Multiplier(movement=0.3, contact=0.3),
dt.date(2020, 6, 4): loaders.Multiplier(movement=0.8, contact=0.8),
dt.date(2020, 6, 24): loaders.Multiplier(movement=0.9, contact=0.9)
}
@pytest.mark.parametrize("m", [float("NaN"), float("inf"), -1.0, "asdf"])
def test_readMovementMultipliers_bad_movement_multipliers(m):
df = pd.DataFrame([{"Date": "2020-05-05", "Movement_Multiplier": m, "Contact_Multiplier": 1.0}])
with pytest.raises(ValueError):
loaders.readMovementMultipliers(df)
@pytest.mark.parametrize("m", [float("NaN"), float("inf"), -1.0, "asdf"])
def test_readMovementMultipliers_bad_contact_multipliers(m):
df = pd.DataFrame([{"Date": "2020-05-05", "Movement_Multiplier": 1.0, "Contact_Multiplier": m}])
with pytest.raises(ValueError):
loaders.readMovementMultipliers(df)
@pytest.mark.parametrize("t", [0, "12/31/2020", "31/12/2020", "asdf"])
def test_readMovementMultipliers_bad_times(t):
df = pd.DataFrame([{"Date": t, "Movement_Multiplier": 1.0, "Contact_Multiplier": 1.0}])
with pytest.raises(ValueError):
loaders.readMovementMultipliers(df)
def test_readInfectionProbability():
df = pd.DataFrame([("2020-01-01", 0.3), ("2020-02-01", 0.7), ("2020-12-31", 1.0)], columns=["Date", "Value"])
assert loaders.readInfectionProbability(df) == {dt.date(2020, 1, 1): 0.3, dt.date(2020, 2, 1): 0.7,
dt.date(2020, 12, 31): 1.0}
def test_readInfectionProbability_empty():
with pytest.raises(ValueError):
loaders.readInfectionProbability(pd.DataFrame())
@pytest.mark.parametrize("t", [0, "12/31/2020", "31/12/2020", "asdf"])
def test_readInfectionProbability_invalid_time(t):
with pytest.raises(ValueError):
loaders.readInfectionProbability(
|
pd.DataFrame([{"Date": t, "Value": 1.0}])
|
pandas.DataFrame
|
import pandas as pd
from sqlalchemy import create_engine
from library import cf
import talib.abstract as ta
import pymysql.cursors
import numpy as np
from library.logging_pack import *
logger.debug("subindex시작!!!!")
pymysql.install_as_MySQLdb()
daily_craw_engine=create_engine(
"mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port + "/daily_craw",
encoding='utf-8')
daily_buy_list_engine = create_engine(
"mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port + "/daily_buy_list" ,
encoding='utf-8')
simul_engine=create_engine(
"mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port + "/simulator11",
encoding='utf-8')
min_craw_engine = create_engine("mysql+mysqldb://" + cf.db_id + ":" + cf.db_passwd + "@" + cf.db_ip + ":" + cf.db_port + "/min_craw",
encoding='utf-8')
stand_date = '20070903'
#데이터 변환
class subindex:
def __init__(self):
logger.debug("subindex 함수로 들어왔다!!")
def collecting(self):
co_sql = f"select TABLE_NAME FROM information_schema.tables WHERE table_schema = 'daily_craw'"
target_code = daily_craw_engine.execute(co_sql).fetchall()
num = len(target_code)
for i in range(num):
self.db_name = target_code[i][0]
self.db_name = self.db_name.replace("%", "%%")
self.collect_db()
print(self.db_name , "을 가져온다!")
def collect_db(self):
# 데이터 불러오기
sql = "select date,code,vol10,code_name,open,close,low,high,volume from daily_craw.`%s` where Date >= %s order by Date "
rows = daily_craw_engine.execute(sql%(self.db_name,stand_date)).fetchall()
three_s = pd.DataFrame(rows, columns=['date', 'code','vol10' ,'code_name','open' ,'close', 'low', 'high', 'volume'])
three_s = three_s.fillna(0)
# 데이터 변환
th_date = list(np.asarray(three_s['date'].tolist()))
th_date_np = np.array(th_date, dtype='f8')
th_close = list(np.asarray(three_s['close'].tolist()))
th_close_np = np.array(th_close, dtype='f8')
th_high = list(np.asarray(three_s['high'].tolist()))
th_high_np = np.array(th_high, dtype='f8')
th_low = list(np.asarray(three_s['low'].tolist()))
th_low_np = np.array(th_low, dtype='f8')
th_volume = list(np.asarray(three_s['volume'].tolist()))
th_volume_np = np.array(th_volume, dtype='f8')
th_open = list(np.asarray(three_s['open'].tolist()))
th_open_np = np.array(th_open, dtype='f8')
th_vol10 = list(np.asarray(three_s['vol10'].tolist()))
th_vol10_np = np.array(th_vol10, dtype='f8')
#주가고가저가 변동폭
th_diff =((three_s['high']-three_s['low'])/three_s['high'])*100
# 30일간 주가최저최고 변동폭 클때
th_diff30 = th_diff.rolling(30).max()
# 보조지표 계산
th_cci = ta._ta_lib.CCI(th_high_np, th_low_np, th_close_np, 9)
th_cci60 = ta._ta_lib.CCI(th_high_np, th_low_np, th_close_np, 60)
##rsi
th_rsi = ta._ta_lib.RSI(th_close_np, 14)
th_rsi5 = ta._ta_lib.RSI(th_close_np, 5)
th_OBV = ta._ta_lib.OBV(th_close_np, th_volume_np)
th_macd, th_macd_signal, th_macd_hist = ta._ta_lib.MACD(th_close_np, fastperiod=12, slowperiod=26,
signalperiod=9)
th_stoch_slowk, th_stoch_slowd = ta._ta_lib.STOCH(th_high_np, th_low_np, th_close_np,
fastk_period=10, slowk_period=2, slowk_matype=0,
slowd_period=2, slowd_matype=0)
##책에따라 12일선 기준으로 바꿈
th_BBAND_U, th_BBAND_M, th_BBAND_L = ta._ta_lib.BBANDS(th_close_np, timeperiod=20, nbdevup=2, nbdevdn=2,
matype=0)
th_BBAND_U14, th_BBAND_M14, th_BBAND_L14 = ta._ta_lib.BBANDS(th_close_np, timeperiod=14, nbdevup=2, nbdevdn=2,
matype=0)
th_BBAND_WIDE = (th_BBAND_U-th_BBAND_L)/th_BBAND_M
th_BBAND_WIDE14 = (th_BBAND_U14 - th_BBAND_L14) / th_BBAND_M14
th_pb=(th_close_np-th_BBAND_L) / (th_BBAND_U-th_BBAND_L)
th_pb14 = (th_close_np - th_BBAND_L14) / (th_BBAND_U14 - th_BBAND_L14)
th_sar = ta._ta_lib.SAR(th_high_np, th_low_np,0.04,0.4)
th_ibs = (th_close_np -th_low_np)/(th_high_np-th_low_np)
th_dema5 = ta._ta_lib.DEMA(th_close_np, 5)
th_dema20 = ta._ta_lib.DEMA(th_close_np,20)
th_dema60 = ta._ta_lib.DEMA(th_close_np, 60)
th_tema5 = ta._ta_lib.TEMA(th_close_np,5)
th_tema20 = ta._ta_lib.TEMA(th_close_np, 20)
th_tema60 = ta._ta_lib.TEMA(th_close_np, 60)
#ema = 지수이동평균
th_ema5 = ta._ta_lib.EMA(th_close_np, 5)
th_ema20 = ta._ta_lib.EMA(th_close_np, 20)
th_ema60 = ta._ta_lib.EMA(th_close_np, 60)
th_ema112 = ta._ta_lib.EMA(th_close_np, 112)
th_ema224 = ta._ta_lib.EMA(th_close_np, 224)
th_ema448 = ta._ta_lib.EMA(th_close_np, 448)
th_ema448diff = ((th_close_np-th_ema448)/th_close_np * 100)
th_ema224diff = ((th_close_np-th_ema224)/th_close_np*100)
th_ema112diff = ((th_close_np-th_ema112)/th_close_np*100)
#ma 이동평균
th_ma112 = ta._ta_lib.MA(th_close_np, 112)
th_ma224 = ta._ta_lib.MA(th_close_np, 224)
th_ma448 = ta._ta_lib.MA(th_close_np, 448)
th_clo5diff = ((th_close_np - ta._ta_lib.MA(th_close_np, 5)) / th_close_np * 100)
th_clo20diff = ((th_close_np - ta._ta_lib.MA(th_close_np, 20)) / th_close_np * 100)
#dmi값들 14->11로 고쳐씀
th_pdi = ta._ta_lib.PLUS_DI(th_high_np,th_low_np,th_close_np, 11)
th_mdi = ta._ta_lib.MINUS_DI(th_high_np, th_low_np, th_close_np, 11)
th_dm = ta._ta_lib.PLUS_DM(th_high_np,th_low_np, 11)
th_adx = ta._ta_lib.ADX(th_high_np,th_low_np,th_close_np, 14)
th_adxr = ta._ta_lib.ADXR(th_high_np, th_low_np, th_close_np, 14)
th_obvsig9 =ta._ta_lib.MA(ta._ta_lib.OBV(th_close_np, th_volume_np),9)
#윌리엄 변동율
th_williumr = ta._ta_lib.WILLR(th_high_np,th_low_np,th_close_np, 14)
th_mfi = ta._ta_lib.MFI(th_high_np,th_low_np,th_close_np,th_volume_np, 14)
#거래량 오실레이터공식 10일
th_ad = ((th_close_np-th_open_np)/(th_high_np-th_low_np) * th_volume_np / th_vol10_np*10)
# #일중강도
th_ll = (2*th_close_np-th_high_np-th_low_np)/(th_high_np-th_low_np) * th_volume_np
# nan을 모두 0으로 전환
np.nan_to_num(th_cci, copy=False)
np.nan_to_num(th_cci60, copy=False)
np.nan_to_num(th_rsi, copy=False)
np.nan_to_num(th_macd, copy=False)
np.nan_to_num(th_macd_signal, copy=False)
np.nan_to_num(th_macd_hist, copy=False)
np.nan_to_num(th_stoch_slowk, copy=False)
np.nan_to_num(th_stoch_slowd, copy=False)
np.nan_to_num(th_BBAND_L, copy=False)
np.nan_to_num(th_BBAND_M, copy=False)
np.nan_to_num(th_BBAND_U, copy=False)
np.nan_to_num(th_BBAND_L14, copy=False)
np.nan_to_num(th_BBAND_M14, copy=False)
np.nan_to_num(th_BBAND_U14, copy=False)
np.nan_to_num(th_OBV, copy=False)
np.nan_to_num(th_sar, copy=False)
np.nan_to_num(th_dema5, copy=False)
np.nan_to_num(th_dema20, copy=False)
np.nan_to_num(th_dema60, copy=False)
np.nan_to_num(th_tema5, copy=False)
np.nan_to_num(th_tema20, copy=False)
np.nan_to_num(th_tema60, copy=False)
np.nan_to_num(th_ema5, copy=False)
np.nan_to_num(th_ema112diff, copy=False)
np.nan_to_num(th_ema224diff, copy=False)
np.nan_to_num(th_ema448diff, copy=False)
np.nan_to_num(th_ema20, copy=False)
np.nan_to_num(th_ema60, copy=False)
np.nan_to_num(th_ema112, copy=False)
np.nan_to_num(th_ema224, copy=False)
np.nan_to_num(th_ema448, copy=False)
np.nan_to_num(th_ma112, copy=False)
np.nan_to_num(th_ma224, copy=False)
np.nan_to_num(th_ma448, copy=False)
np.nan_to_num(th_pdi, copy=False)
np.nan_to_num(th_mdi, copy=False)
np.nan_to_num(th_dm, copy=False)
np.nan_to_num(th_adx, copy=False)
np.nan_to_num(th_adxr, copy=False)
np.nan_to_num(th_williumr, copy=False)
np.nan_to_num(th_pb, copy=False)
np.nan_to_num(th_pb14, copy=False)
np.nan_to_num(th_BBAND_WIDE, copy=False)
np.nan_to_num(th_BBAND_WIDE14, copy=False)
np.nan_to_num(th_mfi, copy=False)
np.nan_to_num(th_ll, copy=False)
np.nan_to_num(th_ad, copy=False)
np.nan_to_num(th_rsi5, copy=False)
np.nan_to_num(th_ibs, copy=False)
np.nan_to_num(th_diff, copy=False)
np.nan_to_num(th_diff30, copy=False)
np.nan_to_num(th_obvsig9, copy=False)
# DataFrame 화 하기
df_ad = pd.DataFrame(th_ad, columns=['ad'])
df_cci = pd.DataFrame(th_cci, columns=['cci'])
df_cci60 = pd.DataFrame(th_cci, columns=['cci60'])
df_rsi5 =
|
pd.DataFrame(th_rsi5, columns=['rsi5'])
|
pandas.DataFrame
|
# %%
'''----------------------------------------------------------------
This script deals with Neutrophils and B cells interaction
----------------------------------------------------------------'''
import os
import pickle
import cv2
import pandas as pd
import numpy as np
from tqdm import tqdm
import sys
sys.path.append('..')
import objecttrack
N_PATH = 'D:\Rotation2\VideoFrame\SecondCD11a'
B_PATH = 'D:\Rotation2\VideoFrame\Exp 19-4-18 CD11a blocking'
IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS = 1002, 1002, 3
# %%
'''----------------------------------------------------------------
self.id, set, may contain multiple ids
self.hits, int, # of frames the cell has been hit
self.pat, dict, {frame # : [intensity, volume, smooth, circumference, centre,
(startX, startY, endX, endY), velocity, acceleration, displacement]}
self.cont, np.array, {frame # : contour}, contour of the detected cell
----------------------------------------------------------------'''
N_before = pickle.load(open(os.path.join(N_PATH, 'ncellbefore.pkl'), 'rb'))
N_after = pickle.load(open(os.path.join(N_PATH, 'ncellafter.pkl'), 'rb'))
# %%
'''----------------------------------------------------------------
Read B cells information | ExcelFile data
----------------------------------------------------------------'''
def ReadBFile(B_PATH, label):
for file in os.listdir(B_PATH):
if file.endswith('.xlsx') and 'B' in file:
if label in file:
B = pd.ExcelFile(os.path.join(B_PATH, file))
return B
# %%
'''----------------------------------------------------------------
Distance from every primary cell to the closest secondary cell at every frame
Plot against the speed of primary cell
----------------------------------------------------------------'''
def NBInteraction(N, label):
B = ReadBFile(B_PATH, label)
b_position = pd.read_excel(B, sheet_name = 'Position', header = 1)
b_speed = pd.read_excel(B, sheet_name = 'Speed', header = 1)
if label == 'before':
b_position['Position X'] = b_position['Position X'] * 1.7105 - 983.6
b_position['Position Y'] = b_position['Position Y'] * (-1.7098) + 1955.9
if label == 'after':
b_position['Position X'] = b_position['Position X'] * 1.7395 - 1007.4
b_position['Position Y'] = b_position['Position Y'] * (-1.7216) + 1964.7
df_proximity = pd.DataFrame(columns = [str(column) for column in range(1, 61)])
df_speed = pd.DataFrame(columns = [str(column) for column in range(1, 61)])
for cell in tqdm(N):
if cell.hits >= 3:
if len(cell.id) == 1:
n_pattern = cell.pat
n_contour = cell.cont
df_proximity.loc[list(cell.id)[0]] = [-1] * 60
df_speed.loc[list(cell.id)[0]] = [-1] * 60
for frame in n_pattern.keys():
frame_b_position = b_position[b_position['Time'] == frame]
frame_b_speed = b_speed[b_speed['Time'] == frame]
n_speed, n_cont = n_pattern[frame][6], n_contour[frame]
b_id = []
n_b_speed = [round(n_speed, 4)]
for index, row in frame_b_position.iterrows():
b_centre = (int(row['Position X']), int(row['Position Y']))
img_b = np.zeros((IMG_HEIGHT, IMG_WIDTH), dtype = np.uint8)
img_b = cv2.circle(img_b, b_centre, int(4 / 0.481), 255, -1)
img_n = np.zeros((IMG_HEIGHT, IMG_WIDTH), dtype = np.uint8)
img_n = cv2.drawContours(img_n, [n_cont], -1, 1)
intersection = np.logical_and(img_n, img_b)
if intersection.any() == True:
b_id.append(row['TrackID'] - 1000000000)
n_b_speed.append(round(frame_b_speed.loc[index, 'Value'], 4))
df_proximity.at[list(cell.id)[0], str(frame)] = b_id
df_speed.at[list(cell.id)[0], str(frame)] = n_b_speed
return df_proximity, df_speed
# %%
'''----------------------------------------------------------------
Main function | Gather proximity and speed data
----------------------------------------------------------------'''
proximity_before, speed_before = NBInteraction(N_before, 'before')
proximity_after, speed_after = NBInteraction(N_after, 'after')
pickle.dump(proximity_before, open(os.path.join(N_PATH, 'proximitybefore.pkl'), 'wb'))
pickle.dump(proximity_after, open(os.path.join(N_PATH, 'proximityafter.pkl'), 'wb'))
pickle.dump(speed_before, open(os.path.join(N_PATH, 'speedbefore.pkl'), 'wb'))
pickle.dump(speed_after, open(os.path.join(N_PATH, 'speedafter.pkl'), 'wb'))
# %%
'''----------------------------------------------------------------
Main function | Load proximity and speed data
----------------------------------------------------------------'''
p_before = pickle.load(open(os.path.join(N_PATH, 'proximitybefore.pkl'), 'rb'))
p_after = pickle.load(open(os.path.join(N_PATH, 'proximityafter.pkl'), 'rb'))
s_before = pickle.load(open(os.path.join(N_PATH, 'speedbefore.pkl'), 'rb'))
s_after = pickle.load(open(os.path.join(N_PATH, 'speedafter.pkl'), 'rb'))
labels = pickle.load(open(os.path.join(N_PATH, '197181.pkl'), 'rb'))
# %%
'''----------------------------------------------------------------
N-B Pairwise speed dataframe
----------------------------------------------------------------'''
import pandas as pd
def ns_vs_bs(df_speed, label):
df_speed_pair = pd.DataFrame(columns = ['N', 'B', 'Cluster'])
count = 1
row_index = 0
for index, row in df_speed.iterrows():
for i in range(1, 61):
if row[str(i)] != -1:
if len(row[str(i)]) > 1:
ns = row[str(i)][0]
bs = row[str(i)][1:]
for item in bs:
df_speed_pair.at[count, 'N'] = ns
df_speed_pair.at[count, 'B'] = item
df_speed_pair.at[count, 'Cluster'] = label[row_index]
count += 1
row_index += 1
return df_speed_pair
speed_pair_before = ns_vs_bs(s_before, labels[:197])
speed_pair_after = ns_vs_bs(s_after, labels[197:])
# %%
'''----------------------------------------------------------------
Gamma Distribution?
----------------------------------------------------------------'''
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import gamma
plt.style.use('ggplot')
def gamma_plot(cell, savefig = False):
data =
|
pd.concat([speed_pair_before, speed_pair_after])
|
pandas.concat
|
from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic import TemplateView
import pandas as pd
from core.models import *
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
import datetime
def make_all_client():
qs = list(ClientModel.objects.values_list('name__name', 'task__task', 'subtask__subtask', 'time_spent'))
df = pd.DataFrame(qs, columns=['name', 'task', 'subtask', 'time_spent'])
df.to_csv('stuff/all.csv', sep=';', index=None)
class AllClientsPageView(TemplateView):
template_name = 'all_clients.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(AllClientsPageView, self).get_context_data(**kwargs)
make_all_client()
all_clients = pd.read_csv('stuff/all.csv', sep=';')
all_clients = all_clients.groupby(['name', 'task', 'subtask']).sum().sum(level=['name', 'task', 'subtask']).fillna(0).reset_index()
all_clients['time_spent'] = pd.to_timedelta(all_clients.time_spent, unit='m')
context.update({'df': all_clients.values})
return context
def make_all_employee():
qs = list(ClientModel.objects.values_list('dec_name',
'time_spent', 'date_added'))
df = pd.DataFrame(qs, columns=['dec_name',
'time_spent', 'date_added'])
df.date_added = df.date_added.values.astype('M8[D]')
df.to_csv('stuff/employees.csv', sep=';', index=None)
class EmployeeTabPageView(TemplateView):
template_name = 'employee_tab.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(EmployeeTabPageView, self).get_context_data(**kwargs)
make_all_employee()
today = datetime.date.today()
employees = pd.read_csv('stuff/employees.csv', sep=';')
employees = employees.groupby(['dec_name', 'date_added']).sum().sum(level=['dec_name', 'date_added']).fillna(0).reset_index()
employees['time_spent'] =
|
pd.to_datetime(employees.time_spent, unit='m')
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""System operating cost plots.
This module plots figures related to the cost of operating the power system.
Plots can be broken down by cost categories, generator types etc.
@author: <NAME>
"""
import logging
import pandas as pd
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_library import PlotLibrary
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, MissingZoneData)
class MPlot(PlotDataHelper):
"""production_cost MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The production_cost.py module contains methods that are
related related to the cost of operating the power system.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
def prod_cost(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Plots total system net revenue and cost normalized by the installed capacity of the area.
Total revenue is made up of reserve and energy revenues which are displayed in a stacked
bar plot with total generation cost. Net revensue is represented by a dot.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Total_Generation_Cost", self.Scenarios),
(True, "generator_Pool_Revenue", self.Scenarios),
(True, "generator_Reserves_Revenue", self.Scenarios),
(True, "generator_Installed_Capacity", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"{self.AGG_BY} = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = pd.DataFrame()
Total_Installed_Capacity = self["generator_Installed_Capacity"].get(scenario)
#Check if zone has installed generation, if not skips
try:
Total_Installed_Capacity = Total_Installed_Capacity.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No installed capacity in : {zone_input}")
continue
Total_Installed_Capacity = self.df_process_gen_inputs(Total_Installed_Capacity)
Total_Installed_Capacity.reset_index(drop=True, inplace=True)
Total_Installed_Capacity = Total_Installed_Capacity.iloc[0]
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
Total_Gen_Cost = self.df_process_gen_inputs(Total_Gen_Cost)
Total_Gen_Cost = Total_Gen_Cost.sum(axis=0)*-1
# Total_Gen_Cost = Total_Gen_Cost/Total_Installed_Capacity #Change to $/MW-year
Total_Gen_Cost.rename("Total_Gen_Cost", inplace=True)
Pool_Revenues = self["generator_Pool_Revenue"].get(scenario)
Pool_Revenues = Pool_Revenues.xs(zone_input,level=self.AGG_BY)
Pool_Revenues = self.df_process_gen_inputs(Pool_Revenues)
Pool_Revenues = Pool_Revenues.sum(axis=0)
# Pool_Revenues = Pool_Revenues/Total_Installed_Capacity #Change to $/MW-year
Pool_Revenues.rename("Energy_Revenues", inplace=True)
### Might change to Net Reserve Revenue at later date
Reserve_Revenues = self["generator_Reserves_Revenue"].get(scenario)
Reserve_Revenues = Reserve_Revenues.xs(zone_input,level=self.AGG_BY)
Reserve_Revenues = self.df_process_gen_inputs(Reserve_Revenues)
Reserve_Revenues = Reserve_Revenues.sum(axis=0)
# Reserve_Revenues = Reserve_Revenues/Total_Installed_Capacity #Change to $/MW-year
Reserve_Revenues.rename("Reserve_Revenues", inplace=True)
Total_Systems_Cost = pd.concat([Total_Systems_Cost, Total_Gen_Cost,
Pool_Revenues, Reserve_Revenues],
axis=1, sort=False)
Total_Systems_Cost.columns = Total_Systems_Cost.columns.str.replace('_',' ')
Total_Systems_Cost = Total_Systems_Cost.sum(axis=0)
Total_Systems_Cost = Total_Systems_Cost.rename(scenario)
total_cost_chunk.append(Total_Systems_Cost)
Total_Systems_Cost_Out = pd.concat(total_cost_chunk, axis=1, sort=False)
Total_Systems_Cost_Out = Total_Systems_Cost_Out.T
Total_Systems_Cost_Out.index = Total_Systems_Cost_Out.index.str.replace('_',' ')
# Total_Systems_Cost_Out = Total_Systems_Cost_Out/1000 #Change to $/kW-year
Total_Systems_Cost_Out = Total_Systems_Cost_Out/1e6 #Convert cost to millions
if pd.notna(custom_data_file_path):
Total_Systems_Cost_Out = self.insert_custom_data_columns(
Total_Systems_Cost_Out,
custom_data_file_path)
Net_Revenue = Total_Systems_Cost_Out.sum(axis=1)
#Checks if Net_Revenue contains data, if not skips zone and does not return a plot
if Net_Revenue.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# Data table of values to return to main program
Data_Table_Out = Total_Systems_Cost_Out.add_suffix(" (Million $)")
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Systems_Cost_Out.index
mplt.barplot(Total_Systems_Cost_Out, stacked=True,
custom_tick_labels=tick_labels)
ax.plot(Net_Revenue.index, Net_Revenue.values,
color='black', linestyle='None', marker='o',
label='Net Revenue')
ax.set_ylabel('Total System Net Rev, Rev, & Cost ($/KW-yr)', color='black', rotation='vertical')
ax.margins(x=0.01)
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def sys_cost(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Creates a stacked bar plot of Total Generation Cost and Cost of Unserved Energy.
Plot only shows totals and is NOT broken down into technology or cost type
specific values.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Total_Generation_Cost",self.Scenarios),
(False,f"{agg}_Cost_Unserved_Energy",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"{self.AGG_BY} = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = pd.DataFrame()
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
try:
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for : {zone_input}")
continue
Total_Gen_Cost = Total_Gen_Cost.sum(axis=0)
Total_Gen_Cost.rename("Total_Gen_Cost", inplace=True)
Cost_Unserved_Energy = self[f"{agg}_Cost_Unserved_Energy"][scenario]
if Cost_Unserved_Energy.empty:
Cost_Unserved_Energy = self["generator_Total_Generation_Cost"][scenario].copy()
Cost_Unserved_Energy.iloc[:,0] = 0
Cost_Unserved_Energy = Cost_Unserved_Energy.xs(zone_input,level=self.AGG_BY)
Cost_Unserved_Energy = Cost_Unserved_Energy.sum(axis=0)
Cost_Unserved_Energy.rename("Cost_Unserved_Energy", inplace=True)
Total_Systems_Cost = pd.concat([Total_Systems_Cost, Total_Gen_Cost, Cost_Unserved_Energy],
axis=1, sort=False)
Total_Systems_Cost.columns = Total_Systems_Cost.columns.str.replace('_',' ')
Total_Systems_Cost.rename({0:scenario}, axis='index', inplace=True)
total_cost_chunk.append(Total_Systems_Cost)
# Checks if gen_cost_out_chunks contains data, if not skips zone and does not return a plot
if not total_cost_chunk:
outputs[zone_input] = MissingZoneData()
continue
Total_Systems_Cost_Out = pd.concat(total_cost_chunk, axis=0, sort=False)
Total_Systems_Cost_Out = Total_Systems_Cost_Out/1000000 #Convert cost to millions
Total_Systems_Cost_Out.index = Total_Systems_Cost_Out.index.str.replace('_',' ')
#Checks if Total_Systems_Cost_Out contains data, if not skips zone and does not return a plot
if Total_Systems_Cost_Out.empty:
outputs[zone_input] = MissingZoneData()
continue
if pd.notna(custom_data_file_path):
Total_Systems_Cost_Out = self.insert_custom_data_columns(
Total_Systems_Cost_Out,
custom_data_file_path)
# Data table of values to return to main program
Data_Table_Out = Total_Systems_Cost_Out.add_suffix(" (Million $)")
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Systems_Cost_Out.index
mplt.barplot(Total_Systems_Cost_Out, stacked=True,
custom_tick_labels=tick_labels)
ax.set_ylabel('Total System Cost (Million $)',
color='black', rotation='vertical')
ax.margins(x=0.01)
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
cost_totals = Total_Systems_Cost_Out.sum(axis=1) #holds total of each bar
#inserts values into bar stacks
for patch in ax.patches:
width, height = patch.get_width(), patch.get_height()
if height<=1:
continue
x, y = patch.get_xy()
ax.text(x+width/2,
y+height/2,
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=12)
#inserts total bar value above each bar
for k, patch in enumerate(ax.patches):
height = cost_totals[k]
width = patch.get_width()
x, y = patch.get_xy()
ax.text(x+width/2,
y+height + 0.05*max(ax.get_ylim()),
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=15, color='red')
if k>=len(cost_totals)-1:
break
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def detailed_gen_cost(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Creates stacked bar plot of total generation cost by cost type (fuel, emission, start cost etc.)
Creates a more deatiled system cost plot.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Fuel_Cost",self.Scenarios),
(True,"generator_VO&M_Cost",self.Scenarios),
(True,"generator_Start_&_Shutdown_Cost",self.Scenarios),
(False,"generator_Emissions_Cost",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"Zone = {zone_input}")
gen_cost_out_chunks = []
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Fuel_Cost = self["generator_Fuel_Cost"].get(scenario)
# Check if Fuel_cost contains zone_input, skips if not
try:
Fuel_Cost = Fuel_Cost.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for: {zone_input}")
continue
Fuel_Cost = Fuel_Cost.sum(axis=0)
Fuel_Cost.rename("Fuel_Cost", inplace=True)
VOM_Cost = self["generator_VO&M_Cost"].get(scenario)
VOM_Cost = VOM_Cost.xs(zone_input,level=self.AGG_BY)
VOM_Cost[0].values[VOM_Cost[0].values < 0] = 0
VOM_Cost = VOM_Cost.sum(axis=0)
VOM_Cost.rename("VO&M_Cost", inplace=True)
Start_Shutdown_Cost = self["generator_Start_&_Shutdown_Cost"].get(scenario)
Start_Shutdown_Cost = Start_Shutdown_Cost.xs(zone_input,level=self.AGG_BY)
Start_Shutdown_Cost = Start_Shutdown_Cost.sum(axis=0)
Start_Shutdown_Cost.rename("Start_&_Shutdown_Cost", inplace=True)
Emissions_Cost = self["generator_Emissions_Cost"][scenario]
if Emissions_Cost.empty:
self.logger.warning(f"generator_Emissions_Cost not included in {scenario} results, Emissions_Cost will not be included in plot")
Emissions_Cost = self["generator_Start_&_Shutdown_Cost"][scenario].copy()
Emissions_Cost.iloc[:,0] = 0
Emissions_Cost = Emissions_Cost.xs(zone_input,level=self.AGG_BY)
Emissions_Cost = Emissions_Cost.sum(axis=0)
Emissions_Cost.rename("Emissions_Cost", inplace=True)
Detailed_Gen_Cost = pd.concat([Fuel_Cost, VOM_Cost, Start_Shutdown_Cost, Emissions_Cost], axis=1, sort=False)
Detailed_Gen_Cost.columns = Detailed_Gen_Cost.columns.str.replace('_',' ')
Detailed_Gen_Cost = Detailed_Gen_Cost.sum(axis=0)
Detailed_Gen_Cost = Detailed_Gen_Cost.rename(scenario)
gen_cost_out_chunks.append(Detailed_Gen_Cost)
# Checks if gen_cost_out_chunks contains data, if not skips zone and does not return a plot
if not gen_cost_out_chunks:
outputs[zone_input] = MissingZoneData()
continue
Detailed_Gen_Cost_Out = pd.concat(gen_cost_out_chunks, axis=1, sort=False)
Detailed_Gen_Cost_Out = Detailed_Gen_Cost_Out.T/1000000 #Convert cost to millions
Detailed_Gen_Cost_Out.index = Detailed_Gen_Cost_Out.index.str.replace('_',' ')
# Deletes columns that are all 0
Detailed_Gen_Cost_Out = Detailed_Gen_Cost_Out.loc[:, (Detailed_Gen_Cost_Out != 0).any(axis=0)]
# Checks if Detailed_Gen_Cost_Out contains data, if not skips zone and does not return a plot
if Detailed_Gen_Cost_Out.empty:
outputs[zone_input] = MissingZoneData()
continue
if pd.notna(custom_data_file_path):
Total_Systems_Cost_Out = self.insert_custom_data_columns(
Total_Systems_Cost_Out,
custom_data_file_path)
# Data table of values to return to main program
Data_Table_Out = Detailed_Gen_Cost_Out.add_suffix(" (Million $)")
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Detailed_Gen_Cost_Out.index
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
mplt.barplot(Detailed_Gen_Cost_Out, stacked=True,
custom_tick_labels=tick_labels)
ax.axhline(y=0)
ax.set_ylabel('Total Generation Cost (Million $)',
color='black', rotation='vertical')
ax.margins(x=0.01)
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
cost_totals = Detailed_Gen_Cost_Out.sum(axis=1) #holds total of each bar
#inserts values into bar stacks
for patch in ax.patches:
width, height = patch.get_width(), patch.get_height()
if height<=2:
continue
x, y = patch.get_xy()
ax.text(x+width/2,
y+height/2,
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=12)
#inserts total bar value above each bar
for k, patch in enumerate(ax.patches):
height = cost_totals[k]
width = patch.get_width()
x, y = patch.get_xy()
ax.text(x+width/2,
y+height + 0.05*max(ax.get_ylim()),
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=15, color='red')
if k>=len(cost_totals)-1:
break
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def sys_cost_type(self, start_date_range: str = None,
end_date_range: str = None, custom_data_file_path: str = None,
**_):
"""Creates stacked bar plot of total generation cost by generator technology type.
Another way to represent total generation cost, this time by tech type,
i.e Coal, Gas, Hydro etc.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
custom_data_file_path (str, optional): Path to custom data file to concat extra
data. Index and column format should be consistent with output data csv.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
# Create Dictionary to hold Datframes for each scenario
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Total_Generation_Cost",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
gen_cost_out_chunks = []
self.logger.info(f"Zone = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Gen_Stack = self["generator_Total_Generation_Cost"].get(scenario)
# Check if Total_Gen_Stack contains zone_input, skips if not
try:
Total_Gen_Stack = Total_Gen_Stack.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for : {zone_input}")
continue
Total_Gen_Stack = self.df_process_gen_inputs(Total_Gen_Stack)
Total_Gen_Stack = Total_Gen_Stack.sum(axis=0)
Total_Gen_Stack.rename(scenario, inplace=True)
gen_cost_out_chunks.append(Total_Gen_Stack)
# Checks if gen_cost_out_chunks contains data, if not skips zone and does not return a plot
if not gen_cost_out_chunks:
outputs[zone_input] = MissingZoneData()
continue
Total_Generation_Stack_Out = pd.concat(gen_cost_out_chunks, axis=1, sort=False).fillna(0)
Total_Generation_Stack_Out = self.create_categorical_tech_index(Total_Generation_Stack_Out)
Total_Generation_Stack_Out = Total_Generation_Stack_Out.T/1000000 #Convert to millions
Total_Generation_Stack_Out = Total_Generation_Stack_Out.loc[:, (Total_Generation_Stack_Out != 0).any(axis=0)]
# Checks if Total_Generation_Stack_Out contains data, if not skips zone and does not return a plot
if Total_Generation_Stack_Out.empty:
outputs[zone_input] = MissingZoneData()
continue
if pd.notna(custom_data_file_path):
Total_Generation_Stack_Out = self.insert_custom_data_columns(
Total_Generation_Stack_Out,
custom_data_file_path)
# Data table of values to return to main program
Data_Table_Out = Total_Generation_Stack_Out.add_suffix(" (Million $)")
Total_Generation_Stack_Out.index = Total_Generation_Stack_Out.index.str.replace('_',' ')
mplt = PlotLibrary()
fig, ax = mplt.get_figure()
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Generation_Stack_Out.index
mplt.barplot(Total_Generation_Stack_Out,
color=self.PLEXOS_color_dict, stacked=True,
custom_tick_labels=tick_labels)
ax.set_ylabel('Total System Cost (Million $)', color='black', rotation='vertical')
ax.margins(x=0.01)
mplt.add_legend(reverse_legend=True)
if mconfig.parser("plot_title_as_region"):
mplt.add_main_title(zone_input)
outputs[zone_input] = {'fig': fig, 'data_table': Data_Table_Out}
return outputs
def sys_cost_diff(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates stacked barplots of Total Generation Cost and Cost of Unserved Energy relative to a base scenario.
Barplots show the change in total total generation cost relative to a base scenario.
The default is to comapre against the first scenario provided in the inputs list.
Plot only shows totals and is NOT broken down into technology or cost type specific values.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Total_Generation_Cost", self.Scenarios),
(False, f"{agg}_Cost_Unserved_Energy", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"Zone = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = pd.DataFrame()
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
try:
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for : {zone_input}")
continue
Total_Gen_Cost = Total_Gen_Cost.sum(axis=0)
Total_Gen_Cost.rename("Total_Gen_Cost", inplace=True)
Cost_Unserved_Energy = self[f"{agg}_Cost_Unserved_Energy"][scenario]
if Cost_Unserved_Energy.empty:
Cost_Unserved_Energy = self["generator_Total_Generation_Cost"][scenario].copy()
Cost_Unserved_Energy.iloc[:,0] = 0
Cost_Unserved_Energy = Cost_Unserved_Energy.xs(zone_input,level=self.AGG_BY)
Cost_Unserved_Energy = Cost_Unserved_Energy.sum(axis=0)
Cost_Unserved_Energy.rename("Cost_Unserved_Energy", inplace=True)
Total_Systems_Cost =
|
pd.concat([Total_Systems_Cost, Total_Gen_Cost, Cost_Unserved_Energy], axis=1, sort=False)
|
pandas.concat
|
import numpy as np
import pandas as pd
import rasterio
import statsmodels.formula.api as smf
from scipy.sparse import coo_matrix
import scipy.spatial
import patsy
from statsmodels.api import add_constant, OLS
from .utils import transform_coord
def test_linearity(x, y, n_knots=5, verbose=True):
"""Test linearity between two variables.
Run a linear regression of y on x, and take the residuals.
Fit the residuals with a natural spline with `n_knots` knots.
Conduct a joint F-test for all columns in the natural spline basis matrix.
Example:
>>> import numpy as np
>>> rng = np.random.default_rng(0)
>>> x = np.linspace(0., 1., 101)
>>> y = 5 * x + 3 + rng.random(size=101) / 5
>>> test_linearity(x, y, n_knots=5, verbose=False)
0.194032
"""
residuals = OLS(y, add_constant(x)).fit().resid
basis_matrix = patsy.dmatrix(
f"cr(x, df={n_knots - 1}, constraints='center') - 1", {'x': x},
return_type='dataframe')
results = OLS(residuals, basis_matrix).fit()
results.summary()
nobs = results.nobs
f_value = results.fvalue
p_value = np.round(results.f_pvalue, 6)
print('Test for Linearity: '
f'N = {nobs:.0f}; df={nobs - n_knots - 1:.0f}; '
f'F = {f_value:.3f}; p = {p_value:.6f}.')
return p_value
def winsorize(s, lower, upper, verbose=False):
"""Winsorizes a pandas series.
Args:
s (pandas.Series): the series to be winsorized
lower, upper (int): number between 0 to 100
"""
lower_value = np.nanpercentile(s.values, lower)
upper_value = np.nanpercentile(s.values, upper)
if verbose:
print(f'Winsorizing to {lower_value} - {upper_value}')
return s.clip(lower_value, upper_value)
def demean(df, column, by):
"""Demean a column in a pandas DataFrame.
Args:
df (pandas.DataFrame): data
column (str): the column to be demeaned
by (list of str): the column names
"""
return (
df[column].values -
(df.loc[:, by + [column]]
.groupby(by).transform(np.nanmean).values.squeeze()))
def load_gd_census(GPS_FILE, MASTER_FILE):
# read GPS coords + treatment status
df = pd.read_csv(
GPS_FILE,
usecols=['village_code', 'ge', 'hi_sat', 'treat',
'latitude', 'longitude', 'elevation', 'accuracy', 'eligible',
'GPS_imputed'],
dtype={
'village_code': 'Int64',
'ge': 'Int32',
'hi_sat': 'Int32',
'treat': 'Int32',
'eligible': 'Int32',
'GPS_imputed': 'Int32'})
# drop non GE households
df = df.loc[df['ge'] == 1, :].copy()
# treat x eligible = cash inflow
df.loc[:, 'treat_eligible'] = (
df.loc[:, 'treat'].values * df.loc[:, 'eligible'].values)
# read sat level identifiers
df_master = pd.read_stata(
MASTER_FILE,
columns=['village_code', 'satlevel_name']
).astype({'village_code': 'Int64'})
df_master = df_master.drop_duplicates()
# merge treatment
df = pd.merge(
df, df_master,
on='village_code', how='left')
assert df['satlevel_name'].notna().all(), (
'Missing saturation level identifier')
return df.drop(columns=['ge'])
def snap_to_grid(df, lon_col, lat_col,
min_lon, max_lon, min_lat, max_lat, step,
**kwargs):
"""Collapses variables in a data frame onto a grid.
Args:
df (pandas.DataFrame)
lon_col, lat_col (str): name of lon, lat columns
min_lon, max_lon, min_lat, max_lat, step (float)
**kwargs: passed to pandas agg() function after grouping by lat, lon
Returns:
(numpy.ndarray, numpy.ndarray): lon and lat grids
pandas.DataFrame: output data frame
"""
df_copy = df.copy()
# snap to grid
df_copy.loc[:, 'grid_lon'] = np.round(
(df[lon_col].values - min_lon - step / 2) / step
).astype(np.int32)
df_copy.loc[:, 'grid_lat'] = np.round(
(df[lat_col].values - min_lat - step / 2) / step
).astype(np.int32)
# construct the grid
grid_lon, grid_lat = np.meshgrid(
np.arange(0, np.round((max_lon - min_lon) / step).astype(np.int32)),
np.arange(0, np.round((max_lat - min_lat) / step).astype(np.int32)))
df_grid = pd.DataFrame({'grid_lon': grid_lon.flatten(),
'grid_lat': grid_lat.flatten()})
# collapse
df_output = pd.merge(
df_grid.assign(is_in_grid=True),
df_copy.groupby(['grid_lon', 'grid_lat']).agg(**kwargs),
how='outer', on=['grid_lon', 'grid_lat'])
print(f"Dropping {df_output['is_in_grid'].isna().sum()} observations;\n"
f"Keeping {df_output['is_in_grid'].notna().sum()} observations")
df_output = df_output.loc[df_output['is_in_grid'].notna(), :].copy()
return (grid_lon, grid_lat), df_output.drop(columns=['is_in_grid'])
def control_for_spline(x, y, z, cr_df=3):
# handle nan's
is_na = np.any((np.isnan(x), np.isnan(y), np.isnan(z)), axis=0)
df = pd.DataFrame({'x': x[~is_na], 'y': y[~is_na], 'z': z[~is_na]})
mod = smf.ols(formula=f"z ~ 1 + cr(x, df={cr_df}) + cr(y, df={cr_df})",
data=df)
res = mod.fit()
# return nan's for cases where any one of x, y, z is nan
z_out = np.full_like(z, np.nan)
z_out[~is_na] = z[~is_na] - res.fittedvalues
return z_out
def load_nightlight_from_point(df, NL_IN_DIR, lon_col='lon', lat_col='lat'):
# extract nightlight values
ds = rasterio.open(NL_IN_DIR)
band = ds.read().squeeze(0)
idx = np.round(transform_coord(
transform=ds.transform,
to='colrow',
xy=df.loc[:, [lon_col, lat_col]].values)).astype(np.int)
df.loc[:, 'nightlight'] = [band[i[1], i[0]] for i in idx]
# winsorize + normalize
# df.loc[:, 'nightlight_winsnorm'] = winsorize(
# df['nightlight'], 0, 99)
# df.loc[:, 'nightlight_winsnorm'] = (
# (df['nightlight_winsnorm'].values -
# np.nanmean(df['nightlight_winsnorm'].values)) /
# np.nanstd(df['nightlight_winsnorm'].values))
return df
# def load_nightlight_asis(input_dir):
# """Loads nightlight data, keeping its raster grid as is.
# Args:
# input_dir (str)
# Returns:
# dict {str: float}: with the following keys
# min_lon, max_lon, min_lat, max_lat, step
# pandas.DataFrame
# """
# # load satellite data
# print('Loading nightlight data')
# ds = rasterio.open(input_dir)
# band = ds.read().squeeze(0)
# # define the grid
# grid = {
# 'min_lon': ds.bounds[0],
# 'min_lat': ds.bounds[1],
# 'max_lon': ds.bounds[2],
# 'max_lat': ds.bounds[3],
# 'step': ds.transform[0],
# }
# # construct the grid
# grid_lon, grid_lat = np.meshgrid(
# np.arange(0, ds.width),
# np.arange(0, ds.height))
# # convert to data frame
# df = pd.DataFrame({
# 'grid_lon': grid_lon.flatten(),
# 'grid_lat': grid_lat[::-1].flatten(),
# 'nightlight': band.flatten(),
# })
# # recover lon, lat
# df.loc[:, 'lon'] = (
# df['grid_lon'] * grid['step'] + grid['min_lon'] + grid['step'] / 2)
# df.loc[:, 'lat'] = (
# df['grid_lat'] * grid['step'] + grid['min_lat'] + grid['step'] / 2)
# # winsorize + normalize
# df.loc[:, 'nightlight'] = winsorize(
# df['nightlight'], 0, 99)
# df.loc[:, 'nightlight'] = (
# (df['nightlight'].values -
# np.nanmean(df['nightlight'].values)) /
# np.nanstd(df['nightlight'].values))
# return grid, df
def load_building(input_dir, grid, agg=True):
"""Loads building polygons.
Args:
input_dir (str): file to load
grid (dict {str: float}): dict with the following keys:
min_lon, max_lon, min_lat, max_lat, step
agg (bool): whether to perform aggregation
Returns:
tuple of numpy.ndarray: (grid_lon, grid_lat)
pandas.DataFrame: gridded dataframe
"""
tin_roofs = [0, 1, 5]
thatched_roofs = [2, 3, 6]
# load satellite predictions
# print('Loading building polygon data')
df =
|
pd.read_csv(input_dir)
|
pandas.read_csv
|
import datetime
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import Timedelta, merge_asof, read_csv, to_datetime
import pandas._testing as tm
from pandas.core.reshape.merge import MergeError
class TestAsOfMerge:
def read_data(self, datapath, name, dedupe=False):
path = datapath("reshape", "merge", "data", name)
x = read_csv(path)
if dedupe:
x = x.drop_duplicates(["time", "ticker"], keep="last").reset_index(
drop=True
)
x.time = to_datetime(x.time)
return x
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.trades = self.read_data(datapath, "trades.csv")
self.quotes = self.read_data(datapath, "quotes.csv", dedupe=True)
self.asof = self.read_data(datapath, "asof.csv")
self.tolerance = self.read_data(datapath, "tolerance.csv")
self.allow_exact_matches = self.read_data(datapath, "allow_exact_matches.csv")
self.allow_exact_matches_and_tolerance = self.read_data(
datapath, "allow_exact_matches_and_tolerance.csv"
)
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]}
)
result = pd.merge_asof(left, right, on="a")
tm.assert_frame_equal(result, expected)
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.048",
"20160525 13:30:00.049",
"20160525 13:30:00.072",
"20160525 13:30:00.075",
]
),
"ticker": [
"GOOG",
"MSFT",
"MSFT",
"MSFT",
"GOOG",
"AAPL",
"GOOG",
"MSFT",
],
"bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
"ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
},
columns=["time", "ticker", "bid", "ask"],
)
pd.merge_asof(trades, quotes, on="time", by="ticker")
pd.merge_asof(
trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.97, np.nan, np.nan, np.nan],
"ask": [np.nan, 51.98, np.nan, np.nan, np.nan],
},
columns=["time", "ticker", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=pd.Timedelta("10ms"),
allow_exact_matches=False,
)
tm.assert_frame_equal(result, expected)
def test_examples3(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]}
)
result = pd.merge_asof(left, right, on="a", direction="forward")
tm.assert_frame_equal(result, expected)
def test_examples4(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype("category")
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype("category")
expected.ticker = expected.ticker.astype("category")
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_left_index(self):
# GH14253
expected = self.asof
trades = self.trades.set_index("time")
quotes = self.quotes
result = merge_asof(
trades, quotes, left_index=True, right_on="time", by="ticker"
)
# left-only index uses right"s index, oddly
expected.index = result.index
# time column appears after left"s columns
expected = expected[result.columns]
tm.assert_frame_equal(result, expected)
def test_basic_right_index(self):
expected = self.asof
trades = self.trades
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_on="time", right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self):
expected = self.asof.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_multi_index_on(self):
def index_by_time_then_arbitrary_new_level(df):
df = df.set_index("time")
df = pd.concat([df, df], keys=["f1", "f2"], names=["f", "time"])
return df.reorder_levels([1, 0]).sort_index()
trades = index_by_time_then_arbitrary_new_level(self.trades)
quotes = index_by_time_then_arbitrary_new_level(self.quotes)
expected = index_by_time_then_arbitrary_new_level(self.asof)
result = merge_asof(trades, quotes, on="time", by=["ticker"])
tm.assert_frame_equal(result, expected)
def test_on_and_index(self):
# "on" parameter and index together is prohibited
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, left_on="price", left_index=True, right_index=True
)
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, right_on="bid", left_index=True, right_index=True
)
def test_basic_left_by_right_by(self):
# GH14253
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(
trades, quotes, on="time", left_by="ticker", right_by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != "MSFT"]
result = merge_asof(trades, q, on="time", by="ticker")
expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan
tm.assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_heterogeneous_types(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": [1, 0, 0, 0, 1, 2],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a"],
[pd.to_datetime("20160602"), 2, "a"],
[pd.to_datetime("20160603"), 1, "b"],
[pd.to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
right = pd.DataFrame(
[
[pd.to_datetime("20160502"), 1, "a", 1.0],
[pd.to_datetime("20160502"), 2, "a", 2.0],
[pd.to_datetime("20160503"), 1, "b", 3.0],
[pd.to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
expected = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a", 1.0],
[pd.to_datetime("20160602"), 2, "a", 2.0],
[pd.to_datetime("20160603"), 1, "b", 3.0],
[pd.to_datetime("20160603"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
result = pd.merge_asof(
left, right, left_index=True, right_index=True, by=["k1", "k2"]
)
tm.assert_frame_equal(expected, result)
with pytest.raises(MergeError):
pd.merge_asof(
left,
right,
left_index=True,
right_index=True,
left_by=["k1", "k2"],
right_by=["k1"],
)
def test_basic2(self, datapath):
expected = self.read_data(datapath, "asof2.csv")
trades = self.read_data(datapath, "trades2.csv")
quotes = self.read_data(datapath, "quotes2.csv", dedupe=True)
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = (
lambda x: x[x.ticker == "MSFT"]
.drop("ticker", axis=1)
.reset_index(drop=True)
)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes, on="time")
tm.assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(trades, quotes, left_on="time", right_on="bid", by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, on=["time", "ticker"], by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, by="ticker")
def test_with_duplicates(self, datapath):
q = (
pd.concat([self.quotes, self.quotes])
.sort_values(["time", "ticker"])
.reset_index(drop=True)
)
result = merge_asof(self.trades, q, on="time", by="ticker")
expected = self.read_data(datapath, "asof.csv")
tm.assert_frame_equal(result, expected)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({"key": [1, 1, 3], "left_val": [1, 2, 3]})
df2 = pd.DataFrame({"key": [1, 2, 2], "right_val": [1, 2, 3]})
result = merge_asof(df1, df2, on="key")
expected = pd.DataFrame(
{"key": [1, 1, 3], "left_val": [1, 2, 3], "right_val": [1, 1, 3]}
)
tm.assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", allow_exact_matches="foo"
)
def test_valid_tolerance(self):
trades = self.trades
quotes = self.quotes
# dti
merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("1s"))
# integer
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1,
)
# incompat
with pytest.raises(MergeError):
merge_asof(trades, quotes, on="time", by="ticker", tolerance=1)
# invalid
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1.0,
)
# invalid negative
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", tolerance=-Timedelta("1s")
)
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=-1,
)
def test_non_sorted(self):
trades = self.trades.sort_values("time", ascending=False)
quotes = self.quotes.sort_values("time", ascending=False)
# we require that we are already sorted on time & quotes
assert not trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
trades = self.trades.sort_values("time")
assert trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
quotes = self.quotes.sort_values("time")
assert trades.time.is_monotonic
assert quotes.time.is_monotonic
# ok, though has dupes
merge_asof(trades, self.quotes, on="time", by="ticker")
@pytest.mark.parametrize(
"tolerance",
[Timedelta("1day"), datetime.timedelta(days=1)],
ids=["pd.Timedelta", "datetime.timedelta"],
)
def test_tolerance(self, tolerance):
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker", tolerance=tolerance)
expected = self.tolerance
tm.assert_frame_equal(result, expected)
def test_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="forward", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_tz(self):
# GH 14844
left = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value1": np.arange(5),
}
)
right = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-01"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value2": list("ABCDE"),
}
)
result = pd.merge_asof(left, right, on="date", tolerance=pd.Timedelta("1 day"))
expected = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value1": np.arange(5),
"value2": list("BCDEE"),
}
)
tm.assert_frame_equal(result, expected)
def test_tolerance_float(self):
# GH22981
left = pd.DataFrame({"a": [1.1, 3.5, 10.9], "left_val": ["a", "b", "c"]})
right = pd.DataFrame(
{"a": [1.0, 2.5, 3.3, 7.5, 11.5], "right_val": [1.0, 2.5, 3.3, 7.5, 11.5]}
)
expected = pd.DataFrame(
{
"a": [1.1, 3.5, 10.9],
"left_val": ["a", "b", "c"],
"right_val": [1, 3.3, np.nan],
}
)
result = pd.merge_asof(left, right, on="a", direction="nearest", tolerance=0.5)
tm.assert_frame_equal(result, expected)
def test_index_tolerance(self):
# GH 15135
expected = self.tolerance.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = pd.merge_asof(
trades,
quotes,
left_index=True,
right_index=True,
by="ticker",
tolerance=pd.Timedelta("1day"),
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches(self):
result = merge_asof(
self.trades, self.quotes, on="time", by="ticker", allow_exact_matches=False
)
expected = self.allow_exact_matches
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 7, 11]}
)
result = pd.merge_asof(
left, right, on="a", direction="forward", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 3, 11]}
)
result = pd.merge_asof(
left, right, on="a", direction="nearest", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance(self):
result = merge_asof(
self.trades,
self.quotes,
on="time",
by="ticker",
tolerance=Timedelta("100ms"),
allow_exact_matches=False,
)
expected = self.allow_exact_matches_and_tolerance
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance2(self):
# GH 13695
df1 = pd.DataFrame(
{"time": pd.to_datetime(["2016-07-15 13:30:00.030"]), "username": ["bob"]}
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = pd.merge_asof(df1, df2, on="time")
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [2],
}
)
tm.assert_frame_equal(result, expected)
result = pd.merge_asof(df1, df2, on="time", allow_exact_matches=False)
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [1],
}
)
tm.assert_frame_equal(result, expected)
result = pd.merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=pd.Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [np.nan],
}
)
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
import pandas as pd
import numpy as np
import talib
class Indicators(object):
"""
Input: Price DataFrame, Moving average/lookback period and standard deviation multiplier
This function returns a dataframe with 5 columns
Output: Prices, Moving Average, Upper BB, Lower BB and BB Val
"""
def bb(self, l_sym, df_price, time_period, st_dev_u, st_dev_l):
df_bb_u = pd.DataFrame(columns=l_sym, index=df_price.index)
df_bb_m = pd.DataFrame(columns=l_sym, index=df_price.index)
df_bb_l = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_bb_u[sym], df_bb_m[sym], df_bb_l[sym] = talib.BBANDS(np.asarray(df_price[sym]), timeperiod=time_period, nbdevup=st_dev_u, nbdevdn=st_dev_l)
except:
pass
return df_bb_u, df_bb_m, df_bb_l
def ema(self, l_sym, df_price, time_period):
df_ema = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_ema[sym] = talib.EMA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_ema
def ma(self, l_sym, df_price, time_period):
df_ma = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_ma[sym] = talib.MA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_ma
def sma(self, l_sym, df_price, time_period):
df_sma = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_sma[sym] = talib.SMA(np.asarray(df_price[sym]), timeperiod=time_period)
except:
pass
return df_sma
def adx(self, l_sym, df_high, df_low, df_close, time_period):
df_adx = pd.DataFrame(columns=l_sym, index=df_high.index)
for sym in l_sym:
try:
df_adx[sym] = talib.ADX(high=np.asarray(df_high[sym]), low=np.asarray(df_low[sym]), close=np.asarray(df_close[sym]), timeperiod = time_period)
except:
pass
return df_adx
def mom(self, l_sym, df_price, time_period):
df_mom = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_mom[sym] = talib.MOM(np.asarray(df_price[sym]), timeperiod = time_period)
except:
pass
return df_mom
def atr(self, l_sym, df_high, df_low, df_close, time_period):
df_atr = pd.DataFrame(columns=l_sym, index=df_high.index)
for sym in l_sym:
try:
df_atr[sym] = talib.ATR(high=np.asarray(df_high[sym]), low=np.asarray(df_low[sym]), close=np.asarray(df_close[sym]), timeperiod=time_period)
except:
pass
return df_atr
def macd(self, l_sym, df_price, fast_period, slow_period, signal_period):
df_macd = pd.DataFrame(columns=l_sym, index=df_price.index)
df_macdsignal = pd.DataFrame(columns=l_sym, index=df_price.index)
df_macdhist = pd.DataFrame(columns=l_sym, index=df_price.index)
for sym in l_sym:
try:
df_macd[sym], df_macdsignal[sym], df_macdhist[sym] = talib.MACD(np.asarray(df_price[sym]), fastperiod=fast_period, slowperiod=slow_period, signalperiod=signal_period)
except:
pass
return df_macd, df_macdsignal, df_macdhist
def wavec(self, l_sym, df_three, df_four, df_five):
df_ca = pd.DataFrame(columns=l_sym, index=df_three.index)
df_cb = pd.DataFrame(columns=l_sym, index=df_three.index)
for sym in l_sym:
df_ca[sym] = df_four[sym] - df_five[sym]
df_cb[sym] = df_three[sym] - df_four[sym]
return df_ca, df_cb
def waveb(self, l_sym, df_two, df_three, df_four):
df_ba = pd.DataFrame(columns=l_sym, index=df_two.index)
df_bb = pd.DataFrame(columns=l_sym, index=df_two.index)
for sym in l_sym:
df_ba[sym] = df_three[sym] - df_four[sym]
df_bb[sym] = df_two[sym] - df_three[sym]
return df_ba, df_bb
def wavea(self, l_sym, df_one, df_two, df_three):
df_aa = pd.DataFrame(columns=l_sym, index=df_one.index)
df_ab = pd.DataFrame(columns=l_sym, index=df_one.index)
for sym in l_sym:
df_aa[sym] = df_two[sym] - df_three[sym]
df_ab[sym] = df_one[sym] - df_two[sym]
return df_aa, df_ab
def keltner(self, l_sym, df_high, df_low, df_close, ema_period, atr_period, multiplier):
df_kch_u = pd.DataFrame(columns=l_sym, index=df_high.index)
df_kch_l = pd.DataFrame(columns=l_sym, index=df_high.index)
df_kch_m = self.ema(l_sym, df_close, time_period=ema_period)
df_atr = self.atr(l_sym, df_high, df_low, df_close, time_period=atr_period)
for sym in l_sym:
df_kch_u[sym] = df_kch_m[sym] + (multiplier * df_atr[sym])
df_kch_l[sym] = df_kch_m[sym] - (multiplier * df_atr[sym])
return df_kch_u, df_kch_m, df_kch_l
def ichimoku(self, l_sym, df_high, df_low):
df_ichimoku_tenkan_u = pd.DataFrame(columns=l_sym, index=df_high.index)
df_ichimoku_tenkan_l = pd.DataFrame(columns=l_sym, index=df_high.index)
df_ichimoku_kijun_u = pd.DataFrame(columns=l_sym, index=df_high.index)
df_ichimoku_kijun_l = pd.DataFrame(columns=l_sym, index=df_high.index)
df_ichimoku_kijun = pd.DataFrame(columns=l_sym, index=df_high.index)
df_ichimoku_tenkan = pd.DataFrame(columns=l_sym, index=df_high.index)
for sym in l_sym:
try:
df_ichimoku_tenkan_u[sym] = pd.rolling_max(df_high[sym], min_periods=1, window=9)
df_ichimoku_tenkan_l[sym] = pd.rolling_min(df_low[sym], min_periods=1, window=9)
df_ichimoku_kijun_u[sym] = pd.rolling_max(df_high[sym], min_periods=1, window=26)
df_ichimoku_kijun_l[sym] =
|
pd.rolling_min(df_low[sym], min_periods=1, window=26)
|
pandas.rolling_min
|
import numpy as np
import pytest
from pandas import DataFrame, Series
import pandas._testing as tm
class SharedSetAxisTests:
@pytest.fixture
def obj(self):
raise NotImplementedError("Implemented by subclasses")
def test_set_axis(self, obj):
# GH14636; this tests setting index for both Series and DataFrame
new_index = list("abcd")[: len(obj)]
expected = obj.copy()
expected.index = new_index
# inplace=False
result = obj.set_axis(new_index, axis=0, inplace=False)
|
tm.assert_equal(expected, result)
|
pandas._testing.assert_equal
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 8 13:54:55 2020
@author: akurnizk
"""
import os
import hydroeval
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from datetime import datetime #parse the datetimes we get from NOAA
from matplotlib import pylab
from scipy.optimize import fsolve
from pytides.tide import Tide
import seaborn as sns; sns.set(font_scale=2)
import matplotlib as mpl
mpl.rc('xtick', labelsize=22)
mpl.rc('ytick', labelsize=22)
mpl.rcParams['pdf.fonttype'] = 42
map_dir = r'E:\Maps' # retrieved files from https://viewer.nationalmap.gov/basic/
data_dir = os.path.join('E:\Data')
#%% Load Data
"""
Dike + Herring River All
"""
# All Measured + Discharge Calculated off Measured
HR_all_resam_1hr_df = pd.read_csv(os.path.join(data_dir,"General Dike Data","HR_All_Data_Resampled_HourlyMeans_8272017-1212020.csv"))
data_cols = HR_all_resam_1hr_df.columns.drop("datetime")
HR_all_resam_1hr_df[data_cols] = HR_all_resam_1hr_df[data_cols].apply(pd.to_numeric, errors='coerce')
HR_all_resam_1hr_df["datetime"] =
|
pd.to_datetime(HR_all_resam_1hr_df["datetime"])
|
pandas.to_datetime
|
import numpy as np
import pandas as pd
voltages = np.concatenate([
np.arange(0,21),
np.arange(20,-21,-1),
np.arange(-20,1)
])
df =
|
pd.DataFrame({'Voltages': voltages})
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import time
import os
input_file = 'nyc-2017-yellow-taxi-trips-to-airport.cvs.gz'
output_file = 'nyc-2017-yellow-taxi-trips-to-airport.pkl.gz'
def main():
"""Loads the data for taxi trips to airports from step 2, cleans it and saves the result.
If output_file already exists, the function skips.
Remove the output_file manually in that case.
The input_file is loaded in chunks of 100,000 lines.
While loading simple progress info will be displayed.
After the whole file is loaded the function clean_data is applied.
That includes a transformation to efficient datatypes.
At the end the cleaned dataset is saved as a gzipped pickle file,
so that the datatypes are not lost.
Remember: pickle files should only be used for temporary storage, since
the format is not guaranteed to be stable between different lib versions.
Keyword Arguments: -
Returns: -
"""
print('=== nyc taxi to airport - step 3 clean data')
if os.path.exists(output_file):
print("output file exists:", output_file)
print("skipping")
return
df = load_data(input_file)
df = clean_data(df)
save_as_pickle_gz(df, output_file)
print('done')
cols_to_use = [
'Unnamed: 0',
'tpep_pickup_datetime',
'tpep_dropoff_datetime',
'PULocationID',
'DOLocationID',
'trip_distance',
]
data_types = {
'PULocationID': np.int16,
'DOLocationID': np.int16,
}
dates_to_parse = ['tpep_pickup_datetime', 'tpep_dropoff_datetime']
def load_data(input_file):
"""Loads the dataframe from input_file.
The file will be loaded with pandas.read_csv with a chunksize of 100_000.
Simple progress info will be displayed during loading.
To speed up, the following transformations are done while loading:
- only the columns in cols_to_use are loaded
- data types are mapped as specified in dict data_types
- the columns specified in dates_to_parse will be parsed
Keyword Arguments:
input_file -- the filepath of the input file to read
Returns: the loaded dataframe
"""
print('loading file:', input_file)
df = pd.DataFrame()
show_progress = make_show_progress()
chunk_iterator = pd.read_csv(input_file,
compression='gzip',
chunksize=100_000,
index_col=0,
usecols=cols_to_use,
dtype=data_types,
parse_dates=dates_to_parse,
infer_datetime_format=True
)
for chunk in chunk_iterator:
df =
|
pd.concat([df, chunk])
|
pandas.concat
|
import nose
import os
import string
from distutils.version import LooseVersion
from datetime import datetime, date, timedelta
from pandas import Series, DataFrame, MultiIndex, PeriodIndex, date_range
from pandas.compat import range, lrange, StringIO, lmap, lzip, u, zip
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
import numpy as np
from numpy import random
from numpy.random import randn
from numpy.testing import assert_array_equal
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
def _skip_if_no_scipy():
try:
import scipy
except ImportError:
raise nose.SkipTest("no scipy")
@tm.mplskip
class TestSeriesPlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
def tearDown(self):
tm.close()
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
_check_plot_works(self.ts.plot, rot=0)
_check_plot_works(self.ts.plot, style='.', logy=True)
_check_plot_works(self.ts.plot, style='.', logx=True)
_check_plot_works(self.ts.plot, style='.', loglog=True)
_check_plot_works(self.ts[:10].plot, kind='bar')
_check_plot_works(self.iseries.plot)
_check_plot_works(self.series[:5].plot, kind='bar')
_check_plot_works(self.series[:5].plot, kind='line')
_check_plot_works(self.series[:5].plot, kind='barh')
_check_plot_works(self.series[:10].plot, kind='barh')
_check_plot_works(Series(randn(10)).plot, kind='bar', color='black')
@slow
def test_plot_figsize_and_title(self):
# figsize and title
import matplotlib.pyplot as plt
ax = self.series.plot(title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
import matplotlib.colors as colors
default_colors = plt.rcParams.get('axes.color_cycle')
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(kind='bar')
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(default_colors[i % len(default_colors)])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
ax = df.plot(kind='bar', color=custom_colors)
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(custom_colors[i])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot(kind='bar', colormap='jet')
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
# Test colormap functionality
ax = df.plot(kind='bar', colormap=cm.jet)
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
df.ix[:, [0]].plot(kind='bar', color='DodgerBlue')
@slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot(kind='bar', linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# stacked
ax = df.plot(kind='bar', stacked=True, linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# subplots
axes = df.plot(kind='bar', linewidth=2, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot(log=True, kind='bar')
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
def test_rotation(self):
df = DataFrame(randn(5, 5))
ax = df.plot(rot=30)
for l in ax.get_xticklabels():
self.assertEqual(l.get_rotation(), 30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_hist(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, by=self.ts.index.month)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_layout(self):
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n)})
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
import matplotlib.pyplot as plt
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
_check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))
tm.close()
_check_plot_works(df.height.hist, by=df.gender, layout=(1, 2))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(1, 4))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(4, 1))
tm.close()
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf, close
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with tm.assertRaises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_by_no_extra_plots(self):
import matplotlib.pyplot as plt
n = 10
df = DataFrame({'gender': tm.choice(['Male', 'Female'], size=n),
'height': random.normal(66, 4, size=n)})
axes = df.height.hist(by=df.gender)
self.assertEqual(len(plt.get_fignums()), 1)
def test_plot_fails_when_ax_differs_from_figure(self):
from pylab import figure, close
fig1 = figure()
fig2 = figure()
ax1 = fig1.add_subplot(111)
with tm.assertRaises(AssertionError):
self.ts.hist(ax=ax1, figure=fig2)
@slow
def test_kde(self):
_skip_if_no_scipy()
_check_plot_works(self.ts.plot, kind='kde')
_check_plot_works(self.ts.plot, kind='density')
ax = self.ts.plot(kind='kde', logy=True)
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_kwargs(self):
_skip_if_no_scipy()
from numpy import linspace
_check_plot_works(self.ts.plot, kind='kde', bw_method=.5, ind=linspace(-100,100,20))
_check_plot_works(self.ts.plot, kind='density', bw_method=.5, ind=linspace(-100,100,20))
ax = self.ts.plot(kind='kde', logy=True, bw_method=.5, ind=linspace(-100,100,20))
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_color(self):
_skip_if_no_scipy()
ax = self.ts.plot(kind='kde', logy=True, color='r')
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0].get_color(), 'r')
@slow
def test_autocorrelation_plot(self):
from pandas.tools.plotting import autocorrelation_plot
_check_plot_works(autocorrelation_plot, self.ts)
_check_plot_works(autocorrelation_plot, self.ts.values)
@slow
def test_lag_plot(self):
from pandas.tools.plotting import lag_plot
_check_plot_works(lag_plot, self.ts)
_check_plot_works(lag_plot, self.ts, lag=5)
@slow
def test_bootstrap_plot(self):
from pandas.tools.plotting import bootstrap_plot
_check_plot_works(bootstrap_plot, self.ts, size=10)
def test_invalid_plot_data(self):
s = Series(list('abcd'))
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with tm.assertRaises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@tm.mplskip
class TestDataFramePlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
def tearDown(self):
tm.close()
@slow
def test_plot(self):
df = tm.makeTimeDataFrame()
_check_plot_works(df.plot, grid=False)
_check_plot_works(df.plot, subplots=True)
_check_plot_works(df.plot, subplots=True, use_index=False)
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
self._check_plot_fails(df.plot, kind='line', blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
_check_plot_works(df.plot, subplots=True, title='blah')
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
self.assertEqual(len(ax.get_lines()), 1) # B was plotted
@slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self.assertEqual(ax.xaxis.get_label().get_text(), 'a')
@slow
def test_explicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b', label='LABEL')
self.assertEqual(ax.xaxis.get_label().get_text(), 'LABEL')
@slow
def test_plot_xy(self):
import matplotlib.pyplot as plt
# columns.inferred_type == 'string'
df = tm.makeTimeDataFrame()
self._check_data(df.plot(x=0, y=1),
df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'),
df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2),
df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@slow
def test_xcompat(self):
import pandas as pd
import matplotlib.pyplot as plt
df = tm.makeTimeDataFrame()
ax = df.plot(x_compat=True)
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
pd.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
pd.plot_params['x_compat'] = False
ax = df.plot()
lines = ax.get_lines()
tm.assert_isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plot_params.use('x_compat', True):
ax = df.plot()
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
|
tm.close()
|
pandas.util.testing.close
|
import pandas as pd
import datetime
import json
import hashlib
import random
import ast
import os
import sys
import argparse
import math
from bokeh.plotting import *
from bokeh.models import ColumnDataSource, HoverTool, Legend, DatePicker, CustomJS
from bokeh.layouts import widgetbox, row, column
from bokeh.models.tickers import FixedTicker
from bokeh.palettes import brewer
# add the 'src' directory as one where we can import modules
src_dir = os.path.join(r'C:\Users\UOU\Desktop\GJ\openbadge-analysis-master')
sys.path.append(src_dir)
import openbadge_analysis as ob
def stack_bar(participation_values, member, labels, metric, choose):
"""
Creates a stacked bar graph showing percentages of participation for each member for each day/week + hover
:param participation_values: Dict{ Member : { date : {'turns': float, 'speak, float } }, Member : ... }
:param member: Member who is viewing the report (serves as base of bar stacks). Either..
- member key if no member_names dictionary is given, or
- member name if member_names dictionary is given
:param labels: List[date1, date2, ..., 'Average']
:param metric: turns' or 'speak' (Whether you want to use Turns or Speaking Time)
:param choose: Whether or not to add Date Picker to choose dates
:return: bokeh plot
"""
colors = brewer['Set2'][len(participation_values)]
members = {} # To order members
data = {}
bottoms = {}
color_members = {}
for date in labels:
bottoms[date] = 0
if member:
# Order the members so that 'member' is always on the bottom of the stacked bar graph, and so that bars
# will always be stacked in the same order.
# members is a dictionary, e.g. {'0': 'Member1', '1': 'Member2', etc... }
# color_members is a dictionary, e.g. {'Member1': 'color_hex_value_1', 'Member2': 'color_hex_value_2', etc... }
i = 1
for member_name in participation_values:
if member_name == member:
members[str(0)] = member_name
color_members[member_name] = colors[0]
else:
members[str(i)] = member_name
color_members[member_name] = colors[i]
i += 1
else:
i = 0
for member_name in participation_values:
members[str(i)] = member_name
color_members[member_name] = colors[i]
i += 1
total_particip = {'all': 0}
for member in participation_values:
total_particip[member] = 0
for date in labels:
if date in participation_values[member]:
particip = participation_values[member][date][metric]
else:
particip = 0
total_particip[member] += particip
total_particip['all'] += particip
for member in participation_values:
participation_values[member]['Average'] = {}
participation_values[member]['Average'][metric] = total_particip[member] / total_particip['all'] * 100
x = 1
for date in labels:
data[date] = {}
data[date]['date'] = []
data[date]['x'] = []
data[date]['y'] = []
data[date][metric] = []
data[date]['member'] = []
data[date]['color'] = []
for i in range(len(members)):
member = members[str(i)]
if date in participation_values[member]:
particip = participation_values[member][date][metric]
else:
particip = 0
data[date]['color'].append(color_members[member])
data[date]['date'].append(date)
data[date]['x'].append(x)
data[date][metric].append(particip)
data[date]['y'].append(bottoms[date] + particip/2)
data[date]['member'].append(member)
bottoms[date] += particip
x += 1
src_all = {}
for date in data:
src_all[date] = ColumnDataSource(data=data[date])
source_p_values = ColumnDataSource(data=participation_values)
source_colors = ColumnDataSource(data=color_members)
source_members = ColumnDataSource(data=members)
source_labels = ColumnDataSource(data=dict(labels=labels[:-2]))
height = 500
width = 800
if metric == 'turns':
hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 17px; font-weight: bold;">@member</span>
</div>
<div>
<span style="font-size: 17px;">Date: </span>
<span style="font-size: 17px; color: purple;">@date</span>
</div>
<div>
<span style="font-size: 17px; font-weight: bold; color: green;">@turns%</span>
<span style="font-size: 17px;"> Speaking Turns</span>
</div>
</div>
"""
)
p = figure(title='Your Percentage of Participation (Based on Number of Speaking Turns)',
plot_width=width, plot_height=height, x_range=[0.5,len(labels)+0.5], y_range=[-7,101],
tools=[hover], toolbar_location='above', sizing_mode='scale_width')
p.yaxis.axis_label = 'Your Percentage of Participation (Speaking Turns)'
elif metric == 'speak':
hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 17px; font-weight: bold;">@member</span>
</div>
<div>
<span style="font-size: 17px;">Date: </span>
<span style="font-size: 17px; color: purple;">@date</span>
</div>
<div>
<span style="font-size: 17px; font-weight: bold; color: green;">@speak%</span>
<span style="font-size: 17px;"> Speaking Time</span>
</div>
</div>
"""
)
p = figure(title='Your Percentage of Participation (Based on Amount of Speaking Time)',
plot_width=width, plot_height=height, x_range=[0.5,len(labels)+0.5], y_range=[-15,101],
tools=[hover], toolbar_location='above', sizing_mode='scale_width')
p.yaxis.axis_label = 'Your Percentage of Participation (Speaking Time)'
legends = []
rects = []
texts = []
dates = data.keys()
dates.sort()
rect_avg = None
for date in dates:
rec = p.rect(source=src_all[date], width=.8, height=metric, x='x', y='y',
fill_color='color', line_color='white')
txt = p.text(
source=ColumnDataSource(data={'date': [data[date]['date'][0]], 'x': [data[date]['x'][0]]}),
text='date', x='x', y=-8, text_align='center', angle=.785) # radians
if date == 'Average':
rect_avg = rec
else:
if date != '':
rects.append(rec)
texts.append(txt)
# For legend
for member in color_members:
sq = p.square(110, 110, size=0, color=color_members[member])
legends.append((member, [sq]))
p.grid.grid_line_alpha = 0.4
label_font_style = 'normal' # 'italic', 'bold'
p.xaxis.axis_label = 'Date'
p.xaxis.axis_label_text_font_size = str(height/50) + 'pt'
p.xaxis.major_label_text_font_size = '0pt'
p.xaxis.axis_label_text_font_style = label_font_style
p.xaxis.ticker=FixedTicker(ticks=[0])
p.yaxis.major_label_text_font_size = str(height/50) + 'pt'
p.yaxis.axis_label_text_font_size = str(height/50) + 'pt'
p.yaxis.axis_label_text_font_style = label_font_style
legend = Legend(legends=legends, location=(0, -30))
p.add_layout(legend, 'right')
if choose:
date_pickers = []
for i in range(len(labels) - 2):
source_i = ColumnDataSource(data={'i':[i]})
if metric == 'turns':
cb = CustomJS(args={'source_p_values': source_p_values,
'source_colors': source_colors, 'source_labels': source_labels,
'source_members': source_members, 'txt_source': texts[i].data_source,
'source_i': source_i, 'r_source_avg': rect_avg.data_source,
'r_source': rects[i].data_source}, code="""
var d = cb_obj.get('value');
var dMs = Date.parse(d);
var dt = new Date(dMs);
var day = dt.getDate();
day_str = day.toString();
if (day < 10){
day_str = '0' + day.toString();
};
var month = dt.getMonth() + 1; // Month is 1 less than actual picked month for some reason
console.log(month);
month_str = month.toString();
if (month < 10) {
month_str = '0' + month.toString();
};
var date_str = month_str + '/' + day_str;
var labels_data = source_labels.get('data');
var i = source_i.get('data')['i'][0];
labels_data['labels'].splice(i, 1, date_str);
var labels = labels_data['labels'];
console.log(labels);
var p_data = source_p_values.get('data');
var total_turns = {'all': 0};
for (member in p_data) {
total_turns[member] = 0;
for (index in labels) {
var turns = 0;
var date = labels[index];
console.log(p_data[member]);
if (date in p_data[member]) {
turns = p_data[member][date]['turns'];
}
console.log(turns);
total_turns[member] += turns;
total_turns['all'] += turns;
console.log(total_turns[member]);
console.log(total_turns['all']);
}
}
for (member in p_data) {
p_data[member]['Average'] = {};
console.log(total_turns[member]);
p_data[member]['Average']['turns'] = total_turns[member] / total_turns['all'] * 100;
}
var colors = source_colors.get('data');
var members = source_members.get('data');
new_data = {}
bottom = 0
new_data['date'] = []
new_data['y'] = []
new_data['turns'] = []
new_data['member'] = []
new_data['color'] = []
for (i=0; i<Object.keys(members).length; i++){
member = members[i.toString()];
var turns = 0;
if (date_str in p_data[member]) {
turns = p_data[member][date_str]['turns'];
};
new_data['color'].push(colors[member]);
new_data['date'].push(date_str);
new_data['turns'].push(turns);
new_data['y'].push(bottom + turns/2);
new_data['member'].push(member);
bottom += turns;
}
new_avg_data = {}
bottom = 0
new_avg_data['date'] = []
new_avg_data['y'] = []
new_avg_data['turns'] = []
new_avg_data['member'] = []
new_avg_data['color'] = []
for (i=0; i<Object.keys(members).length; i++){
member = members[i.toString()];
turns = p_data[member]['Average']['turns'];
new_avg_data['color'].push(colors[member]);
new_avg_data['date'].push('Average');
new_avg_data['turns'].push(turns);
new_avg_data['y'].push(bottom + turns/2);
new_avg_data['member'].push(member);
bottom += turns;
}
var r_data = r_source.get('data');
var r_avg_data = r_source_avg.get('data');
var txt_data = txt_source.get('data');
for (key in new_data) {
r_data[key] = new_data[key];
txt_data[key] = new_data[key];
r_avg_data[key] = new_avg_data[key];
}
console.log(r_avg_data);
r_source.trigger('change');
r_source_avg.trigger('change');
txt_source.trigger('change');
"""
)
elif metric == 'speak':
cb = CustomJS(args={'source_p_values': source_p_values,
'source_colors': source_colors, 'source_labels': source_labels,
'source_members': source_members, 'txt_source': texts[i].data_source,
'source_i': source_i, 'r_source_avg': rect_avg.data_source,
'r_source': rects[i].data_source}, code="""
var d = cb_obj.get('value');
var dMs = Date.parse(d);
var dt = new Date(dMs);
var day = dt.getDate();
day_str = day.toString();
if (day < 10){
day_str = '0' + day.toString();
};
var month = dt.getMonth() + 1; // Month is 1 less than actual picked month for some reason
console.log(month);
month_str = month.toString();
if (month < 10) {
month_str = '0' + month.toString();
};
var date_str = month_str + '/' + day_str;
var labels_data = source_labels.get('data');
var i = source_i.get('data')['i'][0];
labels_data['labels'].splice(i, 1, date_str);
var labels = labels_data['labels'];
console.log(labels);
var p_data = source_p_values.get('data');
var total_turns = {'all': 0};
for (member in p_data) {
total_turns[member] = 0;
for (index in labels) {
var turns = 0;
var date = labels[index];
console.log(p_data[member]);
if (date in p_data[member]) {
turns = p_data[member][date]['speak'];
}
console.log(turns);
total_turns[member] += turns;
total_turns['all'] += turns;
console.log(total_turns[member]);
console.log(total_turns['all']);
}
}
for (member in p_data) {
p_data[member]['Average'] = {};
console.log(total_turns[member]);
p_data[member]['Average']['speak'] = total_turns[member] / total_turns['all'] * 100;
}
var colors = source_colors.get('data');
var members = source_members.get('data');
new_data = {}
bottom = 0
new_data['date'] = []
new_data['y'] = []
new_data['speak'] = []
new_data['member'] = []
new_data['color'] = []
for (i=0; i<Object.keys(members).length; i++){
member = members[i.toString()];
var turns = 0;
if (date_str in p_data[member]) {
turns = p_data[member][date_str]['speak'];
};
new_data['color'].push(colors[member]);
new_data['date'].push(date_str);
new_data['speak'].push(turns);
new_data['y'].push(bottom + turns/2);
new_data['member'].push(member);
bottom += turns;
}
new_avg_data = {}
bottom = 0
new_avg_data['date'] = []
new_avg_data['y'] = []
new_avg_data['speak'] = []
new_avg_data['member'] = []
new_avg_data['color'] = []
for (i=0; i<Object.keys(members).length; i++){
member = members[i.toString()];
turns = p_data[member]['Average']['speak'];
new_avg_data['color'].push(colors[member]);
new_avg_data['date'].push('Average');
new_avg_data['speak'].push(turns);
new_avg_data['y'].push(bottom + turns/2);
new_avg_data['member'].push(member);
bottom += turns;
}
var r_data = r_source.get('data');
var r_avg_data = r_source_avg.get('data');
var txt_data = txt_source.get('data');
for (key in new_data) {
r_data[key] = new_data[key];
txt_data[key] = new_data[key];
r_avg_data[key] = new_avg_data[key];
}
console.log(r_avg_data);
r_source.trigger('change');
r_source_avg.trigger('change');
txt_source.trigger('change');
"""
)
m = int(labels[i].split('/')[0])
d = int(labels[i].split('/')[1])
date_pickers.append(DatePicker(title='Day ' + str(i+1), min_date=datetime.datetime(2016,6,1),
max_date=datetime.datetime.now(),
value=datetime.datetime(datetime.datetime.now().year,m,d),
callback=cb,
width=width/5, height=200)
)
return column(children=[p, row(children=date_pickers)], sizing_mode='scale_width')
else:
return p
# 각 요일/주 + 호버링에 대한 각 구성원의 참여율을 보여주는 누적 막대 그래프를 만듭니다.
# return : bokeh 플롯
def percentage_participation(df_stitched_all, labels, member_names=None):
"""
Process data for percentage participation for a group
:param df_stitched_all: a list of lists of df_stitched
:param labels: a list of dates/weeks for which the df_stitched lists are for
:param member_names: A dictionary mapping member keys to member names (First Last format)
:return: participation_values Dict{ Member : { date : {'turns': float, 'speak, float } }, Member : ... }
turns include self-turns
"""
participation_values = {}
for i in range(len(df_stitched_all)):
label = labels[i]
df_stitched_list = df_stitched_all[i]
if len(df_stitched_list) == 0:
print('No meetings for ' + str(label))
continue
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
from datetime import timedelta
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
Categorical,
Index,
IntervalIndex,
Series,
Timedelta,
bdate_range,
date_range,
isna,
)
import pandas._testing as tm
from pandas.core import nanops, ops
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
def test_flex_add_scalar_fill_value(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, "r" + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
@pytest.mark.parametrize("op, equiv_op, fv", pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all="ignore"):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
tm.assert_series_equal(result, expected)
a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
tm.assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
class TestSeriesArithmetic:
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.iloc[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
ts + ts.asfreq("D", how="end")
@pytest.mark.parametrize(
"target_add,input_value,expected_value",
[
("!", ["hello", "world"], ["hello!", "world!"]),
("m", ["hello", "world"], ["hellom", "worldm"]),
],
)
def test_string_addition(self, target_add, input_value, expected_value):
# GH28658 - ensure adding 'm' does not raise an error
a = Series(input_value)
result = a + target_add
expected = Series(expected_value)
tm.assert_series_equal(result, expected)
def test_divmod(self):
# GH#25557
a = Series([1, 1, 1, np.nan], index=["a", "b", "c", "d"])
b = Series([2, np.nan, 1, np.nan], index=["a", "b", "d", "e"])
result = a.divmod(b)
expected = divmod(a, b)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
result = a.rdivmod(b)
expected = divmod(b, a)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
@pytest.mark.parametrize("index", [None, range(9)])
def test_series_integer_mod(self, index):
# GH#24396
s1 = Series(range(1, 10))
s2 = Series("foo", index=index)
msg = "not all arguments converted during string formatting"
with pytest.raises(TypeError, match=msg):
s2 % s1
def test_add_with_duplicate_index(self):
# GH14227
s1 = Series([1, 2], index=[1, 1])
s2 = Series([10, 10], index=[1, 2])
result = s1 + s2
expected = Series([11, 12, np.nan], index=[1, 1, 2])
tm.assert_series_equal(result, expected)
def test_add_na_handling(self):
from datetime import date
from decimal import Decimal
s = Series(
[Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]
)
result = s + s.shift(1)
result2 = s.shift(1) + s
assert isna(result[0])
assert isna(result2[0])
def test_add_corner_cases(self, datetime_series):
empty = Series([], index=Index([]), dtype=np.float64)
result = datetime_series + empty
assert np.isnan(result).all()
result = empty + empty.copy()
assert len(result) == 0
# FIXME: dont leave commented-out
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = datetime_series.astype(int)[:-5]
added = datetime_series + int_ts
expected = Series(
datetime_series.values[:-5] + int_ts.values,
index=datetime_series.index[:-5],
name="ts",
)
tm.assert_series_equal(added[:-5], expected)
def test_mul_empty_int_corner_case(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({"x": 0.0})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=["x"]))
def test_sub_datetimelike_align(self):
# GH#7500
# datetimelike ops need to align
dt = Series(date_range("2012-1-1", periods=3, freq="D"))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
tm.assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
tm.assert_series_equal(result, expected)
def test_alignment_doesnt_change_tz(self):
# GH#33671
dti = pd.date_range("2016-01-01", periods=10, tz="CET")
dti_utc = dti.tz_convert("UTC")
ser = Series(10, index=dti)
ser_utc = Series(10, index=dti_utc)
# we don't care about the result, just that original indexes are unchanged
ser * ser_utc
assert ser.index is dti
assert ser_utc.index is dti_utc
def test_arithmetic_with_duplicate_index(self):
# GH#8363
# integer ops with a non-unique index
index = [2, 2, 3, 3, 4]
ser = Series(np.arange(1, 6, dtype="int64"), index=index)
other = Series(np.arange(5, dtype="int64"), index=index)
result = ser - other
expected = Series(1, index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# GH#8363
# datetime ops with a non-unique index
ser = Series(date_range("20130101 09:00:00", periods=5), index=index)
other = Series(date_range("20130101", periods=5), index=index)
result = ser - other
expected = Series(Timedelta("9 hours"), index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Comparisons
class TestSeriesFlexComparison:
@pytest.mark.parametrize("axis", [0, None, "index"])
def test_comparison_flex_basic(self, axis, all_compare_operators):
op = all_compare_operators.strip("__")
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
result = getattr(left, op)(right, axis=axis)
expected = getattr(operator, op)(left, right)
tm.assert_series_equal(result, expected)
def test_comparison_bad_axis(self, all_compare_operators):
op = all_compare_operators.strip("__")
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
msg = "No axis named 1 for object type"
with pytest.raises(ValueError, match=msg):
getattr(left, op)(right, axis=1)
@pytest.mark.parametrize(
"values, op",
[
([False, False, True, False], "eq"),
([True, True, False, True], "ne"),
([False, False, True, False], "le"),
([False, False, False, False], "lt"),
([False, True, True, False], "ge"),
([False, True, False, False], "gt"),
],
)
def test_comparison_flex_alignment(self, values, op):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right)
expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"values, op, fill_value",
[
([False, False, True, True], "eq", 2),
([True, True, False, False], "ne", 2),
([False, False, True, True], "le", 0),
([False, False, False, True], "lt", 0),
([True, True, True, False], "ge", 0),
([True, True, False, False], "gt", 0),
],
)
def test_comparison_flex_alignment_fill(self, values, op, fill_value):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right, fill_value=fill_value)
expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
class TestSeriesComparison:
def test_comparison_different_length(self):
a = Series(["a", "b", "c"])
b = Series(["b", "a"])
msg = "only compare identically-labeled Series"
with pytest.raises(ValueError, match=msg):
a < b
a = Series([1, 2])
b = Series([2, 3, 4])
with pytest.raises(ValueError, match=msg):
a == b
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes(self, opname):
# GH#15115
ser = Series([1, 3, 2], index=range(3))
const = 2
result = getattr(ser, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes_empty(self, opname):
# GH#15115 empty Series case
ser = Series([1, 3, 2], index=range(3))
empty = ser.iloc[:0]
const = 2
result = getattr(empty, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt],
)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("baz", "baz", "baz")]
)
def test_ser_cmp_result_names(self, names, op):
# datetime64 dtype
dti = pd.date_range("1949-06-07 03:00:00", freq="H", periods=5, name=names[0])
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# datetime64tz dtype
dti = dti.tz_localize("US/Central")
dti =
|
pd.DatetimeIndex(dti, freq="infer")
|
pandas.DatetimeIndex
|
# coding: utf-8
# In[193]:
#Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
import plotly
import plotly.plotly as py
import plotly.tools as tls
import plotly.graph_objs as go
import time
import pandas_datareader as web
# Package and modules for importing data;
import datetime
import requests
import json as js
import csv
# In[195]:
# Calling API for Microsoft stock prices
headers = {
'X-API-KEY': 'Get API key',
}
API_KEY = 'Get API key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=MSFT&outputsize=full&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file1 = api_call.text
file1=js.loads(api_call.text)
# In[197]:
file1['Time Series (Daily)']['2017-07-27']
# In[198]:
# To write into csv
from datetime import datetime
csv.writer(open("data.csv", "wb"), dialect="excel")
x = file1
#f = csv.writer(open("abc.csv", ""))
# Write CSV Header, If you dont need that, remove this line
#f.writerow(["pk", "model", "codename", "name", "content_type"])
temp_data = file1['Time Series (Daily)']
with open('Microsoft_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Microsoft_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[199]:
# Changing time to Day Month Year format
temp_data = file1['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[200]:
Microsoft.dropna(inplace=True)
Microsoft=pd.read_csv('Microsoft_stock.csv', parse_dates=True, index_col=0 )
print(Microsoft.head(5))
# In[201]:
Microsoft.index.values
# In[202]:
#Cleaning the index values. Changing time to Day Month Year format
Address_M='Microsoft_stock.csv'
Microsoft=pd.read_csv(Address_M)
Microsoft['Date'] = pd.to_datetime(Microsoft['Date']).apply(lambda x: x.strftime('%Y-%m-%d')if not pd.isnull(x) else '')
# In[203]:
Microsoft[['High','Low']].plot()
plt.show()
print()
# In[204]:
a=Microsoft['Date']
b=Microsoft['High']
trace= go.Scatter(x=a,y=b)
data=[trace]
py.iplot(data, filename='Microsoft')
# In[205]:
# Calling API for Apple's stock prices
headers = {
'X-API-KEY': 'Get api key ',
}
API_KEY = 'Get api key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=AAPL&outputsize=ful&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file2 = api_call.text
file2=js.loads(api_call.text)
# In[206]:
# To write into csv
csv.writer(open("data.csv", "wb"), dialect="excel")
x = file2
temp_data = file2['Time Series (Daily)']
with open('Apple_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Apple_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[207]:
# Changing time to Day Month Year format
temp_data = file2['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[208]:
Apple.dropna(inplace=True)
Apple=pd.read_csv('Apple_stock.csv', parse_dates=True, index_col=0 )
# In[209]:
#Cleaning the index values. Changing time to Day Month Year format
Address_A='Apple_stock.csv'
Apple=pd.read_csv(Address_A)
Apple['Date'] = pd.to_datetime(Apple['Date']).apply(lambda x: x.strftime('%Y-%m-%d')if not pd.isnull(x) else '')
# In[210]:
a=Apple['Date']
b=Apple['High']
trace= go.Scatter(x=a,y=b)
data=[trace]
py.iplot(data, filename='Apple')
# In[211]:
# Calling API for Facebook stock prices
headers = {
'X-API-KEY': 'Get API key',
}
API_KEY = 'Get API key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=FB&outputsize=full&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file3 = api_call.text
file3=js.loads(api_call.text)
# In[212]:
# To write into csv
csv.writer(open("data.csv", "wb"), dialect="excel")
x = file3
temp_data = file3['Time Series (Daily)']
with open('Facebook_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Facebook_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[213]:
# Changing time to Day Month Year format
temp_data = file3['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[214]:
Facebook = pd.read_csv('Facebook_stock.csv', parse_dates=True, index_col=0 )
Facebook.dropna(inplace=True)
# In[215]:
#Cleaning the index values. Changing time to Day Month Year format
Address_F='Facebook_stock.csv'
Facebook=pd.read_csv(Address_F)
Facebook['Date'] = pd.to_datetime(Facebook['Date']).apply(lambda x: x.strftime('%Y-%m-%d')if not pd.isnull(x) else '')
# In[216]:
a=Facebook['Date']
b=Facebook['High']
trace= go.Scatter(x=a,y=b)
data=[trace]
py.iplot(data, filename='Facebook')
# In[217]:
# Calling API for Google stock prices
headers = {
'X-API-KEY': 'Get API key',
}
API_KEY = 'Get API key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=GOOG&outputsize=full&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file4 = api_call.text
file4=js.loads(api_call.text)
a=file4['Time Series (Daily)']
# In[218]:
x = file4
temp_data = file4['Time Series (Daily)']
with open('Google_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Google_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[219]:
# Changing time to Day Month Year format
temp_data = file4['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[220]:
Google = pd.read_csv('Google_stock.csv', parse_dates=True, index_col=0 )
Google.dropna(inplace=True)
# In[221]:
#Cleaning the index values. Changing time to Day Month Year format
Address_G='Google_stock.csv'
Google=pd.read_csv(Address_G)
Google['Date'] = pd.to_datetime(Google['Date']).apply(lambda x: x.strftime('%Y-%m-%d')if not pd.isnull(x) else '')
# In[222]:
a=Google['Date']
b=Google['High']
trace= go.Scatter(x=a,y=b)
data=[trace]
py.iplot(data, filename='Google')
# In[224]:
# Calling API for Disney stock prices
headers = {
'X-API-KEY': 'Get API key',
}
API_KEY = 'Get API key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=DIS&outputsize=full&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file5 = api_call.text
file5=js.loads(api_call.text)
# In[225]:
# To write into csv
csv.writer(open("data.csv", "wb"), dialect="excel")
x = file5
temp_data = file5['Time Series (Daily)']
with open('Disney_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Disney_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[226]:
# Changing time to Day Month Year format
temp_data = file5['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[227]:
Disney = pd.read_csv('Disney_stock.csv', parse_dates=True, index_col=0 )
Disney.dropna(inplace=True)
# In[228]:
#Cleaning the index values. Changing time to Day Month Year format
Address_D='Disney_stock.csv'
Disney=pd.read_csv(Address_D)
Disney['Date'] = pd.to_datetime(Disney['Date']).apply(lambda x: x.strftime('%Y-%m-%d')if not pd.isnull(x) else '')
# In[230]:
a=Disney['Date']
b=Disney['High']
trace= go.Scatter(x=a,y=b)
data=[trace]
py.iplot(data, filename='Disney')
# In[231]:
# Calling API for Netflix stock prices
headers = {
'X-API-KEY': 'Get API key',
}
API_KEY = 'Get API key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=NFLX&outputsize=full&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file6 = api_call.text
file6=js.loads(api_call.text)
# In[232]:
# To write into csv
csv.writer(open("data.csv", "wb"), dialect="excel")
x = file6
temp_data = file6['Time Series (Daily)']
with open('Netflix_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Netflix_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[233]:
# Changing time to Day Month Year format
temp_data = file6['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[234]:
Netflix = pd.read_csv('Netflix_stock.csv', parse_dates=True, index_col=0 )
Netflix.dropna(inplace=True)
# In[235]:
#Cleaning the index values. Changing time to Day Month Year format
Address_N='Netflix_stock.csv'
Netflix=pd.read_csv(Address_N)
Netflix['Date'] =
|
pd.to_datetime(Netflix['Date'])
|
pandas.to_datetime
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 16:10:09 2019
@author: andreypoletaev
"""
import numpy as np
import pandas as pd
import freud
from scipy.spatial import Voronoi
from matplotlib import pyplot as plt
import matplotlib as mpl
from colorsys import rgb_to_hls, hls_to_rgb
from scipy.signal import butter, filtfilt
from scipy.optimize import root_scalar
from scipy.optimize import curve_fit as cf
from scipy.special import erf, gamma
from scipy.interpolate import interp1d
from scipy.ndimage import gaussian_filter1d
from timeit import default_timer as timer
from datetime import datetime as dt
import crystal_utils as cu
import networkx as nx
from itertools import chain, repeat
from os import path
from glob import glob
from re import split
from batlow import cm_data as batlow_cm
kb = 8.617e-05 ## [eV/Kelvin]
# =============================================================================
# %% cosmetic things
# =============================================================================
## colors for mobile ions
metal_colors = {'Na':'tab:orange', 'K':'#7d02d4', 'Ag':'tab:red',
'Li':'tab:green'}
## perceptually uniform color map, Crameri, F. Scientific Colour Maps, http://www.fabiocrameri.ch/colourmaps (2020).
## (and the Nat Comms paper, doi.org/10.1038/s41467-020-19160-7 )
# batlow_map = LinearSegmentedColormap.from_list('batlow', batlow_cm)
batlow_cdict = {'red' : np.array(batlow_cm)[:,0],
'green' : np.array(batlow_cm)[:,1],
'blue' : np.array(batlow_cm)[:,2]}
batlow_hls = [rgb_to_hls(*i) for i in batlow_cm]
def batlow_lightness_scaled(min_l=batlow_hls[0][1], max_l=batlow_hls[-1][1]):
''' Adjusts lightness on the otherwise perceptually uniform colormap.
Returns in rgb format. '''
linear_lightnesses = np.linspace(batlow_hls[0][1], batlow_hls[-1][1], 256)
nonlinearity = [ i[1]-j for i, j in zip(batlow_hls, linear_lightnesses)]
scaling = abs(max_l - min_l) / abs(batlow_hls[-1][1] - batlow_hls[0][1])
new_linear_lightnesses = np.linspace(min_l, max_l, 256)
new_lightnesses = [scaling*n + nll for nll, n in zip(new_linear_lightnesses, nonlinearity)]
return [hls_to_rgb(b[0], l, b[2]) for b,l in zip(batlow_hls, new_lightnesses)]
## dictionary of colors to make a LinearSegmentedColormap
## that is like coolwarm but with a darker middle
cdict = {'blue':[[0., 1., 1.], [0.5,0.6,0.6], [1., 0., 0.]],
'green':[[0., 0., 0.],[0.5,0.6,0.6], [1., 0., 0.]],
'red':[[0., 0., 0.], [0.5,0.6,0.6], [1., 1., 1.]] }
zs = ['z_all', '012', '037', '062', '087']
single_zs = ['012', '037', '062', '087']
dims = {'x':0, 'y':1, 'z':3, 'dx':0, 'dy':1, 'dz':3}
# =============================================================================
# %% a list flattening function for lists of strings (filenames)
# ## flatten returns an iterator (usually sufficient),
# ## flattened makes it into a proper list
# =============================================================================
flatten = lambda l: chain.from_iterable(repeat(x,1) if isinstance(x,str) else x for x in l)
def flattened(nested_list_input):
flat_list_output = list()
for x in flatten(nested_list_input) : flat_list_output.append(x)
return flat_list_output
# =============================================================================
# %% aggregation function for edges of graphs
# ## this is used in plots of correlations, the col is typically 'dt'
# =============================================================================
agg_edges_time = lambda df, col: df.groupby(df[col].apply(lambda x: round(x, 2))).count()
# =============================================================================
# %% running-average function for smoothing curves - especially RDF
# =============================================================================
def running_mean(x,N) :
cumsum = np.cumsum(np.insert(x,0,0))
return (cumsum[N:] - cumsum[:-N])/N
# =============================================================================
# %% define helper functions
# =============================================================================
def s2n(s):
'''
check if string is a number and convert it to a number if so
'''
try :
return int(s)
except ValueError:
try :
return float(s)
except ValueError:
return False
def rot(angle):
'''rotation matrix'''
return np.array([[np.cos(angle), -np.sin(angle)],[np.sin(angle), np.cos(angle)]])
def in_bounds(pt, xymin, xymax):
''' check if point is between (xy)_min and (xy)_max
2020/06/13 : added left side equality
This is a general method for any number of dimensions '''
return (sum(pt >= xymin) == len(pt)) & (sum(pt <= xymax) == len(pt))
# =============================================================================
# %% visualization function from freud. This method started here:
# ## freud.readthedocs.io/en/v1.2.0/examples/module_intros/Voronoi-Voronoi.html
# =============================================================================
def draw_voronoi(box, points, cells, nlist=None, color_by_sides=False, ax=None,
draw_points=False, draw_box=False, single_poly_color=None,
cell_numbers=None, cell_text=None, color_by_property=None,
draw_nns=False, skip_polys=False, property_clim=[0, 1],
alpha=0.4, cmap='coolwarm', draw_colorbar=False):
''' This method started out here:
freud.readthedocs.io/en/v1.2.0/examples/module_intros/Voronoi-Voronoi.html
AP added simple flags for: draw_pts, draw_box, coloring by a property,
writing cell numbers next to the plotted polygons.
Distinction between 'points' and 'cells': points are ALL the points (centers
of Voronoi polygons) in the tessellation, while cells are the polygons
(polytopes in freud parlance) that are to be visualized. Therefore,
len(cells) must be <= len(points), otherwise an error will be thrown.
Coloring by a property: pass an iterable, color_by_property, with indices
matching the cells to be plotted, and min/max limits on it via property_clim
Cell numbers: pass an iterable, cell_numbers, of numbers that matches the
length of polygons (input points) to be plotted AND the indices of the cells
(among points) that are getting plotted. Custom labels with cell_text.
'''
## AP adds specifying axes in which to make the plot
if ax is None: ax = plt.gca()
# Draw Voronoi cells
patches = [plt.Polygon(cell[:, :2]) for cell in cells]
patch_collection = mpl.collections.PatchCollection(patches, alpha=alpha,
# edgecolors='black', ## AP took out
facecolors=single_poly_color)
if single_poly_color is not None: ## No color map needed
colors = [1 for cell in cells]
bounds = [1, 1]
patch_collection.set_edgecolor('black')
elif color_by_property is None: ## need to make a color map
if color_by_sides:
colors = [len(cell) for cell in cells] ## original said voro.polytopes for the full graph,
## this allows plotting a specific subset of cells
else: ## choose random colors for all polygons
colors = np.random.permutation(np.arange(len(patches)))
patch_collection.set_edgecolor('black')
# cmap = plt.cm.Set1 ## AP took out
cmap = plt.cm.get_cmap('Set1', np.unique(colors).size) ## this essentially ranks all the cells without checking absolute differences
bounds = np.array(range(min(colors), max(colors) + 2))
## more finagling can be done here to normalize color map if need be
elif color_by_property is not None: ## color by property
if type(color_by_property) == pd.core.series.Series:
colors = [color_by_property.iloc[i] for i, cell in enumerate(cells)] ## normalized below
else :
colors = [color_by_property[i] for i, cell in enumerate(cells)]
bounds = property_clim
cmap = plt.cm.get_cmap(cmap) ## assuming 256 colors resolution
patch_collection.set_array(np.array(colors))
patch_collection.set_cmap(cmap)
patch_collection.set_clim(bounds[0], bounds[-1])
## option to skip the polytopes and only do other stuff
if not skip_polys:
ax.add_collection(patch_collection)
# Draw points
if draw_points:
pt_colors = np.random.permutation(np.arange(len(points))) ## AP
plt.scatter(points[:, 0], points[:, 1], c=pt_colors, s=6) ## AP change to pt_colors
## AP: Write the numbers of polygons, given #cells ≤ #points
## 2020/07/09: comment out the old, add the option for custom text
if cell_numbers is not None:
# for c, cn in zip(cells, cell_numbers):
# ax.text(points[cn, 0], points[cn, 1], cn, fontsize=8)
for i, cn in enumerate(cell_numbers):
text = f'({cn},\n{cell_text[i]})' if cell_text is not None else cn
ax.text(points[cn, 0], points[cn, 1], text, fontsize=8)
plt.xlim((-box.Lx / 2, box.Lx / 2))
plt.ylim((-box.Ly / 2, box.Ly / 2))
## Set equal aspect and draw the box
if draw_box: ## added AP
# ax.set_aspect('equal', 'datalim') ## commented out AP
box_patch = plt.Rectangle([-box.Lx / 2, -box.Ly / 2], box.Lx, box.Ly, alpha=1, fill=None)
ax.add_patch(box_patch)
## Draw nearest-neighbor lines: this is freud 1.2.0 back-compatibility
## For freud 2.2.0, use the below with flag draw_neighbors
if nlist is not None:
bonds = np.asarray([points[j] - points[i] for i, j in zip(nlist.index_i, nlist.index_j)])
box.wrap(bonds)
line_data = np.asarray([[points[nlist.index_i[i]],
points[nlist.index_i[i]] + bonds[i]] for i in range(len(nlist.index_i))])
line_data = line_data[:, :, :2]
line_collection = mpl.collections.LineCollection(line_data, alpha=0.3)
ax.add_collection(line_collection)
## connect nearest neighbors, freud 2.2.0
if draw_nns > 0:
que = freud.locality.AABBQuery(box, points)
query_args = dict(mode='nearest', num_neighbors=draw_nns, exclude_ii=True)
result = list(que.query(points, query_args))
bond_vectors = np.asarray([points[x[1]] - points[x[0]] for x in result])
bond_vectors = box.wrap(bond_vectors)
line_data = [[points[result[i][0]], points[result[i][0]] + bond_vectors[i]] \
for i in range(len(result))]
line_data = np.asarray(line_data)[:, :, :2] ## planarize
line_collection = mpl.collections.LineCollection(line_data, alpha=0.3)
ax.add_collection(line_collection)
# Show colorbar for number of sides
if color_by_sides or draw_colorbar:
cb = plt.colorbar(patch_collection, ax=ax, ticks=bounds, boundaries=bounds)
cb.set_ticks(cb.formatter.locs + 0.5)
cb.set_ticklabels((cb.formatter.locs - 0.5).astype('int'))
cb.set_label("Number of sides", fontsize=12)
# =============================================================================
# %% generating a lattice of repeating points for making the Voronoi grid
# ## Deprecated: the grid is generated from the crystal structure file
# =============================================================================
def gen_grid(pt0, a1, a2, angle=0, angle2=np.pi/6, xymin=np.array([-0.05,-0.1]), xymax=np.array([1.05,1.15])):
''' generate array of points within bounds '''
## initiate list of points
xs = [pt0[0]]
ys = [pt0[1]]
va1 = np.dot(rot(angle), np.array([a1,0]))
va2 = np.dot(np.dot(rot(angle),rot(angle2)), np.array([a2,0]))
numa1 = (xymax-xymin) / va1
numa2 = (xymax-xymin) / va2
zeroa1 = (pt0-xymin) / va1
zeroa2 = (pt0-xymin) / va2
for a1i in np.round(np.arange(-max(zeroa1)*5,max(numa1-zeroa1)*5)) : ## x direction
for a2i in np.round(np.arange(-max(zeroa2)*5,max(numa2-zeroa2)*5)) : ## y direction
pt = pt0 + a1i * va1 + a2i * va2
if in_bounds(pt,xymin,xymax):
xs.append(pt[0])
ys.append(pt[1])
return xs, ys
# =============================================================================
# %% check neighbors with arbitrary connection indices
# ## this is only relevant for older versions of freud.
# ## DEPRECATED; was used with freud 1.2.0 back in 2019
# =============================================================================
def are_neighbors(cell1s, cell2s, index_i, index_j):
'''
return true if cell2s are nearest-neighbors of cell1s,
using indices index_i and index_j
Note that indices are arbitrary and could be 2-nearest, 1-nearest, or else
'''
assert len(cell2s) == len(cell1s), 'cell1s and cell2s must be same length'
return [cell2s[k] in index_j[np.where(index_i==cell1s[k])] for k in range(len(cell1s)) ]
# =============================================================================
# %% make dictionaries for distances to oxygens and for path lengths
# =============================================================================
def edge_distances_to_oi(folder='.', zs=['z_all', '012', '037', '062', '087'],max_r=4):
''' returns dict of (max_r+1) x (max_r+1) matrices with counts of edges
aggregated by their distances to Oi sites. max_r must be passed as input
max_r can be calculated from the "old_ox_r" or "new_ox_r" columns
2020/06/16: saving time, save a set of distances for every folder,
and first try to look it up and load it'''
distances = dict()
for zz in zs:
try: distances[zz] = np.loadtxt(folder+'/edge dists {}.csv'.format(zz)).astype(int)
except:
## calculate from paths and Oi distances
try:
dists = np.zeros((max_r+1,max_r+1))
oi_sites = np.loadtxt(folder+'/oxygen_cells_{}.csv'.format(zz)).astype(int)
paths = np.loadtxt(folder+'/paths{}.csv'.format(zz),delimiter=',').astype(int)
# print('loaded site-to-site paths')
for edge in np.argwhere(paths==1): ## count all 1-NN paths
dists[min(paths[oi_sites,edge[0]]),min(paths[oi_sites,edge[1]])] += 0.5 if edge[0] != edge[1] else 1
# print('processed all edges')
## remove zeros as this will be divided by
for i in range(len(dists)):
for j in range(len(dists[0])):
if dists[i,j] == 0: dists[i,j] = 1
# print('removed zeros')
## assign dictionary
distances[zz] = dists
## save
np.savetxt(folder+'/edge dists {}.csv'.format(zz),distances[zz],fmt='%d',delimiter=',')
except: print(f'missing paths or Oi cells at z = {zz}')
return distances
def sites_by_distance(folder='.', zs=['z_all', '012', '037', '062', '087']):
''' returns dict of lists w/indices of sites by their network distance
from the nearest Oi, from within the specific folder.
This is usually where sites_by_r[plane][radius] calls come from '''
sites = dict()
for zz in zs:
try:
oi_sites = np.loadtxt(folder+'/oxygen_cells_{}.csv'.format(zz)).astype(int)
paths = np.loadtxt(folder+'/paths{}.csv'.format(zz),delimiter=',').astype(int)
path_lengths = sorted(set(flatten(paths)))
_ = path_lengths.pop() ## take out longest
sites[zz] = [[] for p in range(max(path_lengths)+2)] ## could in principle throw an error
for s in range(len(paths)) : ## each path is a square matrix
sites[zz][min(paths[oi_sites,s])].append(s)
for r in range(len(sites[zz])): ## prune each from end
if not sites[zz][-1] : sites[zz] = sites[zz][:-1]
except: print(f'something missing for z = {zz}')
return sites
def BR_sites(folder='.', zs=['z_all', '012', '037', '062', '087']) :
''' retrieves (from pre-saved files) the 1D arrays with network indices of
Beevers-Ross sites for beta-aluminas. All this is only
in reference to a pre-defined "grid" that defines the network.
AP moved from hop_utils 2020/06/23 '''
BR_dict = dict()
for zz in zs:
try:
BR_dict[zz] = np.loadtxt(folder+'/sites BR {}.csv'.format(zz),delimiter=',')
except: print('missing BR sites at z = {zz} in folder {folder}')
return BR_dict
def site_paths(folder = '.', zs=['z_all', '012', '037', '062', '087']):
''' retrieves number_of_polygons x number_of_polygons arrays with network path lengths
between mobile-ion sites from pre-saved files. All this is only in
reference to a pre-defined "grid" that defines the network.
AP moved from hop_utils 2020/06/23 '''
paths = dict()
for zz in zs:
try:
paths[zz] = np.loadtxt(folder+'/paths{}.csv'.format(zz),delimiter=',').astype(int)
except: print(f'missing paths at z = {zz}')
return paths
def o_sites(folder = '.', zs=['z_all', '012', '037', '062', '087']):
''' retrieves (from pre-saved files) the 1D arrays with network indices of sites
that have an edge that is occupied by an O_interstitial. All this is only
in reference to a pre-defined "grid" that defines the network.
AP moved from hop_utils 2020/06/23 '''
paths = dict()
for zz in zs:
try:
paths[zz] = np.loadtxt(folder+'/oxygen_cells_{}.csv'.format(zz),delimiter=',').astype(int)
except: print(f'missing O sites z = {zz}')
return paths
# =============================================================================
# %% load a list of hops for a plane nicely
# ## update for adding reverse column if it is not already there
# ## (it was recorded by ion and in simulation-chronological order)
# ## 2020/04/07: adding oxygen path input as 'old_ox_r', 'new_ox_r'
# ## 2020/06/13: adding the do_update flag
# =============================================================================
def load_plane(path, numpolys=200, numplanes=1, verbose=True, do_update=True,
oxygen_path=None, paths_path=None, BR=[]):
total_hops = pd.read_csv(path); save_update = False
zz = which_one_in(zs,path)
if 'new_mg_r' in total_hops.columns:
do_update=False
save_update=False
## check reverse column and update if needed
if do_update and 'rev_hop' not in total_hops.columns:
print('\nupdating reverse hops column...')
total_hops = total_hops.assign(rev_hop = np.zeros(len(total_hops)))
num_ions = len(total_hops.ion.unique())
for ii, ion in enumerate(total_hops.ion.unique()):
one_ion_hops = total_hops.loc[total_hops.ion == ion]
## account for non-hops
if len(one_ion_hops) > 1:
rev = list((one_ion_hops.new_cell[1:].values == one_ion_hops.old_cell[:-1].values))
rev.append(np.nan) ## last hop is unknown
else : rev = [np.nan]
total_hops.rev_hop.loc[total_hops.ion == ion] = rev
if ii % 25 == 0: print(f'{ii} of {num_ions} ions done')
save_update = True; print('... done and onwards.')
elif verbose: print('\nupdate complete: reverse hopping.')
## check residence time of previous hop for correlations
if do_update and 'old_resid_time' not in total_hops.columns :
print('updating old residence time column...')
total_hops = total_hops.assign(old_resid_time = np.zeros(len(total_hops)))
num_ions = len(total_hops.ion.unique())
for ii, ion in enumerate(total_hops.ion.unique()):
one_ion_hops = total_hops.loc[total_hops.ion == ion]
## account for non-hops
if len(one_ion_hops) > 1:
old = list(one_ion_hops.new_resid_time[:-1])
old.insert(0,one_ion_hops.time.iloc[0]) ## 1st hop unknown - and that is OK
else : old = [np.nan]
total_hops.old_resid_time.loc[total_hops.ion == ion] = old
if ii % 25 == 0: print(f'{ii} of {num_ions} ions done')
save_update = True; print('... done and onwards.')
elif verbose: print('update complete: old residence time.')
## add columns on the distance to nearest oxygen to all planes
while do_update and ('old_ox_r' not in total_hops.columns or 'new_ox_r' not in total_hops.columns) :
if oxygen_path is None or paths_path is None:
print('distances to Oi missing; add oxygen_path=... and paths_path=... to update')
break ## out of the while loop
elif oxygen_path == 'no interstitials' :
total_hops['new_ox_r'] = 100
total_hops['old_ox_r'] = 100
break
print('updating distances to oxygens...')
oi_sites = np.loadtxt(oxygen_path).astype(int)
paths = np.loadtxt(paths_path,delimiter=',').astype(int)
## add columns
total_hops['new_ox_r'] = total_hops.new_cell.apply(lambda x: min(paths[oi_sites,x]))
total_hops['old_ox_r'] = total_hops.old_cell.apply(lambda x: min(paths[oi_sites,x]))
## save & update
save_update = True; print('... done and onwards.')
if 'old_ox_r' in total_hops.columns and 'new_ox_r' in total_hops.columns and verbose :
if not save_update: print('update complete: distances to oxygens.')
## add the BR column here - if beta
if 'old_is_BR' not in total_hops.columns and do_update :
print('updating BR site column...')
total_hops['old_is_BR'] = total_hops.old_cell.isin(BR[zz])
## save & update
save_update = True; print('... done and onwards.')
elif verbose: print('update complete: BR sites (hop origin).')
if save_update: ## save all updates at once
print('saving updated hops...')
total_hops.to_csv(path,index=False)
## proceed to actually load stuff
all_residences = total_hops.groupby('new_cell').mean().new_resid_time
# new_cells = total_hops.new_cell.unique()
# old_cells = total_hops.old_cell.unique()
non_hops = total_hops.loc[total_hops.new_cell == total_hops.old_cell]
# empties = set(range(numpolys*numplanes))-set(new_cells)-set(old_cells)
empties = set(range(numpolys*numplanes))-set(list(all_residences.index))
non_hop_sites = list(non_hops.new_cell.unique())
## take out the placeholders for the ions that do not hop
## those were recorded as one hop with new_cell == old_cell
total_hops = total_hops.loc[total_hops.new_cell != total_hops.old_cell]
if verbose:
print('\n{} ions hopped {} times, {} ions stayed put'.format(
len(total_hops.ion.unique()),len(total_hops), len(non_hops)))
if 'old_is_BR' in total_hops.columns:
print('{} hops from BR sites, {} from aBR sites'.format(
len(total_hops.query('old_is_BR == True')),
len(total_hops.query('old_is_BR == False'))))
print('{} total onwards hops, {:.1f}% hops reversed'.format(
total_hops.rev_hop.loc[total_hops.rev_hop==False].size,
total_hops.rev_hop.loc[np.isnan(total_hops.rev_hop)==False].mean()*100.))
print(f'{numpolys*numplanes} sites, {len(empties)} remained empty')
else :
print('...plane loaded')
return total_hops, all_residences, non_hops, empties, non_hop_sites
# =============================================================================
# %% combine planes after updating
# ## skipping the graphics for a moment
# =============================================================================
def combine_planes3(plane_paths, zs, numpolys=200, verbose=False):
combined_hops = pd.DataFrame()
## sort the inputs by z, while keeping them together
all_inputs = list(zip(plane_paths, zs))
all_inputs = sorted(all_inputs, key = lambda x: x[1])
if verbose: print(f'combining {len(all_inputs)} planes')
for i, (plane, z) in enumerate(all_inputs):
th, _, nh, _, _ = load_plane(plane, numpolys=numpolys, verbose=True)
these_hops = pd.concat([th,nh],ignore_index=True)
these_hops.new_cell += i * numpolys
these_hops.old_cell += i * numpolys
these_sites = set(these_hops.new_cell.unique()).union(set(these_hops.old_cell.unique()))
combined_hops = pd.concat([combined_hops, these_hops],ignore_index=True)
if verbose:
# print(f'ions at z={z:03d} :', these_hops.ion.unique())
print('sites from {} to {}, {} total\n'.format(min(these_sites),max(these_sites),len(these_sites)))
return combined_hops
# =============================================================================
# %% low-pass filter
# =============================================================================
def lopass(signal, cutoff, sampling_freq, order = 5):
nyquist = sampling_freq / 2
b, a = butter(order, cutoff/nyquist)
if not np.all(np.abs(np.roots(a)) < 1):
raise ValueError('Filter with cutoff {} unstable with '
'sampling frequency {}'.format(cutoff, sampling_freq))
filtered = filtfilt(b, a, signal, method='gust')
return filtered
# =============================================================================
# %% functions for traversing lists
# =============================================================================
def which_one_in(l, f):
"""
returns which one element of list l is in f, otherwise None
"""
included = [i for i in l if str(i) in f]
if len(included) == 1:
return included[0]
elif len(included) > 1:
return False
else:
return None
# =============================================================================
# %% simple exponential decay with x0 = 0, and baseline
# =============================================================================
def exp_decay(x, c0, tau, c1=0):
return c0 * np.exp(-x/tau) + c1
def exp_decay_cumsum(x, c0, tau, c1=0, c2=0):
return np.cumsum(c0 * np.exp(-x/tau) + c1)+c2
# =============================================================================
# %% double exponential decay with x0 = 0, and baseline
# =============================================================================
def two_exp_decay(x, c0, c1, tau0, tau1, y0=0):
return y0 + exp_decay(x, c0, tau0) + exp_decay(x, c1, tau1)
def two_exp_decay_cumsum(x, c0, c1, tau0, tau1, y0=0, y1=0):
return np.cumsum(y0 + exp_decay(x, c0, tau0) + exp_decay(x, c1, tau1)) + y1
# =============================================================================
# %% KWW stretched exponential decay with x0 = 0, and baseline
# =============================================================================
def kww_decay(x, c0, tau, beta=1., c1=0):
return c0 * np.exp(-(x/float(tau))**float(beta)) + c1
# =============================================================================
# %% an erf rise wrapper for fitting functions
# =============================================================================
def rising_exp_decay(x, c0, tau, c1=0, x0=0, rt=np.inf):
return exp_decay(x,c0,tau,c1) * erf(rt*(x-x0))
def rising_exp_decay_cumsum(x, c0, tau, c1=0, c2=0, x0=0, rt=np.inf):
return np.cumsum(exp_decay(x,c0,tau,c1) * erf(rt*(x-x0))) +c2
def rising_two_exp_decay(x, c0, c1, tau0, tau1, y0=0, x0=0, rt=np.inf):
return two_exp_decay(x, c0, c1, tau0, tau1, y0) * erf(rt*(x-x0))
def rising_two_exp_decay_cumsum(x, c0, c1, tau0, tau1, y0=0, y1=0, x0=0, rt=np.inf):
return np.cumsum(two_exp_decay(x, c0, c1, tau0, tau1, y0) * erf(rt*(x-x0))) + y1
# =============================================================================
# %% KWW stretched exponential decay with x0 = 0, and baseline, plus:
# ## tail stretch turns on at x=tstar, returns simple exponential for x<tstar
# =============================================================================
def kww_decay_break(x, c0, tau, tstar=0, beta=1., c1=0):
simple = exp_decay(x[x<tstar],c0,tau,c1)
c02 = exp_decay(tstar, c0, tau, 0)/kww_decay(tstar, 1., tau, beta, 0)
stretched = kww_decay(x[x>=tstar], c02, tau, beta, c1)
# print x[x<tstar], simple
# print x[x>=tstar]-tstar, stretched
# return np.array( [(simple[i], stretched[i])[x[i]>tstar] for i in range(len(x))] )
return np.concatenate((simple, stretched), axis=None)
def kww_decay_cumsum(x, c0, tau, tstar=0, beta=1., c1=0, c2=0):
simple = exp_decay(x[x<tstar],c0,tau,c1)
c02 = exp_decay(tstar, c0, tau, 0)/kww_decay(tstar, 1., tau, beta, 0)
stretched = kww_decay(x[x>=tstar], c02, tau, beta, c1)
# print x[x<tstar], simple
# print x[x>=tstar]-tstar, stretched
# return np.array( [(simple[i], stretched[i])[x[i]>tstar] for i in range(len(x))] )
return np.cumsum(np.concatenate((simple, stretched), axis=None))+c2
# =============================================================================
# %% Mittag-Leffler function and a wrapper to fit to a lifetime with it
# =============================================================================
def mittag_leffler(x,a,b,terms):
'''
Computes the Mittag-Leffler function:
E_a,b(x) = sum_0^terms x**term / gamma(a*term+b)
This is typically called with x = -(t/tau)**a, hence the wrapper below.
Convergence is limited to when the function is not too small and the value
of the argument x is not too large. 100-200 terms typically gives precision
that is good enough for any practical application including fitting. This
translates to a reliable range of values for the ML function from around
1 (argument near zero) to 0.01 at best. If you think you need to compute
the ML function when it is small (e.g. 1e-3), most likely an approximation
will work just as fine.
Sokolov & Klafter in "First Steps in Random Walks" set b=1.
The Havriliak-Negami relaxation's FT is close to the ML function, but not
exact; Cole-Cole relaxation has b=1 - but still other terms in front too.
Parameters
----------
x : array or number
The argument of the function.
a : numerical
Typical first parameter of the Mittag-Leffler function.
b : numerical
Typical first parameter of the Mittag-Leffler function.
terms : int
The number of terms to compute for the series. Exact is infinity.
100-200 is typically sufficient, using a number higher than that may
hamper convergence.
Returns
-------
output
Same dimensionality as x.
'''
return np.sum(np.array([x**k/gamma(k*a+b) for k in range(terms)]), axis=0)
def mittag_leffler_wrapper(x, tau, a, b, terms):
return mittag_leffler(-(x/tau)**a, a, b, terms)
# =============================================================================
# %% make freud Voronoi & NearestNeighbors objects from a grid of points (x,y)
# ## this method assumes that the box is square
# ## This is copied from crystal_utils on 2020/07/23
# =============================================================================
def make_voronoi(grid_path, L=1., z=0, debug=False):
'''input: grid_path is the (relative) path to the file with grid points'''
pts = pd.read_csv(grid_path)
gridpts = np.array([pts.x, pts.y]).T
print(f'read {len(gridpts)} grid points')
## help_vor is the inverse of unit cells.
## Vertices are maxima of mobile ion probability density
help_vor = Voronoi(gridpts)
site_pts = []
for i in range(len(help_vor.vertices[:, 0])):
if in_bounds(help_vor.vertices[i, :], np.array([0, 0]), np.array([L, L])):
site_pts.append((help_vor.vertices[i, 0] - 0.5 * L, help_vor.vertices[i, 1] - 0.5 * L, 0))
## remove duplicates around the edges of the box if needed,
## this is using brute force
to_remove = [];
thresh = L * 1e-4
new_site_pts = [];
for i, pt1 in enumerate(site_pts):
if i in to_remove: continue
for j, pt2 in enumerate(site_pts[i + 1:]):
if L - abs(pt1[0] - pt2[0]) < thresh and abs(pt1[1] - pt2[1]) < thresh:
# print pt1, pt2, i, j+i+1
to_remove.append(j + i + 1)
elif L - abs(pt1[1] - pt2[1]) < thresh and abs(pt1[0] - pt2[0]) < thresh:
# print pt1, pt2, i, j+i+1
to_remove.append(j + i + 1)
new_site_pts.append(pt1)
print(f'{len(site_pts)} points in bounds, removing {len(to_remove)} of them')
site_pts = np.asarray(new_site_pts)
box = freud.box.Box.square(L)
site_pts = box.wrap(site_pts)
site_vor = freud.locality.Voronoi(box, 0.5 * L)
site_vor.compute(system=(box, site_pts))
numpolys = len(site_vor.polytopes)
if debug: draw_voronoi(box, site_pts, site_vor.polytopes, draw_points=True,
cell_numbers=range(len(site_vor.polytopes)))
## points at centers of sites, approx. corresponding to oxygen locations
help_verts = site_vor.polytopes
these_sites = []
for s in help_verts:
for sh in s:
these_sites.append([sh[0], sh[1], 0]) ## append avg z given that z was previously lost
help_verts = np.asarray(these_sites)
## initialize nearest-neighbor object
# nn = freud.locality.NearestNeighbors(0.2, 4, 1.05)
return box, site_vor, site_pts, numpolys, help_verts
# =============================================================================
# %% TODO: plot multiple Voronoi lattices from a multi-plane file with hops
# =============================================================================
def draw_lattices():
return False
# =============================================================================
# %% count and output re-fill times for a single or composite plane
# =============================================================================
def count_fill_times(plane_data, numpolys):
## sort by time for calculating fill times below
plane_data.sort_values(by='time',axis=0,inplace=True,ascending=True)
plane_data.reset_index(inplace=True)
plane_data.drop(['index'],axis=1,inplace=True)
#numsites = len(total_hops.new_cell.unique()) ## use numpolys instead
fill_sites = np.zeros((numpolys,2)) ## keep track of which ion was here last
fill_times = list() ## keep track of which site gets which time to tell BR/a-BR apart
for i, r in plane_data.iterrows():
## count the non-hop with its (long) residence time
if r.old_cell == r.new_cell :
fill_times.append(np.array([r.new_resid_time,int(r.old_cell),np.nan]))
continue
## restart old site's counter. Even if not onwards hop, new ion could fill
## the site before the original returns
fill_sites[int(r.old_cell),:] = np.array([r.time, int(r.ion)])
## append new site's fill time with now minus last hop out
if np.array_equal(fill_sites[int(r.new_cell),:], np.array([0,0])) : ## the first hop into new site
## count the fill time of the initial site starting at zero
fill_times.append(np.array([r.time,int(r.old_cell),np.nan]))
else :
## flag back-hops / re-fills immediately
## 3rd column is True for re-fills
fill_times.append([r.time-fill_sites[int(r.new_cell),0],int(r.new_cell),r.ion==fill_sites[int(r.new_cell),1]])
## more conditions to go here
## TODO: add the fill time from last hop into new site to end of simulation
if (r.rev_hop != 1.) & (r.rev_hop != 0.):
fill_times.append([r.new_resid_time,int(r.new_cell),np.nan])
if not i % int(len(plane_data)/20) : print(f'{100*i/len(plane_data):.0f}% done')
fill_times = pd.DataFrame(data=fill_times, columns=['time','site','refill'])
fill_times.time = np.round(fill_times.time,3)
fill_times.site = fill_times.site.astype(int)
return fill_times
# =============================================================================
# %% claculate occupancy of high-energy site based on T and fixed stoichiometry
# ## assuming a two-state system with each level having Nsite sites
# ## assuming Nion total ions, of which Nexc are excited to the higher level
# =============================================================================
def two_state_occup(Nion, Nsite, energy, T):
''' energy [eV], T [K] '''
lb=max(0,Nion-Nsite)
ub=Nion/2.
f = lambda Nexc : (Nsite-Nexc)*(Nion-Nexc) - np.exp(energy/kb/T)*(Nsite-(Nion-Nexc))*Nexc
sol = root_scalar(f, bracket=[lb,ub], method='brentq')
return sol.root
# =============================================================================
# %% calculate energy from occupancy
# ## assuming Nion total ions, of which Nexc are excited to the higher level
# ## and 2 levels with Nsite sites each; energy is in eV based on kb
# =============================================================================
def two_state_energy(Nion, Nsite, Nexc, T, lb=0., ub = 1.):
''' bounds [eV], T [K] '''
assert (Nexc <= Nion / 2.) or (lb < 0.), 'N excited > 50%, T > inf @ energy > 0'
assert Nexc > 0., 'N excited should probably be > 0, or else T < 0'
f = lambda energy : (Nsite-Nexc)*(Nion-Nexc) - np.exp(energy/kb/T)*(Nsite-(Nion-Nexc))*Nexc
sol = root_scalar(f, bracket=[lb,ub], method='brentq')
return sol.root
# =============================================================================
# %% calculate occupancy from energy for a 2-state model w/ distinct #s o/sites
# ## (i.e. degeneracies of the levels 'g' for ground and 'e' for excited)
# ## assuming levels with degeneracy Ns_g and Ns_e, and N_i total ions
# =============================================================================
def two_state_nondeg_occupancy(Ns_g, Ns_e, Nion, energy, T):
''' energy [eV], T [K] '''
assert Nion < Ns_g+Ns_e, 'too many ions for {} total sites: {}'.format(Ns_g+Ns_e, Nion)
lb=max(0,Nion-Ns_g) ## minimum number of ions in excited level
ub=float(Ns_e)/(Ns_e+Ns_g) ## toward inf T, all levels have same occupancy
f = lambda Nexc : (Nion-Nexc)*(Ns_e - Nexc) - np.exp(energy/kb/T)*Nexc*(Ns_g-(Nion-Nexc))
sol = root_scalar(f, bracket=[lb,ub], method='brentq')
return sol.root
# =============================================================================
# %% calculate energy from occupancy for a 2-state model w/ distinct #s of sites
# ## (i.e. degeneracies of the levels 'g' for ground and 'e' for excited)
# ## assuming levels with degeneracy Ns_g and Ns_e, and N_i total ions
# =============================================================================
def two_state_nondeg_energy(Ns_g, Ns_e, Ni_g, Ni_e, T, lb=0., ub = 5.):
''' bounds [eV], T [K] '''
assert 0 < Ni_g < Ns_g, 'weird ground state: {:.2f} in {}'.format(Ni_g, Ns_g) ## strict <
assert 0 < Ni_e < Ns_e, 'weird excited state: {:.2f} in {}'.format(Ni_e, Ns_e) ## strict
f_g = float(Ni_g) / float(Ns_g) ## fraction of filled ground-state sites
f_e = float(Ni_e) / float(Ns_e) ## fraction of filled excited-state sites
# f = lambda energy : f_g*(1.-f_e)*Ns_g*Ns_e - np.exp(energy/kb/T)*f_e*(1.-f_g)*Ns_g*Ns_e
f = lambda energy : f_g*(1.-f_e) - np.exp(energy/kb/T)*f_e*(1.-f_g)
sol = root_scalar(f, bracket=[lb,ub], method='brentq')
return sol.root
# =============================================================================
# %% multi-method wrapper for calculating expectation values over distributions
# ## assumes that the 'dist' passed on is already a Series, not DataFrame
# =============================================================================
def expectation_multi_method(dist, method, aggregated=False, **kwargs):
if method == 'mean':
return dist.mean(), dist.std()
else:
if not aggregated :
## make pdf & cdf
freqs = dist.groupby(dist).agg('count').pipe(pd.DataFrame).rename(columns = {dist.name: 'frequency'})
freqs['pdf'] = freqs['frequency'] / sum(freqs.frequency)
freqs['cdf'] = freqs.pdf.cumsum()
# print(freqs.head())
## create PDF from a distribution
times = freqs.loc[(freqs.index.values > 1.) & (freqs.index.values < 500)].index.values
pdf = freqs.loc[(freqs.index.values > 1.) & (freqs.index.values < 500)].pdf.values
pdf_ub = freqs.pdf.min()
else:
times = dist.index.values
pdf = dist.values.reshape(-1)
pdf_ub = pdf.min()/100 if pdf.min() > 0 else 1e-8
## fit simple exponential time to PDF. Format: [pre-exponent, tau, constant offset]
ub = [1000., 1e5, pdf_ub] ## hard limit: 1e-7
lb = [1e-4, 1e-3, 0]
p0 = [1e-2,3,1e-15]
try:
popt, pcov = cf(exp_decay,times,pdf, p0=p0, bounds = (lb, ub))
perr = np.sqrt(np.diag(pcov))
except ValueError:
print('fitting one exponential did not work, trying a faster decay')
# popt, pcov = cf(exp_decay,times,pdf, p0=[1e-2,0.5,1e-10], bounds = (lb, ub))
# perr = np.sqrt(np.diag(pcov))
popt = p0
if method == 'simple' :
if 'verbose' in kwargs.keys() and kwargs['verbose'] : return popt, perr
else : return popt[1], perr[1]
else:
## fit stretch tail with a break, p0 = [c0, tau, tstar, beta=1, c1=0]
ub = [1000., popt[1]*100., 2000., 1, pdf_ub] ## hard limit: 1e-7
lb = [1e-4, popt[1]*0.1, 0.1, 0, 0]
p0=[1e-2,popt[1],5,0.9,1e-15]
# print('lb:', lb)
# print('p0:', p0)
# print('ub:', ub)
popt, pcov = cf(kww_decay_break,times,pdf,
p0=p0, bounds = (lb, ub),
max_nfev=1e4)
perr = np.sqrt(np.diag(pcov))
if 'verbose' in kwargs.keys() and kwargs['verbose'] : return popt, perr
else : return popt[1], perr[1]
# =============================================================================
# $$ functions to query z
# =============================================================================
# =============================================================================
# %% correlation factor
# =============================================================================
def avg_cos_hop(rev_hops):
''' honeycomb lattice <cos theta> '''
return (-1. * len(rev_hops[rev_hops==True]) + 0.5 * len(rev_hops[rev_hops==False]))/len(rev_hops)
def corr_factor(rev_hops):
cos_theta = avg_cos_hop(rev_hops)
return (1.+cos_theta)/(1.-cos_theta)
# =============================================================================
# %% parse LAMMPS output with multiple RDFs
# ## file structure: 2 lines of comments, then each step with number-of-rows,
# ## then that many rows: center of bin, then rdf, then coordination
# ## TODO: merge this with the standalone parse_rdf file
# =============================================================================
def parse_rdf(filepath):
bins = list()
steps = list()
first_rdf = list()
with open(filepath, 'r') as fp:
## read the first three sets of comments
line = fp.readline() #; print line[:-1]
line = fp.readline() #; print line[:-1]
line = fp.readline() #; print line[:-1]
## read first line with first time step and number-of-rows
## take off the endline character and split
line = fp.readline()
arr = line[:-1].split(' ')
steps.append(arr[0])
numrows = s2n(arr[1])
## get first set of bins
for i in range(numrows):
line= fp.readline()
arr = line[:-1].split(' ')
bins.append(s2n(arr[1]))
first_rdf.append(s2n(arr[2]))
## skip cdf / coordination
## check
# print len(bins), len(first_rdf)
## make a pandas dataframe
dfdict = {'bins':np.array(bins), '{}'.format(steps[-1]):first_rdf}
df = pd.DataFrame(data = dfdict)
## read next time step
line = fp.readline()
## loop through all other sets
while(line) :
## parse line with new time step
arr = line[:-1].split(' ')
steps.append(arr[0])
numrows = s2n(arr[1])
rdf = list()
bins = list()
for i in range(numrows):
line= fp.readline()
arr = line[:-1].split(' ')
bins.append(s2n(arr[1]))
rdf.append(s2n(arr[2]))
## skip cdf / coordination
df['{}'.format(steps[-1])] = np.array(rdf)
## check
# if int(steps[-1]) % 1000 == 0:
# print 'done {} ps'.format(int(steps[-1])/1000)
## read next time step
line = fp.readline()
return df.set_index('bins')
# =============================================================================
# %% parse non-gaussian parameter output of LAMMPS
# ## TODO: update this as it is deprecated.
# =============================================================================
def parse_a2(fpath):
times = list(); r2s = list(); r4s = list(); a2s = list()
# df = pd.DataFrame(columns=['r2','r4','a2'])
with open(fpath,'r') as a2in:
for i in range(3) : a2in.readline() ## skip header
stepline = a2in.readline()
while stepline:
times.append(int(stepline[:-1].split(' ')[0])/1000.)
r2s.append(float(a2in.readline()[:-1].split(' ')[1]))
r4s.append(float(a2in.readline()[:-1].split(' ')[1]))
a2s.append(float(a2in.readline()[:-1].split(' ')[1]))
# df.iloc[int(step)] = {'r2':r2, 'r4':r4, 'a2':a2}
stepline = a2in.readline()
return pd.DataFrame({'time':times, 'r2':r2s, 'r4':r4s, 'a2':a2s}).set_index('time')
# =============================================================================
# %% calculate occupancies of sites properly
# ## counting method: total old time + final time
# =============================================================================
def site_occupancies(data2, sites, BR_sites, total_time=10000., plane_label=None,
r='some r',verbose=False):
'''
data2: pandas df with columns: time, old_resid_time, new_resid_time, old_is_BR
sites: (sub)set of sites for which to calculate occupancies
BR_sites: which sites are BR
verbose: boolean flag for printing debug statements
plane_label: has fields m, T1 if given;
r: optional for debugging'''
if plane_label is None : m='M'; T1 = 0; ph = 'beta'
else: m = plane_label.metal; T1 = plane_label.T1; ph = plane_label.phase
## columns with distances : new and old
new_r_col = 'new_ox_r' if ph == 'beta' else 'new_mg_count'
old_r_col = 'old_ox_r' if ph == 'beta' else 'old_mg_count'
## downselect plane & sites
data = data2.query(f'{new_r_col} == @r') ## ignores empties, catches non-hops
data_new = data2.query(f'{new_r_col} == @r & new_cell != old_cell') ## ignores empties, catches non-hops
data_old = data2.query(f'{old_r_col} == @r & old_cell != new_cell') ## ignores empties
data_non = data2.query(f'{old_r_col} == @r & {new_r_col} == @r & old_cell == new_cell')
## count sites
old_sites = set(data.old_cell.unique())
new_sites = set(data.new_cell.unique())
# ions = len(data.query('new_cell != old_cell').ion.unique()) ## cosmetic only
## subdivide data manually
data_new_aBR = data_new[~data_new.new_cell.isin(BR_sites)] ## query overcounts non-hops
data_new_BR = data_new[ data_new.new_cell.isin(BR_sites)]
data_old_BR = data_old[ data_old.old_cell.isin(BR_sites)]
data_old_aBR = data_old[~data_old.old_cell.isin(BR_sites)]
## count empty sites: the series for their zero occupancy will be created later
empties = sites - old_sites - new_sites
empties_BR = empties.intersection(BR_sites)
empties_aBR = empties - empties_BR
if verbose: print(f'\n{m} {T1}K r={r}: {len(data_non)} non-hops, {len(empties)} empties')
## non-hops in time - modified 2020/08/01
# time_non_BR = data_non.query('old_is_BR == True ').groupby('new_cell').new_resid_time.sum()
# time_non_aBR = data_non.query('old_is_BR == False').groupby('new_cell').new_resid_time.sum()
time_non_BR = data_non[ data_non.old_cell.isin(BR_sites)].groupby('new_cell').new_resid_time.sum()
time_non_aBR = data_non[~data_non.old_cell.isin(BR_sites)].groupby('new_cell').new_resid_time.sum()
old_BR_time = data_old_BR.groupby('old_cell').old_resid_time.sum()
old_aBR_time = data_old_aBR.groupby('old_cell').old_resid_time.sum()
## adjust for the final time at a final site. Only one sum() b/c each should be only one hop
final_times_BR = data_new_BR.query('rev_hop != True & rev_hop != False ').groupby('new_cell').new_resid_time.sum()
final_times_aBR = data_new_aBR.query('rev_hop != True & rev_hop != False ').groupby('new_cell').new_resid_time.sum()
## add site-specific values using pandas combine, check lengths.
csum = lambda s1, s2 : s1 + s2 ## need a function that takes two series to pass to df.combine()
total_BR = old_BR_time.combine(final_times_BR, csum, fill_value=0)
total_aBR = old_aBR_time.combine(final_times_aBR,csum, fill_value=0)
## add non-hops with pandas append
if not time_non_BR.empty : total_BR = total_BR.append(time_non_BR)
if not time_non_aBR.empty: total_aBR = total_aBR.append(time_non_aBR)
## create series of zeros for empties and append to the main
if empties_BR: total_BR = total_BR.append(pd.Series(data=0,index=empties_BR))
if empties_aBR: total_aBR = total_aBR.append(pd.Series(data=0, index=empties_aBR))
## check lengths and bounds
if verbose:
print(' BR: {} sites, max={:.2f}, min={:.2f}'.format(len(total_BR), total_BR.max(), total_BR.min()))
print('aBR: {} sites, max={:.2f}, min={:.2f}'.format(len(total_aBR), total_aBR.max(), total_aBR.min()))
## add the radius to make it look like it was just done with groupby calls
total_BR = pd.DataFrame({'total':total_BR/total_time, new_r_col:r, 'site':'BR'})
total_aBR = pd.DataFrame({'total':total_aBR/total_time, new_r_col:r, 'site':'aBR'})
return total_BR, total_aBR
# =============================================================================
# %% calculate multi-state (10-state) energies from occupancies
# =============================================================================
def multi_state_energies(gro, T1, lb=-2., ub=2.):
''' gro: list of triples (degeneracy g, distance r, occupancy o) in order
T1: temperature [K] '''
gro = sorted(gro, key = lambda x: abs(x[2]-0.5)) ## put closest-to-half-full 1st
es = [[] for x in gro]
es[0].append(0.) ## make a convention
rs = []
ab = []
for i, s1 in enumerate(gro):
rs.append(s1[1])
try: ab.append(s1[3])
except: pass
for j, s2 in enumerate(gro): ## no need to double up; use 1 as ground & 2 as exc
if j <= i : continue
E21 = two_state_nondeg_energy(s1[0], s2[0], s1[0]*s1[2], s2[0]*s2[2], T1, lb, ub)
es[j].append(E21+es[i][-1])
# print i, j, s1[1], s2[1], np.round(E21,3)
es = [np.mean(x) for x in es]
return pd.DataFrame( {'rs':rs, 'es':es, 'site':ab})
# =============================================================================
# %% calculate multi-state (10-state) energies from occupancies
# ## while keeping site labels intact and attached
# =============================================================================
def multi_site_es(gro, T1, lb=-2., ub=2.):
''' gro: pandas df with columns: degeneracy g, distance r, occupancy o,
site kind 'site', and indexed by site number.
T1: temperature [K] '''
gro['o2'] = np.abs(gro.o - 0.5)
gro.sort_values(by='o2',inplace=True) ## put closest-to-half-full 1st
gro.drop(columns='o2',inplace=True)
gro.reset_index(inplace=True)
gro.rename(columns={'index':'cell'},inplace=True)
es = dict([(gro.iloc[x].cell,[]) for x in range(len(gro))])
es[gro.iloc[0].cell].append(0.) ## make a convention / starting point
try:
for row1 in gro.itertuples():
for row2 in gro.itertuples(): ## no need to double up; use row1 as ground & row2 as excited
if row2.Index <= row1.Index : continue
E21 = two_state_nondeg_energy(row1.g, row2.g, row1.o*row1.g, row2.g*row2.o, T1, lb, ub)
es[row2.cell].append(E21+es[row1.cell][-1])
except:
print(row2.cell)
es = dict([(k,np.mean(v)) for (k,v) in list(es.items())])
gro['e'] = gro.cell.map(es)
return gro
# =============================================================================
# %% load planes using the 2020/07/26 framework: return a big dataframe
# ## this requires the input plane to have been a row in big all_planes database
# =============================================================================
def load_plane_with_atoms(plane, frac=False, do_fill_times=False):
print(f'\nplane: {plane.hop_path}')
## extract all the metadata
mm = plane.metal
TK = int(plane.T1)
zz = plane.z
st = plane.stoich
ex = plane.exclude
ph = plane.phase
tt = plane.total_time
cn = plane.config ## config is st + ex together
## TODO: load everything that is auxiliary using the lammps file:
## mobile ion sites: site_pts, locations of defects, BR sites
## Load the lammps file
_, _, cell, all_atoms = cu.read_lmp(plane.lammps_path, fractional=frac)
conduction_planes = cu.get_conduction_planes(all_atoms, mm, inexact=False if ph == 'beta' else True)
planes_by_name = dict(zip([cu.standard_plane_name(z if frac else z/cell[2,2]) for z in conduction_planes], conduction_planes))
## separate oxygen defects: remove the max() of atom types that are oxygen
## and store the oxygens separately
if ph == 'beta' :
if len(all_atoms.query('atom == "O"').type.unique() ) > 1:
type_ointer = all_atoms.query('atom == "O"').type.max()
ointers = all_atoms.query(f'type == {type_ointer}')
atoms = all_atoms.query(f'type != {type_ointer}')
print(f'found {len(ointers)} defects')
else :
ointers = pd.DataFrame()
atoms = all_atoms
else :
## doubleprime: load distances to Mg instead
ointers = pd.DataFrame()
atoms = all_atoms
## Mg defects should be checked for both beta and beta"
mgs = atoms.query('atom == "Mg"')
mgs = atoms.query('type == 4 & atom != "O"')
## count the planes
## count the mobile-ion sites
num_planes = len(conduction_planes) if zz == 'z_all' else int(plane.num_planes)
dz = np.mean(np.diff(conduction_planes))
## Initialize variables for advanced data
all_sites_by_r = list()
edge_matrices = list()
all_BR_sites = list()
defect_coords = list()
## compute 2D matrix of from-to distances to defects
if num_planes == 1 and ph == 'beta':
all_site_pts = cu.get_mobile_ion_sites(atoms, planes_by_name[zz], cell if not frac else np.eye(3))
_, edges, _ = cu.get_mid_oxygen_sites_freud(all_site_pts, cell, viz=False)
num_polys = len(all_site_pts)
## auto-get BR sites - this just yields a list of false for beta-doubleprime
site_types = cu.auto_get_BR_sites(atoms, cell, all_site_pts, atoms_are_frac=frac)
all_BR_sites = [i for i in range(len(site_types)) if site_types[i] == 'BR']
print(f'{len(all_BR_sites)} BR sites')
## make graph path lengths
nxg = nx.from_edgelist(edges)
paths = cu.path_lengths(nxg)
## calculate distances to defects
if len(ointers) > 0 :
## get all sites and edges
oi_defects = ointers.query(f'z == {planes_by_name[zz]}')
oi_defect_pts = oi_defects[['x','y','z']].values
oi_adjacent_sites, _ = cu.get_defect_adjacent_sites(cell, all_site_pts, oi_defect_pts)
defect_coords = oi_defects
# print(f'sites next to O_i in plane {zz}: ', oi_adjacent_sites)
# calculate paths to interstitial-adjacent mobile-ion sites
paths_to_oi = [int(min(paths[oi_adjacent_sites,x])) for x in range(num_polys)]
max_r = max(paths_to_oi)
## bin sites by distance
for r in range(max_r+1):
all_sites_by_r.append(np.argwhere(np.array(paths_to_oi) == r).T[0])
## store paths to oxygens as a matrix
edge_matrix = np.zeros((max_r+1, max_r+1))
for edge in np.argwhere(paths == 1) :
edge_matrix[paths_to_oi[edge[0]], paths_to_oi[edge[1]]] += 0.5 if edge[0] != edge[1] else 1
## if there are Mg defects instead of Oi, then count distances to them
elif len(mgs) > 0 :
## create a placeholder for the coordinates of the created defects
defect_pts = mgs[['x', 'y', 'z']].values
## find the Mg closest to each mobile-ion site in this plane
e0, e1, d0, d1 = cu.get_nearest_points(all_site_pts, defect_pts, cell, num_nn=6)
e0 = np.array(e0)[np.array(d0)<dz]
e1 = np.array(e1)[np.array(d1)<dz]
## indices of mobile-ion sites
s0 = [x[1] for x in e0]
s1 = [x[1] for x in e1]
## Mg locations
mg0 = [x[0] for x in e0]
mg1 = [x[0] for x in e1]
defect_coords = mgs.iloc[list(set(mg0+mg1))]
# it will be more tidy for combining distances later to keep placeholder arrays
if len(s0) > 0: paths_to_mg_0 = [min(paths[s0, x]) for x in range(len(all_site_pts))]
else: paths_to_mg_0 = np.ones(len(all_site_pts))*len(all_site_pts)
if len(s1) > 0: paths_to_mg_1 = [min(paths[s1, x])+1 for x in range(len(all_site_pts))]
else: paths_to_mg_1 = np.ones(len(all_site_pts))*len(all_site_pts)
# combine path lengths to distance==1 and distance==0 sites using min()
paths_to_oi = [int(min(paths_to_mg_0[i], paths_to_mg_1[i])) for i in range(len(all_site_pts))]
max_r = max(paths_to_oi)
## bin sites by distance
for r in range(max_r+1):
all_sites_by_r.append(np.argwhere(np.array(paths_to_oi) == r).T[0])
## store paths to oxygens as a matrix
edge_matrix = np.zeros((max_r+1, max_r+1))
for edge in np.argwhere(paths == 1) :
edge_matrix[paths_to_oi[edge[0]], paths_to_oi[edge[1]]] += 0.5 if edge[0] != edge[1] else 1
# if there are no oxygens and no Mg, then make very long paths
else :
print(f'(whether intentional or not, no O_i in plane {zz})')
paths_to_oi = np.ones(num_polys).astype(int)*100
## TODO: make placeholders for sites_by_r and edge_distances for defect-free
# max_r = max(paths_to_oi)
## this needs to be broken down by plane for composite planes
elif ph == 'beta':
num_polys = 0
max_r = 0
all_paths_to_oi = list()
all_site_pts = list()
paths_list = list()
for c, p in enumerate(conduction_planes) :
## get all sites and edges
site_pts = cu.get_mobile_ion_sites(atoms, p, cell if not frac else np.eye(3))
_, edges, _ = cu.get_mid_oxygen_sites_freud(site_pts, cell, viz=False)
num_polys += len(site_pts)
all_site_pts.append(site_pts)
## auto-get BR sites - this just yields a list of false for beta-doubleprime
site_types = cu.auto_get_BR_sites(atoms, cell, site_pts, atoms_are_frac=frac)
all_BR_sites.append([s+len(site_pts)*c for s in range(len(site_types)) if site_types[s] == 'BR'])
## make graph path lengths
nxg = nx.from_edgelist(edges)
paths = cu.path_lengths(nxg)
paths_list.append(paths)
# print(f'found paths for plane {p:.4f}')
## calculate distances to defects
if len(ointers) > 0 :
## get all sites and edges
oi_defects = ointers.query(f'z == {p}')
oi_defect_pts = oi_defects[['x','y','z']].values
oi_adjacent_sites, _ = cu.get_defect_adjacent_sites(cell, site_pts, oi_defect_pts)
defect_coords.append(oi_defects)
# print(f'sites next to O_i in plane {p:.4f}: ', oi_adjacent_sites)
# calculate paths to interstitial-adjacent mobile-ion sites
paths_to_oi = [int(min(paths[oi_adjacent_sites,x])) for x in range(len(site_pts))]
all_paths_to_oi.append(paths_to_oi)
max_r = max(max_r, max(paths_to_oi))
# print(f'farthest distance: {max_r}')
## store paths to oxygens as a matrix
edge_matrix = np.zeros((max_r+1, max_r+1))
for edge in np.argwhere(paths == 1) :
edge_matrix[paths_to_oi[edge[0]], paths_to_oi[edge[1]]] += 0.5 if edge[0] != edge[1] else 1
edge_matrices.append(edge_matrix)
# print('added a matrix of edges')
## if there are Mg defects instead of Oi, then count distances to them
elif len(mgs) > 0 :
## create a placeholder for the coordinates of the created defects
defect_pts = mgs[['x', 'y', 'z']].values
## find the Mg closest to each mobile-ion site in this plane
e0, e1, d0, d1 = cu.get_nearest_points(site_pts, defect_pts, cell, num_nn=6)
e0 = np.array(e0)[np.array(d0)<dz]
e1 = np.array(e1)[np.array(d1)<dz]
## indices of mobile-ion sites
s0 = [x[1] for x in e0]
s1 = [x[1] for x in e1]
## Mg locations
mg0 = [x[0] for x in e0]
mg1 = [x[0] for x in e1]
defect_coords.append(mgs.iloc[list(set(mg0+mg1))])
# it will be more tidy for combining distances later to keep placeholder arrays
if len(s0) > 0: paths_to_mg_0 = [min(paths[s0, x]) for x in range(len(site_pts))]
else: paths_to_mg_0 = np.ones(len(site_pts))*len(site_pts)
if len(s1) > 0: paths_to_mg_1 = [min(paths[s1, x])+1 for x in range(len(site_pts))]
else: paths_to_mg_1 = np.ones(len(site_pts))*len(site_pts)
# combine path lengths to distance==1 and distance==0 sites using min()
paths_to_oi = [int(min(paths_to_mg_0[i], paths_to_mg_1[i])) for i in range(len(site_pts))]
all_paths_to_oi.append(paths_to_oi)
max_r = max(max_r, max(paths_to_oi))
# print(f'farthest distance: {max_r}')
## store paths to oxygens as a matrix
edge_matrix = np.zeros((max_r+1, max_r+1))
for edge in np.argwhere(paths == 1) :
edge_matrix[paths_to_oi[edge[0]], paths_to_oi[edge[1]]] += 0.5 if edge[0] != edge[1] else 1
edge_matrices.append(edge_matrix)
# if there are no oxygens and no Mg, then make very long paths
else:
print(f'(whether intentional or not, no O_i in plane {p:.4f})')
paths_to_oi = np.ones(len(site_pts)).astype(int)*100
## add all edge matrices together
edge_matrix = np.zeros((max_r+1, max_r+1))
for mat in edge_matrices: edge_matrix[:len(mat),:len(mat)] += mat
## bin sites by distance, add to list
all_paths_to_oi = list(cu.flatten(all_paths_to_oi))
for r in range(max_r+1):
all_sites_by_r.append(np.argwhere(np.array(all_paths_to_oi) == r).T[0])
## flatten the nested list of BR sites
all_BR_sites = list(cu.flatten(all_BR_sites))
print(f'{len(all_BR_sites)} BR sites')
## combine all paths into a large paths array
paths = np.zeros((num_polys, num_polys)) + num_polys
prev_sites = 0
for pz, paths_matrix in zip(conduction_planes, paths_list):
these_sites = len(paths_matrix)
paths[prev_sites:prev_sites + these_sites, prev_sites:prev_sites + these_sites] = paths_matrix
prev_sites += these_sites
print(f'plane at {pz:.4f} : {these_sites} sites with max {prev_sites}')
## doubleprime, single plane
## fields to use: max_r, edge_matrix, all_BR_sites, all_sites_by_r,
## defect_coords, all_site_pts, paths
elif num_planes == 1 :
## get sites & paths
all_site_pts, _, _, paths = cu.get_sites_above_below(planes_by_name[zz], atoms, \
cell if not frac else np.eye(3), metal=mm, frac=frac, viz=False)
num_polys = len(all_site_pts)
## get defect coordinates
defect_coords = cu.get_nearby_atoms_wrap(planes_by_name[zz], mgs,
np.mean(np.diff(conduction_planes)), cell[2,2])
mg_pts = defect_coords[['x','y','z']].values
## paths to defects, and count defects
paths_to_mg, mg_counts = cu.get_mg_distances(all_site_pts, paths, mg_pts, cell if not frac else np.eye(3))
max_r = max(mg_counts)
## bin sites by the number of nearest defects rather than by distance
for r in range(max_r+1):
all_sites_by_r.append(np.argwhere(np.array(mg_counts) == r).T[0])
## store counts of defects for every edge between sites as a matrix
edge_matrix = np.zeros((max_r+1, max_r+1))
for edge in np.argwhere(paths == 1) :
edge_matrix[mg_counts[edge[0]], mg_counts[edge[1]]] += 0.5 if edge[0] != edge[1] else 1
# pass
## pretend that 0 and 1 distances are BR and aBR
all_BR_sites = np.argwhere(np.array(paths_to_mg) == 0).T[0]
## doubleprime, combined planes. Aggregate all_site_pts from site_pts,
## add all edge matrices together, bin sites by distances & add, combine paths
else :
num_polys = 0
max_r = 0
paths_list = list()
all_mg_counts = list()
all_site_pts = list()
# conduction_planes = sorted(conduction_planes, reverse=True)
for c, p in enumerate(conduction_planes):
# print(f'\n starting {c} : {p:.2f}')
## get sites & paths
site_pts, _, _, paths = cu.get_sites_above_below(p, atoms, \
cell if not frac else np.eye(3), metal=mm, frac=frac, viz=False)
num_polys += len(site_pts)
all_site_pts.append(np.copy(site_pts))
paths_list.append(np.copy(paths))
# print(f' got paths {c} : {p:.4f}')
## get defect coordinates
mg_coords = cu.get_nearby_atoms_wrap(p, mgs, np.mean(np.diff(conduction_planes)), cell[2,2])
mg_pts = mg_coords[['x','y','z']].values
defect_coords.append(mg_coords.copy(deep=True))
# print(f'got defects {c} : {p:.4f}')
## paths to defects, and count defects
paths_to_mg, mg_counts = cu.get_mg_distances(site_pts, paths, mg_pts,
cell if not frac else np.eye(3), verbose=False)
max_r = max(max_r, max(mg_counts))
all_mg_counts.append(mg_counts)
# print(f'got dists {c} : {p:.4f}')
## bin sites by the number of nearest defects rather than by distance
# for r in range(max_r+1):
# sites_by_r.append(np.argwhere(np.array(mg_counts) == r).T[0])
## store counts of defects for every edge between sites as a matrix
edge_matrix = np.zeros((max_r+1, max_r+1))
for edge in np.argwhere(paths == 1) :
edge_matrix[mg_counts[edge[0]], mg_counts[edge[1]]] += 0.5 if edge[0] != edge[1] else 1
edge_matrices.append(edge_matrix)
## pretend that 0 and 1 distances are BR and aBR
BR_sites = np.argwhere(np.array(paths_to_mg) == 0).T[0]
all_BR_sites.append(BR_sites+len(site_pts)*c)
# print(f' finishing {c} : {p:.4f}')
del site_pts, paths, mg_coords
# print('putting it together')
## make sites_by_r
## bin sites by distance, add to list
all_mg_counts = list(cu.flatten(all_mg_counts))
for r in range(max_r+1):
all_sites_by_r.append(np.argwhere(np.array(all_mg_counts) == r).T[0])
print(f'{len(all_sites_by_r[r]):4d} sites w/ {r} Mg_Al neighbors')
## add all edge matrices together
edge_matrix = np.zeros((max_r+1, max_r+1))
for mat in edge_matrices: edge_matrix[:len(mat),:len(mat)] += mat
## flatten the nested list of BR sites
all_BR_sites = list(cu.flatten(all_BR_sites))
print(f'{len(all_BR_sites)} sites w/ Mg_Al directly above/below (accessible as BR_sites)')
## combine all paths into a large paths array
paths = np.zeros((num_polys, num_polys)) + num_polys
prev_sites = 0
for pz, paths_matrix in zip(conduction_planes, paths_list):
these_sites = len(paths_matrix)
paths[prev_sites:prev_sites + these_sites, prev_sites:prev_sites + these_sites] = paths_matrix
prev_sites += these_sites
print(f'plane at z = {pz:.4f} : {these_sites} sites with max {prev_sites}')
## load plane to get statistics
th, _, nh, _, _ = load_plane(plane.hop_path, numpolys=num_polys)
data = pd.concat([th,nh],ignore_index=True)
## TODO: deal with Voronoi objects using the lammps file
if s2n(zz) : ## one plane: Voronoi implemented
pass
elif zz == 'z_all' :
pass
## add the counting of fill times.
## This is separate, because filling time does not correspond 1-1 to hops
fill_times = None
if do_fill_times:
hop_folder = '/'.join(plane.hop_path.split('/')[:-1])
fill_time_path = hop_folder+f'/{mm}{TK}K_{zz}_fill_times.csv'
if path.isfile(fill_time_path) and path.getsize(fill_time_path) > 0:
print('found pre-processed fill times')
else :
print('processing fill times...')
ft = count_fill_times(data,num_polys)
ft.to_csv(fill_time_path, index=False)
print(f'... saved fill times to {fill_time_path}')
## actually load the fill times, add BR/aBR label, add distance to defect
fill_times = pd.read_csv(fill_time_path).sort_values(by='site')
fill_times['site_is_BR'] = [x in all_BR_sites for x in fill_times.site]
try: fill_times['r_to_defect'] = [all_paths_to_oi[x] if ph == 'beta' else all_mg_counts[x] for x in fill_times.site.values ]
except: fill_times['r_to_defect'] = 100
## return a dictionary with all the data and metadata
return dict(phase=ph, config=cn, stoich=st, exclude=ex, metal=mm, T1=TK,
z=zz, hops=data, atoms=atoms, max_r=max_r, total_time=tt,
edge_distances=edge_matrix, BR_sites=all_BR_sites,
sites_by_r=all_sites_by_r, defects=defect_coords,
num_planes=num_planes, cell=cell, atoms_are_frac=frac,
site_pts=all_site_pts, path_lengths=paths, hop_path=plane.hop_path,
fill_times=fill_times)
# =============================================================================
# %% a full autocorrelation function: assumes input has columns vx, vy, vz
# ## (so I guess a velocity autocorrelation function, not position)
# ## For a parallelized version, see
# =============================================================================
def autocorrelation(df, time_limit, dims=['x','y','z'], verbose=False, to_file=None):
'''
'''
start_time = dt.now()
time = df.index.values
num_periods = np.argwhere(time > time_limit)[0,0]
df = df[['vx','vy','vz']]
for d in ['x','y','z'] :
if d not in dims : df.drop(f'v{d}', axis=1, inplace=True)
acf = list()
if to_file is not None :
fout = open(to_file,'w')
fout.write('time,' + ','.join(sorted(dims))+'\n')
for lag in range(num_periods):
cfun = dict(zip(sorted(dims),df.apply(lambda col: col.autocorr(lag))))
acf.append(cfun)
if verbose and np.isclose(time[lag] % 25, 0, atol=1e-4) :
print(f'done lag {time[lag]:.3f} ps, {dt.now()-start_time}')
if to_file is not None:
fout.write(','.join([str(time[lag]), ','.join([str(cfun[x]) for x in sorted(dims)]) ])+'\n')
acf = pd.DataFrame(acf, index=time[:num_periods]) #.set_index('time')
if to_file is not None: fout.close()
return acf
# =============================================================================
# %% apply a function to a one-atome trajectory for an arbitrary # starts
# =============================================================================
def multistart_apply(traj, duration, starts,func=lambda x: pow(x,2)):
'''
traj: pandas DataFrame with columns x,y,z and index time
duration: for how long to count
func: function to apply element-wise, default x**2 for r2. r4 is the second obvious use
'''
num_points = sum(traj.index.values <= starts[0]+duration)
## build 3D array
traj_array = np.zeros((len(starts),num_points,3))
for i, start in enumerate(starts):
traj_array[i,:,:] = traj.loc[start:start+duration,['x','y','z']].values
## apply the function, then sum over last axis (which is xyz)
## then average over the first axis (which is over the multiple starts)
rn = np.mean(np.sum(func(traj_array), axis=-1), axis=0)
## make a dataframe
new_index = traj.index.values-traj.index.values[0]
new_index = new_index[:len(rn)]
return pd.DataFrame(data={'r2':rn, 'time':new_index})
# =============================================================================
# %% calculate r2 for one atom given trajectory for an arbitrary # starts
# =============================================================================
def multistart_r2r4(traj, duration, starts, cell=np.eye(3), timebins=None, do_avg=True, col='r2', twod=False):
'''
traj: pandas DataFrame with columns x,y,z and index time
duration: for how long to count
starts: iterable of time values over which to average
cell: simulation box, assumed orthogonal
timebins: time values for binning (e.g. logspace)
do_avg : boolean flag to perform averaging over the multiple starts
col : option for which column to output, e.g. 'r2' , 'dx', 'dy' etc
twod : boolean flag, set to True if z is to be ignored
'''
# num_points = sum(traj.index.values <= starts[0] + duration)
if timebins is None :
num_points = sum(traj.index.values <= starts[0] + duration)
else :
duration = max(timebins)
num_points = len(timebins) # -1
## repeat precision determination for rounding just below
traj.index -= traj.index.min()
ts = traj.index[1] - traj.index[0]
prec = -int(np.floor(np.log10(ts)))
while not np.isclose(ts % 10**-prec, 0) : prec += 1
# make a mask for which values to take
mask_all = np.in1d(np.round(traj.index.values,prec), timebins) ## -min(traj.index.values)
mask = mask_all[:len(traj.loc[:duration])]
print(sum(mask), len(mask), len(timebins))
if len(timebins) > sum(mask) :
print('extra time bins getting skipped:', sorted(list(set(timebins)-set(mask[mask==1]))))
num_points=sum(mask)
if num_points < 1 : num_points = 1
assert col == 'r2' or col in dims.keys(), 'Passed value for "col" is not recognized. Passed: {col}. Recognized: "r2" or in {dims.keys()}'
## take differences for later computing r2
traj['dx'] = traj.x.diff() * cell[0,0]
traj['dy'] = traj.y.diff() * cell[1,1]
traj['dz'] = traj.z.diff() * cell[2,2]
traj.iloc[0,-3:] = 0
## account for atoms hopping the border of the cell
traj.dx[traj.dx> 0.5*cell[0,0]] -= cell[0,0] ## left x boundary crossed
traj.dy[traj.dy> 0.5*cell[1,1]] -= cell[1,1] ## left y boundary crossed
traj.dz[traj.dz> 0.5*cell[2,2]] -= cell[2,2] ## left z boundary crossed
traj.dx[traj.dx<-0.5*cell[0,0]] += cell[0,0] ## right x boundary crossed
traj.dy[traj.dy<-0.5*cell[1,1]] += cell[1,1] ## right y boundary crossed
traj.dz[traj.dz<-0.5*cell[2,2]] += cell[2,2] ## right z boundary crossed
## build 3D array of r2 using cumsum
traj_array = np.zeros((len(starts),num_points, 2 if twod else 3))
# print(traj_array.shape)
for i, start in enumerate(starts):
if twod: chunk = traj.loc[start-5e-4:start+duration+5e-4,['dx','dy']].values
else : chunk = traj.loc[start-5e-4:start+duration+5e-4,['dx','dy','dz']].values
chunk[0,:] = 0
chunk = np.cumsum(chunk, axis=0)
# if i < 1 : print(chunk.shape, mask.shape, traj_array.shape)
traj_array[i,:,:] = chunk if timebins is None else chunk[mask]
if i < 1 : print('at least one of multiple starts works.')
## apply the square, then sum over last axis (which is x,y,z)
## this gets the distribution of r2
r2 = np.sum(traj_array**2, axis=-1)
# print(f'r2 is {r2.shape}')
## bin by time if time bins exist
if timebins is not None :
# new_index = np.array(timebins[:-1])*0.5 + np.array(timebins[1:])*0.5
new_index = traj.index.values-traj.index.values[0]
new_index = new_index[mask_all]
else :
new_index = traj.index.values-traj.index.values[0]
new_index = new_index[:len(r2[0,:])]
if do_avg :
## average over the first axis (which is over the multiple starts) to get <r2>
exp_r2 = np.mean(r2, axis=0)
## calculate r4 from the distribution first, then average
exp_r4 = np.mean(r2**2, axis=0)
## make the output dataframe
out = pd.DataFrame(data={'r2':exp_r2, 'r4':exp_r4, 'time':new_index})
else :
# print(r2.shape, len(timebins), len(starts))
out = pd.DataFrame(r2.T) if col == 'r2' else pd.DataFrame(traj_array[:,:,dims[col]].T)
out['time'] = new_index
out.set_index('time', inplace=True)
out.columns = starts
return out #, traj.loc[:,['dx','dy','dz']]
# =============================================================================
# %% calculate r2 for one atom given trajectory for an arbitrary # starts
# =============================================================================
def multiduration_r2r4(traj, deltas, starts, cell=np.eye(3), discard=4):
'''
traj: pandas DataFrame with columns x,y,z and index time
deltas: time lags for computing r2 and r4, the longest is taken
starts: iterable of time values over which to average, typically a range()
cell: simulation box, assumed orthogonal
discard: toss out this multiple of short-time averages-over-duration
'''
##
try: duration = max(deltas)
except: duration = int(deltas)
# if not isinstance(starts, list) :
starts = np.array(starts)
# if min(starts) < duration * discard : starts += duration * discard - min(starts)
## number of time points in one lag-length of the trajectory
num_points = sum((traj.index.values >= starts[0]) & (traj.index.values <= starts[0] + duration)) # if timebins is not None else len(timebins)-1
if num_points < 1 : num_points = 1
## take differences for later computing r2
traj['dx'] = traj.x.diff() * cell[0,0]
traj['dy'] = traj.y.diff() * cell[1,1]
traj['dz'] = traj.z.diff() * cell[2,2]
traj.iloc[0,-3:] = 0
## account for atoms hopping the border of the cell
traj.dx[traj.dx> 0.5*cell[0,0]] -= cell[0,0] ## left x boundary crossed
traj.dy[traj.dy> 0.5*cell[1,1]] -= cell[1,1] ## left y boundary crossed
traj.dz[traj.dz> 0.5*cell[2,2]] -= cell[2,2] ## left z boundary crossed
traj.dx[traj.dx<-0.5*cell[0,0]] += cell[0,0] ## right x boundary crossed
traj.dy[traj.dy<-0.5*cell[1,1]] += cell[1,1] ## right y boundary crossed
traj.dz[traj.dz<-0.5*cell[2,2]] += cell[2,2] ## right z boundary crossed
## build 3D array of r2 using cumsum
traj_array = np.zeros((len(starts),num_points,3))
for i, start in enumerate(starts):
chunk = traj.loc[start:start+duration,['dx','dy','dz']].values
chunk[0,:] = 0
traj_array[i,:,:] = np.cumsum(chunk, axis=0)
## apply the square, then sum over last axis (which is dx,dy,dz)
## this gets the distribution of r2 and is a 2D array with shape (starts, time points)
## here, only the last time point is kept
r2 = np.sum(traj_array[:,-1,:]**2, axis=-1)
# print(f'r2 is {r2.shape}')
## starts_to_count is actually t minus delta in eq.1 from He(2008)
starts_to_count = np.argwhere(starts>=duration*discard)
this_r2 = (np.cumsum(r2)/ np.arange(1,1+len(r2)) )
this_r2 = this_r2[starts_to_count].reshape(-1)
this_r4 = this_r2**2
out = pd.DataFrame(data={'r2':this_r2, 'r4':this_r4, 'time':duration+starts[starts_to_count].reshape(-1)})
# out.time = out.time.astype(int)
## TODO: downselect time points with a mask
return out
# =============================================================================
# %% calculate the fluctuation kernel for the diffusion coefficient
# ## using Laplace transforms
# =============================================================================
laplace = lambda x, y, s : np.trapz(y * np.exp(-s * x), x)
def fluctuation_kernel(a2, s_values, dim=3):
'''
Parameters
----------
a2 : pandas DataFrame
a2 is an output from MD, should be indexed to time and have fields r4 and r2.
s_values : numpy array
Values of the Laplace-space variable to use.
dim : int, optional
Number of dimensions. The default is 3.
Returns
-------
cds : TYPE
DESCRIPTION.
'''
if 0 not in a2.index.values :
a2.loc[0] = np.zeros(len(a2.columns))
a2 = a2.sort_index()
a2['r22'] = a2.r2 **2
a2['x4'] = a2.r4-(dim+2)*a2.r22/dim
a2['burn'] = 0
dx4dt = np.diff(a2.x4,1)/np.diff(a2.index.values,1)
time_midpoints = a2.index.values[1:] - a2.index.values[:1]
a2.burn.iloc[1:-1] = np.diff(dx4dt,1)/np.diff(time_midpoints,1) / 24
# dt = a2.index.values[1] - a2.index.values[0]
# a2.burn.iloc[1:-1] = np.diff(np.diff(a2.x4,1),1)/dt**2 / 24
cd = list()
for s in s_values:
cds = 3*laplace(a2.index, a2.burn, s)/(dim+2) ## BCF term
cds += s**2 * laplace(a2.index, a2.r22, s) / 8 / dim**2 ## r22 term
cds -= s**3 * laplace(a2.index, a2.r2, s)**2 / 4 / dim**2 ## (r2)^2 term
cds /= (s**2 * laplace(a2.index, a2.r2, s) / 2 / dim)**2 ## Dgs^2 term
cd.append(cds)
int1s = interp1d(s_values, np.array(cd), bounds_error=False, fill_value='extrapolate')
# cd_interp = lambda x: int1s(x)
# cd2 = [ invertlaplace(cd_interp, x, method='dehoog', dps=5, degree=5) for x in a2.index.values[1::10].tolist() ]
# return a2.index.values[1::10], cd2
return int1s
# =============================================================================
# %% Stehfest algorithm for the inverse Laplace transform
# ## https://gist.github.com/AndrewWalker/5583653
# ## another method: https://github.com/mojtaba-komeili/numerical-inverse-laplace/blob/master/NumInvLaplace.py
# =============================================================================
from math import factorial
def stehfest_coeff(n, i):
acc = 0.
for k in range(int(np.floor((i+1)/2.0)), int(min(i, n/2.0))+1) :
num = k**(n/2.0) * factorial(2 * k)
den = factorial(i - k) * factorial(k -1) * factorial(k) * factorial(2*k - i) * factorial(n/2.0 - k)
acc += (num /den)
exponent = i+n/2.0
term = np.power(-1+0.0j,exponent)
res = term * acc
return res.real
def stehfest_inverse(f, t, n=6):
acc = 0.
lton2 = np.log(2) / t
for i in range(1, n+1):
a = stehfest_coeff(n, i)
b = f(i * lton2)
acc += (a * b)
return lton2 * acc
# =============================================================================
# %% 2-way smoothing for plotting hopping PDF, CDF, and filling times
# ## 2-way is for forward & back smoothing. Could use a convolution instead.
# =============================================================================
def pdf_smooth(df, halflife):
## assume series index is in [picoseconds], as it is for hopping residence times.
## this only works approx 59 [picoseconds] at a time - so needs recursion
# ## keeping real [ps] units doesn't work as pandas uses [nanosecond] precision ... sigh ...
# df.index = [np.datetime64(int(x*1000),'fs') for x in df.index.values]
## get everything int-indexed: in femtoseconds
df.index = np.round(df.index*1000).astype(int)
ts = round(min(np.diff(df.index)))
## reindex to bring back missing times
df = df.reindex(np.arange(df.index.min(),df.index.max()+1, ts).astype(int))
df = df.apply(lambda col : gaussian_filter1d(col, halflife / ts, truncate=3))
df.index = df.index.astype(float) / 1000
return df # .dropna()
# =============================================================================
# %% load a Gs (van Hove) file quickly. This is here to avoid bloat in the
# ## macroscopic analysis notebook
# =============================================================================
def load_gs(glob_query, option, **kwargs):
gs_glob = glob(glob_query)
gs = None
try:
gs = pd.read_csv(gs_glob[0])
# if option != 'spectra' :
gs.gs = gs.gs * gs.r**2
gs = gs.set_index(['r','time']).unstack().apply(lambda col: col/col.sum(), axis=0)
gs.columns = [x[1] for x in gs.columns]
gs.index = np.round(gs.index.values,4)
except:
print(f'could not load a Gs file for {glob_query}')
## return times at which Gs decays to 1/e
if option in ['a2', 'r2', 'exponent', 'cds', 'exp-vs-r', 'cdt', 'Funke'] and gs is not None:
if 'radii' in kwargs.keys() :
decay_times = list()
for r in kwargs['radii'] :
rmin=min(r); rmax=max(r)
s = gs.loc[rmin:rmax,:].sum().reset_index()
s.columns = ['time','gs']
# s.set_index('time', inplace=True)
## reverse interpolation: x-values are the function, y is time
gs_int = interp1d(s.gs, s.time)
decay_times.append(gs_int(1/np.e))
return decay_times
else : print('to calculate 1/e times, supply an iterable of tuples, "radii".')
return gs
# =============================================================================
# %% 4-point correlation - for Burnett and response functions
# ## assumes a regularly spaced, real-valued zero-mean series, e.g. velocities
# ## assumes time lags are measured in periods of the series, not in real units.
# =============================================================================
def four_point_autocorr(series, lag1, lag2, lag3):
'''
Compute a four-point (three-lag) correlation of a series with itself for
three lags. The series is assumed to be real-valued (complex conjugation is
ignored) and zero-mean.
Parameters
----------
series : 1D series such as a numpy array
The series for which the autocorrelation is computed. Assumed to be
real-valued and zero-mean.
lag1 : int
First lag (in sequence of 3), an integer number of indices of the series.
lag2 : int
Second lag (in sequence of 3), an integer number of indices of the series.
lag3 : int
Third lag (last in sequence of 3), an integer number of indices of the series.
Returns
-------
float
The four-point (three-lag) autocorrelation of the series.
Throws
-------
AssertionError
Throws an AssertionError if the sum of lags is larger than the length
of the series.
'''
assert lag1 + lag2 + lag3 <= len(series), 'sum of 4-pt correlation time lags is larger than series is long'
return np.mean(series[:-lag1-lag2-lag3 if lag1+lag2+lag3 else None,:] *
series[lag1:-lag2-lag3 if lag2+lag3 else None,:] *
series[lag1+lag2:-lag3 if lag3 else None,:] *
series[lag1+lag2+lag3:,:], axis=0)
# =============================================================================
# %% 4th cumulant - for Burnett and response functions
# ## assumes a regularly spaced, real-valued zero-mean time series, e.g. velocities
# ## assumes time lags are measured in periods of the series, not in real units.
# =============================================================================
def fourth_cumulant(series, lag1, lag2, lag3):
'''
Compute the fourth cumulant of a series, e.g. of a velocity series in time.
The series is assumed to be regularly-spaced, zero-mean, real-valued.
Parameters
----------
series : 1D series such as a numpy array
The series for which the cumulant is computed. Assumed to be regularly
spaced, real-valued, and zero-mean.
lag1 : int
First lag (in sequence of 3), an integer number of indices of the series.
lag2 : int
Second lag (in sequence of 3), an integer number of indices of the series.
lag3 : int
Third lag (last in sequence of 3), an integer number of indices of the series.
Returns
-------
float
Fourth cumulant of the series.
'''
autocorr_4 = four_point_autocorr(series, lag1, lag2, lag3)
series_tau_1 = series[:-lag1-lag2-lag3 if lag1+lag2+lag3 else None,:]
series_tau_2 = series[lag1:-lag2-lag3 if lag2+lag3 else None,:]
series_tau_3 = series[lag1+lag2:-lag3 if lag3 else None,:]
series_tau_4 = series[lag1+lag2+lag3:,:]
prod_12 = np.mean(series_tau_1 * series_tau_2, axis=0)
prod_13 = np.mean(series_tau_1 * series_tau_3, axis=0)
prod_14 = np.mean(series_tau_1 * series_tau_4, axis=0)
prod_23 = np.mean(series_tau_2 * series_tau_3, axis=0)
prod_24 = np.mean(series_tau_2 * series_tau_4, axis=0)
prod_34 = np.mean(series_tau_3 * series_tau_4, axis=0)
return autocorr_4 - prod_12*prod_34 - prod_13*prod_24 - prod_14*prod_23
# =============================================================================
# %% compute Burnett CF from velocities by taking 4th cumulant and integrating
# =============================================================================
def burnett_from_velocity(velocity_series, total_lag, downsample = 0):
'''
Compute Burnett CF for series at one lag.
Sources:
Nieuwenhuizen & Ernst, J. Stat. Phys. (1985) vol. 41, p. 773
Song et al., PNAS (2019) vol. 116, p. 12733
Parameters
----------
velocity_series : series such as 1D numpy array or pandas series
Time series of velocities, assumed: regularly spaced, real-valued, zero-mean.
total_lag : int
Time lag, an integer number of indices/periods of the series.
This is the sum of 3 lags over which the 4-pt CF is computed.
downsample : int
Take every downsample-th set of lags if total_lag > downsample * 10.
If not zero, then speeds up calculation by downsample**2 times.
Returns
-------
bcf : float
Burnett CF at the value of time lag.
Units: [(velocity series distance unit)^4/(velocity series time point spacing)^2].
As of 2021/02/25, diagonal terms.
'''
if total_lag == 0 : return fourth_cumulant(velocity_series, 0, 0, 0)
if (not downsample) or total_lag/downsample < 10 :
downsample = 1
## add the 4th cumulant for each combination of lag1 and
## the last lag is determined by the first two, hence double sum, not triple
bcf = 0
samples = 0
for lag1 in np.arange(0,total_lag+1, downsample, dtype=int) :
for lag2 in np.arange(0,total_lag+1-lag1, downsample, dtype=int) :
bcf += fourth_cumulant(velocity_series, lag1, lag2, total_lag-lag1-lag2)
samples += 1
## since each pathway is added, the total value
total_samples = sum(range(len(np.arange(0,total_lag+1))+1))
return bcf * total_samples / samples
# =============================================================================
# %% shorthand for loading macro planes
# =============================================================================
def load_macro_planes(planes_to_load, load_r2=False, load_com=False) :
## make a structure for loading data
planes_dicts = []
for plane in planes_to_load.itertuples(index=False):
mm = plane.metal
T1 = plane.T1
hp = plane.hop_path
ph = plane.phase
st = plane.stoich
ex = plane.exclude
# tt = plane.total_time ## based on hops, not CoM. Could be off.
cn = plane.config
## load lammps structure
_, _, cell, atoms = cu.read_lmp(plane.lammps_path, fractional=False)
a2_folder = '/'.join(plane.lammps_path.split('/')[:-1])
## load lammps r2 file for the diffusion coefficient
if load_r2 :
r2_fname = glob.glob(a2_folder+f'/a2_*{T1}K-{mm}.fix')
## load the r2 file if exactly one exists, else complain
if isinstance(r2_fname, list) and len(r2_fname) == 1:
## read the r2 file - options for fix file
this_r2 = pd.read_csv(r2_fname[0], names=['time','r2','r4','a2'],
skiprows=2, sep=' ')
this_r2.time /= 1000
this_r2.set_index('time', inplace=True)
## Look for a literature folder
lit_folder = '/'.join(a2_folder.split('/')[:-1])
print(f'\nLoaded r2 for plane {hp}')
else:
print(f'\nsomething off with plane {hp}.')
print(f'here are possible r2 outputs: {r2_fname}')
this_r2 = None
else : this_r2 = None
## the a2 fix file is LAMMPS output, csv is calculated with multiple starts
## this takes the longest-duration a2 file
a2_fnames = glob(a2_folder+f'/{mm}*a2-*{T1}K*ps.csv')
## load the a2 file if exactly one exists, else complain
if a2_fnames :
if len(a2_fnames) > 1 : a2_fnames = sorted(a2_fnames, reverse=True,
key = lambda x : eval(split('-|_| ',x)[-1][:-6]))
## read the a2 file - options for csv file
this_a2 = pd.read_csv(a2_fnames[0], sep=',').set_index('time')
## Look for a literature folder
lit_folder = '/'.join(a2_folder.split('/')[:-1])
print(f'Loaded a2: {a2_fnames[0]}')
else:
print(f'something off with plane {hp}.')
print(f'here are possible a2 outputs: {a2_fnames}')
this_a2 = None
## load the CoM trajectory if it exists
com_fname = glob(a2_folder + f'/cm*{T1}K*{mm}.fix')
if isinstance(com_fname, list) and len(com_fname) == 1 and load_com:
this_com = pd.read_csv(com_fname[0],sep=' ', names=['time', 'x', 'y', 'z', 'vx', 'vy', 'vz'], skiprows=2).drop(columns=['vx','vy','vz'])
this_com.time /= 1000. ## hard-coded conversion from steps to picoseconds
this_com.set_index('time', inplace=True)
print('Loaded CoM trajectory.')
elif not load_com :
this_com = True
# print('Skipping CoM trajectory.')
else :
print(f'Could not load CoM trajectory, found: {com_fname}')
this_com = None
## wrap the a2, CoM, and metadata into a dict
if (this_r2 is not None or not load_r2) and (this_a2 is not None) :
planes_dicts.append(dict(phase=ph, metal=mm, T1=T1, config=cn, stoich=st, exclude=ex,
a2=this_a2, lit_folder=lit_folder, com = this_com,
cell=cell, atoms=atoms, folder=a2_folder, r2=this_r2))
## make the holding structure into a dataframe
return
|
pd.DataFrame(planes_dicts)
|
pandas.DataFrame
|
######################### fro JQ to get ID for recommendations ###########################
import pickle
import sqlite3
db_path = './data/foursquare.db'
conn = sqlite3.connect(db_path)
c = conn.cursor()
recs = pickle.load(open('./data/rec_results/recs_full_venue.sav', 'rb'))
# get top 10 recs
top_10_rec_dict = {}
for key,value in recs.items():
uid = key
top_10_rec = value[1][0:10]
top_10_rec_dict[uid] = top_10_rec
# get recs using history
recs_using_self_history = {}
c.execute("SELECT DISTINCT uid FROM tips t GROUP BY uid HAVING COUNT(tid) >= 3")
hist_recommend_users = c.fetchall()
for u in hist_recommend_users:
uid = u[0]
recs_using_self_history[uid] = recs[uid]
# get recs using friends
recs_using_friends = {}
c.execute("SELECT DISTINCT uid FROM tips t GROUP BY uid HAVING COUNT(tid) < 3")
frd_recommend_users = c.fetchall()
for usr in frd_recommend_users:
uid = usr[0]
recs_using_friends[uid] = recs[uid]
# use
recs_using_friends
recs_using_self_history
###########################################################################################
######################### to get the dataset for visualization ############################
###########################################################################################
## objective 1 : to return 20 recommendations for eaach user from both tables
## objective 2 : to get friends' friend table - to plot network graph
# objective 3 : to get users and its respective 3 cluster no, for parallel set
## step 1 - get IDs and Names
import pandas as pd
#################################### user name master db #############################
c.execute('SELECT uid, first_name,last_name, senti_clus_id, venue_clus_id, mrt_loc_clus_id from users')
user_id = c.fetchall()
user_id_df = pd.DataFrame(user_id)
user_id_df.columns = ['uid', 'first_name','last_name','senti_clus_id', 'venue_clus_id', 'mrt_loc_clys_id']
master_user_name = pd.DataFrame(user_id_df['uid'])
master_user_name['name'] = user_id_df.first_name + ' ' + user_id_df.last_name
master_user_name['senti_clus_id'] = user_id_df['senti_clus_id']
master_user_name['venue_clus_id'] = user_id_df['venue_clus_id']
master_user_name['mrt_loc_clus_id'] = user_id_df['mrt_loc_clys_id']
master_user_name = master_user_name.sort_values('uid').reset_index(drop = True)
#################################### resta name master db #############################
c.execute('SELECT rid, venue_name from venues')
resta_id = c.fetchall()
master_resta = pd.DataFrame(resta_id)
master_resta.columns = ['rid', 'venue_name']
######################## get friends' friends' list ###################################
c.execute('SELECT uid, friend_uid from user_friends')
user_friend_id = c.fetchall()
user_friend_id_df= pd.DataFrame(user_friend_id)
user_friend_id_df.columns = ['uid', 'friend_uid']
### df1.merge(df2[['MODEL', 'MAKE']], how = 'left')
user_friend = user_friend_id_df.merge(master_user_name[['uid','name']], how = 'left')
user_friend.columns = ['user_id', 'uid', 'user_name']
friend_list = user_friend.merge(master_user_name[['uid','name']], how = 'left')
friend_list.columns = ['uid', 'users', 'friend_uid', 'friend_name']
select_top_N = 20 ## can be modified
top_N_friends = friend_list.groupby('users').agg(['count'])
top_N_friends['users'] = top_N_friends.index
top_N_friends['total_friends'] = top_N_friends.iloc[:,0]
master_top_N_friends_list = top_N_friends[['users', 'total_friends']]
master_top_N_friends_list.columns =['users', 'total_friends']
master_top_N_friends_list = master_top_N_friends_list.sort_values('total_friends', ascending = False).reset_index(drop=True)
######################## get existing users, recommendation ###################################
## recs_using_self_history
select_top_N_recommendation = 20
master_recom_list_existing = []
## for i in range(len(recs_using_self_history)):
for key in recs_using_self_history.items():
temp_user_list =[]
temp_user_id= key[0]
temp_user_list.append(temp_user_id)
temp_recent_visit = key[1][0]
temp_user_list.append(temp_recent_visit)
temp_recommendation_01 = key[1][1][0]
temp_user_list.append(temp_recommendation_01)
try:
temp_recommendation_02 = key[1][1][1]
temp_user_list.append(temp_recommendation_02)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_03 = key[1][1][2]
temp_user_list.append(temp_recommendation_03)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_04 = key[1][1][3]
temp_user_list.append(temp_recommendation_04)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_05 = key[1][1][4]
temp_user_list.append(temp_recommendation_05)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_06 = key[1][1][5]
temp_user_list.append(temp_recommendation_06)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_07 = key[1][1][6]
temp_user_list.append(temp_recommendation_07)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_08 = key[1][1][7]
temp_user_list.append(temp_recommendation_08)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_09 = key[1][1][8]
temp_user_list.append(temp_recommendation_09)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_10 = key[1][1][9]
temp_user_list.append(temp_recommendation_10)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_11 = key[1][1][10]
temp_user_list.append(temp_recommendation_11)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_12 = key[1][1][11]
temp_user_list.append(temp_recommendation_12)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_13 = key[1][1][12]
temp_user_list.append(temp_recommendation_13)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_14 = key[1][1][13]
temp_user_list.append(temp_recommendation_14)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_15 = key[1][1][14]
temp_user_list.append(temp_recommendation_15)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_16 = key[1][1][15]
temp_user_list.append(temp_recommendation_16)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_17 = key[1][1][16]
temp_user_list.append(temp_recommendation_17)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_18 = key[1][1][17]
temp_user_list.append(temp_recommendation_18)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_19 = key[1][1][18]
temp_user_list.append(temp_recommendation_19)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_20 = key[1][1][19]
temp_user_list.append(temp_recommendation_20)
except IndexError:
temp_user_list.append("NA")
master_recom_list_existing.append(temp_user_list )
master_recomm_list_existing = pd.DataFrame(master_recom_list_existing)
master_recomm_list_existing.columns = ['uid', 'recent', \
'R1','R2','R3','R4','R5', 'R6', 'R7','R8','R9','R10',\
'R11','R12','R13','R14','R15', 'R16', 'R17','R18','R19','R20']
######################## get new users, recommendation ###################################
## recs_using_self_history
select_top_N_recommendation = 20
master_recom_list_new = []
## for i in range(len(recs_using_self_history)):
for key in recs_using_friends.items():
temp_user_list =[]
temp_user_id= key[0]
temp_user_list.append(temp_user_id)
temp_recent_visit = key[1][0]
temp_user_list.append(temp_recent_visit)
temp_recommendation_01 = key[1][1][0]
temp_user_list.append(temp_recommendation_01)
try:
temp_recommendation_02 = key[1][1][1]
temp_user_list.append(temp_recommendation_02)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_03 = key[1][1][2]
temp_user_list.append(temp_recommendation_03)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_04 = key[1][1][3]
temp_user_list.append(temp_recommendation_04)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_05 = key[1][1][4]
temp_user_list.append(temp_recommendation_05)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_06 = key[1][1][5]
temp_user_list.append(temp_recommendation_06)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_07 = key[1][1][6]
temp_user_list.append(temp_recommendation_07)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_08 = key[1][1][7]
temp_user_list.append(temp_recommendation_08)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_09 = key[1][1][8]
temp_user_list.append(temp_recommendation_09)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_10 = key[1][1][9]
temp_user_list.append(temp_recommendation_10)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_11 = key[1][1][10]
temp_user_list.append(temp_recommendation_11)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_12 = key[1][1][11]
temp_user_list.append(temp_recommendation_12)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_13 = key[1][1][12]
temp_user_list.append(temp_recommendation_13)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_14 = key[1][1][13]
temp_user_list.append(temp_recommendation_14)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_15 = key[1][1][14]
temp_user_list.append(temp_recommendation_15)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_16 = key[1][1][15]
temp_user_list.append(temp_recommendation_16)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_17 = key[1][1][16]
temp_user_list.append(temp_recommendation_17)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_18 = key[1][1][17]
temp_user_list.append(temp_recommendation_18)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_19 = key[1][1][18]
temp_user_list.append(temp_recommendation_19)
except IndexError:
temp_user_list.append("NA")
try:
temp_recommendation_20 = key[1][1][19]
temp_user_list.append(temp_recommendation_20)
except IndexError:
temp_user_list.append("NA")
master_recom_list_new.append(temp_user_list )
master_recomm_list_new =
|
pd.DataFrame(master_recom_list_new)
|
pandas.DataFrame
|
from pathlib import Path
from matplotlib.colors import same_color
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns; sns.set()
from sklearn.decomposition import PCA
import gffpandas.gffpandas as gffpd
import math
# rpy2 imports
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri, Formula
from rpy2.robjects.conversion import localconverter
base = importr('base')
utils = importr('utils')
deseq2 = importr('DESeq2')
def makeOutDir(outputDir, folderName):
"""
Makes an out directory if there is not one. Returns file path as Path object
Inputs:
outputDir - Pathlib directory object that directory will be created within
folderName - Name of directory to be made/returned
"""
outdir = outputDir / folderName
if not outdir.exists():
outdir.mkdir()
return outdir
def get_htseq2_and_metadata_df(htseq2dir, organism_type_ID_df, feature_types_to_keep=None):
raw_counts_df_list = []
raw_metadata_df_list = []
first_unique_ids = []
for i, path in enumerate(sorted(list(htseq2dir.iterdir()))):
if path.suffix == '.tsv':
# get sample ID from path
sample_name = path.stem
# read in HTseq TSV
temp_df = pd.read_csv(path, sep='\t', names=['long_ID', sample_name])
# check that long_IDs match
if len(first_unique_ids) == 0:
first_unique_ids = temp_df['long_ID'].unique()
else:
temp_unique_ids = temp_df['long_ID'].unique()
assert first_unique_ids.all() == temp_unique_ids.all()
temp_df = temp_df.set_index('long_ID')
temp_metadata_df = temp_df[temp_df.index.str.contains('__')]
temp_counts_df = temp_df[~temp_df.index.str.contains('__')]
# append df to raw_counts_df_list
raw_counts_df_list.append(temp_counts_df)
raw_metadata_df_list.append(temp_metadata_df)
counts_df = pd.concat(raw_counts_df_list, axis=1)
counts_df = counts_df.add_prefix('sample_')
metadata_df = pd.concat(raw_metadata_df_list, axis=1)
metadata_df = metadata_df.add_prefix('sample_')
metadata_df.index = metadata_df.index.str.replace('__', '')
counts_df = counts_df.join(organism_type_ID_df)
counts_df = counts_df.set_index(['organism', 'type'], append=True)
counts_df = counts_df.reorder_levels(['organism', 'type', 'long_ID'])
if feature_types_to_keep:
counts_df = counts_df[counts_df.index.get_level_values('type').isin(feature_types_to_keep)]
feature_df = counts_df.groupby(['type']).sum()
metadata_df = pd.concat([feature_df, metadata_df])
return metadata_df, counts_df
def get_dge_table(results_table, attributes_df, fdr=0.1, sort=True, dge_out_path=None, rearrange_columns=True):
if fdr:
dge_table_subset = results_table[results_table['padj'] < fdr]
else:
dge_table_subset = results_table
dge_table_subset = dge_table_subset.join(attributes_df)
# Symmetric Log function (never undefined)
def symlog10(x):
if x > 0:
return math.log10(x+1)
elif x < 0:
return -math.log10(-x+1)
else:
return 0
dge_table_subset['symlog10baseMean'] = dge_table_subset['baseMean'].apply(lambda x: symlog10(x))
if sort:
dge_table_subset = dge_table_subset.sort_values('padj', ascending=True)
if rearrange_columns:
cols = list(dge_table_subset.columns.values)
try:
index_of_padj = cols.index('padj')
index_of_baseMean = cols.index('baseMean')
cols.remove('product')
cols.insert(index_of_padj+1, 'product')
dge_table_subset = dge_table_subset[cols]
cols.remove('symlog10baseMean')
cols.insert(index_of_baseMean+1, 'symlog10baseMean')
dge_table_subset = dge_table_subset[cols]
except ValueError:
pass
if dge_out_path:
dge_table_subset.to_csv(dge_out_path, sep='\t')
return dge_table_subset
def save_out_metadata(metadata_df, outdir):
metadata_df.to_csv(outdir / 'metadata.tsv', sep='\t')
def plot_feature_pct_sample(metadata_df, outdir, normalized=True):
if normalized:
read_counts = metadata_df.sum()
metadata_df_pct_reads = metadata_df / read_counts
axes = metadata_df_pct_reads.T.plot.bar(stacked=True)
outpath = outdir / 'library_annotation_{}.png'.format('normalized')
else:
axes = metadata_df.T.plot.bar(stacked=True)
outpath = outdir / 'library_annotation_{}.png'.format('sum')
axes.legend(bbox_to_anchor=(1, 1))
fig = axes.get_figure()
fig.savefig(outpath, bbox_inches='tight', transparent=False, facecolor='w', edgecolor='w')
def plot_strain_featured_sample(counts_df, outdir, normalized = True):
strain_counts_df = pd.DataFrame(columns=counts_df.columns)
strain_counts_df = counts_df.copy()
strain_counts_df = strain_counts_df.groupby('organism').sum()
if normalized:
strain_counts_df_pct_reads = strain_counts_df / strain_counts_df.sum()
axes = strain_counts_df_pct_reads.T.plot.bar(stacked=True)
outpath = outdir / 'strain_{}_featured_sample.png'.format('normalized')
else:
axes = strain_counts_df.T.plot.bar(stacked=True)
outpath = outdir / 'strain_{}_featured_sample.png'.format('count')
axes.legend(bbox_to_anchor=(1, 1))
fig = axes.get_figure()
fig.savefig(outpath, bbox_inches='tight', transparent=False, facecolor='w', edgecolor='w')
def plot_heatmap(count_table, indices=None, fig_out_path=None):
if indices is not None:
figure = sns.clustermap(count_table.loc[indices])
else:
figure = sns.clustermap(count_table)
if fig_out_path:
figure.savefig(fig_out_path)
def get_PCA(count_table):
pca = PCA(n_components=2)
pca.fit(count_table)
print('pca components')
print(pca.components_)
print('pca explained variance')
print(pca.explained_variance_)
return pca
def plot_PCA(count_table, condition_table, fig_out_path):
pca = PCA(n_components=2)
projected = pca.fit_transform(count_table)
print('DF Components')
print(pd.DataFrame(pca.components_, columns = count_table.columns))
print('PCA explained variance')
explained_variance = list(pca.explained_variance_)
print(explained_variance)
transform = pca.fit_transform(count_table.T)
samples = count_table.columns
treatments = condition_table['treatment']
unique_treatments = treatments.unique()
color_dict = {}
color=iter(plt.cm.rainbow(np.linspace(0,1,len(unique_treatments))))
for i, t in enumerate(unique_treatments):
color_dict[t] = next(color)
fig = plt.figure(figsize=(10, 5), constrained_layout=True)
ax = fig.gca()
ax.scatter(transform[:,0], transform[:,1], c=list(map(color_dict.get, treatments)))
for i, txt in enumerate(samples):
ax.annotate(txt, transform[i])
ax.set_xlabel('PC 1 (explained variance: {})'.format(explained_variance[0]))
ax.set_ylabel('PC 2 (explained variance: {})'.format(explained_variance[1]))
patches = [mpatches.Patch(color=c, label=l) for l, c in color_dict.items()]
ax.legend(handles=patches)
plt.savefig(fig_out_path)
plt.close()
def plot_pct_genes_mapped_per_organism_sample(pct_genes_mapped_per_organism_sample_df, out_dir):
for organism in pct_genes_mapped_per_organism_sample_df.index.unique():
fig_out_path = out_dir / 'pct_genes_mapped_per_sample_{}.png'.format(organism)
fig = plt.figure(figsize=(10, 5), constrained_layout=True)
pct_genes_mapped_per_organism_sample_df.loc[organism].plot.bar()
plt.savefig(fig_out_path)
plt.close()
def plot_lfc_mean_normalized_counts(results_df, fig_out_path, fdr=0.1):
fig = plt.figure(figsize=(10, 5), constrained_layout=True)
ax = fig.gca()
color_dict = {True : 'royalblue', False : 'darkgray'}
color_map = list(map(color_dict.get, results_df['padj'] < fdr))
ax.scatter(results_df['baseMean'], results_df['log2FoldChange'], c=color_map, alpha=0.5)
ax.set_xscale('log')
ax.set_xlabel('baseMean')
ax.set_ylabel('log2FoldChange')
plt.savefig(fig_out_path)
plt.close()
def main(results_dir, htseq2dir, gff_dir, condition_table_path, raw_reads_dir, feature_types_to_keep=None):
experiments_dir = makeOutDir(results_dir, 'experiments')
metadata_figures_dir = makeOutDir(results_dir, 'metadata_figures')
metadata_tables_dir = makeOutDir(results_dir, 'metadata_tables')
# attributes and annotations
gffs = []
for gff_path in gff_dir.iterdir():
organism_name = str(gff_path.stem)
annotation = gffpd.read_gff3(gff_path)
attributes_df = annotation.attributes_to_columns()
attributes_df['organism'] = organism_name
gffs.append(attributes_df)
attributes_df =
|
pd.concat(gffs)
|
pandas.concat
|
import pandas as pd
import numpy as np
import datetime as dt
from tqdm import tqdm
from ..utils.parralel import *
def getDailyVol(close, span0=100, days=1):
# daily vol reindexed to close
df0 = close.index.searchsorted(close.index - pd.Timedelta(days=days))
df0 = df0[df0 > 0]
df0 = pd.Series(close.index[df0 - 1], index=close.index[close.shape[0] - df0.shape[0]:])
df0 = close.loc[df0.index] / close.loc[df0.values].values - 1 # daily returns
df0 = df0.ewm(span=span0).std()
return df0.dropna()
def getTEvents_mid(gRaw, h):
tEvents, sPos, sNeg = [], 0, 0
diff = np.log(gRaw).diff().dropna()
for i in tqdm(diff.index[1:]):
pos, neg = float(sPos+diff.loc[i]), float(sNeg+diff.loc[i])
sPos, sNeg=max(0., pos), min(0., neg)
if sNeg<-h: sNeg=0;tEvents.append(i)
elif sPos>h: sPos=0;tEvents.append(i)
return pd.DatetimeIndex(tEvents)
def getTEvents(gRaw, h):
if ('ask' not in gRaw) and ('bid' not in gRaw):
return getTEvents_mid(gRaw, h)
tEvents, sPos, sNeg = [], 0, 0
diff = np.log(gRaw).diff() # bid vs bid and ask vs ask
diff_short = np.log(gRaw.ask / gRaw.bid.shift(1)) # returns from selling @bid(T-1) and buying @ask(T+0)
diff_long = np.log(gRaw.bid / gRaw.ask.shift(1)) # returns from buying @ask(T-1) and selling @bid(T+0)
for i in tqdm(diff.index[1:]):
pos, neg = sPos + diff_long.loc[i], sNeg + diff_short[i]
sPos, sNeg = max(0., sPos + diff.ask.loc[i]), min(0., sNeg + diff.bid.loc[i])
if pos > h:
sPos = 0;
tEvents.append(i);
elif neg < -h:
sNeg = 0;
tEvents.append(i)
return pd.DatetimeIndex(tEvents)
def addVerticalBarrier(tEvents, close, numDays=1):
""" Generates timeindex of events where the vertical barrier was reached within numDays
:param tEvents: events when upper or lower barrier was reached
:param close: dataframe/series of closing prices
:param numDays: max number of days to hold the position
:return: sorted pillars
"""
t1 = close.index.searchsorted(tEvents + pd.Timedelta(days=numDays))
t1 = t1[t1 < close.shape[0]] #removing times that are beyond those in consideration
t1 = (pd.Series(close.index[t1], index=tEvents[:t1.shape[0]]))
return t1
def getEvents(close, tEvents, ptSl, trgt, minRet, numThreads, t1=False, side=None):
# 1) get target
# tEvents = [event for event in tEvents if event in trgt.index]
trgt = trgt[tEvents] # get target volatility
trgt = trgt[trgt > minRet] # filter out returns lower than the minRet threshold
# 2) get t1 (max holding period)
if t1 is False: t1 = pd.Series(pd.NaT, index=tEvents)
# 3) form events object, apply stop loss on t1
if side is None:
side_, ptSl_ = pd.Series(1., index=trgt.index), [ptSl[0], ptSl[0]]
else:
side_, ptSl_ = side[trgt.index], ptSl[:2]
events = (pd.concat({'t1': t1, 'trgt': trgt, 'side': side_}, axis=1)
.dropna(subset=['trgt']))
df0 = mpPandasObj(func=applyPtSlOnT1, pdObj=('molecule', events.index),
numThreads=numThreads, close=close, events=events,
ptSl=ptSl_)
events['t1'] = df0.dropna(how='all').min(axis=1) # pd.min ignores nan
if side is None: events = events.drop('side', axis=1)
return events
def applyPtSlOnT1(close, events, ptSl, molecule):
# apply stop loss/profit taking, if it takes place before t1 (end of event)
events_ = events.loc[molecule]
out = events_[['t1']].copy(deep=True)
if ptSl[0] > 0:
pt = ptSl[0] * events_['trgt']
else:
pt =
|
pd.Series(index=events.index)
|
pandas.Series
|
# coding: utf-8
# ---
#
# _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._
#
# ---
# # The Series Data Structure
# In[ ]:
import pandas as pd
# In[ ]:
animals = ['Tiger', 'Bear', 'Moose']
pd.Series(animals)
# In[ ]:
numbers = [1, 2, 3]
pd.Series(numbers)
# In[ ]:
animals = ['Tiger', 'Bear', None]
|
pd.Series(animals)
|
pandas.Series
|
import os
import streamlit as st
import pandas as pd
import altair as alt
import sqlite3
from sqlite3 import Connection
import requests
import json
import plotly.express as px
# spotify stuff
SPOTIFY_CLIENT_ID = os.environ.get('SPOTIFY_CLIENT_ID')
SPOTIFY_CLIENT_SECRET = os.environ.get('SPOTIFY_CLIENT_SECRET')
def get_spotify_token():
url='https://accounts.spotify.com/api/token'
grant_type = 'client_credentials'
body_params = {'grant_type' : grant_type}
r = requests.post(url, data=body_params, auth = (SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET))
r.raise_for_status()
token_raw = json.loads(r.text)
token = token_raw["access_token"]
return token
def spotify_search(song):
token = get_spotify_token()
url = f'https://api.spotify.com/v1/search?q={song}&type=track&limit=1'
headers = {
'Accept': 'application/json',
'Content-type': 'application/json',
'Authorization': f'Bearer {token}'
}
r = requests.get(url, headers=headers)
r.raise_for_status()
if r.status_code == 200:
data = r.json()
result = data['tracks']['items'][0]
thirty_sec_preview_url = result['preview_url']
return thirty_sec_preview_url
else:
raise Exception('Failed to get Spotify data.')
@st.cache(hash_funcs={Connection: id}) # add caching so we load the data only once
def get_connection(path_to_db):
# connect to db
try:
conn = sqlite3.connect(path_to_db, check_same_thread=False)
return conn
except Exception as e:
print(e)
def get_data(conn: Connection):
sql_query = """
SELECT
song, artist, album, date, energy, valence, danceability, instrumentalness, tempo
FROM
acoustic_features
WHERE
artist LIKE '%<NAME>%'
ORDER BY date DESC
"""
df = pd.read_sql(sql_query, con=conn)
df['date'] = pd.to_datetime(df['date'])
return df
def get_bowie_data(conn: Connection,feature):
df =
|
pd.read_sql(f'select song, tempo,round({feature},2) as {feature},cast(valence*10 as int) as valence,date,album from acoustic_features where artist="<NAME>"', con=conn)
|
pandas.read_sql
|
r"""
Tests for news results
Author: <NAME>
License: BSD-3
"""
from statsmodels.compat.pandas import NumericIndex
from statsmodels.compat.pandas import (
assert_frame_equal,
assert_series_equal,
)
import numpy as np
from numpy.testing import assert_, assert_allclose, assert_equal
import pandas as pd
import pytest
from statsmodels import datasets
from statsmodels.tsa.statespace import (
dynamic_factor,
sarimax,
structural,
varmax,
)
dta = datasets.macrodata.load_pandas().data
dta.index = pd.period_range(start='1959Q1', end='2009Q3', freq='Q')
def check_impact_indices(news, impact_dates, impacted_variables):
# Note: the index for impacts is only a time index, because we compute
# impacts for all variables during these time periods.
for attr in ['total_impacts', 'update_impacts', 'revision_impacts',
'post_impacted_forecasts', 'prev_impacted_forecasts']:
val = getattr(news, attr)
assert_(val.index.equals(impact_dates))
assert_equal(val.columns.tolist(), impacted_variables)
def check_revision_indices(news, revisions_index):
# Note: revision indices must be a MultiIndex, because for each time
# period, not all variables necessarily revised.
# If there are no revisions, we just check the length is zero
if news.news_results.revision_impacts is None:
assert_equal(len(news.revisions_iloc['revision date']), 0)
assert_equal(len(news.revisions_iloc['revised variable']), 0)
assert_equal(len(news.revisions_ix['revision date']), 0)
assert_equal(len(news.revisions_ix['revised variable']), 0)
# Otherwise, check that the revision indexes are correct
else:
# Get the integer locations of the given indexes
dates = news.previous.model._index
endog_names = news.previous.model.endog_names
if isinstance(endog_names, str):
endog_names = [endog_names]
desired_ix = revisions_index.to_frame().reset_index(drop=True)
desired_iloc = desired_ix.copy()
desired_iloc['revision date'] = [
dates.get_loc(date) for date in desired_ix['revision date']]
desired_iloc['revised variable'] = [
endog_names.index(name)
for name in desired_ix['revised variable']]
assert_(news.revisions_iloc.equals(desired_iloc))
assert_(news.revisions_ix.equals(desired_ix))
def check_update_indices(news, updates_index):
# Note: update indices are also a MultiIndex, for the same reason as the
# revision indices.
# If there are no updates, we just check the length is zero
if news.news_results.update_impacts is None:
assert_equal(len(news.updates_iloc['update date']), 0)
assert_equal(len(news.updates_iloc['updated variable']), 0)
assert_equal(len(news.updates_ix['update date']), 0)
assert_equal(len(news.updates_ix['updated variable']), 0)
# Otherwise, check that the revision indexes are correct
else:
# Get the integer locations of the given indexes
dates = news.updated.model._index
endog_names = news.updated.model.endog_names
if isinstance(endog_names, str):
endog_names = [endog_names]
desired_ix = updates_index.to_frame().reset_index(drop=True)
desired_iloc = desired_ix.copy()
desired_iloc['update date'] = [
dates.get_loc(date) for date in desired_ix['update date']]
desired_iloc['updated variable'] = [
endog_names.index(name)
for name in desired_ix['updated variable']]
assert_(news.updates_iloc.equals(desired_iloc))
assert_(news.updates_ix.equals(desired_ix))
def check_news_indices(news, updates_index, impact_dates):
# News are computed only from updates, so the news indices are the same as
# the update indices
if len(updates_index):
news_index = updates_index
else:
news_index = pd.MultiIndex.from_product(
[[], []], names=['update date', 'updated variable'])
endog_names = news.previous.model.endog_names
if isinstance(endog_names, str):
endog_names = [endog_names]
assert_(news.news.index.equals(news_index))
assert_(news.update_forecasts.index.equals(news_index))
assert_(news.update_realized.index.equals(news_index))
assert_(news.weights.index.equals(news_index))
weights_columns = pd.MultiIndex.from_product([impact_dates, endog_names])
assert_(news.weights.columns.equals(weights_columns))
def check_news(news, revisions, updates, impact_dates, impacted_variables,
revisions_index, updates_index,
revision_impacts, update_impacts,
prev_impacted_forecasts, post_impacted_forecasts,
update_forecasts, update_realized, news_desired, weights):
# Note: we use atol=1e-12 to handle cases where impacts, etc. are equal to
# zero, but numerical precision of the Kalman filter procedures gives an
# answer of e.g. 1e-16.
# Note: Here we set the tolerance to be slightly negative, since some of
# the tests have weights or impacts exactly equal to zero, while we still
# want to include those in tests.
news.tolerance = -1e-10
# - Indexes --------------------------------------------------------------
# Index of impacts
check_impact_indices(news, impact_dates, impacted_variables)
# Reivision indices
check_revision_indices(news, revisions_index)
# Update indices
check_update_indices(news, updates_index)
# News indices
check_news_indices(news, updates_index, impact_dates)
# - Impacts --------------------------------------------------------------
if updates:
assert_allclose(news.update_impacts, update_impacts, atol=1e-12)
else:
assert_(np.all(news.update_impacts.isnull()))
# Impacts from revisions
if revisions:
assert_allclose(news.revision_impacts, revision_impacts, atol=1e-12)
else:
assert_(news.news_results.revision_impacts is None)
assert_(np.all(news.revision_impacts.isnull()))
# Total impacts
total_impacts = (news.revision_impacts.fillna(0) +
news.update_impacts.fillna(0))
assert_allclose(news.total_impacts, total_impacts, atol=1e-12)
# - Impacted variable forecasts ------------------------------------------
assert_allclose(news.prev_impacted_forecasts, prev_impacted_forecasts,
atol=1e-12)
assert_allclose(news.post_impacted_forecasts, post_impacted_forecasts,
atol=1e-12)
# - News -----------------------------------------------------------------
assert_allclose(news.update_forecasts, update_forecasts, atol=1e-12)
assert_allclose(news.update_realized, update_realized, atol=1e-12)
# The "news" is simply the forecast error
assert_allclose(news.news, news_desired, atol=1e-12)
# The weight is zero on previously known data, and is geometrically
# declining (according to the AR parameter) in the forecast period
assert_allclose(news.weights, weights, atol=1e-12)
# - Table: data revisions ------------------------------------------------
assert_equal(news.data_revisions.columns.tolist(),
['observed (prev)', 'revised'])
assert_equal(news.data_revisions.index.names,
['revision date', 'revised variable'])
assert_(news.data_revisions.index.equals(revisions_index))
# - Table: data updates --------------------------------------------------
assert_equal(news.data_updates.columns.tolist(),
['observed', 'forecast (prev)'])
assert_equal(news.data_updates.index.names,
['update date', 'updated variable'])
assert_(news.data_updates.index.equals(news.news.index))
assert_allclose(news.data_updates['forecast (prev)'],
news.update_forecasts, atol=1e-12)
assert_allclose(news.data_updates['observed'], news.update_realized,
atol=1e-12)
# - Table: details_by_impact ---------------------------------------------
details_by_impact = news.details_by_impact
desired = ['observed', 'forecast (prev)', 'news', 'weight', 'impact']
assert_equal(details_by_impact.columns.tolist(), desired)
desired = ['impact date', 'impacted variable',
'update date', 'updated variable']
assert_equal(details_by_impact.index.names, desired)
if updates:
actual = (news.details_by_impact['forecast (prev)']
.drop_duplicates()
.reset_index([0, 1])['forecast (prev)'])
assert_allclose(actual, news.update_forecasts, atol=1e-12)
actual = (news.details_by_impact['observed']
.drop_duplicates().reset_index([0, 1])['observed'])
assert_allclose(actual, news.update_realized, atol=1e-12)
actual = (news.details_by_impact['news']
.drop_duplicates().reset_index([0, 1])['news'])
assert_allclose(actual, news.news, atol=1e-12)
# Weights
assert_allclose(details_by_impact['weight'].unstack([0, 1]),
news.weights, atol=1e-12)
# Impact of news
actual = (news.details_by_impact['impact']
.unstack([2, 3]).sum(axis=1).unstack(1))
assert_allclose(actual, news.update_impacts, atol=1e-12)
# - Table: details_by_update ---------------------------------------------
details_by_update = news.details_by_update
desired = ['news', 'weight', 'impact']
assert_equal(details_by_update.columns.tolist(), desired)
desired = ['update date', 'updated variable', 'observed',
'forecast (prev)', 'impact date', 'impacted variable']
assert_equal(details_by_update.index.names, desired)
if updates:
# News
# Special case for Pandas = 0.23, see above
actual = (news.details_by_update['news']
.drop_duplicates().reset_index([2, 3, 4, 5])['news'])
assert_allclose(actual, news.news, atol=1e-12)
# Weights
assert_allclose(news.details_by_update['weight'].unstack([4, 5]),
news.weights, atol=1e-12)
# Impact of news
actual = (news.details_by_update['impact']
.unstack([4, 5]).sum(axis=0).unstack(1))
assert_allclose(actual, news.update_impacts, atol=1e-12)
# - Table: impacts -------------------------------------------------------
impacts = news.impacts
desired = ['estimate (prev)', 'impact of revisions', 'impact of news',
'total impact', 'estimate (new)']
assert_equal(impacts.columns.tolist(), desired)
desired = ['impact date', 'impacted variable']
assert_equal(impacts.index.names, desired)
assert_allclose(impacts.loc[:, 'estimate (prev)'],
news.prev_impacted_forecasts.stack(), atol=1e-12)
assert_allclose(impacts.loc[:, 'impact of revisions'],
news.revision_impacts.fillna(0).stack(), atol=1e-12)
assert_allclose(impacts.loc[:, 'impact of news'],
news.update_impacts.fillna(0).stack(), atol=1e-12)
assert_allclose(impacts.loc[:, 'total impact'],
news.total_impacts.stack(), atol=1e-12)
assert_allclose(impacts.loc[:, 'estimate (new)'],
news.post_impacted_forecasts.stack(), atol=1e-12)
@pytest.mark.parametrize('revisions', [True, False])
@pytest.mark.parametrize('updates', [True, False])
def test_sarimax_time_invariant(revisions, updates):
# Construct previous and updated datasets
endog = dta['infl'].copy()
comparison_type = None
if updates:
endog1 = endog.loc[:'2009Q2'].copy()
endog2 = endog.loc[:'2009Q3'].copy()
else:
endog1 = endog.loc[:'2009Q3'].copy()
endog2 = endog.loc[:'2009Q3'].copy()
# Without updates and without NaN values, we need to specify that
# the type of the comparison object that we're passing is "updated"
comparison_type = 'updated'
if revisions:
endog1.iloc[-1] = 0.
# Get the previous results object and compute the news
mod = sarimax.SARIMAX(endog1)
res = mod.smooth([0.5, 1.0])
news = res.news(endog2, start='2009Q2', end='2010Q1',
comparison_type=comparison_type)
# Compute the true values for each combination of (revsions, updates)
impact_dates = pd.period_range(start='2009Q2', end='2010Q1', freq='Q')
impacted_variables = ['infl']
# Revisions
if revisions and updates:
revisions_index = pd.MultiIndex.from_arrays(
[endog1.index[-1:], ['infl']],
names=['revision date', 'revised variable'])
# If we have updates, the revision is to 2009Q2
revision_impacts = endog2.iloc[-2] * 0.5**np.arange(4).reshape(4, 1)
elif revisions:
revisions_index = pd.MultiIndex.from_arrays(
[endog1.index[-1:], ['infl']],
names=['revision date', 'revised variable'])
# With no updates, the revision is to 2009Q3
revision_impacts = np.r_[
0, endog2.iloc[-1] * 0.5**np.arange(3)].reshape(4, 1)
else:
revisions_index = pd.MultiIndex.from_arrays(
[[], []], names=['revision date', 'revised variable'])
revision_impacts = None
# Updates
if updates:
updates_index = pd.MultiIndex.from_arrays(
[pd.period_range(start='2009Q3', periods=1, freq='Q'), ['infl']],
names=['update date', 'updated variable'])
update_impacts = np.array([[
0, endog.loc['2009Q3'] - 0.5 * endog.loc['2009Q2'],
0.5 * endog.loc['2009Q3'] - 0.5**2 * endog.loc['2009Q2'],
0.5**2 * endog.loc['2009Q3'] - 0.5**3 * endog.loc['2009Q2']]]).T
else:
updates_index = pd.MultiIndex.from_arrays(
[[], []], names=['update date', 'updated variable'])
update_impacts = None
# Impact forecasts
if updates:
prev_impacted_forecasts = np.r_[
endog1.iloc[-1] * 0.5**np.arange(4)].reshape(4, 1)
else:
prev_impacted_forecasts = np.r_[
endog1.iloc[-2], endog1.iloc[-1] * 0.5**np.arange(3)].reshape(4, 1)
post_impacted_forecasts = np.r_[
endog2.iloc[-2], 0.5 ** np.arange(3) * endog2.iloc[-1]].reshape(4, 1)
# News
if updates:
# Note: update_forecasts is created using the endog2 dataset even if
# there were revisions, because it should be computed after revisions
# have already been taken into account
update_forecasts = [0.5 * endog2.loc['2009Q2']]
update_realized = [endog2.loc['2009Q3']]
news_desired = [update_realized[i] - update_forecasts[i]
for i in range(len(update_forecasts))]
weights = pd.DataFrame(np.r_[0, 0.5**np.arange(3)]).T
else:
update_forecasts = pd.Series([], dtype=np.float64)
update_realized = pd.Series([], dtype=np.float64)
news_desired = pd.Series([], dtype=np.float64)
weights = pd.DataFrame(np.zeros((0, 4)))
# Run unit tests
check_news(news, revisions, updates, impact_dates, impacted_variables,
revisions_index, updates_index,
revision_impacts, update_impacts,
prev_impacted_forecasts, post_impacted_forecasts,
update_forecasts, update_realized, news_desired, weights)
@pytest.mark.parametrize('revisions', [True, False])
@pytest.mark.parametrize('updates', [True, False])
@pytest.mark.parametrize('which', ['exog', 'trend'])
def test_sarimax_time_varying(revisions, updates, which):
# This is primarily a test that the `news` method works with a time-varying
# setup (i.e. time-varying state space matrices). It tests a time-varying
# SARIMAX model where the time-varying component has been set to zeros
# against a time-invariant version of the model.
# Construct previous and updated datasets
endog = dta['infl'].copy()
comparison_type = None
if updates:
endog1 = endog.loc[:'2009Q2'].copy()
endog2 = endog.loc[:'2009Q3'].copy()
else:
endog1 = endog.loc[:'2009Q3'].copy()
endog2 = endog.loc[:'2009Q3'].copy()
# Without updates and without NaN values, we need to specify that
# the type of the comparison object that we're passing is "updated"
comparison_type = 'updated'
if revisions:
endog1.iloc[-1] = 0.
exog1 = None
exog2 = None
trend = 'n'
if which == 'exog':
exog1 = np.ones_like(endog1)
exog2 = np.ones_like(endog2)
elif which == 'trend':
trend = 't'
# Compute the news from a model with a trend/exog term (so the model is
# time-varying), but with the coefficient set to zero (so that it will be
# equivalent to the time-invariant model)
mod1 = sarimax.SARIMAX(endog1, exog=exog1, trend=trend)
res1 = mod1.smooth([0., 0.5, 1.0])
news1 = res1.news(endog2, exog=exog2, start='2008Q1', end='2009Q3',
comparison_type=comparison_type)
# Compute the news from a model without a trend term
mod2 = sarimax.SARIMAX(endog1)
res2 = mod2.smooth([0.5, 1.0])
news2 = res2.news(endog2, start='2008Q1', end='2009Q3',
comparison_type=comparison_type)
attrs = ['total_impacts', 'update_impacts', 'revision_impacts', 'news',
'weights', 'update_forecasts', 'update_realized',
'prev_impacted_forecasts', 'post_impacted_forecasts',
'revisions_iloc', 'revisions_ix', 'updates_iloc', 'updates_ix']
for attr in attrs:
w = getattr(news1, attr)
x = getattr(news2, attr)
if isinstance(x, pd.Series):
assert_series_equal(w, x)
else:
assert_frame_equal(w, x)
@pytest.mark.parametrize('revisions', [True, False])
@pytest.mark.parametrize('updates', [True, False])
def test_unobserved_components_time_varying(revisions, updates):
# This is primarily a test that the `news` method works with a time-varying
# setup (i.e. time-varying state space matrices). It tests a time-varying
# UnobservedComponents model where the time-varying component has been set
# to zeros against a time-invariant version of the model.
# Construct previous and updated datasets
endog = dta['infl'].copy()
comparison_type = None
if updates:
endog1 = endog.loc[:'2009Q2'].copy()
endog2 = endog.loc[:'2009Q3'].copy()
else:
endog1 = endog.loc[:'2009Q3'].copy()
endog2 = endog.loc[:'2009Q3'].copy()
# Without updates and without NaN values, we need to specify that
# the type of the comparison object that we're passing is "updated"
comparison_type = 'updated'
if revisions:
endog1.iloc[-1] = 0.
exog1 = np.ones_like(endog1)
exog2 = np.ones_like(endog2)
# Compute the news from a model with a trend/exog term (so the model is
# time-varying), but with the coefficient set to zero (so that it will be
# equivalent to the time-invariant model)
mod1 = structural.UnobservedComponents(endog1, 'llevel', exog=exog1)
res1 = mod1.smooth([0.5, 0.2, 0.0])
news1 = res1.news(endog2, exog=exog2, start='2008Q1', end='2009Q3',
comparison_type=comparison_type)
# Compute the news from a model without a trend term
mod2 = structural.UnobservedComponents(endog1, 'llevel')
res2 = mod2.smooth([0.5, 0.2])
news2 = res2.news(endog2, start='2008Q1', end='2009Q3',
comparison_type=comparison_type)
attrs = ['total_impacts', 'update_impacts', 'revision_impacts', 'news',
'weights', 'update_forecasts', 'update_realized',
'prev_impacted_forecasts', 'post_impacted_forecasts',
'revisions_iloc', 'revisions_ix', 'updates_iloc', 'updates_ix']
for attr in attrs:
w = getattr(news1, attr)
x = getattr(news2, attr)
if isinstance(x, pd.Series):
assert_series_equal(w, x)
else:
assert_frame_equal(w, x)
@pytest.mark.parametrize('revisions', [True, False])
@pytest.mark.parametrize('updates', [True, False])
def test_varmax_time_invariant(revisions, updates):
# Construct previous and updated datasets
endog = dta[['realgdp', 'unemp']].copy()
endog['realgdp'] = np.log(endog['realgdp']).diff() * 400
endog = endog.iloc[1:]
comparison_type = None
if updates:
endog1 = endog.loc[:'2009Q2'].copy()
endog2 = endog.loc[:'2009Q3'].copy()
else:
endog1 = endog.loc[:'2009Q3'].copy()
endog2 = endog.loc[:'2009Q3'].copy()
# Without updates and without NaN values, we need to specify that
# the type of the comparison object that we're passing is "updated"
comparison_type = 'updated'
if revisions:
# TODO: add test for only one of the variables revising?
endog1.iloc[-1] = 0.
# Get the previous results object and compute the news
mod = varmax.VARMAX(endog1, trend='n')
params = np.r_[0.5, 0.1, 0.2, 0.9, 1., 0.1, 1.1]
res = mod.smooth(params)
news = res.news(endog2, start='2009Q2', end='2010Q1',
comparison_type=comparison_type)
# Compute the true values for each combination of (revsions, updates)
impact_dates = pd.period_range(start='2009Q2', end='2010Q1', freq='Q')
impacted_variables = ['realgdp', 'unemp']
# Matrix powers of transition matrix
Z = np.zeros((2, 2))
T0 = np.eye(2)
T1 = mod['transition']
T2 = T1 @ T1
T3 = T1 @ T2
# Revisions
if revisions and updates:
revisions_index = pd.MultiIndex.from_product(
[endog1.index[-1:], ['realgdp', 'unemp']],
names=['revision date', 'revised variable'])
# If we have updates, the revision is to 2009Q2
# Note: this ".values" and all of those below are only required for
# Pandas = 0.23, and can be removed once that is no longer a supported
# dependency
tmp = endog2.iloc[-2].values
revision_impacts = np.c_[T0 @ tmp, T1 @ tmp, T2 @ tmp, T3 @ tmp].T
elif revisions:
revisions_index = pd.MultiIndex.from_product(
[endog1.index[-1:], ['realgdp', 'unemp']],
names=['revision date', 'revised variable'])
# With no updates, the revision is to 2009Q3
tmp = endog2.iloc[-1].values
revision_impacts = np.c_[Z @ tmp, T0 @ tmp, T1 @ tmp, T2 @ tmp].T
else:
revisions_index = pd.MultiIndex.from_product(
[[], []],
names=['revision date', 'revised variable'])
revision_impacts = None
# Impact forecasts
if updates:
tmp = endog1.iloc[-1].values
prev_impacted_forecasts = np.c_[T0 @ tmp, T1 @ tmp,
T2 @ tmp, T3 @ tmp].T
tmp = endog2.iloc[-2].values
rev_impacted_forecasts = np.c_[T0 @ tmp, T1 @ tmp,
T2 @ tmp, T3 @ tmp].T
else:
tmp = endog1.iloc[-1].values
prev_impacted_forecasts = np.c_[
T0 @ endog1.iloc[-2], T0 @ tmp, T1 @ tmp, T2 @ tmp].T
tmp = endog2.iloc[-1].values
rev_impacted_forecasts = np.c_[
T0 @ endog2.iloc[-2], T0 @ tmp, T1 @ tmp, T2 @ tmp].T
tmp = endog2.iloc[-1].values
post_impacted_forecasts = np.c_[
T0 @ endog2.iloc[-2], T0 @ tmp, T1 @ tmp, T2 @ tmp].T
# Updates
if updates:
updates_index = pd.MultiIndex.from_product(
[pd.period_range(start='2009Q3', periods=1, freq='Q'),
['realgdp', 'unemp']], names=['update date', 'updated variable'])
update_impacts = post_impacted_forecasts - rev_impacted_forecasts
else:
updates_index = pd.MultiIndex.from_product(
[[], []], names=['update date', 'updated variable'])
update_impacts = None
# News
if updates:
# Note: update_forecasts is created using the endog2 dataset even if
# there were revisions, because it should be computed after revisions
# have already been taken into account
update_forecasts = T1 @ endog2.loc['2009Q2'].values
update_realized = endog2.loc['2009Q3'].values
news_desired = [update_realized[i] - update_forecasts[i]
for i in range(len(update_forecasts))]
columns = pd.MultiIndex.from_product(
[impact_dates, impacted_variables],
names=['impact dates', 'impacted variables'])
weights = pd.DataFrame(np.zeros((2, 8)), index=updates_index,
columns=columns)
weights.loc[:, '2009Q2'] = Z
weights.loc[:, '2009Q3'] = T0
weights.loc[:, '2009Q4'] = T1.T
weights.loc[:, '2010Q1'] = T2.T
else:
update_forecasts = pd.Series([], dtype=np.float64)
update_realized = pd.Series([], dtype=np.float64)
news_desired =
|
pd.Series([], dtype=np.float64)
|
pandas.Series
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
""" test get/set & misc """
import pytest
from datetime import timedelta
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_scalar
from pandas import (Series, DataFrame, MultiIndex,
Timestamp, Timedelta, Categorical)
from pandas.tseries.offsets import BDay
from pandas.compat import lrange, range
from pandas.util.testing import (assert_series_equal)
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestMisc(TestData):
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
assert (result == 5).all()
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
assert result == 4
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
assert self.series[idx1] == self.series.get(idx1)
assert self.objSeries[idx2] == self.objSeries.get(idx2)
assert self.series[idx1] == self.series[5]
assert self.objSeries[idx2] == self.objSeries[5]
assert self.series.get(-1) == self.series.get(self.series.index[-1])
assert self.series[5] == self.series.get(self.series.index[5])
# missing
d = self.ts.index[0] - BDay()
pytest.raises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
assert result is None
def test_getitem_int64(self):
idx = np.int64(5)
assert self.ts[idx] == self.ts[5]
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
assert self.series.index[2] == slice1.index[1]
assert self.objSeries.index[2] == slice2.index[1]
assert self.series[2] == slice1[1]
assert self.objSeries[2] == slice2[1]
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
@pytest.mark.parametrize(
'result_1, duplicate_item, expected_1',
[
[
pd.Series({1: 12, 2: [1, 2, 2, 3]}), pd.Series({1: 313}),
pd.Series({1: 12, }, dtype=object),
],
[
pd.Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}),
pd.Series({1: [1, 2, 3]}), pd.Series({1: [1, 2, 3], }),
],
])
def test_getitem_with_duplicates_indices(
self, result_1, duplicate_item, expected_1):
# GH 17610
result = result_1.append(duplicate_item)
expected = expected_1.append(duplicate_item)
assert_series_equal(result[1], expected)
assert result[2] == result_1[2]
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
pytest.raises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
pytest.raises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
assert s.iloc[0] == s['a']
s.iloc[0] = 5
tm.assert_almost_equal(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
assert isinstance(value, np.float64)
def test_series_box_timestamp(self):
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng)
assert isinstance(ser[5], pd.Timestamp)
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng, index=rng)
assert isinstance(ser[5], pd.Timestamp)
assert isinstance(ser.iat[5], pd.Timestamp)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
pytest.raises(KeyError, s.__getitem__, 1)
pytest.raises(KeyError, s.loc.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
assert is_scalar(obj['c'])
assert obj['c'] == 0
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .loc internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = s.loc[['foo', 'bar', 'bah', 'bam']]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
pytest.raises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
assert result == s.loc['A']
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.loc[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
assert self.series.index[9] not in numSlice.index
assert self.objSeries.index[9] not in objSlice.index
assert len(numSlice) == len(numSlice.index)
assert self.series[numSlice.index[0]] == numSlice[numSlice.index[0]]
assert numSlice.index[1] == self.series.index[11]
assert tm.equalContents(numSliceEnd, np.array(self.series)[-10:])
# Test return view.
sl = self.series[10:20]
sl[:] = 0
assert (self.series[10:20] == 0).all()
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
assert np.isnan(self.ts[6])
assert np.isnan(self.ts[2])
self.ts[np.isnan(self.ts)] = 5
assert not np.isnan(self.ts[2])
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
assert (series[::2] == 0).all()
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = self.ts.set_value(idx, 0)
assert res is self.ts
assert self.ts[idx] == 0
# equiv
s = self.series.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = s.set_value('foobar', 0)
assert res is s
assert res.index[-1] == 'foobar'
assert res['foobar'] == 0
s = self.series.copy()
s.loc['foobar'] = 0
assert s.index[-1] == 'foobar'
assert s['foobar'] == 0
def test_setslice(self):
sl = self.ts[5:20]
assert len(sl) == len(sl.index)
assert sl.index.is_unique
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
pytest.raises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
pytest.raises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.loc[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
assert result == expected
result = s.iloc[0]
assert result == expected
result = s['a']
assert result == expected
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00-04:00', tz=tz),
pd.Timestamp('2011-01-01 00:00-05:00', tz=tz),
pd.Timestamp('2016-11-06 01:00-05:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_categorial_assigning_ops(self):
orig = Series(Categorical(["b", "b"], categories=["a", "b"]))
s = orig.copy()
s[:] = "a"
exp = Series(Categorical(["a", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(Categorical(["b", "a"], categories=["a", "b"]),
index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1, 2, 3]))
exp = Series(Categorical([1, np.nan, 3], categories=[1, 2, 3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_take(self):
s = Series([-1, 5, 6, 2, 4])
actual = s.take([1, 3, 4])
expected = Series([5, 2, 4], index=[1, 3, 4])
tm.assert_series_equal(actual, expected)
actual = s.take([-1, 3, 4])
expected = Series([4, 2, 4], index=[4, 3, 4])
tm.assert_series_equal(actual, expected)
pytest.raises(IndexError, s.take, [1, 10])
pytest.raises(IndexError, s.take, [2, 5])
with tm.assert_produces_warning(FutureWarning):
s.take([-1, 3, 4], convert=False)
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.loc[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.iloc[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.loc[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.loc[d1] = 4
self.series.loc[d2] = 6
assert self.series[d1] == 4
assert self.series[d2] == 6
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# gets coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
pytest.raises(IndexError, s.__getitem__, 5)
pytest.raises(IndexError, s.__setitem__, 5, 0)
pytest.raises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
pytest.raises(IndexError, s.__getitem__, 5)
pytest.raises(IndexError, s.__setitem__, 5, 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame({c: [1, 2, 3] for c in ['a', 'b', 'c']})
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
assert df['bb'].iloc[0] == 0.15
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
assert not np.isnan(self.ts[10])
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected = Series([1], index=['two'])
result = s.drop(['one'])
assert_series_equal(result, expected)
result = s.drop('one')
assert_series_equal(result, expected)
# single string/tuple-like
s = Series(range(3), index=list('abc'))
pytest.raises(KeyError, s.drop, 'bc')
pytest.raises(KeyError, s.drop, ('a',))
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.iloc[1:]
assert_series_equal(result, expected)
# bad axis
pytest.raises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2, 3], index=[True, False])
assert s.index.is_object()
result = s.drop(True)
expected = Series([3], index=[False])
assert_series_equal(result, expected)
# GH 16877
s = Series([2, 3], index=[0, 1])
with
|
tm.assert_raises_regex(KeyError, 'not contained in axis')
|
pandas.util.testing.assert_raises_regex
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from sklearn.linear_model import LinearRegression
import numpy as np
import datetime as dt
import pandas as pd
import statsmodels.tsa.stattools as ts
class Detrend():
"""
Only for daily data.
1. remove trend and periodicty of time series.
methods
[1]: two-step method
(1) First, using timestep to fit time seires,like y = at+b,
get the detrend time series, i.e., b.
(2) Second, get climatology average of each feature, and remove it
from detrend time series, i.e., b = x + db.
[2]: fourier method
(1) First, construct cos and sin terms of periodicity(e.g., annual,
seasonal) to represent periodicity of variables. record as X
(2) Second, X fit Y using linear models(e.g., GLM) and nonlinear models
(e.g., RF) to remove linearity and nonlinearity of periodictiy
2. stability test using Augmented Dickey–Fuller test
***TODO: dataframe interface need to be constructed
paramters
_________
target_data: array-like, (n_length, n_features)
begin_date: begin date of target_data, ***TODO: auto define method need
type as "YYYY-MM-DD"
end_date: end date of target_data
attributes
__________
_detrend_seasonal_t: time series remove seasonality
_detrend_t: time series remove trending
_adf_value: result of ADF test
"""
def __init__(self,
target_data,
begin_date='2019-05-30',
end_date='2019-06-04'):
self.target_data = target_data
self.begin_date = begin_date
self.end_date = end_date
def _detrend(self, target_data,
begin_date,
end_date):
"""
only for daily data.
1. default method. [1]
2. fourier method used in [2]
[1] P<NAME>., <NAME>., <NAME>.,
<NAME>., <NAME>., <NAME>., & <NAME>. (2017).
A non-linear Granger-causality framework to investigate
climate-vegetation dynamics.
Geoscientific Model Development, 10(5), 1945-1960.
[2] <NAME>., & <NAME>. (2016).
Empirical evidence of contrasting soil moisture–precipitation
feedbacks across the United States.
Science, 352(6287), 825-828.
"""
# [1]
# detrend timesteps
_detrend_t, _trend_t = self._detrend_timestep(target_data)
# remove seasonality
_detrend_seasonal_t, _seasonal_avg = self._detrend_seasonality(
_detrend_t, begin_date, end_date)
# plot module
# plt.figure()
# plt.subplot(2,2,1)
# plt.plot(target_data[:,1],label='raw data')
# plt.plot(_trend_t[:,1],label='trend_t')
# plt.legend(loc='best')
#
# plt.subplot(2,2,2)
# plt.plot(_detrend_t[:,1],label='detrend_t')
# plt.legend(loc='best')
#
# plt.subplot(2,2,3)
# plt.plot(_seasonal_avg[:,1],label='seasonality')
# plt.legend(loc='best')
#
# plt.subplot(2,2,4)
# plt.plot(_detrend_seasonal_t[:,1],label='de_seasonality')
# plt.legend(loc='best')
# adf test
_adf_value = self._adf_test(_detrend_seasonal_t)
# if pass adf test after remove trend and seasonality, return it
# if not, neet to be different
if sum(np.array(_adf_value)-1) == 0:
print() # return _detrend_seasonal_t
else:
print("Time series aren't stable! \nNeed to be differented")
return _detrend_seasonal_t
"""
TODO: [2] need to be developed
"""
def _detrend_timestep(self,
target_data,
default=None):
"""
remove the trend from the time series.
default uses linear regression.
"""
if default is None:
# set timestamp t used as a predictor variables.
_t = np.array(range(self.T)).reshape(-1, 1)
# fit raw time series using t and remove trending
_detrend_t = target_data - \
LinearRegression().fit(_t, target_data).predict(_t)
_trend_t = LinearRegression().fit(_t, target_data).predict(_t)
# plt.plot(_detrend_t[:,0])
return _detrend_t, _trend_t
else:
"""
TODO: nonlinear method need improved
"""
print('Sorry, lilu love panjinjing ')
def _detrend_seasonality(self,
target_data,
begin_date,
end_date,
type_reg=None,
year_length=366):
"""
remove the seasonality from the time series.
parameters:
__________
target_data: matrix of time series which need to remove seasonality
as T x N matrix
begin_date: begin date of time series. as '2019-05-30'
end_date: end date of time series. as '2019-05-30'
type_reg: regression type. could be linear and nonlinear
default set as linear method shown in _detrend[1] above
TODO: nonlinear method need improved
year_length: length of year used to get average seasonality.
default consider leap year set as 366.
Attributes:
___________
_detrend_seasonal: matrix of time series after remove seasonality
shape is the same as target_data. as T x N matrix
_seasonal_avg: matrix of seasonality.
shape is year_length x N.
"""
# create DatetimeIndex array as '2019-05-30'...'2019-10-04'
dates = self._date_array(begin_date, end_date)
# caculate corresponding day in each year of the time series
jd = self._date_jd(dates)
# shape of target data
_T, _N = np.shape(target_data)
# Initialize the array for contain de-periodicty time series
_detrend_seasonal = np.zeros((_T, _N))
_seasonal_avg = np.zeros((year_length, _N))
# main loop for 1,2,3,...,366
for j in range(1, year_length+1):
# set list of index of 1,2,3,...,366 day over dataframe
_jd_list = [i for i, value in enumerate(jd) if value == j]
# get the average value on 1,2,3...,366 day
_detrend_seasonal[_jd_list, :] = target_data[_jd_list, :] - \
np.mean(target_data[_jd_list, :], 0)
# seasonal average time series for plot
_seasonal_avg[j-1, :] = np.mean(target_data[_jd_list, :], 0)
return _detrend_seasonal, _seasonal_avg
def _date_array(self, begin_date, end_date):
"""
create DatatimeIndex array as '2019-05-30','2019-05-31',...
"""
# Initialize the list from begin_date to end_date
dates = []
# Initialize the timeindex for append in dates array.
_dates = dt.datetime.strptime(begin_date, "%Y-%m-%d")
# initialized the timeindex for decide whether break loop
_date = begin_date[:]
# main loop
while _date <= end_date:
# pass date in the array
dates.append(_dates)
# refresh date by step 1
_dates = _dates + dt.timedelta(1)
# changed condition by step 1
_date = _dates.strftime("%Y-%m-%d")
return dates
def _date_jd(self, dates):
"""
create list for corresponding day in each year of the time series
"""
# changed DatatimeIndex array in time format
dates =
|
pd.to_datetime(dates, format='%Y%m%d')
|
pandas.to_datetime
|
from datetime import datetime
import numpy as np
import pytest
from pandas import Series, _testing as tm
def test_title():
values = Series(["FOO", "BAR", np.nan, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", np.nan, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0])
mixed = mixed.str.title()
exp = Series(["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", np.nan, np.nan, np.nan])
tm.assert_almost_equal(mixed, exp)
def test_lower_upper():
values = Series(["om", np.nan, "nom", "nom"])
result = values.str.upper()
exp = Series(["OM", np.nan, "NOM", "NOM"])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(["a", np.nan, "b", np.nan, np.nan, "foo", np.nan, np.nan, np.nan])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
def test_capitalize():
values = Series(["FOO", "BAR", np.nan, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", np.nan, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0])
mixed = mixed.str.capitalize()
exp = Series(["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", np.nan, np.nan, np.nan])
tm.assert_almost_equal(mixed, exp)
def test_swapcase():
values = Series(["FOO", "BAR", np.nan, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", np.nan, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", np.nan, "bar", True, datetime.today(), "Blah", None, 1, 2.0])
mixed = mixed.str.swapcase()
exp = Series(["foo", np.nan, "BAR", np.nan, np.nan, "bLAH", np.nan, np.nan, np.nan])
tm.assert_almost_equal(mixed, exp)
def test_casemethods():
values = ["aaa", "bbb", "CCC", "Dddd", "eEEE"]
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_pad():
values = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"])
result = values.str.pad(5, side="left")
exp = Series([" a", " b", np.nan, " c", np.nan, "eeeeee"])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side="right")
exp = Series(["a ", "b ", np.nan, "c ", np.nan, "eeeeee"])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side="both")
exp = Series([" a ", " b ", np.nan, " c ", np.nan, "eeeeee"])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(["a", np.nan, "b", True, datetime.today(), "ee", None, 1, 2.0])
rs = Series(mixed).str.pad(5, side="left")
xp = Series(
[" a", np.nan, " b", np.nan, np.nan, " ee", np.nan, np.nan, np.nan]
)
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(["a", np.nan, "b", True, datetime.today(), "ee", None, 1, 2.0])
rs = Series(mixed).str.pad(5, side="right")
xp = Series(
["a ", np.nan, "b ", np.nan, np.nan, "ee ", np.nan, np.nan, np.nan]
)
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(["a", np.nan, "b", True, datetime.today(), "ee", None, 1, 2.0])
rs = Series(mixed).str.pad(5, side="both")
xp = Series(
[" a ", np.nan, " b ", np.nan, np.nan, " ee ", np.nan, np.nan, np.nan]
)
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_pad_fillchar():
values = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"])
result = values.str.pad(5, side="left", fillchar="X")
exp = Series(["XXXXa", "XXXXb", np.nan, "XXXXc", np.nan, "eeeeee"])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side="right", fillchar="X")
exp = Series(["aXXXX", "bXXXX", np.nan, "cXXXX", np.nan, "eeeeee"])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side="both", fillchar="X")
exp = Series(["XXaXX", "XXbXX", np.nan, "XXcXX", np.nan, "eeeeee"])
tm.assert_almost_equal(result, exp)
msg = "fillchar must be a character, not str"
with pytest.raises(TypeError, match=msg):
result = values.str.pad(5, fillchar="XY")
msg = "fillchar must be a character, not int"
with pytest.raises(TypeError, match=msg):
result = values.str.pad(5, fillchar=5)
@pytest.mark.parametrize("f", ["center", "ljust", "rjust", "zfill", "pad"])
def test_pad_width(f):
# see gh-13598
s = Series(["1", "22", "a", "bb"])
msg = "width must be of integer type, not*"
with pytest.raises(TypeError, match=msg):
getattr(s.str, f)("f")
def test_center_ljust_rjust():
values = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"])
result = values.str.center(5)
exp = Series([" a ", " b ", np.nan, " c ", np.nan, "eeeeee"])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(["a ", "b ", np.nan, "c ", np.nan, "eeeeee"])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([" a", " b", np.nan, " c", np.nan, "eeeeee"])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(["a", np.nan, "b", True, datetime.today(), "c", "eee", None, 1, 2.0])
rs = Series(mixed).str.center(5)
xp = Series(
[
" a ",
np.nan,
" b ",
np.nan,
np.nan,
" c ",
" eee ",
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(
[
"a ",
np.nan,
"b ",
np.nan,
np.nan,
"c ",
"eee ",
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series(
[
" a",
np.nan,
" b",
np.nan,
np.nan,
" c",
" eee",
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_center_ljust_rjust_fillchar():
values = Series(["a", "bb", "cccc", "ddddd", "eeeeee"])
result = values.str.center(5, fillchar="X")
expected = Series(["XXaXX", "XXbbX", "Xcccc", "ddddd", "eeeeee"])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, "X") for v in values.values], dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar="X")
expected = Series(["aXXXX", "bbXXX", "ccccX", "ddddd", "eeeeee"])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, "X") for v in values.values], dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar="X")
expected = Series(["XXXXa", "XXXbb", "Xcccc", "ddddd", "eeeeee"])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, "X") for v in values.values], dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
template = "fillchar must be a character, not {dtype}"
with pytest.raises(TypeError, match=template.format(dtype="str")):
values.str.center(5, fillchar="XY")
with pytest.raises(TypeError, match=template.format(dtype="str")):
values.str.ljust(5, fillchar="XY")
with pytest.raises(TypeError, match=template.format(dtype="str")):
values.str.rjust(5, fillchar="XY")
with pytest.raises(TypeError, match=template.format(dtype="int")):
values.str.center(5, fillchar=1)
with pytest.raises(TypeError, match=template.format(dtype="int")):
values.str.ljust(5, fillchar=1)
with pytest.raises(TypeError, match=template.format(dtype="int")):
values.str.rjust(5, fillchar=1)
def test_zfill():
values = Series(["1", "22", "aaa", "333", "45678"])
result = values.str.zfill(5)
expected = Series(["00001", "00022", "00aaa", "00333", "45678"])
|
tm.assert_series_equal(result, expected)
|
pandas._testing.assert_series_equal
|
# Cleaning previously wrangled IRS migration data (need to fill in missing values- res, inmig, outmig)
import pandas as pd
import numpy as np
import math
source = 'C:/users/mahmoud/desktop/projects/migration/'
# IRS migration data source: https://www.irs.gov/statistics/soi-tax-stats-migration-data
file0 = pd.read_csv(source + 'county_migration_91to92.csv')
file1 = pd.read_csv(source + 'county_migration_93to11.csv')
file2 = pd.read_csv(source + 'county_migration_12to18.csv')
combined = pd.concat([file0, file1, file2]).sort_values(['FIPS', 'Year']).reset_index(drop=True)
combined = combined[combined.FIPS != 15005] # Missing almost all values
# Changing Dade County (FL) code to updated Miami-Dade County code
combined.loc[combined.FIPS == 12025, 'FIPS'] = 12086
#combined.loc[combined.FIPS == 12086, 'County'] = 'Miami-Dade'
#-------------------------------------------------------------------------------------------------------------------------------
"""# Adding missing WY 1994 records (replacing w/ avg. of '93 & '95 values)
temp = combined.copy()
wy94 = temp[(temp.FIPS > 56000) & (temp.FIPS < 57000) & (temp.Year == 1993)]
wy94.Year = list(np.full(len(wy94), 1994))
for col in wy94.columns[3:]: # Columns: ['Year', 'County', 'FIPS', 'Residents', 'Inmigrants', 'Outmigrants']
wy94[col] = pd.Series()
wy9395 = temp[(temp.FIPS > 56000) & (temp.FIPS < 57000) & (temp.Year.isin([1993,1995]))] # 93 & 95 Wyoming records
wy = pd.concat([wy9395, wy94]).sort_values(['FIPS', 'Year'])
# Filling missing values (Res, Inmig, Outmig) w/ avg. of 93 & 95 values
for col in wy.columns[3:]:
wy[col] = wy[col].fillna((wy[col].shift() + wy[col].shift(-1))/2).fillna(method='bfill')
wynew = wy[wy.Year == 1994].sort_values('FIPS')
combined = pd.concat([combined, wynew]).sort_values(['FIPS', 'Year']).reset_index(drop=True)"""
#-------------------------------------------------------------------------------------------------------------------------------
# Adding rows if county is missing years (applies to about 40 counties)
new_rows = pd.DataFrame()
missing_fips1 = []
for fips in combined.FIPS.unique():
df = combined[combined.FIPS == fips]
if len(df) < 28: # Complete year range (91-18)
missing_fips1.append(fips)
missing = [yr for yr in range(1991, 2019) if yr not in df.Year.values]
length = len(missing)
fips_new = pd.DataFrame({'Year':list(np.full(length, missing)), 'County':list(np.full(length, df.County.iloc[0])),
'FIPS':list(np.full(length, fips)), 'Residents':list(np.full(length, np.nan)),
'Inmigrants':list(np.full(length, np.nan)), 'Outmigrants':list(np.full(length, np.nan))})
new_rows = pd.concat([new_rows, fips_new])
combined = pd.concat([combined, new_rows]).sort_values(['FIPS', 'Year'])
combined['State'] = combined.FIPS.astype(str).apply(lambda x:x.zfill(5)[:2]) # Needed for filtering in next section
#-------------------------------------------------------------------------------------------------------------------------------
# Filling in Missing Res, Inmig, & Outmig Values
missing_fips2 = set(list(combined.FIPS[combined.Inmigrants.isnull()].unique()) + list(combined.FIPS[combined.Outmigrants.isnull()].unique()))
not_missing_df = combined[~combined.FIPS.isin(missing_fips2)]
for fips in missing_fips2:
df = combined[combined.FIPS == fips]
temp_df = df.copy()
# Assigning all nulls if 4 or fewer mig values are available (b/c too few to calculate accurate estimates)
if len(df[df.Inmigrants.isnull()]) > 23 or len(df[df.Outmigrants.isnull()]) > 23:
df['Inmigrants'] = np.full(28, np.nan)
df['Outmigrants'] = np.full(28, np.nan)
df['Residents'] = df[col].fillna((df['Residents'].shift() + df['Residents'].shift(-1))/2).fillna(method='bfill').fillna(method='ffill').round(0).astype(int)
else:
for col in ['Residents', 'Inmigrants', 'Outmigrants']:
df[col] = df[col].fillna((df[col].shift() + df[col].shift(-1))/2).fillna(method='bfill').fillna(method='ffill').round(0).astype(int)
not_missing_df =
|
pd.concat([not_missing_df, df])
|
pandas.concat
|
'''
Manipulate multiple data series together
v0.1 feb 2019
<EMAIL>
'''
from hdlib.data.dataseries.timeseries import *
##############
# Unit tests #
##############
import unittest
from typing import ClassVar, Any, Tuple, Type, List
import pandas as pd
import matplotlib.pyplot as plt
plot : bool = False
seriesFN : str = "unittest1.csv"
#Dates are set at the beggining and end of time series
#seriesDates : Tuple [str, str] = ("2018-8-1 00:00:00", "2018-8-31 23:59:00")
#Dates are set before/after time series
seriesDates : Tuple [str, str] = ("2018-7-25 00:00:00", "2018-9-5 23:59:00")
#Missing samples in August (3830)
marketValuesColumns : List[str] = ['Open','High','Low','Close','Volume']
allColumNames : List[str] = ['UnixTimeStamp','Date','Symbol']+marketValuesColumns
class TestTimeSeries(unittest.TestCase):
"""Unit tests."""
marketDataTS : ClassVar[TimeSeries]
rndts0 : ClassVar[TimeSeries]
rndts1 : ClassVar[TimeSeries]
min : ClassVar[float]
max : ClassVar[float]
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#code to be executed before all tests
#with python 3 prefer: setUpClass(cls) class method
def setUp(self):
#code to be executed before each test
'''
@classmethod
def setUpClass(cls) -> None:
#code to be executed only once before all tests start
#Get market data
cls.marketDataTS = TimeSeries.fromCSV(seriesFN, allColumNames, 'Date')
#Sort ascending time index
cls.marketDataTS.sortIndex()
cls.marketDataTS = cls.marketDataTS.loc[seriesDates[0]:seriesDates[1]] # type: ignore
#Generate random TimeSeries
cls.min = 0.000001
cls.max = 0.000005
cls.rndts0 = TimeSeries.random(cls.min, cls.max, ['A', 'B', 'Cr'], "2018-1-1 00:00:00", "2018-1-1 00:30:00", 60)
cls.rndts1 = TimeSeries.random(cls.min, cls.max, ['A', 'B', 'Cr'], "2018-1-1 00:30:00", "2018-1-1 00:00:00", 60)
''' Indexing & accessing '''
def testIndexes0(self) -> None:
'''
Note: Gives deprecated warning on .ix when using loc with DateTimeIndex
and a String. Biut only if breakpoint set
'''
#ok
type(self).marketDataTS['2018-08-01 02:00:00':'2018-08-02 04:00:00'] #type:ignore
#ok
type(self).marketDataTS.loc['2018-08-01 02:00:00':'2018-08-02 04:00:00'] #type:ignore
#ok
#To get a value with an exact match Pandas needs a DateTime.
#If it finds a label with time it assumes it is a column name and gives a KeyError
#When assigning a value it does ok
#But its safer to use DateTime objects
buf = type(self).marketDataTS.at[pd.to_datetime('2018-08-01 00:00:00'), 'Open'] #type:ignore
type(self).marketDataTS.loc['2018-08-01 00:00:00','Open'] = 1
type(self).marketDataTS.loc['2018-08-01 00:00:00', 'Open'] = buf
#ok
type(self).marketDataTS.iloc[1,4]
#gets just the number
self.assertEqual(type(self).marketDataTS.iat[1,4], 7731.18)
#ok
type(self).marketDataTS.iloc[1,1]
#gets just the string: 'BTCUSD'
self.assertEqual(type(self).marketDataTS.iat[1,1], 'BTCUSD')
#test raise
#Error cannot iloc by string, use loc
with self.assertRaises(TypeError) as context:
type(self).marketDataTS.iloc['2018-08-01 02:00:00':'2018-08-02 04:00:00'] #type:ignore
self.assertTrue("cannot do slice indexing on <class 'pandas.core.indexes.datetimes.DatetimeIndex'> with these indexers [2018-08-01 02:00:00] of <class 'str'>" \
in str(context.exception))
def testReversed0(self) -> None:
ts0 = type(self).marketDataTS.reversed()
ts1 = ts0.reversed()
self.assertNotEqual(ts0, type(self).marketDataTS)
self.assertEqual(ts1, type(self).marketDataTS)
self.assertEqual(type(ts1), TimeSeries)
def testReturnType(self) -> None:
#returns TimeSeries
self.assertEqual(type(type(self).marketDataTS[:]), type(type(self).marketDataTS))
self.assertEqual(type(type(self).marketDataTS.iloc[0,:]), TimeSeries)
#returns DataFrame
self.assertTrue(type(type(self).marketDataTS.toDataFrame()) is pd.DataFrame)
def testFindExpectedSamples0(self) -> Any:
df0 = pd.DataFrame([[0,1],[2,3],[4,5]], columns=['A', 'B'])
ts0 = TimeSeries.fromDataFrame(df0, pd.date_range(start='2018-1-1', periods=3, freq='T'))
self.assertEqual(ts0.findExpectedSamples(0.1), 1201)
self.assertEqual(ts0.findExpectedSamples(1), 121)
self.assertEqual(ts0.findExpectedSamples(60), 3)
self.assertEqual(ts0.findExpectedSamples(360), 1)
ts1 = ts0.reversed()
self.assertEqual(ts1.findExpectedSamples(0.1), 1201)
''' Copy and creation '''
def testDeepCopy0(self) -> None:
ts0 = type(self).marketDataTS
ts1 = ts0.copy()
#Test different storage for data
#buf = ts1.loc['2018-08-01 00:00:00','Open'].toDataFrame().values[0,0]
buf = ts1.at[pd.to_datetime('2018-08-01 00:00:00'),'Open']
ts1.at['2018-08-01 00:00:00','Open'] = buf+1
b0 = (ts0.toDataFrame() == ts1.toDataFrame())
t = b0.at[pd.to_datetime('2018-08-01 00:00:00'), 'Open']
self.assertFalse(t)
ts1.at['2018-08-01 00:00:00','Open'] = buf #restore to make equal again for next test
#Test same data and index
self.assertTrue(ts0==ts1)
def testRandom0values(self) -> None:
buf = type(self).rndts0.iat[1,1]
type(self).rndts0.iat[1,1] = 1 #set value outside to force error
b0 = type(self).rndts0.bounded(type(self).min, type(self).max)
type(self).rndts0.iat[1,1] = buf #restore Object for next tests
b1 = type(self).rndts0.bounded(type(self).min, type(self).max)
self.assertFalse(b0)
self.assertTrue(b1)
def testRandom0timeRange(self) -> None:
rindx0 = type(self).rndts0.toDataFrame().index
rindx1 = type(self).rndts1.toDataFrame().index
self.assertLessEqual(rindx0.values[0], rindx0.values[-1])
self.assertLessEqual(rindx1.values[0], rindx1.values[-1])
self.assertEqual(len(type(self).rndts0), 31)
self.assertEqual(len(type(self).rndts1), 31)
self.assertEqual(rindx0.values[0], rindx1.values[0])
self.assertEqual(rindx0.values[30], rindx1.values[30])
def testFromDataFrame0(self) -> None:
df0 = pd.DataFrame([[0,1],[2,3],[4,5]], columns=['A', 'B'])
ts = TimeSeries.fromDataFrame(df0, pd.date_range(start='2018-1-1', periods=3, freq='T'))
df1 = df0.copy()
df1 = df1.set_index(pd.date_range(start='2018-1-1', periods=3, freq='T'))
b0 = ts.toDataFrame() == df1
b1 = ts.toDataFrame().index.values == df1.index.values
self.assertTrue(all(b0.values.flatten()))
self.assertTrue(all(b1))
def testFromDataFrame1(self) -> None:
df0 = pd.DataFrame([[0,1, 0],[2,3, 60],[4,5, 120]], columns=['A', 'B', 't'])
ts = TimeSeries.fromDataFrame(df0, pd.to_datetime(df0.iloc[:,2]))
ts = ts.iloc[:,:2]
df1 =
|
pd.DataFrame([[0,1],[2,3],[4,5]], columns=['A', 'B'])
|
pandas.DataFrame
|
import sys
sys.path.insert(0, "../models/")
import gr4h
from anns import constructor
import pandas as pd
import numpy as np
from scipy.optimize import differential_evolution
import keras
from keras.models import load_model
### IMPORTANT ###
# It is highly recommended to use any modern GPU
# (e.g., NVIDIA 1080Ti, P100, V100, 2060, 2070, 2080)
# for running this script.
# The average time needed to perform the entire set
# of experiments is almost two weeks when using 1080Ti
# or P100 GPUs.
# Probably, running this script on a standard CPU will take forever.
### uncomment when using GPU ###
#import os
#import tensorflow as tf
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
#config = tf.ConfigProto(log_device_placement=True)
#config.gpu_options.per_process_gpu_memory_fraction = 0.5
#config.gpu_options.allow_growth = True
#session = tf.Session(config=config)
# 1. Reading the data
# uncomment if you requested the data from the authors
#data = pd.read_pickle("../data/data.pkl")
# placeholder for input data
# comment out with # mark if you use the observational data
# which was provided to you by the authors
data = pd.read_pickle("../data/data_dummy.pkl")
# split data for calibration and validation periods
data_cal = data["1968":"1987"]
data_val = data["1988":"2004"]
# 2. Utils
def mse(y_true, y_pred):
return np.nanmean((y_true - y_pred) ** 2, axis=0)
def nse(y_true, y_pred):
return 1 - np.nansum((y_true-y_pred)**2)/np.nansum((y_true-np.nanmean(y_true))**2)
def data_generator(data_instance, model_type, history=720, mode="calibration", runoff_threshold=0.0):
"""
Data generator function for efficient training of neural networks
Input:
data_instance: pandas dataframe with Q, P, and PE columns represent
discharge, precipitation, and potential evapotranspiration timeseries, respectively
model_type: one of "GR4H", "MLP", "RNN", "LSTM", "GRU"
history: the number of antecedent timesteps to consider, hours (default=720, aka one month)
mode: "calibration" or "validation"
runoff threshold: the value below which discharge is not considered for calibration, float (default=0.0)
Output:
list of variables needed for model calibration / validation
"""
if model_type == "GR4H":
_Q = data_instance["Q"].values
_P = data_instance["P"].values
_PE= data_instance["PE"].values
# add warmup
# simply add a full period as a warm-up
Qobs = np.concatenate([_Q, _Q])
P = np.concatenate([_P, _P])
PE = np.concatenate([_PE, _PE])
output = [Qobs, P, PE]
elif model_type in ["RNN", "GRU", "LSTM", "MLP"]:
X_matrix = data_instance[["P", "PE"]].values
y_matrix = data_instance[["Q"]].values
X, y = [], []
for i in range(history, len(data_instance)):
X_chunk = X_matrix[i-history:i, ::]
y_chunk = y_matrix[i, ::]
if mode == "training":
# check for NaNs and non-zero runoff
if np.isnan(np.sum(X_chunk)) or np.isnan(np.sum(y_chunk)) or y_chunk<runoff_threshold:
pass
else:
X.append(X_chunk)
y.append(y_chunk)
else:
X.append(X_chunk)
y.append(y_chunk)
# from lists to np.array
X, y = np.array(X), np.array(y)
# normalization
X_mean = np.nanmean(X)
X_std = np.nanstd(X)
y_mean = np.nanmean(y)
y_std = np.nanstd(y)
X -= X_mean
X /= X_std
y -= y_mean
y /= y_std
if model_type == "MLP":
X = X.reshape(X.shape[0], -1)
else:
pass
output = [X, np.squeeze(y), y_mean, y_std]
return output
def calibration(data_instance, model_type, history=720):
"""
Calibration routine
Input:
data_instance: pandas dataframe (the same that for data_generator func)
model_type: one of "GR4H", "MLP", "RNN", "LSTM", "GRU"
history: the number of antecedent timesteps to consider, hours (default=720, aka one month)
Output:
list of: (1) optimal parameters (or Keras model instance) and pandas dataframe
with simulation results
"""
if model_type == "GR4H":
Qobs, P, PE = data_generator(data_instance=data_instance, model_type=model_type)
def loss_gr4h(params):
# calculate runoff
Qsim = gr4h.run(P, PE, params)
# mse on peiod with cropped warm-up
return np.nanmean((Qobs[-len(data_instance):] - Qsim[-len(data_instance):]) ** 2, axis=0)
# optimization
opt_par = differential_evolution(loss_gr4h, bounds=gr4h.bounds(), maxiter=100, polish=True, disp=False, seed=42).x
# calculate runoff with optimal parameters
Qsim = gr4h.run(P, PE, opt_par)
# cut the warmup period + history (for consistency with DL)
Qobs = Qobs[-len(data_instance)+history:]
Qsim = Qsim[-len(data_instance)+history:]
print(f"NSE on calibration is {np.round(nse(Qobs, Qsim), 2)}")
# save results from calibration period separately
calib_res =
|
pd.DataFrame({"Qobs": Qobs, "Qsim": Qsim})
|
pandas.DataFrame
|
import collections
import numpy as np
import pandas as pd
from sklearn.base import RegressorMixin
from sklearn.linear_model.base import LinearModel
from sklearn.utils.validation import check_is_fitted
from .utils import QuantileMapper, ensure_samples_features
MONTH_GROUPER = lambda x: x.month
class BcsdBase(LinearModel, RegressorMixin):
""" Base class for BCSD model.
"""
_fit_attributes = ["y_climo_", "quantile_mappers_"]
def __init__(self, time_grouper=MONTH_GROUPER, **qm_kwargs):
if isinstance(time_grouper, str):
self.time_grouper = pd.Grouper(freq=time_grouper)
else:
self.time_grouper = time_grouper
self.qm_kwargs = qm_kwargs
def _qm_fit_by_group(self, groups):
""" helper function to fit quantile mappers by group
Note that we store these mappers for later
"""
self.quantile_mappers_ = {}
for key, group in groups:
data = ensure_samples_features(group)
self.quantile_mappers_[key] = QuantileMapper(**self.qm_kwargs).fit(data)
def _qm_transform_by_group(self, groups):
""" helper function to apply quantile mapping by group
Note that we recombine the dataframes using pd.concat, there may be a better way to do this
"""
dfs = []
for key, group in groups:
data = ensure_samples_features(group)
qmapped = self.quantile_mappers_[key].transform(data)
dfs.append(
|
pd.DataFrame(qmapped, index=group.index, columns=data.columns)
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.